diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 8e47bcbd382..207c765837b 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,33 +1,44 @@ -* @sougou - +bootstrap.sh @deepthi /config/mycnf/ @askdba @shlomi-noach /docker/ @derekperkins @dkhenry /examples/compose @shlomi-noach +/examples/demo @sougou +/examples/legacy_local @deepthi /examples/local @rohit-nayak-ps +/examples/operator @askdba +/examples/region_sharding @deepthi +/java/ @harshit-gangal +/go/cache @vmg /go/cmd/vtadmin @ajm188 @doeg /go/cmd/vtctldclient @ajm188 @doeg /go/mysql @harshit-gangal @systay +/go/protoutil @ajm188 /go/test/endtoend/onlineddl @shlomi-noach /go/test/endtoend/orchestrator @deepthi @shlomi-noach /go/test/endtoend/vtgate @harshit-gangal @systay /go/vt/discovery @deepthi /go/vt/mysqlctl @deepthi -/go/vt/proto/vtadmin @ajm188 @doeg /go/vt/orchestrator @deepthi @shlomi-noach +/go/vt/proto/vtadmin @ajm188 @doeg /go/vt/schema @shlomi-noach -/go/vt/sqlparser @harshit-gangal @systay -/go/vt/vtadmin @ajm188 @doeg +/go/vt/servenv @deepthi +/go/vt/sqlparser @harshit-gangal @systay @GuptaManan100 +/go/vt/srvtopo @rafael +/go/vt/topo @deepthi @rafael +/go/vt/vtadmin @ajm188 @doeg @rohit-nayak-ps /go/vt/vtctl @deepthi /go/vt/vtctl/vtctl.go @ajm188 @doeg /go/vt/vtctl/grpcvtctldclient @ajm188 @doeg /go/vt/vtctl/grpcvtctldserver @ajm188 @doeg /go/vt/vtctl/vtctldclient @ajm188 @doeg +/go/vt/vtctld @ajm188 @doeg @rohit-nayak-ps @deepthi /go/vt/vtgate @harshit-gangal @systay /go/vt/vttablet/tabletmanager @deepthi @shlomi-noach /go/vt/vttablet/tabletmanager/vreplication @rohit-nayak-ps /go/vt/vttablet/tabletmanager/vstreamer @rohit-nayak-ps /go/vt/vttablet/tabletserver @harshit-gangal @systay @shlomi-noach /go/vt/wrangler @deepthi @rohit-nayak-ps +/go/vt/workflow @rohit-nayak-ps /helm/ @derekperkins @dkhenry /proto/vtadmin.proto @ajm188 @doeg /proto/vtctldata.proto @ajm188 @doeg diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index aa5370ac3a7..4df6788fa69 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,3 +1,9 @@ + + ## Description @@ -22,4 +28,5 @@ Components that this PR will affect: - [ ] Query Serving - [ ] VReplication - [ ] Cluster Management -- [ ] Build +- [ ] Build/CI +- [ ] VTAdmin diff --git a/.github/workflows/check_make_sizegen.yml b/.github/workflows/check_make_sizegen.yml new file mode 100644 index 00000000000..b8eef13acf4 --- /dev/null +++ b/.github/workflows/check_make_sizegen.yml @@ -0,0 +1,35 @@ +name: check_make_sizegen +on: [push, pull_request] +jobs: + + build: + name: Check Make Sizegen + runs-on: ubuntu-latest + steps: + + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + - name: Run make minimaltools + run: | + make minimaltools + + - name: check_make_sizegen + run: | + tools/check_make_sizegen.sh + diff --git a/.github/workflows/check_make_visitor.yml b/.github/workflows/check_make_visitor.yml index b72fbbaf514..dbb1e42d3de 100644 --- a/.github/workflows/check_make_visitor.yml +++ b/.github/workflows/check_make_visitor.yml @@ -31,5 +31,5 @@ jobs: - name: check_make_visitor run: | - misc/git/hooks/visitorgen + misc/git/hooks/asthelpers diff --git a/.github/workflows/check_runs_analysis.yaml b/.github/workflows/check_runs_analysis.yaml new file mode 100644 index 00000000000..bab9061c561 --- /dev/null +++ b/.github/workflows/check_runs_analysis.yaml @@ -0,0 +1,59 @@ +on: + schedule: + - cron: '*/10 * * * *' + +jobs: + analyze: + if: github.repository == 'vitessio/vitess' + name: analyze_check_runs + runs-on: ubuntu-latest + steps: + + - uses: actions/checkout@v2 + with: + ref: metrics + - name: analyze check runs + run: | + FAILED_RUNS_JSON="/tmp/failed_runs.json" + FAILED_RUNS_CSV="/tmp/failed_runs.csv" + WORKFLOW_FAILURES_FILE=".metrics/workflow-failures" + + # Get latest failed check runs + curl \ + -H 'authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' \ + -H "Accept: application/vnd.github.v3+json" \ + -H "Content-type: application/json" \ + "https://api.github.com/repos/${GITHUB_REPOSITORY}/actions/runs?per_page=100&status=failure" \ + > "$FAILED_RUNS_JSON" + + # extract .created_at, .name, .html_url for each failed check run + cat "$FAILED_RUNS_JSON" | + jq -r '.workflow_runs[] | (.created_at + "," + .name + "," + .html_url)' > "$FAILED_RUNS_CSV" + + # now combine back into $WORKFLOW_FAILURES_FILE, + # - skipping duplicates + # - limit file size to X last lines + # - encode with bas64 + tmpfile="$(mktemp)" + cat $WORKFLOW_FAILURES_FILE $FAILED_RUNS_CSV | sort | uniq | tail -10000 | base64 -w 0 > $tmpfile + # sha of the file we're replacing: + sha="$(git hash-object ${WORKFLOW_FAILURES_FILE})" + + json=" + { + \"message\": \"automated merge of failed check runs\", + \"branch\": \"metrics\", + \"sha\": \"$sha\", + \"content\": \"$(cat $tmpfile)\" + } + " + json_tmpfile="$(mktemp)" + echo "$json" > $json_tmpfile + + curl \ + -X PUT \ + -H 'authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' \ + -H "Accept: application/vnd.github.v3+json" \ + -H "Content-type: application/json" \ + "https://api.github.com/repos/${GITHUB_REPOSITORY}/contents/${WORKFLOW_FAILURES_FILE}" \ + -d "@${json_tmpfile}" diff --git a/.github/workflows/cluster_endtoend_11.yml b/.github/workflows/cluster_endtoend_11.yml new file mode 100644 index 00000000000..5ec5d45cc72 --- /dev/null +++ b/.github/workflows/cluster_endtoend_11.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (11) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (11) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard 11 diff --git a/.github/workflows/cluster_endtoend_12.yml b/.github/workflows/cluster_endtoend_12.yml new file mode 100644 index 00000000000..77934b909bd --- /dev/null +++ b/.github/workflows/cluster_endtoend_12.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (12) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (12) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard 12 diff --git a/.github/workflows/cluster_endtoend_13.yml b/.github/workflows/cluster_endtoend_13.yml new file mode 100644 index 00000000000..c3604cef464 --- /dev/null +++ b/.github/workflows/cluster_endtoend_13.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (13) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (13) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard 13 diff --git a/.github/workflows/cluster_endtoend_14.yml b/.github/workflows/cluster_endtoend_14.yml new file mode 100644 index 00000000000..e5873512672 --- /dev/null +++ b/.github/workflows/cluster_endtoend_14.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (14) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (14) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard 14 diff --git a/.github/workflows/cluster_endtoend_15.yml b/.github/workflows/cluster_endtoend_15.yml new file mode 100644 index 00000000000..0661e084b26 --- /dev/null +++ b/.github/workflows/cluster_endtoend_15.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (15) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (15) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard 15 diff --git a/.github/workflows/cluster_endtoend_16.yml b/.github/workflows/cluster_endtoend_16.yml new file mode 100644 index 00000000000..a21072bf604 --- /dev/null +++ b/.github/workflows/cluster_endtoend_16.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (16) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (16) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard 16 diff --git a/.github/workflows/cluster_endtoend_17.yml b/.github/workflows/cluster_endtoend_17.yml new file mode 100644 index 00000000000..38944c257f1 --- /dev/null +++ b/.github/workflows/cluster_endtoend_17.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (17) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (17) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard 17 diff --git a/.github/workflows/cluster_endtoend_18.yml b/.github/workflows/cluster_endtoend_18.yml new file mode 100644 index 00000000000..85b25419b48 --- /dev/null +++ b/.github/workflows/cluster_endtoend_18.yml @@ -0,0 +1,54 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (18) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (18) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Installing zookeeper and consul + run: | + make tools + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard 18 diff --git a/.github/workflows/cluster_endtoend_19.yml b/.github/workflows/cluster_endtoend_19.yml new file mode 100644 index 00000000000..582b7db0227 --- /dev/null +++ b/.github/workflows/cluster_endtoend_19.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (19) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (19) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard 19 diff --git a/.github/workflows/cluster_endtoend_20.yml b/.github/workflows/cluster_endtoend_20.yml new file mode 100644 index 00000000000..d53175a88f3 --- /dev/null +++ b/.github/workflows/cluster_endtoend_20.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (20) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (20) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard 20 diff --git a/.github/workflows/cluster_endtoend_21.yml b/.github/workflows/cluster_endtoend_21.yml new file mode 100644 index 00000000000..10f52f04178 --- /dev/null +++ b/.github/workflows/cluster_endtoend_21.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (21) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (21) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard 21 diff --git a/.github/workflows/cluster_endtoend_22.yml b/.github/workflows/cluster_endtoend_22.yml new file mode 100644 index 00000000000..c1bcb675bae --- /dev/null +++ b/.github/workflows/cluster_endtoend_22.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (22) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (22) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard 22 diff --git a/.github/workflows/cluster_endtoend_23.yml b/.github/workflows/cluster_endtoend_23.yml new file mode 100644 index 00000000000..f7105bc96cd --- /dev/null +++ b/.github/workflows/cluster_endtoend_23.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (23) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (23) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard 23 diff --git a/.github/workflows/cluster_endtoend_24.yml b/.github/workflows/cluster_endtoend_24.yml new file mode 100644 index 00000000000..ef3eaa00ce8 --- /dev/null +++ b/.github/workflows/cluster_endtoend_24.yml @@ -0,0 +1,54 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (24) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (24) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Installing zookeeper and consul + run: | + make tools + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard 24 diff --git a/.github/workflows/cluster_endtoend_26.yml b/.github/workflows/cluster_endtoend_26.yml new file mode 100644 index 00000000000..fdee5988cf0 --- /dev/null +++ b/.github/workflows/cluster_endtoend_26.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (26) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (26) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard 26 diff --git a/.github/workflows/cluster_endtoend_27.yml b/.github/workflows/cluster_endtoend_27.yml new file mode 100644 index 00000000000..dea2e7336b9 --- /dev/null +++ b/.github/workflows/cluster_endtoend_27.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (27) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (27) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard 27 diff --git a/.github/workflows/cluster_endtoend.yml b/.github/workflows/cluster_endtoend_mysql80.yml similarity index 67% rename from .github/workflows/cluster_endtoend.yml rename to .github/workflows/cluster_endtoend_mysql80.yml index 4ec4671d62f..4f075015c7e 100644 --- a/.github/workflows/cluster_endtoend.yml +++ b/.github/workflows/cluster_endtoend_mysql80.yml @@ -1,12 +1,11 @@ -name: cluster_endtoend +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (mysql80) on: [push, pull_request] jobs: build: - runs-on: ubuntu-18.04 - strategy: - matrix: - name: [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 26, 27] + runs-on: ubuntu-20.04 steps: - name: Set up Go @@ -26,22 +25,15 @@ jobs: sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld go mod download + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb sudo apt-get install -y gnupg2 sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb sudo apt-get update sudo apt-get install percona-xtrabackup-24 - - name: Installing zookeeper and consul - run: | - # Only running for shard 18 and 24 where we need to install consul and zookeeper - if [[ ${{matrix.name}} == 18 || ${{matrix.name}} == 24 ]]; then - make tools - fi - - - name: sharded cluster_endtoend + - name: Run cluster endtoend test timeout-minutes: 30 run: | source build.env - eatmydata -- go run test.go -docker=false -print-log -follow -shard ${{matrix.name}} - + eatmydata -- go run test.go -docker=false -print-log -follow -shard mysql80 diff --git a/.github/workflows/cluster_endtoend_onlineddl_declarative.yml b/.github/workflows/cluster_endtoend_onlineddl_declarative.yml new file mode 100644 index 00000000000..3a7381b37f2 --- /dev/null +++ b/.github/workflows/cluster_endtoend_onlineddl_declarative.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (onlineddl_declarative) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (onlineddl_declarative) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard onlineddl_declarative diff --git a/.github/workflows/cluster_endtoend_onlineddl_ghost.yml b/.github/workflows/cluster_endtoend_onlineddl_ghost.yml new file mode 100644 index 00000000000..22b049e9347 --- /dev/null +++ b/.github/workflows/cluster_endtoend_onlineddl_ghost.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (onlineddl_ghost) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (onlineddl_ghost) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard onlineddl_ghost diff --git a/.github/workflows/cluster_endtoend_onlineddl_revert.yml b/.github/workflows/cluster_endtoend_onlineddl_revert.yml new file mode 100644 index 00000000000..a6937a9eb85 --- /dev/null +++ b/.github/workflows/cluster_endtoend_onlineddl_revert.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (onlineddl_revert) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (onlineddl_revert) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard onlineddl_revert diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml new file mode 100644 index 00000000000..5bd8a46a61c --- /dev/null +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (onlineddl_vrepl) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (onlineddl_vrepl) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard onlineddl_vrepl diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml new file mode 100644 index 00000000000..56dd0231a4b --- /dev/null +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (onlineddl_vrepl_stress) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (onlineddl_vrepl_stress) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard onlineddl_vrepl_stress diff --git a/.github/workflows/cluster_endtoend_tabletmanager_throttler.yml b/.github/workflows/cluster_endtoend_tabletmanager_throttler.yml new file mode 100644 index 00000000000..7caa3ccde42 --- /dev/null +++ b/.github/workflows/cluster_endtoend_tabletmanager_throttler.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (tabletmanager_throttler) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (tabletmanager_throttler) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard tabletmanager_throttler diff --git a/.github/workflows/cluster_endtoend_tabletmanager_throttler_custom_config.yml b/.github/workflows/cluster_endtoend_tabletmanager_throttler_custom_config.yml new file mode 100644 index 00000000000..875efbed6da --- /dev/null +++ b/.github/workflows/cluster_endtoend_tabletmanager_throttler_custom_config.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (tabletmanager_throttler_custom_config) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (tabletmanager_throttler_custom_config) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard tabletmanager_throttler_custom_config diff --git a/.github/workflows/cluster_endtoend_upgrade.yml b/.github/workflows/cluster_endtoend_upgrade.yml index e7ad8653838..85528a34425 100644 --- a/.github/workflows/cluster_endtoend_upgrade.yml +++ b/.github/workflows/cluster_endtoend_upgrade.yml @@ -3,8 +3,9 @@ on: [push, pull_request] jobs: build: + if: github.repository == 'vitessio/vitess' name: Run endtoend tests on Cluster (upgrade) - runs-on: ubuntu-18.04 + runs-on: ubuntu-latest steps: - name: Set up Go @@ -12,15 +13,25 @@ jobs: with: go-version: 1.15 - - name: Check out v8.0.0 + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out v9.0.0 uses: actions/checkout@v2 with: - ref: v8.0.0 + ref: v9.0.0 - name: Get dependencies run: | # This prepares general purpose binary dependencies - # as well as v8.0.0 specific go modules + # as well as v9.0.0 specific go modules sudo apt-get update sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata sudo service mysql stop @@ -35,28 +46,28 @@ jobs: sudo apt-get update sudo apt-get install percona-xtrabackup-24 - - name: Building v8.0.0 binaries + - name: Building v9.0.0 binaries timeout-minutes: 10 run: | - # We build v8.0.0 binaries and save them in a temporary location + # We build v9.0.0 binaries and save them in a temporary location source build.env make build - mkdir -p /tmp/vitess-build-v8.0.0/ - cp -R bin /tmp/vitess-build-v8.0.0/ + mkdir -p /tmp/vitess-build-v9.0.0/ + cp -R bin /tmp/vitess-build-v9.0.0/ - name: Check out HEAD uses: actions/checkout@v2 - - name: Run cluster endtoend test v8.0.0 (create cluster) + - name: Run cluster endtoend test v9.0.0 (create cluster) timeout-minutes: 5 run: | - # By checking out we deleted bin/ directory. We now restore our pre-built v8.0.0 binaries - cp -R /tmp/vitess-build-v8.0.0/bin . + # By checking out we deleted bin/ directory. We now restore our pre-built v9.0.0 binaries + cp -R /tmp/vitess-build-v9.0.0/bin . # create the directory where we store test data; ensure it is empty: rm -rf /tmp/vtdataroot mkdir -p /tmp/vtdataroot source build.env - # We pass -skip-build so that we use the v8.0.0 binaries, not HEAD binaries + # We pass -skip-build so that we use the v9.0.0 binaries, not HEAD binaries eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -shard 28 - name: Check out HEAD @@ -73,7 +84,7 @@ jobs: mkdir -p /tmp/vitess-build-head/ cp -R bin /tmp/vitess-build-head/ - - name: Run cluster endtoend test HEAD based on v8.0.0 data (upgrade path) + - name: Run cluster endtoend test HEAD based on v9.0.0 data (upgrade path) timeout-minutes: 5 run: | # /tmp/vtdataroot exists from previous test @@ -94,11 +105,11 @@ jobs: eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -shard 28 - - name: Run cluster endtoend test v8.0.0 based on HEAD data (downgrade path) + - name: Run cluster endtoend test v9.0.0 based on HEAD data (downgrade path) timeout-minutes: 5 run: | # /tmp/vtdataroot exists from previous test - cp -R /tmp/vitess-build-v8.0.0/bin . + cp -R /tmp/vitess-build-v9.0.0/bin . source build.env # We again built manually and there's no need for the test to build. diff --git a/.github/workflows/cluster_endtoend_vreplication_basic.yml b/.github/workflows/cluster_endtoend_vreplication_basic.yml new file mode 100644 index 00000000000..427d8675d06 --- /dev/null +++ b/.github/workflows/cluster_endtoend_vreplication_basic.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (vreplication_basic) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (vreplication_basic) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard vreplication_basic diff --git a/.github/workflows/cluster_endtoend_vreplication_cellalias.yml b/.github/workflows/cluster_endtoend_vreplication_cellalias.yml new file mode 100644 index 00000000000..efdf95423d6 --- /dev/null +++ b/.github/workflows/cluster_endtoend_vreplication_cellalias.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (vreplication_cellalias) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (vreplication_cellalias) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard vreplication_cellalias diff --git a/.github/workflows/cluster_endtoend_vreplication_migrate.yml b/.github/workflows/cluster_endtoend_vreplication_migrate.yml new file mode 100644 index 00000000000..1e9f02b0153 --- /dev/null +++ b/.github/workflows/cluster_endtoend_vreplication_migrate.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (vreplication_migrate) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (vreplication_migrate) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard vreplication_migrate diff --git a/.github/workflows/cluster_endtoend_vreplication_multicell.yml b/.github/workflows/cluster_endtoend_vreplication_multicell.yml new file mode 100644 index 00000000000..72c92ebf610 --- /dev/null +++ b/.github/workflows/cluster_endtoend_vreplication_multicell.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (vreplication_multicell) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (vreplication_multicell) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard vreplication_multicell diff --git a/.github/workflows/cluster_endtoend_vreplication_v2.yml b/.github/workflows/cluster_endtoend_vreplication_v2.yml new file mode 100644 index 00000000000..a80745f35c7 --- /dev/null +++ b/.github/workflows/cluster_endtoend_vreplication_v2.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (vreplication_v2) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (vreplication_v2) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard vreplication_v2 diff --git a/.github/workflows/cluster_initial_sharding_multi.yml b/.github/workflows/cluster_initial_sharding_multi.yml index 494dfca2bb6..e6e0ee36a60 100644 --- a/.github/workflows/cluster_initial_sharding_multi.yml +++ b/.github/workflows/cluster_initial_sharding_multi.yml @@ -12,6 +12,16 @@ jobs: with: go-version: 1.15 + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + - name: Check out code uses: actions/checkout@v2 diff --git a/.github/workflows/docker_test_1.yml b/.github/workflows/docker_test_1.yml index 11a53e1a48b..aff66aefa73 100644 --- a/.github/workflows/docker_test_1.yml +++ b/.github/workflows/docker_test_1.yml @@ -11,7 +11,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.13 + go-version: 1.15 - name: Check out code uses: actions/checkout@v2 diff --git a/.github/workflows/docker_test_2.yml b/.github/workflows/docker_test_2.yml index 46d04889bbd..6da16d0ac9a 100644 --- a/.github/workflows/docker_test_2.yml +++ b/.github/workflows/docker_test_2.yml @@ -11,7 +11,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.13 + go-version: 1.15 - name: Check out code uses: actions/checkout@v2 diff --git a/.github/workflows/docker_test_3.yml b/.github/workflows/docker_test_3.yml index 6cc880e992f..7c82dc4efe8 100644 --- a/.github/workflows/docker_test_3.yml +++ b/.github/workflows/docker_test_3.yml @@ -11,7 +11,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.13 + go-version: 1.15 - name: Check out code uses: actions/checkout@v2 diff --git a/.github/workflows/e2e_race.yml b/.github/workflows/e2e_race.yml index 1b66cdcbe13..d1da9666662 100644 --- a/.github/workflows/e2e_race.yml +++ b/.github/workflows/e2e_race.yml @@ -12,6 +12,16 @@ jobs: with: go-version: 1.15 + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + - name: Check out code uses: actions/checkout@v2 diff --git a/.github/workflows/endtoend.yml b/.github/workflows/endtoend.yml index 8be2dbb3179..7e3181aaa4e 100644 --- a/.github/workflows/endtoend.yml +++ b/.github/workflows/endtoend.yml @@ -12,6 +12,16 @@ jobs: with: go-version: 1.15 + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + - name: Check out code uses: actions/checkout@v2 diff --git a/.github/workflows/ensure_bootstrap_updated.yml b/.github/workflows/ensure_bootstrap_updated.yml index 6bb1af2eb76..174995ad838 100644 --- a/.github/workflows/ensure_bootstrap_updated.yml +++ b/.github/workflows/ensure_bootstrap_updated.yml @@ -10,7 +10,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.13 + go-version: 1.15 - name: Check out code uses: actions/checkout@v2 diff --git a/.github/workflows/golangci-linter.yml b/.github/workflows/golangci-linter.yml index bc6799331ff..98bdd56ea7b 100644 --- a/.github/workflows/golangci-linter.yml +++ b/.github/workflows/golangci-linter.yml @@ -2,8 +2,8 @@ name: golangci-lint on: [push, pull_request] jobs: build: - name: Build - runs-on: ubuntu-18.04 + name: Lint using golangci-lint + runs-on: ubuntu-latest steps: - name: Set up Go 1.15 uses: actions/setup-go@v1 @@ -12,7 +12,7 @@ jobs: id: go - name: Check out code into the Go module directory - uses: actions/checkout@v1 + uses: actions/checkout@v2 - name: Install golangci-lint run: curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s -- -b $(go env GOPATH)/bin v1.31.0 diff --git a/.github/workflows/gomod-tidy.yml b/.github/workflows/gomod-tidy.yml new file mode 100644 index 00000000000..34dbdd2d5ca --- /dev/null +++ b/.github/workflows/gomod-tidy.yml @@ -0,0 +1,28 @@ +name: gomod-tidy +on: [push, pull_request] +jobs: + build: + name: Check go mod tidy + runs-on: ubuntu-latest + steps: + - name: Set up Go 1.15 + uses: actions/setup-go@v1 + with: + go-version: 1.15 + id: go + + - name: Check out code into the Go module directory + uses: actions/checkout@v2 + + - name: Run go mod tidy + run: | + set -e + go mod tidy + output=$(git status -s) + if [ -z "${output}" ]; then + exit 0 + fi + echo 'We wish to maintain a tidy state for go mod. Please run `go mod tidy` on your branch, commit and push again.' + echo 'Running `go mod tidy` on this CI test yields with the following changes:' + echo "$output" + exit 1 diff --git a/.github/workflows/legacy_local_example.yml b/.github/workflows/legacy_local_example.yml index 2033da389af..83f680c0406 100644 --- a/.github/workflows/legacy_local_example.yml +++ b/.github/workflows/legacy_local_example.yml @@ -17,6 +17,16 @@ jobs: with: go-version: 1.15 + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + - name: Check out code uses: actions/checkout@v2 diff --git a/.github/workflows/local_example.yml b/.github/workflows/local_example.yml index acddcc357fa..5f899bd761f 100644 --- a/.github/workflows/local_example.yml +++ b/.github/workflows/local_example.yml @@ -17,6 +17,16 @@ jobs: with: go-version: 1.15 + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + - name: Check out code uses: actions/checkout@v2 diff --git a/.github/workflows/region_example.yml b/.github/workflows/region_example.yml index b31b9af6151..ba09878b1f0 100644 --- a/.github/workflows/region_example.yml +++ b/.github/workflows/region_example.yml @@ -17,6 +17,16 @@ jobs: with: go-version: 1.15 + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + - name: Check out code uses: actions/checkout@v2 diff --git a/.github/workflows/sonar_analysis.yml b/.github/workflows/sonar_analysis.yml index af59daa03b7..de80804b174 100644 --- a/.github/workflows/sonar_analysis.yml +++ b/.github/workflows/sonar_analysis.yml @@ -12,7 +12,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.13 + go-version: 1.15 - name: Check out code uses: actions/checkout@v2 diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml deleted file mode 100644 index e40b4921ab1..00000000000 --- a/.github/workflows/unit.yml +++ /dev/null @@ -1,90 +0,0 @@ -name: unit -on: [push, pull_request] -jobs: - - build: - runs-on: ubuntu-18.04 - strategy: - matrix: - name: [percona56, mysql57, mysql80, mariadb101, mariadb102, mariadb103] - - steps: - - name: Set up Go - uses: actions/setup-go@v1 - with: - go-version: 1.15 - - - name: Check out code - uses: actions/checkout@v2 - - - name: Get dependencies - run: | - export DEBIAN_FRONTEND="noninteractive" - sudo apt-get update - - if [ ${{matrix.name}} = "mysql57" ]; then - sudo apt-get install -y mysql-server mysql-client - else - # Uninstall likely installed MySQL first - sudo systemctl stop apparmor - sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common - sudo apt-get -y autoremove - sudo apt-get -y autoclean - sudo deluser mysql - sudo rm -rf /var/lib/mysql - sudo rm -rf /etc/mysql - - if [ ${{matrix.name}} = "percona56" ]; then - sudo rm -rf /var/lib/mysql - sudo apt install -y gnupg2 - wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb - sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb - sudo apt update - sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y percona-server-server-5.6 percona-server-client-5.6 - elif [ ${{matrix.name}} = "mysql80" ]; then - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb - echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections - sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update - sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-server mysql-client - elif [ ${{matrix.name}} = "mariadb101" ]; then - sudo apt-get install -y software-properties-common - sudo apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0xF1656F24C74CD1D8 - sudo add-apt-repository 'deb [arch=amd64,arm64,ppc64el] http://nyc2.mirrors.digitalocean.com/mariadb/repo/10.1/ubuntu bionic main' - sudo apt update - sudo DEBIAN_FRONTEND="noninteractive" apt install -y mariadb-server - elif [ ${{matrix.name}} = "mariadb102" ]; then - sudo apt-get install -y software-properties-common - sudo apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0xF1656F24C74CD1D8 - sudo add-apt-repository 'deb [arch=amd64,arm64,ppc64el] http://nyc2.mirrors.digitalocean.com/mariadb/repo/10.2/ubuntu bionic main' - sudo apt update - sudo DEBIAN_FRONTEND="noninteractive" apt install -y mariadb-server - elif [ ${{matrix.name}} = "mariadb103" ]; then - sudo apt-get install -y software-properties-common - sudo apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0xF1656F24C74CD1D8 - sudo add-apt-repository 'deb [arch=amd64,arm64,ppc64el] http://nyc2.mirrors.digitalocean.com/mariadb/repo/10.3/ubuntu bionic main' - sudo apt update - sudo DEBIAN_FRONTEND="noninteractive" apt install -y mariadb-server - fi - fi - - sudo apt-get install -y make unzip g++ curl git wget ant openjdk-8-jdk eatmydata - sudo service mysql stop - sudo bash -c "echo '/usr/sbin/mysqld { }' > /etc/apparmor.d/usr.sbin.mysqld" # https://bugs.launchpad.net/ubuntu/+source/mariadb-10.1/+bug/1806263 - sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld || echo "could not remove mysqld profile" - - mkdir -p dist bin - curl -L https://github.com/coreos/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz | tar -zxC dist - mv dist/etcd-v3.3.10-linux-amd64/{etcd,etcdctl} bin/ - - go mod download - - - name: Run make tools - run: | - make tools - - - name: unit - timeout-minutes: 30 - run: | - eatmydata -- make unit_test diff --git a/.github/workflows/unit_race.yml b/.github/workflows/unit_race.yml index d280afe62a6..b362b2b714e 100644 --- a/.github/workflows/unit_race.yml +++ b/.github/workflows/unit_race.yml @@ -12,6 +12,16 @@ jobs: with: go-version: 1.15 + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + - name: Check out code uses: actions/checkout@v2 diff --git a/.github/workflows/unit_test_mariadb101.yml b/.github/workflows/unit_test_mariadb101.yml new file mode 100644 index 00000000000..c6120a09303 --- /dev/null +++ b/.github/workflows/unit_test_mariadb101.yml @@ -0,0 +1,71 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Unit Test (mariadb101) +on: [push, pull_request] +jobs: + + test: + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + export DEBIAN_FRONTEND="noninteractive" + sudo apt-get update + + # !mysql57 + + # Uninstall any previously installed MySQL first + sudo systemctl stop apparmor + sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common + sudo apt-get -y autoremove + sudo apt-get -y autoclean + sudo deluser mysql + sudo rm -rf /var/lib/mysql + sudo rm -rf /etc/mysql + + # mariadb101 + sudo apt-get install -y software-properties-common + sudo apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0xF1656F24C74CD1D8 + sudo add-apt-repository 'deb [arch=amd64,arm64,ppc64el] http://nyc2.mirrors.digitalocean.com/mariadb/repo/10.1/ubuntu bionic main' + sudo apt update + sudo DEBIAN_FRONTEND="noninteractive" apt install -y mariadb-server + + sudo apt-get install -y make unzip g++ curl git wget ant openjdk-8-jdk eatmydata + sudo service mysql stop + sudo bash -c "echo '/usr/sbin/mysqld { }' > /etc/apparmor.d/usr.sbin.mysqld" # https://bugs.launchpad.net/ubuntu/+source/mariadb-10.1/+bug/1806263 + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld || echo "could not remove mysqld profile" + + mkdir -p dist bin + curl -L https://github.com/coreos/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz | tar -zxC dist + mv dist/etcd-v3.3.10-linux-amd64/{etcd,etcdctl} bin/ + + go mod download + + - name: Run make tools + run: | + make tools + + - name: Run test + timeout-minutes: 30 + run: | + eatmydata -- make unit_test diff --git a/.github/workflows/unit_test_mariadb102.yml b/.github/workflows/unit_test_mariadb102.yml new file mode 100644 index 00000000000..17ebdba01a5 --- /dev/null +++ b/.github/workflows/unit_test_mariadb102.yml @@ -0,0 +1,71 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Unit Test (mariadb102) +on: [push, pull_request] +jobs: + + test: + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + export DEBIAN_FRONTEND="noninteractive" + sudo apt-get update + + # !mysql57 + + # Uninstall any previously installed MySQL first + sudo systemctl stop apparmor + sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common + sudo apt-get -y autoremove + sudo apt-get -y autoclean + sudo deluser mysql + sudo rm -rf /var/lib/mysql + sudo rm -rf /etc/mysql + + # mariadb102 + sudo apt-get install -y software-properties-common + sudo apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0xF1656F24C74CD1D8 + sudo add-apt-repository 'deb [arch=amd64,arm64,ppc64el] http://nyc2.mirrors.digitalocean.com/mariadb/repo/10.2/ubuntu bionic main' + sudo apt update + sudo DEBIAN_FRONTEND="noninteractive" apt install -y mariadb-server + + sudo apt-get install -y make unzip g++ curl git wget ant openjdk-8-jdk eatmydata + sudo service mysql stop + sudo bash -c "echo '/usr/sbin/mysqld { }' > /etc/apparmor.d/usr.sbin.mysqld" # https://bugs.launchpad.net/ubuntu/+source/mariadb-10.1/+bug/1806263 + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld || echo "could not remove mysqld profile" + + mkdir -p dist bin + curl -L https://github.com/coreos/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz | tar -zxC dist + mv dist/etcd-v3.3.10-linux-amd64/{etcd,etcdctl} bin/ + + go mod download + + - name: Run make tools + run: | + make tools + + - name: Run test + timeout-minutes: 30 + run: | + eatmydata -- make unit_test diff --git a/.github/workflows/unit_test_mariadb103.yml b/.github/workflows/unit_test_mariadb103.yml new file mode 100644 index 00000000000..f30d035eaa9 --- /dev/null +++ b/.github/workflows/unit_test_mariadb103.yml @@ -0,0 +1,71 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Unit Test (mariadb103) +on: [push, pull_request] +jobs: + + test: + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + export DEBIAN_FRONTEND="noninteractive" + sudo apt-get update + + # !mysql57 + + # Uninstall any previously installed MySQL first + sudo systemctl stop apparmor + sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common + sudo apt-get -y autoremove + sudo apt-get -y autoclean + sudo deluser mysql + sudo rm -rf /var/lib/mysql + sudo rm -rf /etc/mysql + + # mariadb103 + sudo apt-get install -y software-properties-common + sudo apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0xF1656F24C74CD1D8 + sudo add-apt-repository 'deb [arch=amd64,arm64,ppc64el] http://nyc2.mirrors.digitalocean.com/mariadb/repo/10.3/ubuntu bionic main' + sudo apt update + sudo DEBIAN_FRONTEND="noninteractive" apt install -y mariadb-server + + sudo apt-get install -y make unzip g++ curl git wget ant openjdk-8-jdk eatmydata + sudo service mysql stop + sudo bash -c "echo '/usr/sbin/mysqld { }' > /etc/apparmor.d/usr.sbin.mysqld" # https://bugs.launchpad.net/ubuntu/+source/mariadb-10.1/+bug/1806263 + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld || echo "could not remove mysqld profile" + + mkdir -p dist bin + curl -L https://github.com/coreos/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz | tar -zxC dist + mv dist/etcd-v3.3.10-linux-amd64/{etcd,etcdctl} bin/ + + go mod download + + - name: Run make tools + run: | + make tools + + - name: Run test + timeout-minutes: 30 + run: | + eatmydata -- make unit_test diff --git a/.github/workflows/unit_test_mysql57.yml b/.github/workflows/unit_test_mysql57.yml new file mode 100644 index 00000000000..3c7e077ad32 --- /dev/null +++ b/.github/workflows/unit_test_mysql57.yml @@ -0,0 +1,56 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Unit Test (mysql57) +on: [push, pull_request] +jobs: + + test: + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + export DEBIAN_FRONTEND="noninteractive" + sudo apt-get update + + # mysql57 + sudo apt-get install -y mysql-server mysql-client + + sudo apt-get install -y make unzip g++ curl git wget ant openjdk-8-jdk eatmydata + sudo service mysql stop + sudo bash -c "echo '/usr/sbin/mysqld { }' > /etc/apparmor.d/usr.sbin.mysqld" # https://bugs.launchpad.net/ubuntu/+source/mariadb-10.1/+bug/1806263 + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld || echo "could not remove mysqld profile" + + mkdir -p dist bin + curl -L https://github.com/coreos/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz | tar -zxC dist + mv dist/etcd-v3.3.10-linux-amd64/{etcd,etcdctl} bin/ + + go mod download + + - name: Run make tools + run: | + make tools + + - name: Run test + timeout-minutes: 30 + run: | + eatmydata -- make unit_test diff --git a/.github/workflows/unit_test_mysql80.yml b/.github/workflows/unit_test_mysql80.yml new file mode 100644 index 00000000000..80f224e77c2 --- /dev/null +++ b/.github/workflows/unit_test_mysql80.yml @@ -0,0 +1,71 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Unit Test (mysql80) +on: [push, pull_request] +jobs: + + test: + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + export DEBIAN_FRONTEND="noninteractive" + sudo apt-get update + + # !mysql57 + + # Uninstall any previously installed MySQL first + sudo systemctl stop apparmor + sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common + sudo apt-get -y autoremove + sudo apt-get -y autoclean + sudo deluser mysql + sudo rm -rf /var/lib/mysql + sudo rm -rf /etc/mysql + + # mysql80 + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb + echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections + sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* + sudo apt-get update + sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-server mysql-client + + sudo apt-get install -y make unzip g++ curl git wget ant openjdk-8-jdk eatmydata + sudo service mysql stop + sudo bash -c "echo '/usr/sbin/mysqld { }' > /etc/apparmor.d/usr.sbin.mysqld" # https://bugs.launchpad.net/ubuntu/+source/mariadb-10.1/+bug/1806263 + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld || echo "could not remove mysqld profile" + + mkdir -p dist bin + curl -L https://github.com/coreos/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz | tar -zxC dist + mv dist/etcd-v3.3.10-linux-amd64/{etcd,etcdctl} bin/ + + go mod download + + - name: Run make tools + run: | + make tools + + - name: Run test + timeout-minutes: 30 + run: | + eatmydata -- make unit_test diff --git a/.github/workflows/unit_test_percona56.yml b/.github/workflows/unit_test_percona56.yml new file mode 100644 index 00000000000..72bc5a24d61 --- /dev/null +++ b/.github/workflows/unit_test_percona56.yml @@ -0,0 +1,72 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Unit Test (percona56) +on: [push, pull_request] +jobs: + + test: + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + export DEBIAN_FRONTEND="noninteractive" + sudo apt-get update + + # !mysql57 + + # Uninstall any previously installed MySQL first + sudo systemctl stop apparmor + sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common + sudo apt-get -y autoremove + sudo apt-get -y autoclean + sudo deluser mysql + sudo rm -rf /var/lib/mysql + sudo rm -rf /etc/mysql + + # percona56 + sudo rm -rf /var/lib/mysql + sudo apt install -y gnupg2 + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt update + sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y percona-server-server-5.6 percona-server-client-5.6 + + sudo apt-get install -y make unzip g++ curl git wget ant openjdk-8-jdk eatmydata + sudo service mysql stop + sudo bash -c "echo '/usr/sbin/mysqld { }' > /etc/apparmor.d/usr.sbin.mysqld" # https://bugs.launchpad.net/ubuntu/+source/mariadb-10.1/+bug/1806263 + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld || echo "could not remove mysqld profile" + + mkdir -p dist bin + curl -L https://github.com/coreos/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz | tar -zxC dist + mv dist/etcd-v3.3.10-linux-amd64/{etcd,etcdctl} bin/ + + go mod download + + - name: Run make tools + run: | + make tools + + - name: Run test + timeout-minutes: 30 + run: | + eatmydata -- make unit_test diff --git a/.golangci.yml b/.golangci.yml index 48721a08dcb..ca7b6e25c3d 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,5 +1,7 @@ run: timeout: 10m + skip-dirs: + - go/vt/topo/k8stopo/client linters-settings: goimports: @@ -23,6 +25,12 @@ linters: - gofmt - goimports +issues: + exclude-rules: + - path: '^go/vt/proto/' + linters: + - goimports + # https://github.com/golangci/golangci/wiki/Configuration service: golangci-lint-version: 1.31.0 # use the fixed version to not introduce new linters unexpectedly diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 46a675327bb..125cd85560f 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -4,6 +4,7 @@ The following is the full list, alphabetically ordered. * Alkin Tezuysal ([askdba](https://github.com/askdba)) alkin@planetscale.com * Andres Taylor ([systay](https://github.com/systay)) andres@planetscale.com +* Andrew Mason ([amason](https://github.com/ajm188)) amason@slack-corp.com * Anthony Yeh ([enisoc](https://github.com/enisoc)) enisoc@planetscale.com * Dan Kozlowski ([dkhenry](https://github.com/dkhenry)) dan.kozlowski@gmail.com * David Weitzman ([dweitzman](https://github.com/dweitzman)) dweitzman@pinterest.com @@ -25,7 +26,7 @@ The following is the full list, alphabetically ordered. sougou, demmer, rafael, dweitzman, tirsen, askdba, enisoc ### Builds -dkhenry, shlomi-noach +dkhenry, shlomi-noach, ajm188 ### Resharding sougou, rafael, tirsen, dweitzman, systay, rohit-nayak-ps @@ -34,10 +35,13 @@ sougou, rafael, tirsen, dweitzman, systay, rohit-nayak-ps sougou, dweitzman, deepthi, systay, harshit-gangal ### Cluster Management -deepthi, rafael, enisoc, shlomi-noach +deepthi, rafael, enisoc, shlomi-noach, ajm188 ### Java mpawliszyn, pH14, harshit-gangal ### Kubernetes derekperkins, dkhenry, enisoc + +### VTAdmin +ajm188 diff --git a/Makefile b/Makefile index b499fc76775..c58a70223ec 100644 --- a/Makefile +++ b/Makefile @@ -13,6 +13,7 @@ # limitations under the License. MAKEFLAGS = -s +GIT_STATUS := $(shell git status --porcelain) export GOBIN=$(PWD)/bin export GO111MODULE=on @@ -22,7 +23,7 @@ export REWRITER=go/vt/sqlparser/rewriter.go # Since we are not using this Makefile for compilation, limiting parallelism will not increase build time. .NOTPARALLEL: -.PHONY: all build install test clean unit_test unit_test_cover unit_test_race integration_test proto proto_banner site_test site_integration_test docker_bootstrap docker_test docker_unit_test java_test reshard_tests e2e_test e2e_test_race minimaltools tools web_bootstrap web_build web_start +.PHONY: all build install test clean unit_test unit_test_cover unit_test_race integration_test proto proto_banner site_test site_integration_test docker_bootstrap docker_test docker_unit_test java_test reshard_tests e2e_test e2e_test_race minimaltools tools web_bootstrap web_build web_start generate_ci_workflows all: build @@ -53,13 +54,27 @@ embed_config: go run github.com/GeertJohan/go.rice/rice embed-go go build . +# build the vitess binaries with dynamic dependency on libc +build-dyn: +ifndef NOBANNER + echo $$(date): Building source tree +endif + bash ./build.env + go install $(EXTRA_BUILD_FLAGS) $(VT_GO_PARALLEL) -ldflags "$(shell tools/build_version_flags.sh)" ./go/... + (cd go/cmd/vttablet && go run github.com/GeertJohan/go.rice/rice append --exec=../../../bin/vttablet) + +# build the vitess binaries statically build: ifndef NOBANNER echo $$(date): Building source tree endif bash ./build.env - go install $(EXTRA_BUILD_FLAGS) $(VT_GO_PARALLEL) -ldflags "$(shell tools/build_version_flags.sh)" ./go/... && \ - (cd go/cmd/vttablet && go run github.com/GeertJohan/go.rice/rice append --exec=../../../bin/vttablet) + # build all the binaries by default with CGO disabled + CGO_ENABLED=0 go install $(EXTRA_BUILD_FLAGS) $(VT_GO_PARALLEL) -ldflags "$(shell tools/build_version_flags.sh)" ./go/... + # embed local resources in the vttablet executable + (cd go/cmd/vttablet && go run github.com/GeertJohan/go.rice/rice append --exec=../../../bin/vttablet) + # build vtorc with CGO, because it depends on sqlite + CGO_ENABLED=1 go install $(EXTRA_BUILD_FLAGS) $(VT_GO_PARALLEL) -ldflags "$(shell tools/build_version_flags.sh)" ./go/cmd/vtorc/... debug: ifndef NOBANNER @@ -74,7 +89,7 @@ endif install: build # binaries mkdir -p "$${PREFIX}/bin" - cp "$${VTROOT}/bin/"{mysqlctld,vtorc,vtctld,vtctlclient,vtctldclient,vtgate,vttablet,vtworker,vtbackup} "$${PREFIX}/bin/" + cp "$${VTROOT}/bin/"{mysqlctl,mysqlctld,vtorc,vtctld,vtctlclient,vtctldclient,vtgate,vttablet,vtworker,vtbackup} "$${PREFIX}/bin/" # Install local install the binaries needed to run vitess locally # Usage: make install-local PREFIX=/path/to/install/root @@ -102,8 +117,23 @@ grpcvtctldclient: go/vt/proto/vtctlservice/vtctlservice.pb.go parser: make -C go/vt/sqlparser -visitor: - go generate go/vt/sqlparser/rewriter.go +codegen: asthelpergen sizegen parser + +visitor: asthelpergen + echo "make visitor has been replaced by make asthelpergen" + +asthelpergen: + go run ./go/tools/asthelpergen/main -in ./go/vt/sqlparser -iface vitess.io/vitess/go/vt/sqlparser.SQLNode -except "*ColName" + +sizegen: + go run ./go/tools/sizegen/sizegen.go \ + -in ./go/... \ + -gen vitess.io/vitess/go/vt/vtgate/engine.Plan \ + -gen vitess.io/vitess/go/vt/vttablet/tabletserver.TabletPlan \ + -gen vitess.io/vitess/go/sqltypes.Result + +astfmtgen: + go run ./go/tools/astfmtgen/main.go vitess.io/vitess/go/vt/sqlparser/... # To pass extra flags, run test.go manually. # For example: go run test.go -docker=false -- --extra-flag @@ -117,7 +147,6 @@ clean: go clean -i ./go/... rm -rf third_party/acolyte rm -rf go/vt/.proto.tmp - rm -rf ./visitorgen # Remove everything including stuff pulled down by bootstrap.sh cleanall: clean @@ -166,7 +195,7 @@ java_test: VTROOT=${PWD} mvn -f java/pom.xml -B clean verify install_protoc-gen-go: - go install github.com/golang/protobuf/protoc-gen-go + go install github.com/gogo/protobuf/protoc-gen-gofast PROTO_SRCS = $(wildcard proto/*.proto) PROTO_SRC_NAMES = $(basename $(notdir $(PROTO_SRCS))) @@ -179,10 +208,11 @@ ifndef NOBANNER echo $$(date): Compiling proto definitions endif -$(PROTO_GO_OUTS): install_protoc-gen-go proto/*.proto +$(PROTO_GO_OUTS): minimaltools install_protoc-gen-go proto/*.proto for name in $(PROTO_SRC_NAMES); do \ - $(VTROOT)/bin/protoc --go_out=plugins=grpc:. -Iproto proto/$${name}.proto && \ - goimports -local vitess.io/vitess -w vitess.io/vitess/go/vt/proto/$${name}/$${name}.pb.go; \ + $(VTROOT)/bin/protoc --gofast_out=plugins=grpc:. --plugin protoc-gen-gofast="${GOBIN}/protoc-gen-gofast" \ + -I${PWD}/dist/vt-protoc-3.6.1/include:proto proto/$${name}.proto && \ + goimports -w vitess.io/vitess/go/vt/proto/$${name}/$${name}.pb.go; \ done cp -Rf vitess.io/vitess/go/vt/proto/* go/vt/proto rm -rf vitess.io/vitess/go/vt/proto/ @@ -245,6 +275,12 @@ docker_local: docker_mini: ${call build_docker_image,docker/mini/Dockerfile,vitess/mini} +DOCKER_VTTESTSERVER_SUFFIX = mysql57 mysql80 +DOCKER_VTTESTSERVER_TARGETS = $(addprefix docker_vttestserver_,$(DOCKER_VTTESTSERVER_SUFFIX)) +$(DOCKER_VTTESTSERVER_TARGETS): docker_vttestserver_%: + ${call build_docker_image,docker/vttestserver/Dockerfile.$*,vitess/vttestserver:$*} + +docker_vttestserver: $(DOCKER_VTTESTSERVER_TARGETS) # This rule loads the working copy of the code into a bootstrap image, # and then runs the tests inside Docker. # Example: $ make docker_test flavor=mariadb @@ -258,7 +294,7 @@ docker_unit_test: # This will generate a tar.gz file into the releases folder with the current source release: docker_base @if [ -z "$VERSION" ]; then \ - echo "Set the env var VERSION with the release version"; exit 1;\ + echo "Set the env var VERSION with the release version"; exit 1;\ fi mkdir -p releases docker build -f docker/Dockerfile.release -t vitess/release . @@ -268,6 +304,43 @@ release: docker_base echo "git push origin v$(VERSION)" echo "Also, don't forget the upload releases/v$(VERSION).tar.gz file to GitHub releases" +do_release: +ifndef RELEASE_VERSION + echo "Set the env var RELEASE_VERSION with the release version" + exit 1 +endif +ifndef DEV_VERSION + echo "Set the env var DEV_VERSION with the version the dev branch should have after release" + exit 1 +endif +ifeq ($(strip $(GIT_STATUS)),) + echo so much clean +else + echo cannot do release with dirty git state + exit 1 + echo so much win +endif +# Pre checks passed. Let's change the current version + cd java && mvn versions:set -DnewVersion=$(RELEASE_VERSION) + echo package servenv > go/vt/servenv/version.go + echo >> go/vt/servenv/version.go + echo const versionName = \"$(RELEASE_VERSION)\" >> go/vt/servenv/version.go + echo -n Pausing so relase notes can be added. Press enter to continue + read line + git add --all + git commit -n -s -m "Release commit for $(RELEASE_VERSION)" + git tag -m Version\ $(RELEASE_VERSION) v$(RELEASE_VERSION) + cd java && mvn versions:set -DnewVersion=$(DEV_VERSION) + echo package servenv > go/vt/servenv/version.go + echo >> go/vt/servenv/version.go + echo const versionName = \"$(DEV_VERSION)\" >> go/vt/servenv/version.go + git add --all + git commit -n -s -m "Back to dev mode" + echo "Release preparations successful" + echo "A git tag was created, you can push it with:" + echo " git push upstream v$(RELEASE_VERSION)" + echo "The git branch has also been updated. You need to push it and get it merged" + tools: echo $$(date): Installing dependencies ./bootstrap.sh @@ -279,34 +352,70 @@ minimaltools: dependency_check: ./tools/dependency_check.sh -GEN_BASE_DIR ?= ./go/vt/topo/k8stopo +install_k8s-code-generator: tools.go go.mod + go install k8s.io/code-generator/cmd/deepcopy-gen + go install k8s.io/code-generator/cmd/client-gen + go install k8s.io/code-generator/cmd/lister-gen + go install k8s.io/code-generator/cmd/informer-gen + +DEEPCOPY_GEN=$(GOBIN)/deepcopy-gen +CLIENT_GEN=$(GOBIN)/client-gen +LISTER_GEN=$(GOBIN)/lister-gen +INFORMER_GEN=$(GOBIN)/informer-gen -client_go_gen: +GEN_BASE_DIR ?= vitess.io/vitess/go/vt/topo/k8stopo + +client_go_gen: install_k8s-code-generator echo $$(date): Regenerating client-go code # Delete and re-generate the deepcopy types - find $(GEN_BASE_DIR)/apis/topo/v1beta1 -type f -name 'zz_generated*' -exec rm '{}' \; - deepcopy-gen -i $(GEN_BASE_DIR)/apis/topo/v1beta1 -O zz_generated.deepcopy -o ./ --bounding-dirs $(GEN_BASE_DIR)/apis --go-header-file $(GEN_BASE_DIR)/boilerplate.go.txt + find $(VTROOT)/go/vt/topo/k8stopo/apis/topo/v1beta1 -name "zz_generated.deepcopy.go" -delete - # Delete, generate, and move the client libraries - rm -rf go/vt/topo/k8stopo/client + # We output to ./ and then copy over the generated files to the appropriate path + # This is done so we don't have rely on the repository being cloned to `$GOPATH/src/vitess.io/vitess` - # There is no way to get client-gen to automatically put files in the right place and still have the right import path so we generate and move them + $(DEEPCOPY_GEN) -o ./ \ + --input-dirs $(GEN_BASE_DIR)/apis/topo/v1beta1 \ + -O zz_generated.deepcopy \ + --bounding-dirs $(GEN_BASE_DIR)/apis \ + --go-header-file ./go/vt/topo/k8stopo/boilerplate.go.txt - # Generate client, informers, and listers - client-gen -o ./ --input 'topo/v1beta1' --clientset-name versioned --input-base 'vitess.io/vitess/go/vt/topo/k8stopo/apis/' -i vitess.io/vitess --output-package vitess.io/vitess/go/vt/topo/k8stopo/client/clientset --go-header-file $(GEN_BASE_DIR)/boilerplate.go.txt - lister-gen -o ./ --input-dirs vitess.io/vitess/go/vt/topo/k8stopo/apis/topo/v1beta1 --output-package vitess.io/vitess/go/vt/topo/k8stopo/client/listers --go-header-file $(GEN_BASE_DIR)/boilerplate.go.txt - informer-gen -o ./ --input-dirs vitess.io/vitess/go/vt/topo/k8stopo/apis/topo/v1beta1 --versioned-clientset-package vitess.io/vitess/go/vt/topo/k8stopo/client/clientset/versioned --listers-package vitess.io/vitess/go/vt/topo/k8stopo/client/listers --output-package vitess.io/vitess/go/vt/topo/k8stopo/client/informers --go-header-file $(GEN_BASE_DIR)/boilerplate.go.txt + # Delete existing code + rm -rf go/vt/topo/k8stopo/client + + # Generate clientset + $(CLIENT_GEN) -o ./ \ + --clientset-name versioned \ + --input-base $(GEN_BASE_DIR)/apis \ + --input 'topo/v1beta1' \ + --output-package $(GEN_BASE_DIR)/client/clientset \ + --fake-clientset=true \ + --go-header-file ./go/vt/topo/k8stopo/boilerplate.go.txt + + # Generate listers + $(LISTER_GEN) -o ./ \ + --input-dirs $(GEN_BASE_DIR)/apis/topo/v1beta1 \ + --output-package $(GEN_BASE_DIR)/client/listers \ + --go-header-file ./go/vt/topo/k8stopo/boilerplate.go.txt + + # Generate informers + $(INFORMER_GEN) -o ./ \ + --input-dirs $(GEN_BASE_DIR)/apis/topo/v1beta1 \ + --output-package $(GEN_BASE_DIR)/client/informers \ + --versioned-clientset-package $(GEN_BASE_DIR)/client/clientset/versioned \ + --listers-package $(GEN_BASE_DIR)/client/listers \ + --go-header-file ./go/vt/topo/k8stopo/boilerplate.go.txt # Move and cleanup mv vitess.io/vitess/go/vt/topo/k8stopo/client go/vt/topo/k8stopo/ - rmdir -p vitess.io/vitess/go/vt/topo/k8stopo/ + mv vitess.io/vitess/go/vt/topo/k8stopo/apis/topo/v1beta1/zz_generated.deepcopy.go go/vt/topo/k8stopo/apis/topo/v1beta1/zz_generated.deepcopy.go + rm -rf vitess.io/vitess/go/vt/topo/k8stopo/ # Check prerequisites and install dependencies web_bootstrap: ./tools/web_bootstrap.sh # Do a production build of the vtctld UI. -# This target needs to be manually run every time any file within web/vtctld2/app +# This target needs to be manually run every time any file within web/vtctld2/app # is modified to regenerate rice-box.go web_build: web_bootstrap ./tools/web_build.sh @@ -317,7 +426,7 @@ web_build: web_bootstrap web_start: web_bootstrap cd web/vtctld2 && npm run start -vtadmin_web_install: +vtadmin_web_install: cd web/vtadmin && npm install # Generate JavaScript/TypeScript bindings for vtadmin-web from the Vitess .proto files. @@ -325,3 +434,9 @@ vtadmin_web_install: # While vtadmin-web is new and unstable, however, we can keep it out of the critical build path. vtadmin_web_proto_types: vtadmin_web_install ./web/vtadmin/bin/generate-proto-types.sh + +# Generate github CI actions workflow files for unit tests and cluster endtoend tests based on templates in the test/templates directory +# Needs to be called if the templates change or if a new test "shard" is created. We do not need to rebuild tests if only the test/config.json +# is changed by adding a new test to an existing shard. Any new or modified files need to be committed into git +generate_ci_workflows: + cd test && go run ci_workflow_gen.go && cd .. diff --git a/README.md b/README.md index fe2abf35fc2..e5c81599223 100644 --- a/README.md +++ b/README.md @@ -24,13 +24,13 @@ For more about Vitess, please visit [vitess.io](https://vitess.io). Vitess has a growing community. You can view the list of adopters [here](https://github.com/vitessio/vitess/blob/master/ADOPTERS.md). -## Reporting a Problem, Issue ,or Bug -To report a problem the best way to get attention is to create a GitHub [issue](.https://github.com/vitessio/vitess/issues ). +## Reporting a Problem, Issue, or Bug +To report a problem, the best way to get attention is to create a GitHub [issue](.https://github.com/vitessio/vitess/issues ) using proper severity level based on this [guide](https://github.com/vitessio/vitess/blob/master/SEVERITY.md). For topics that are better discussed live, please join the [Vitess Slack](https://vitess.io/slack) workspace. You may post any questions on the #general channel or join some of the special-interest channels. -Follow [Vitess Blog](https://blog.vitess.io/) for low-frequency updates like new features and releases. +Follow [Vitess Blog](https://blog.vitess.io/) for low-frequency updates like new features and releases. ## Security diff --git a/SECURITY.md b/SECURITY.md index c524bdc11de..18380130301 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -38,14 +38,14 @@ best protect our users. If a security vulnerability affects master, but not a currently supported branch, then the following process will apply: * The fix will land in master. -* A courtesy email will be sent to [vitess@googlegroups.com](https://groups.google.com/forum/#!forum/vitess) along with a posted notice in #developers on Slack. +* A courtesy notice will be posted in #developers on Vitess Slack. #### Policy for unsupported releases If a security vulnerability affects only a stable release which is no longer under active support, then the following process will apply: * A fix **will not** be issued (exceptions may be made for extreme circumstances) -* An email will be sent to [vitess-announce@googlegroups.com](https://groups.google.com/forum/#!forum/vitess-announce) identifying the threat, and encouraging users to upgrade. +* A notice will be posted to Vitess Slack in #general channel identifying the threat, and encouraging users to upgrade. #### Policy for supported releases @@ -73,9 +73,7 @@ or mitigation so that a realistic timeline can be communicated to users. **Disclosure of Forthcoming Fix to Users** (Completed within 1-7 days of Disclosure) -- The Fix Lead will email [vitess-announce@googlegroups.com](https://groups.google.com/forum/#!forum/vitess-announce) - informing users that a security vulnerability has been disclosed and that a fix will be made - available at YYYY-MM-DD HH:MM UTC in the future via this list. This time is the Release Date. +- The Fix Lead will update Vitess Slack informing users that a security vulnerability has been disclosed and that a fix will be made available at YYYY-MM-DD HH:MM UTC in the future via this list. This time is the Release Date. - The Fix Lead will include any mitigating steps users can take until a fix is available. The communication to users should be actionable. They should know when to block time to apply @@ -83,10 +81,8 @@ patches, understand exact mitigation steps, etc. **Disclosure of Fixed Vulnerability** -- The Fix Lead will email [vitess-announce@googlegroups.com](https://groups.google.com/forum/#!forum/vitess-announce) - informing users that there are new releases available to address an identified vulnerability. -- As much as possible this email should be actionable and include links to CVEs, and how to apply - the fix to user's environments; this can include links to external distributor documentation. +- The Fix Lead will post a notice on Vitess Slack informing users that there are new releases available to address an identified vulnerability. +- As much as possible this notice should be actionable and include links to CVEs, and how to apply the fix to user's environments; this can include links to external distributor documentation. ### Embargo Policy diff --git a/SEVERITY.md b/SEVERITY.md new file mode 100644 index 00000000000..0626c9806e8 --- /dev/null +++ b/SEVERITY.md @@ -0,0 +1,61 @@ +Please search the existing issues for relevant feature requests, and use the [reaction feature](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to add upvotes to pre-existing requests. + +#### Feature Description + +In order to improve the quality of the issue queue handling and automation purposes, we’d like to open this RFC. Tags are provided to rate both the severity and priority of issues for the Vitess maintainers’ team: + + +**Severity** is used to indicate how "critical" a problem is. It's a more objective rating that in theory should typically stay constant throughout the life of the issue. + +**Priority** is used as a project management tool to indicate how important an issue is for the current release, milestone, and/or sprint. Because it's used for planning, the priority value is more likely than severity to be dynamic over the life of the issue. + +Severity + +The available tags for severity are as follows: + +### Severity 1 + +Critical feature impact. This indicates you are unable to use the overall product resulting in a critical impact on operations. This condition requires an immediate solution. Your cluster is down or unable to serve traffic. The Vitess is unable to operate or the product caused other critical software to fail and there is no acceptable way to work around the problem. You have a significant security breach or data leak. You have data corruption, both on disk, and in the results from the cluster. + +### Severity 2 + +Significant feature impact. This indicates the Vitess is usable but is severely limited or degraded. Severely limited can mean that a task is unable to operate, the task caused other critical software to fail, or the task is usable but not without severe difficulty. +A serious performance degradation might fall into this category unless it was so bad as to render the system completely inoperative. This can mean that function which you were attempting to use failed to function, but a temporary workaround is available hence needs immediate attention. + +### Severity 3 + +Some feature impact. This indicates the feature is usable but a Vitess cluster runs with minor issues/limitations. The task that you were attempting to use behaved in a manner that is incorrect and/or unexpected, or presented misleading or confusing information. +This can include documentation that was incomplete or incorrect, making it difficult to know how to use a task. This can include poor or unexplained log messages where no clear error was evident. This can include situations where some side effect is observed which does not significantly harm operations. +Documentation that causes the customer to perform some operation that damaged data (unintentional deletion, corruption, etc.) would more likely be listed as a severity 2 problem. +This can not include cases where customer data is inaccurately stored, or retrieved. Data integrity problems require a severity of 2 or higher. + +### Severity 4 + +Minimal feature impact. This indicates the problem causes little impact on operations or that a reasonable circumvention to the problem has been implemented. +The function you were attempting to use suffers from usability quirks, requires minor documentation updates, or could be enhanced with some minor changes to the function. +This is also the place for general Help/DOC suggestions where data is NOT missing or incorrect. + + +**Priority** + +The available tags for priority are as follows: + +**P-1 Priority Critical** +Cannot ship the release/milestone until completed. +Should be addressed immediately before any lower priority items. + +**_P-2 Priority High_** +Part of the "must-include" plan for the given milestone + +**P-3 Priority Medium** +Highly desirable but not essential for the given milestone. + +**Priority Low** +Desirable for given milestone but not essential, should be pursued if an opportunity arises to address in a safe and timely manner + + + +#### Use Case(s) + +Any relevant use-cases that you see. + diff --git a/dev.env b/dev.env index f1e6e71b896..781f54be7ba 100644 --- a/dev.env +++ b/dev.env @@ -1,13 +1,13 @@ # No shebang line as this script is sourced from an external shell. # Copyright 2019 The Vitess Authors. -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -34,6 +34,6 @@ export PATH # According to https://github.com/etcd-io/etcd/blob/a621d807f061e1dd635033a8d6bc261461429e27/Documentation/op-guide/supported-platform.md, # currently, etcd is unstable on arm64, so ETCD_UNSUPPORTED_ARCH should be set. -if [ "$(uname -m)" == aarch64 ]; then +if [ "$(uname -m)" = aarch64 ]; then export ETCD_UNSUPPORTED_ARCH=arm64 fi diff --git a/doc/releasenotes/10_0_0_release_notes.md b/doc/releasenotes/10_0_0_release_notes.md new file mode 100644 index 00000000000..ace6e57d215 --- /dev/null +++ b/doc/releasenotes/10_0_0_release_notes.md @@ -0,0 +1,263 @@ +This release complies with VEP-3 which removes the upgrade order requirement. Components can be upgraded in any order. It is recommended that the upgrade order should still be followed if possible, except to canary test the new version of VTGate before upgrading the rest of the components. + +## Known Issues +* Running binaries with `--version` or calling @@version from a MySQL client still shows `10.0.0-RC1` (Note: fixed in v10.0.1) +* Online DDL [cannot be used](https://github.com/vitessio/vitess/pull/7873#issuecomment-822798180) if you are using the keyspace filtering feature of VTGate +* VReplication errors when a fixed-length binary column is used as the sharding key #8080 + +## Bugs Fixed + +### VTGate / MySQL compatibility +* Remove printing of ENFORCED word so that statements remain compatible with mysql 5.7 #7458 +* Allow any ordering of generic options in column definitions #7459 +* Corrects the comment handling in vitess #7581 +* Fix regression - should be able to plan subquery on top of subquery #7682 +* Nullable Timestamp Column Fix #7740 +* VTGate: Fix the error messages in drop, create and alter database commands #7397 +* VTGate: Fix information_schema query with system schema in table_schema filter #7430 +* VTGate: Fix Set Statement in Tablet when executed with bindvars #7431 +* VTGate: Fix for Query Serving when Toposerver is Down #7484 +* VTGate: Add necessary bindvars when preparing queries #7493 +* VTGate: Show anywhere plan fix to consider default keyspace #7531 +* VTGate: Fix table parsing on VSchema generation #7511 +* VTGate: Use the emulated MySQL version for special comments #7510 +* VTGate: Reset Session for Reserved Connection when the connection id is not found #7539 +* VTGate: Healthcheck: update healthy tablets correctly when a stream returns an error or timeout #7732 +* VTGate: Fix for reserved connection usage with transaction #7646 +* VTGate: Fix MySQL Workbench failure on login with `select current_user()` #7705 +* VTGate: Constraint names and database names with spaces. #7745 +* VTGate: Fix dual table query when system schema is selected database #7734 + +### Other +* VTTablet: Correctly initialize statsTabletTypeCounts during startup #7390 +* Backup/Restore: Respect -disable_active_reparents in backup/restore #7576 +* Backup/Restore: check disable_active_reparents properly before waiting for position update #7703 + + +## Functionality Added or Changed + +### VTGate / MySQL compatibility / Query Serving + +* VTGate: Gen4 Planner: AxB vs BxA #7274 +* VTGAte: Gen4 fallback planning #7370 +* VTGate: Support for CALL procedures #7287 +* VTGate: Set default for @@workload to OLTP #7288 +* VTGate: Change @@version and @@version_comment #7337 +* VTGate: Fix VitessAware system variables of type boolean return NULL when MySQL is not involved #7353 +* VTGate: Add stats for RowsAffected similar to RowsReturned #7380 +* VTGate: Added information_schema_stats_expiry to allowed list of set vars #7435 +* VTGate: LFU Cache Implementation #7439 +* VTGate: Describe table to route based on table name and qualifier #7445 +* VTGate: Olap error message fix #7448 +* VTGate: Temporary Table support in unsharded keyspace #7411 +* VTGate: Publish table size on schema #7444 +* VTGate: Support for caching_sha2_password plugin in mysql/client #6716 +* VTGate: Moving Show plan from executor to planbuilder #7475 +* VTGate: Adds another case to merge routes for information_schema queries #7504 +* VTGate: Add innodb_read_rows as vttablet metric #7520 +* VTGate: Adds support for Show variables #7547 +* VTGate: gen4: fail unsupported queries #7409 +* VTGate: Fix Metadata in SHOW queries #7540 +* VTGate: Update AST helper generation #7558 +* VTGate: Avoiding addition of redundant unary operators #7579 +* VTGate: Optimise AST rewriting #7583 +* VTGate: Add Show Status query to vtexplain and make asthelpergen/sizegen quiet #7590 +* VTGate: Add support for SELECT ALL #7593 +* VTGate: Empty statement error code change in sql parsing #7618 +* VTGate: Socket system variable to return vitess mysql socket #7637 +* VTGate: Make DROP/CREATE DATABASE pluggable #7381 +* VTGate: Allow Select with lock to pass through in vttablet #7584 +* VTGate: Fix ordering in SELECT INTO and printing of strings #7655 +* VTGate: AST Equals code generator #7672 +* VTGate: [tabletserver] More resilient wait for schema changes #7684 +* VTGate: Fix flush statement planner #7695 +* VTGate: Produce query warnings for using features not supported when sharded #7538 +* VTGate: Support for ALTER VITESS_MIGRATION statements #7663 +* VTGate: Solve I_S queries using CNF rewriting #7677 +* VTGate: System schema queries #7685 +* VTGate: Make the AST visitor faster #7701 +* VTGate: COM_PREPARE - Single TCP response packet with all MySQL Packets #7713 +* VTGate: Replace the database name in result fields only if needed #7714 +* VTGate: Split ast_helper into individual gen files #7727 +* VTGate: Adds support for ordering on character fields for sharded keyspace queries #7678 +* VTGate: Show columns query on system schema #7729 +* VTGate: Disallow foreign key constraint on ddl #7780 +* VTGate: VTGate: support -enable_online_ddl flag #7694 + +### Testing +* Fuzzing: Add vtctl fuzzer #7605 +* Fuzzing: Add more fuzzers #7622 +* Fuzzing: Add 3 fuzzers for mysql endpoints #7639 +* Fuzzing: Add oss-fuzz build script #7591 +* Fuzzing: Add requirement for minimum length of input data #7722 +* Fuzzing: Add new mysql fuzzer #7660 +* Fuzzing: Add [grpcvtgateconn] fuzzer #7689 +* Fuzzing: Make mysql fuzzer more calls during each iteration #7766 + +### Performance +* VTGate: [perf] zero-copy tokenizer #7619 +* VTGate: [perf: sqlparser faster formatting #7710 +* VTGate :[perf] Cache reserved bind variables in queries #7698 +* VTGate: [perf] sqlparser yacc codegen #7669 +* VTGate: Making fast AST rewriter faster #7726 +* VTGate: Cached Size Implementation #7387 +* VTGate: Plan remove mutexes #7468 +* LFU Cache Bug Fixes #7479 +* [cache] Handle all possible initialization cases #7556 +* VTGate: [servenv] provide a global flag for profiling #7496 +* VTGate: [vttablet] Benchmarks and two small optimizations #7560 +* [pprof]: allow stopping profiling early with a signal #7594 +* perf: RPC Serialization #7519 +* perf: keyword lookups in the tokenizer #7606 + +### Cluster Management +* [vtctld] Migrate topo management RPCs #7395 +* [vtctldclient] Set `SilenceErrors` on the root command, so we don't double-log #7404 +* [vtctldclient] Command line flags: dashes and underscores synonyms #7406 +* Extract the `maxReplPosSearch` struct out to `topotools` #7420 +* Add protoutil package, refactor ISP to use it #7421 +* Add `ErrorGroup` to package concurrency, use in `waitOnNMinusOneTablets` #7429 +* [vtctld / wrangler] Extract some reparent methods out to functions for shared use between wrangler and VtctldServer #7434 +* [vtctld/wrangler] Extract `EmergencyReparentShard` logic to dedicated struct and add unit tests #7464 +* Provide named function for squashing usage errors; start using it #7451 +* [concurrency] Add guard against potentially blocking forever in ErrorGroup.Wait() when NumGoroutines is 0 #7463 +* Add hook for statsd integration #7417 +* [concurrency] Add guard against potentially blocking forever in ErrorGroup.Wait() when NumGoroutines is 0 #7463 +* Resilient rebuild keyspace graph check, tablet controls not in `RebuildKeyspaceGraph` command #7442 +* [reparentutil / ERS] confirm at least one replica succeeded to `SetMaster`, or fail #7486 +* [reparentutil / wrangler] Extract PlannedReparentShard logic from wrangler to PlannedReparenter struct #7501 +* Add backup/restore duration stats #7512 +* Refresh replicas and rdonly after MigrateServedTypes except on skipRefreshState. #7327 +* [eparentutil] ERS should not attempt to WaitForRelayLogsToApply on primary tablets that were not running replication #7523 +* [orchestrator] prevent XSS attack via 'orchestrator-msg' params #7526 +* [vtctld] Add remaining reparent commands to VtctldServer #7536 +* [reparentutil] ERS should not attempt to WaitForRelayLogsToApply on primary tablets that were not running replication #7523 +* Shutdown vttablet gracefully if tablet record disappears #7563 +* ApplySchema: -skip_preflight #7587 +* Table GC: disable binary logging on best effort basis #7588 +* Addition of waitSig pprof argument to start recording on USR1 #7616 +* Add combine TLS certs feature #7609 +* Check error response before attempting to access InitShardPrimary response #7658 +* [vtctld] Migrate GetSrvKeyspace as GetSrvKeyspaces in VtctldServer #7680 +* [vtctld] Migrate ShardReplicationPositions #7690 +* [reparentutil | ERS] Bind status variable to each goroutine in WaitForRelayLogsToApply #7692 +* [servenv] Fix var shadowing caused by short variable declaration #7702 +* [vtctl|vtctldserver] List/Get Tablets timeouts #7715 +* vtctl ApplySchema supports '-request_context' flag #7777 + +### VReplication + +* VReplication: vstreamer to throttle on source endpoint #7324 +* VReplication: Throttle on target tablet #7364 +* VReplication: Throttler: fix to client usage in vreplication and table GC #7422 +* VReplication: MoveTables/Reshard add flags to start workflows in a stopped state and to stop workflow once copy is completed #7449 +* VReplication: Support for caching_sha2_password plugin in mysql/client #6716 +* VReplication: Validate SrvKeyspace during Reshard/SwitchReads #7481 +* VReplication: [MoveTables] Refresh SrvVSchema (for Routing Rules) and source tablets (for Blacklisted Tables) on completion #7505 +* VReplication : Data migration from another Vitess cluster #7546 +* VReplication : [materialize] Add cells and tablet_types parameters #7562 +* VReplication: JSON Columns: fix bug where vreplication of update statements were failing #7640 +* VReplication: Make the frequency at which heartbeats update the _vt.vreplication table configurable #7659 +* VReplication: Error out if binlog compression is turned on #7670 +* VReplication: Tablet throttler: support for custom query & threshold #7541 +* VStream API: allow aligning streams from different shards to minimize skews across the streams #7626 + +### OnlineDDL + +* OnlineDDL: update gh-ost binary to v1.1.1 #7394 +* Online DDL via VReplication #7419 +* Online DDL: VReplicatoin based mini stress test CI #7492 +* OnlineDDL: Revert for VReplication based migrations #7478 +* Online DDL: Internal support for eta_seconds #7630 +* Online DDL: Support 'SHOW VITESS_MIGRATIONS' queries #7638 +* Online DDL: Support for REVERT VITESS_MIGRATION statement #7656 +* Online DDL: Declarative Online DDL #7725 + +### VTAdmin + +* VTAdmin: Add vtadmin-web build flag for configuring fetch credentials #7414 +* VTAdmin: Add `cluster` field to vtadmin-api's /api/gates response #7425 +* VTAdmin: Add /api/clusters endpoint to vtadmin-api #7426 +* VTAdmin: Add /api/schemas endpoint to vtadmin-api #7432 +* VTAdmin: [vtadmin-web] Add routes and simple tables for all entities #7440 +* VTAdmin: [vtadmin-web] Set document.title from route components #7450 +* VTAdmin: [vtadmin-web] Add DataTable component with URL pagination #7487 +* VTAdmin: [vtadmin-api] Add shards to /api/keyspaces response #7453 +* VTAdmin: [vtadmin-web] Add replaceQuery + pushQuery to useURLQuery hook #7507 +* VTAdmin: [vtadmin-web] An initial pass for tablet filters #7515 +* VTAdmin: [vtadmin-web] Add a Select component #7524 +* VTAdmin: [vtadmin-api] Add /vtexplain endpoint #7528 +* VTAdmin: [vtadmin-api] Reorganize tablet-related functions into vtadmin/cluster/cluster.go #7553 +* VTAdmin: Three small bugfixes in Tablets table around stable sort order, display type lookup, and filtering by type #7568 +* VTAdmin: [vtadmin] Add GetSchema endpoint #7596 +* VTAdmin: [vtadmin/testutil] Add testutil helper to manage the complexity of recursively calling WithTestServer #7601 +* VTAdmin: [vtadmin] Add FindSchema route #7610 +* VTAdmin: [vtadmin-web] Add simple /schema view with table definition #7615 +* VTAdmin: [vtadmin] vschemas api endpoints #7625 +* VTAdmin: [vtadmin] Add support for additional service healthchecks in grpcserver #7635 +* VTAdmin: [vtadmin] test refactors #7641 +* VTAdmin: [vtadmin] propagate error contexts #7642 +* VTAdmin: [vtadmin] tracing refactor #7649 +* VTAdmin: [vtadmin] GetWorkflow(s) endpoints #7662 +* VTAdmin: [vitessdriver|vtadmin] Support Ping in vitessdriver, use in vtadmin to healthcheck connections during Dial #7709 +* VTAdmin: [vtadmin] Add to local example #7699 +* VTAdmin: vtexplain lock #7724 +* VTAdmin: [vtadmin] Aggregate schema sizes #7751 +* VTAdmin: [vtadmin-web] Add comments + 'options' parameter to API hooks #7754 +* VTAdmin: [vtadmin-web] Add common max-width to infrastructure table views #7760 +* VTAdmin: [vtadmin-web] Add hooks + skeleton view for workflows #7762 +* VTAdmin: [vtadmin-web] Add a hasty filter input to the /schemas view #7779 + +### Other / Tools + +* [rulesctl] Implements CLI tool for rule management #7712 + +## Examples / Tutorials + +* Source correct shell script in README #7749 + +## Documentation + +* Add Severity Labels document #7542 +* Remove Google Groups references #7664 +* Move some commas around in README.md :) #7671 +* Add Andrew Mason to Maintainers List #7757 + +## Build/CI Environment Changes + +* Update java build versions to vitess 10.0.0 #7383 +* CI: check run analysis to post JSON from file #7386 +* Fix Dockerfiles for vtexplain and vtctlclient #7418 +* CI: Add descriptive names to vrep shards. Update test generator script #7454 +* CI: adding 'go mod tidy' test #7461 +* Docker builds vitess/vtctlclient to install curl #7466 +* Add VT_BASE_VER to vtexplain/Dockerfile #7467 +* Enable -mysql_server_version in vttestserver, and utilize it in vttestserver container images #7474 +* [vtctld | tests only] testtmclient refactor #7518 +* CI: skip some tests on forked repos #7527 +* Workflow to check make sizegen #7535 +* Add mysqlctl docker image #7557 +* Restore CI workflow shard 26, accidentally dropped #7569 +* Update CODEOWNERS #7586 +* CI: ci-workflow-gen turn string to array to reduce conflicts #7582 +* Add percona-toolkit (for pt-osc/pt-online-schema-change) to the docker/lite images #7603 +* CI: Use ubuntu-18.04 in tests #7614 +* [vttestserver] Fix to work with sharded keyspaces #7617 +* Add tools.go #7517 +* Make vttestserver compatible with persistent data directories #7718 +* Add vtorc binary for rpm,deb builds #7750 +* Fixes bug that prevents creation of logs directory #7761 +* [Java] Guava update to 31.1.1 #7764 + +## Functionality Neutral Changes +* VTGate: Remove unused key.Destination.IsUnique() #7565 +* VTGate: Add information_schema query on prepare statement #7746 +* VTGate: Tests for numeric_precision and numeric_scale columns in information_schema #7763 +* Disable flaky test until it can be fixed #7623 +* Tests: reset stat at the beginning of test #7644 +* Cleanup mysql server_test #7645 +* vttablet: fix flaky tests #7543 +* Removed unused tests for Wordpress installation #7516 +* Fix unit test fail after merge #7550 +* Add test with NULL input values for vindexes that did not have any. #7552 + diff --git a/doc/releasenotes/10_0_1_release_notes.md b/doc/releasenotes/10_0_1_release_notes.md new file mode 100644 index 00000000000..6c37ad54f2d --- /dev/null +++ b/doc/releasenotes/10_0_1_release_notes.md @@ -0,0 +1,2 @@ +## Bugs Fixed +* Running binaries with `--version` or calling @@version from a MySQL client still shows `10.0.0-RC1` diff --git a/doc/releasenotes/10_0_2_release_notes.md b/doc/releasenotes/10_0_2_release_notes.md new file mode 100644 index 00000000000..5ff40ef3fee --- /dev/null +++ b/doc/releasenotes/10_0_2_release_notes.md @@ -0,0 +1,24 @@ +## Bug fixes +### Query Serving + * Fixes encoding of sql strings #8029 + * Fix for issue with information_schema queries with both table name and schema name predicates #8099 + * PRIMARY in index hint list for release 10.0 #8159 +### VReplication + * VReplication: Pad binlog values for binary() columns to match the value returned by mysql selects #8137 +## CI/Build +### Build/CI + * update release notes with known issue #8081 +## Documentation +### Other + * Post v10.0.1 updates #8045 +## Enhancement +### Build/CI + * Added release script to the makefile #8030 +### Other + * Add optional TLS feature to gRPC servers #8176 +## Other +### Build/CI + * Release 10.0.1 #8031 + +The release includes 14 commits (excluding merges) +Thanks to all our contributors: @GuptaManan100, @askdba, @deepthi, @harshit-gangal, @noxiouz, @rohit-nayak-ps, @systay diff --git a/doc/releasenotes/9_0_0_release_notes.md b/doc/releasenotes/9_0_0_release_notes.md index 20c18b0a32f..43c5510b845 100644 --- a/doc/releasenotes/9_0_0_release_notes.md +++ b/doc/releasenotes/9_0_0_release_notes.md @@ -7,9 +7,6 @@ The following PRs made changes to behaviors that clients might rely on. They sho Vitess 9.0 is not compatible with the previous release of the Vitess Kubernetes Operator (2.2.0). A new version of the Operator (2.3.0) is available that is compatible. -## Known Issue(s) -* VReplication errors when a fixed-length binary column is used as the sharding key #8080 - ## Bugs Fixed ### VTGate / MySQL compatibility @@ -159,7 +156,6 @@ Automatically terminate migrations run by a failed tablet * Online DDL: ignore errors if extracted gh-ost binary is identical to installed binary #6928 * Online DDL: Table lifecycle: skip time hint for unspecified states #7151 - ### VTadmin * VTadmin: Initial vtadmin-api, clusters, and service discovery #7187 @@ -245,4 +241,3 @@ Automatically terminate migrations run by a failed tablet * action_repository: no need for http.Request #7124 * Testing version upgrade/downgrade path from/to 8.0 #7323 * Use `context` from Go's standard library #7235 - diff --git a/doc/releasenotes/9_0_2_release_notes.md b/doc/releasenotes/9_0_2_release_notes.md new file mode 100644 index 00000000000..14749b0069b --- /dev/null +++ b/doc/releasenotes/9_0_2_release_notes.md @@ -0,0 +1,20 @@ +## Bug fixes +### Cluster management + * Restore: Check disable_active_reparents properly before waiting for position update #8114 +### Query Serving + * Fix information_schema query with system schema in table_schema filter #8095 + * Fix for issue with information_schema queries with both table name and schema name predicates #8096 + * Fix for transactions not allowed to finish during PlannedReparentShard #8098 + * PRIMARY in index hint list #8158 +## CI/Build +### Build/CI + * Release 9.0.1 #8065 + * 9.0.0: update release notes with known issue #8080 #8082 + * Added release script to the makefile #8182 + * Update do_release to work in the 9.0 branch #8184 +## Performance +### Cluster management + * Revert "backup: Use pargzip instead of pgzip for compression." #8174 + +The release includes 17 commits (excluding merges) +Thanks to all our contributors: @GuptaManan100, @deepthi, @Harshit, @rafael, @systay diff --git a/docker/k8s/Dockerfile b/docker/k8s/Dockerfile index c6d28a79b6c..a8665ed148f 100644 --- a/docker/k8s/Dockerfile +++ b/docker/k8s/Dockerfile @@ -42,6 +42,7 @@ COPY --from=base /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificat # Copy binaries COPY --from=base /vt/bin/mysqlctld /vt/bin/ +COPY --from=base /vt/bin/mysqlctl /vt/bin/ COPY --from=base /vt/bin/vtctld /vt/bin/ COPY --from=base /vt/bin/vtctl /vt/bin/ COPY --from=base /vt/bin/vtctlclient /vt/bin/ diff --git a/docker/k8s/mysqlctl/Dockerfile b/docker/k8s/mysqlctl/Dockerfile new file mode 100644 index 00000000000..45abdfda5dc --- /dev/null +++ b/docker/k8s/mysqlctl/Dockerfile @@ -0,0 +1,42 @@ +# Copyright 2019 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +ARG VT_BASE_VER=latest + +FROM vitess/k8s:${VT_BASE_VER} AS k8s + +FROM debian:buster-slim + +# Set up Vitess environment (just enough to run pre-built Go binaries) +ENV VTROOT /vt +ENV VTDATAROOT /vtdataroot + +# Prepare directory structure. +RUN mkdir -p /vt/bin && \ + mkdir -p /vt/config && mkdir -p /vtdataroot + +# Copy binaries +COPY --from=k8s /vt/bin/mysqlctl /vt/bin/ + +# Copy certs to allow https calls +COPY --from=k8s /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt + +# copy vitess config +COPY --from=k8s /vt/config /vt/config + +# add vitess user/group and add permissions +RUN groupadd -r --gid 2000 vitess && \ + useradd -r -g vitess --uid 1000 vitess && \ + chown -R vitess:vitess /vt && \ + chown -R vitess:vitess /vtdataroot diff --git a/docker/k8s/vtctlclient/Dockerfile b/docker/k8s/vtctlclient/Dockerfile index 4f50a333ea9..5ed9ba0d613 100644 --- a/docker/k8s/vtctlclient/Dockerfile +++ b/docker/k8s/vtctlclient/Dockerfile @@ -20,7 +20,7 @@ FROM debian:buster-slim RUN apt-get update && \ apt-get upgrade -qq && \ - apt-get install jq -qq --no-install-recommends && \ + apt-get install jq curl -qq --no-install-recommends && \ apt-get autoremove && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* diff --git a/docker/lite/Dockerfile.ubi7.mysql57 b/docker/lite/Dockerfile.ubi7.mysql57 index 985d8fb89e7..a98bda48381 100644 --- a/docker/lite/Dockerfile.ubi7.mysql57 +++ b/docker/lite/Dockerfile.ubi7.mysql57 @@ -61,7 +61,7 @@ RUN yum update -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs \ mysql-community-client mysql-community-server \ # Have to use hacks to ignore conflicts on /etc/my.cnf install && mkdir -p /tmp/1 \ - && yum install -y --setopt=alwaysprompt=no --downloadonly --downloaddir=/tmp/1 --enablerepo mysql57-community --disablerepo mysql80-community percona-xtrabackup-24 \ + && yum install -y --setopt=alwaysprompt=no --downloadonly --downloaddir=/tmp/1 --enablerepo mysql57-community --disablerepo mysql80-community percona-xtrabackup-24 percona-toolkit \ && rpm -Uvh --replacefiles /tmp/1/*rpm \ && rm -rf /tmp/1 \ && yum clean all \ diff --git a/docker/lite/Dockerfile.ubi7.mysql80 b/docker/lite/Dockerfile.ubi7.mysql80 index b38b04c0c84..fce90776a2e 100644 --- a/docker/lite/Dockerfile.ubi7.mysql80 +++ b/docker/lite/Dockerfile.ubi7.mysql80 @@ -61,7 +61,7 @@ RUN yum update -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs \ mysql-community-client mysql-community-server \ # Have to use hacks to ignore conflicts on /etc/my.cnf install && mkdir -p /tmp/1 \ - && yum install -y --setopt=alwaysprompt=no --downloadonly --downloaddir=/tmp/1 --enablerepo mysql80-community --disablerepo mysql57-community percona-xtrabackup-80 \ + && yum install -y --setopt=alwaysprompt=no --downloadonly --downloaddir=/tmp/1 --enablerepo mysql80-community --disablerepo mysql57-community percona-xtrabackup-80 percona-toolkit \ && rpm -Uvh --replacefiles /tmp/1/*rpm \ && rm -rf /tmp/1 \ && yum clean all \ diff --git a/docker/lite/Dockerfile.ubi7.percona57 b/docker/lite/Dockerfile.ubi7.percona57 index 497ca8f790d..a8911e23a9d 100644 --- a/docker/lite/Dockerfile.ubi7.percona57 +++ b/docker/lite/Dockerfile.ubi7.percona57 @@ -55,7 +55,7 @@ RUN yum update -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs \ jemalloc gperftools-libs procps-ng rsync wget openssl hostname curl tzdata make \ # Can't use alwaysprompt=no here, since we need to pick up deps # No way to separate key imports and accept deps separately in yum/dnf - && yum install -y --setopt=tsflags=nodocs Percona-Server-server-57 percona-xtrabackup-24 \ + && yum install -y --setopt=tsflags=nodocs Percona-Server-server-57 percona-xtrabackup-24 percona-toolkit \ && yum clean all \ && rm -rf /etc/my.cnf /var/lib/mysql /tmp/gpg /sbin/mysqld-debug diff --git a/docker/lite/Dockerfile.ubi7.percona80 b/docker/lite/Dockerfile.ubi7.percona80 index 629073ef481..e1ed4c89a1b 100644 --- a/docker/lite/Dockerfile.ubi7.percona80 +++ b/docker/lite/Dockerfile.ubi7.percona80 @@ -59,7 +59,7 @@ RUN yum update -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs \ && sleep 5 \ # Can't use alwaysprompt=no here, since we need to pick up deps # No way to separate key imports and accept deps separately in yum/dnf - && yum install -y --setopt=tsflags=nodocs percona-server-server percona-xtrabackup-80 \ + && yum install -y --setopt=tsflags=nodocs percona-server-server percona-xtrabackup-80 percona-toolkit \ && yum clean all \ && rm -rf /etc/my.cnf /var/lib/mysql /tmp/gpg /sbin/mysqld-debug diff --git a/docker/lite/install_dependencies.sh b/docker/lite/install_dependencies.sh index 8ed522d77c6..45a7b53dc4a 100755 --- a/docker/lite/install_dependencies.sh +++ b/docker/lite/install_dependencies.sh @@ -25,6 +25,19 @@ add_apt_key() { done } +# Set number of times to retry a download +MAX_RETRY=20 + +do_fetch() { + wget \ + --tries=$MAX_RETRY\ + --read-timeout=30\ + --timeout=30\ + --retry-connrefused\ + --waitretry=1\ + --no-dns-cache \ + $1 -O $2 +} # Install base packages that are common to all flavors. BASE_PACKAGES=( @@ -45,6 +58,7 @@ BASE_PACKAGES=( sysstat wget curl + percona-toolkit ) apt-get update @@ -63,11 +77,11 @@ mysql56) ;; mysql57) mysql57_version=5.7.31 - wget https://repo.mysql.com/apt/debian/pool/mysql-5.7/m/mysql-community/libmysqlclient20_${mysql57_version}-1debian10_amd64.deb -O /tmp/libmysqlclient20_${mysql57_version}-1debian10_amd64.deb - wget https://repo.mysql.com/apt/debian/pool/mysql-5.7/m/mysql-community/mysql-community-client_${mysql57_version}-1debian10_amd64.deb -O /tmp/mysql-community-client_${mysql57_version}-1debian10_amd64.deb - wget https://repo.mysql.com/apt/debian/pool/mysql-5.7/m/mysql-community/mysql-client_${mysql57_version}-1debian10_amd64.deb -O /tmp/mysql-client_${mysql57_version}-1debian10_amd64.deb - wget https://repo.mysql.com/apt/debian/pool/mysql-5.7/m/mysql-community/mysql-community-server_${mysql57_version}-1debian10_amd64.deb -O /tmp/mysql-community-server_${mysql57_version}-1debian10_amd64.deb - wget https://repo.mysql.com/apt/debian/pool/mysql-5.7/m/mysql-community/mysql-server_${mysql57_version}-1debian10_amd64.deb -O /tmp/mysql-server_${mysql57_version}-1debian10_amd64.deb + do_fetch https://repo.mysql.com/apt/debian/pool/mysql-5.7/m/mysql-community/libmysqlclient20_${mysql57_version}-1debian10_amd64.deb /tmp/libmysqlclient20_${mysql57_version}-1debian10_amd64.deb + do_fetch https://repo.mysql.com/apt/debian/pool/mysql-5.7/m/mysql-community/mysql-community-client_${mysql57_version}-1debian10_amd64.deb /tmp/mysql-community-client_${mysql57_version}-1debian10_amd64.deb + do_fetch https://repo.mysql.com/apt/debian/pool/mysql-5.7/m/mysql-community/mysql-client_${mysql57_version}-1debian10_amd64.deb /tmp/mysql-client_${mysql57_version}-1debian10_amd64.deb + do_fetch https://repo.mysql.com/apt/debian/pool/mysql-5.7/m/mysql-community/mysql-community-server_${mysql57_version}-1debian10_amd64.deb /tmp/mysql-community-server_${mysql57_version}-1debian10_amd64.deb + do_fetch https://repo.mysql.com/apt/debian/pool/mysql-5.7/m/mysql-community/mysql-server_${mysql57_version}-1debian10_amd64.deb /tmp/mysql-server_${mysql57_version}-1debian10_amd64.deb PACKAGES=( /tmp/libmysqlclient20_${mysql57_version}-1debian10_amd64.deb /tmp/mysql-community-client_${mysql57_version}-1debian10_amd64.deb @@ -79,13 +93,13 @@ mysql57) ;; mysql80) mysql8_version=8.0.21 - wget https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/libmysqlclient21_${mysql8_version}-1debian10_amd64.deb -O /tmp/libmysqlclient21_${mysql8_version}-1debian10_amd64.deb - wget https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-client-core_${mysql8_version}-1debian10_amd64.deb -O /tmp/mysql-community-client-core_${mysql8_version}-1debian10_amd64.deb - wget https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-client_${mysql8_version}-1debian10_amd64.deb -O /tmp/mysql-community-client_${mysql8_version}-1debian10_amd64.deb - wget https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-client_${mysql8_version}-1debian10_amd64.deb -O /tmp/mysql-client_${mysql8_version}-1debian10_amd64.deb - wget https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-server-core_${mysql8_version}-1debian10_amd64.deb -O /tmp/mysql-community-server-core_${mysql8_version}-1debian10_amd64.deb - wget https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-server_${mysql8_version}-1debian10_amd64.deb -O /tmp/mysql-community-server_${mysql8_version}-1debian10_amd64.deb - wget https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-server_${mysql8_version}-1debian10_amd64.deb -O /tmp/mysql-server_${mysql8_version}-1debian10_amd64.deb + do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/libmysqlclient21_${mysql8_version}-1debian10_amd64.deb /tmp/libmysqlclient21_${mysql8_version}-1debian10_amd64.deb + do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-client-core_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-community-client-core_${mysql8_version}-1debian10_amd64.deb + do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-client_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-community-client_${mysql8_version}-1debian10_amd64.deb + do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-client_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-client_${mysql8_version}-1debian10_amd64.deb + do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-server-core_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-community-server-core_${mysql8_version}-1debian10_amd64.deb + do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-server_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-community-server_${mysql8_version}-1debian10_amd64.deb + do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-server_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-server_${mysql8_version}-1debian10_amd64.deb PACKAGES=( /tmp/libmysqlclient21_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-community-client-core_${mysql8_version}-1debian10_amd64.deb @@ -200,6 +214,8 @@ percona57) debconf debconf/frontend select Noninteractive percona-server-server-5.7 percona-server-server/root_password password 'unused' percona-server-server-5.7 percona-server-server/root_password_again password 'unused' +percona-server-server-5.7 percona-server-server-5.7/root-pass password 'unused' +percona-server-server-5.7 percona-server-server-5.7/re-root-pass password 'unused' EOF ;; percona80) @@ -213,7 +229,7 @@ esac # Install flavor-specific packages apt-get update -apt-get install -y --no-install-recommends "${PACKAGES[@]}" +for i in $(seq 1 $MAX_RETRY); do apt-get install -y --no-install-recommends "${PACKAGES[@]}" && break; done # Clean up files we won't need in the final image. rm -rf /var/lib/apt/lists/* diff --git a/docker/local/run.sh b/docker/local/run.sh index 217052d4304..fb847be80c8 100755 --- a/docker/local/run.sh +++ b/docker/local/run.sh @@ -1,3 +1,3 @@ #!/bin/bash -docker run -p 15000:15000 -p 15001:15001 -p 15991:15991 --rm -it vitess/local +docker run -p 15000:15000 -p 15001:15001 -p 15991:15991 -p 15999:15999 --rm -it vitess/local diff --git a/docker/vttestserver/Dockerfile.mysql57 b/docker/vttestserver/Dockerfile.mysql57 new file mode 100644 index 00000000000..20b25c8ae09 --- /dev/null +++ b/docker/vttestserver/Dockerfile.mysql57 @@ -0,0 +1,61 @@ +# Copyright 2021 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# NOTE: We have to build the Vitess binaries from scratch instead of sharing +# a base image because Docker Hub dropped the feature we relied upon to +# ensure images contain the right binaries. + +# Use a temporary layer for the build stage. +ARG bootstrap_version=1 +ARG image="vitess/bootstrap:${bootstrap_version}-mysql57" + +FROM "${image}" AS builder + +# Allows docker builds to set the BUILD_NUMBER +ARG BUILD_NUMBER + +# Re-copy sources from working tree. +COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess + +# Build and install Vitess in a temporary output directory. +USER vitess +RUN make install-testing PREFIX=/vt/install + +# Start over and build the final image. +FROM debian:buster-slim + +# Install dependencies +COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh +RUN /vt/dist/install_dependencies.sh mysql57 + +# Set up Vitess user and directory tree. +RUN groupadd -r vitess && useradd -r -g vitess vitess +RUN mkdir -p /vt/vtdataroot && chown -R vitess:vitess /vt + +# Set up Vitess environment (just enough to run pre-built Go binaries) +ENV VTROOT /vt +ENV VTDATAROOT /vt/vtdataroot +ENV PATH $VTROOT/bin:$PATH + +# Copy artifacts from builder layer. +COPY --from=builder --chown=vitess:vitess /vt/install /vt + +# Create mount point for actual data (e.g. MySQL data dir) +VOLUME /vt/vtdataroot +USER vitess + +COPY docker/vttestserver/setup_vschema_folder.sh /vt/setup_vschema_folder.sh +COPY docker/vttestserver/run.sh /vt/run.sh + +CMD /vt/run.sh "5.7.9-vitess" diff --git a/docker/vttestserver/Dockerfile.mysql80 b/docker/vttestserver/Dockerfile.mysql80 new file mode 100644 index 00000000000..10c68e24e2b --- /dev/null +++ b/docker/vttestserver/Dockerfile.mysql80 @@ -0,0 +1,61 @@ +# Copyright 2021 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# NOTE: We have to build the Vitess binaries from scratch instead of sharing +# a base image because Docker Hub dropped the feature we relied upon to +# ensure images contain the right binaries. + +# Use a temporary layer for the build stage. +ARG bootstrap_version=1 +ARG image="vitess/bootstrap:${bootstrap_version}-mysql80" + +FROM "${image}" AS builder + +# Allows docker builds to set the BUILD_NUMBER +ARG BUILD_NUMBER + +# Re-copy sources from working tree. +COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess + +# Build and install Vitess in a temporary output directory. +USER vitess +RUN make install-testing PREFIX=/vt/install + +# Start over and build the final image. +FROM debian:buster-slim + +# Install dependencies +COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh +RUN /vt/dist/install_dependencies.sh mysql80 + +# Set up Vitess user and directory tree. +RUN groupadd -r vitess && useradd -r -g vitess vitess +RUN mkdir -p /vt/vtdataroot && chown -R vitess:vitess /vt + +# Set up Vitess environment (just enough to run pre-built Go binaries) +ENV VTROOT /vt +ENV VTDATAROOT /vt/vtdataroot +ENV PATH $VTROOT/bin:$PATH + +# Copy artifacts from builder layer. +COPY --from=builder --chown=vitess:vitess /vt/install /vt + +# Create mount point for actual data (e.g. MySQL data dir) +VOLUME /vt/vtdataroot +USER vitess + +COPY docker/vttestserver/setup_vschema_folder.sh /vt/setup_vschema_folder.sh +COPY docker/vttestserver/run.sh /vt/run.sh + +CMD /vt/run.sh "8.0.21-vitess" diff --git a/docker/vttestserver/run.sh b/docker/vttestserver/run.sh new file mode 100755 index 00000000000..23de153459e --- /dev/null +++ b/docker/vttestserver/run.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +# Copyright 2021 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Setup the Vschema Folder +/vt/setup_vschema_folder.sh "$KEYSPACES" "$NUM_SHARDS" +# Run the vttestserver binary +/vt/bin/vttestserver -port "$PORT" -keyspaces "$KEYSPACES" -num_shards "$NUM_SHARDS" -mysql_bind_host "${MYSQL_BIND_HOST:-127.0.0.1}" -mysql_server_version "${MYSQL_SERVER_VERSION:-$1}" -vschema_ddl_authorized_users=% -schema_dir="/vt/schema/" \ No newline at end of file diff --git a/docker/vttestserver/setup_vschema_folder.sh b/docker/vttestserver/setup_vschema_folder.sh new file mode 100755 index 00000000000..fcf1bf746f1 --- /dev/null +++ b/docker/vttestserver/setup_vschema_folder.sh @@ -0,0 +1,59 @@ +#!/bin/bash + +# Copyright 2021 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# getLength gets the number of elements in a comma separated string +function getLength() { + COUNT=0 + for _ in ${1//,/ } + do + ((COUNT++)) + done + echo "$COUNT" +} + +# The first argument to the file is a comma separated list of keyspaces and the second argument is the comma separated list of number of shards. + +KEYSPACES="$1" +NUM_SHARDS="$2" + +COUNT_KEYSPACES=$(getLength "$KEYSPACES") +COUNT_NUM_SHARDS=$(getLength "$NUM_SHARDS") + +# Incase the number of keyspaces and num_shards do not match, throw an error +if [ "$COUNT_KEYSPACES" != "$COUNT_NUM_SHARDS" ]; then + echo "Incompatible list of keyspaces and number of shards" + exit 1 +fi + +# Convert the strings to lists +read -ra KEYSPACES_LIST <<<"${KEYSPACES//,/ }" +read -ra NUM_SHARDS_LIST <<<"${NUM_SHARDS//,/ }" + +# create the main schema directory +mkdir /vt/schema/ + +i=0; +for keyspace in "${KEYSPACES_LIST[@]}"; +do + # create a directory for each keyspace + mkdir "/vt/schema/$keyspace" + num_shard=${NUM_SHARDS_LIST[$i]} + # Create a vschema.json file only if the number of shards are more than 1 + if [[ $num_shard -gt "1" ]]; then + printf "{\n\t\"sharded\": true\n}" > "/vt/schema/$keyspace/vschema.json" + fi + ((i++)) +done diff --git a/examples/common/select_commerce_data.sql b/examples/common/select_commerce_data.sql index d7494950b2d..dd7f624d7ca 100644 --- a/examples/common/select_commerce_data.sql +++ b/examples/common/select_commerce_data.sql @@ -1,5 +1,5 @@ -\! echo 'Using commerce/0' -use commerce/0; +\! echo 'Using commerce' +use commerce; \! echo 'Customer' select * from customer; \! echo 'Product' diff --git a/examples/demo/demo.go b/examples/demo/demo.go index 101d49d430e..19c4677beef 100644 --- a/examples/demo/demo.go +++ b/examples/demo/demo.go @@ -19,6 +19,7 @@ package main import ( "bufio" "context" + "encoding/hex" "encoding/json" "flag" "fmt" @@ -60,14 +61,14 @@ func runCluster() { Config: vttest.Config{ Topology: &vttestpb.VTTestTopology{ Keyspaces: []*vttestpb.Keyspace{{ - Name: "user", + Name: "customer", Shards: []*vttestpb.Shard{{ Name: "-80", }, { Name: "80-", }}, }, { - Name: "lookup", + Name: "product", Shards: []*vttestpb.Shard{{ Name: "0", }}, @@ -75,6 +76,8 @@ func runCluster() { }, SchemaDir: path.Join(os.Getenv("VTROOT"), "examples/demo/schema"), MySQLBindHost: "0.0.0.0", + // VSchemaDDLAuthorizedUsers allows you to experiment with vschema DDLs. + VSchemaDDLAuthorizedUsers: "%", }, } env, err := vttest.NewLocalTestEnv("", 12345) @@ -140,21 +143,17 @@ func exec(w http.ResponseWriter, req *http.Request) { } response["queries"] = queries - execQuery(conn, "user0", "select * from user", "user", "-80", response) - execQuery(conn, "user1", "select * from user", "user", "80-", response) - execQuery(conn, "user_extra0", "select * from user_extra", "user", "-80", response) - execQuery(conn, "user_extra1", "select * from user_extra", "user", "80-", response) - execQuery(conn, "music0", "select * from music", "user", "-80", response) - execQuery(conn, "music1", "select * from music", "user", "80-", response) - execQuery(conn, "music_extra0", "select * from music_extra", "user", "-80", response) - execQuery(conn, "music_extra1", "select * from music_extra", "user", "80-", response) - execQuery(conn, "name_info0", "select * from name_info", "user", "-80", response) - execQuery(conn, "name_info1", "select * from name_info", "user", "80-", response) - execQuery(conn, "music_keyspace_idx0", "select music_id, hex(keyspace_id) from music_keyspace_idx", "user", "-80", response) - execQuery(conn, "music_keyspace_idx1", "select music_id, hex(keyspace_id) from music_keyspace_idx", "user", "80-", response) - execQuery(conn, "user_seq", "select * from user_seq", "lookup", "0", response) - execQuery(conn, "music_seq", "select * from music_seq", "lookup", "0", response) - execQuery(conn, "name_keyspace_idx", "select name, hex(keyspace_id) from name_keyspace_idx", "lookup", "0", response) + execQuery(conn, "customer0", "select * from customer", "customer", "-80", response) + execQuery(conn, "customer1", "select * from customer", "customer", "80-", response) + execQuery(conn, "corder0", "select * from corder", "customer", "-80", response) + execQuery(conn, "corder1", "select * from corder", "customer", "80-", response) + execQuery(conn, "corder_event0", "select * from corder_event", "customer", "-80", response) + execQuery(conn, "corder_event1", "select * from corder_event", "customer", "80-", response) + execQuery(conn, "oname_keyspace_idx0", "select * from oname_keyspace_idx", "customer", "-80", response) + execQuery(conn, "oname_keyspace_idx1", "select * from oname_keyspace_idx", "customer", "80-", response) + execQuery(conn, "product", "select * from product", "product", "0", response) + execQuery(conn, "customer_seq", "select * from customer_seq", "product", "0", response) + execQuery(conn, "corder_keyspace_idx", "select * from corder_keyspace_idx", "product", "0", response) enc.Encode(response) } @@ -181,6 +180,9 @@ func execQuery(conn *mysql.Conn, key, query, keyspace, shard string, response ma } qr, err := conn.ExecuteFetch(query, 10000, true) if err != nil { + if strings.Contains(err.Error(), "doesn't exist") { + return + } response[key] = map[string]interface{}{ "title": title, "error": err.Error(), @@ -200,7 +202,11 @@ func resultToMap(title string, qr *sqltypes.Result) map[string]interface{} { for _, row := range qr.Rows { srow := make([]string, 0, len(row)) for _, value := range row { - srow = append(srow, value.ToString()) + if value.Type() == sqltypes.VarBinary { + srow = append(srow, hex.EncodeToString(value.ToBytes())) + } else { + srow = append(srow, value.ToString()) + } } rows = append(rows, srow) } diff --git a/examples/demo/index.html b/examples/demo/index.html index 7f86833da99..3144c4501a6 100644 --- a/examples/demo/index.html +++ b/examples/demo/index.html @@ -5,67 +5,73 @@ href="https://stackpath.bootstrapcdn.com/bootswatch/4.3.1/minty/bootstrap.min.css"> -Vitess V3 demo +Vitess VSchema demo
-

Vitess V3 demo

+

Vitess VSchema demo

-
+

customer

-
-
-
-
+
-
-
-
-
+
-
-
+
+
+
-
+
+
-
+
+
+
+
-
-
+
-
+
+
+
+
-
+
+
-
+
-

lookup

+

product

-
-
-
diff --git a/examples/demo/index.js b/examples/demo/index.js index 846d02a67ec..549f15f3c4b 100644 --- a/examples/demo/index.js +++ b/examples/demo/index.js @@ -19,49 +19,25 @@ function DemoController($scope, $http) { function init() { $scope.samples = [ - "insert into user(name) values('test1') /* run this at least 6 times with different values of name */", - "insert into user(name) values(null) /* error: name must be supplied */", - "select user_id, name from user where user_id=6 /* unique select */", - "select user_id, name from user where name='test1' /* non-unique select */", - "select user_id, name from user where user_id in (1, 6) /* unique multi-select */", - "select user_id, name from user where name in ('test1', 'test2') /* non-unique multi-select */", - "select user_id, name from user /* scatter */", - "select count(*) from user where user_id=1 /* aggregation on unique vindex */", - "select count(*) from user where name='foo' /* error: aggregation on non-unique vindex */", - "select user_id, name from user where user_id=1 limit 1 /* limit on unique vindex */", - "update user set user_id=1 where user_id=2 /* error: cannot change vindex columns */", - "delete from user where user_id=1 /* other 'test1' in name_user_idx unaffected */", - "delete from user where name='test1' /* error: cannot delete by non-unique vindex */", - "", - "insert into user_extra(user_id, extra) values(1, 'extra1')", - "insert into user_extra(user_id, extra) values(2, 'test1')", - "insert into user_extra(user_id, extra) values(6, 'test2')", - "insert into user_extra(extra) values('extra1') /* error: must supply value for user_id */", - "select user_id, extra from user_extra where extra='extra1' /* scatter */", - "update user_extra set extra='extra2' where user_id=1 /* allowed */", - "delete from user_extra where user_id=1 /* vindexes are unchanged */", - "", - "insert into music(user_id) values(1) /* auto-inc on music_id */", - "select user_id, music_id from music where user_id=1", - "delete from music where music_id=6 /* one row deleted */", - "delete from music where user_id=1 /* multiple rows deleted */", - "", - "insert into name_info(name, info) values('test1', 'test info')", - "insert into name_info(name, info) values('test4', 'test info 4')", - "select u.user_id, n.info from user u join name_info n on u.name=n.name where u.user_id=1", - "", - "select u.user_id, u.name, e.user_id, e.extra from user u join user_extra e on u.user_id = e.user_id where u.user_id = 2 /* simple, single row join */", - "select u.user_id, u.name, e.user_id, e.extra from user u join user_extra e on u.user_id = e.user_id /* simple, scatter join */", - "select u.user_id, u.name, e.user_id, e.extra from user u join user_extra e on u.name != e.extra where u.user_id = 2 /* simple, cross-shard complex join */", - "select u1.user_id, u1.name, u2.user_id, u2.name from user u1 join user u2 on u1.name = u2.name where u1.user_id = 2 /* self-join */", - "select u.user_id, u.name, e.user_id, e.extra from user u left join user_extra e on u.user_id = e.user_id /* left join */", - "select u.user_id, u.name, e.user_id, e.extra from user u left join user_extra e on u.name != e.extra where u.user_id = 2 /* cross-shard left join */", - "select user_id, name from user where name in (select extra from user_extra where user_id = user.user_id) /* correlated subquery */", - "select count(*), u.user_id, u.name, e.extra from user u join user_extra e on u.user_id = e.user_id /* aggregates */", - "select u.user_id, u.name, m.music_id, m.user_id from user u join music m on u.user_id = m.music_id where u.user_id = 1 order by u.user_id, u.name, m.user_id /* order by, in spite of odd join */", - "", - "insert into music_extra(music_id) values(1) /* keyspace_id back-computed */", - "insert into music_extra(music_id, keyspace_id) values(1, 1) /* invalid keyspace id */", + "insert into product(pname) values ('monitor'), ('keyboard')", + "select * from product /* pass-through to unsharded keyspace */", + "--", + "insert into customer(uname) values('alice'),('bob'),('charlie'),('dan'),('eve') /* multi-shard insert */", + "select * from customer where customer_id=1 /* use hash vindex */", + "select * from customer where uname='bob' /* scatter */", + "update customer set customer_id=10 where customer_id=1 /* error: cannot change primary vindex column */", + "delete from customer where uname='eve' /* scatter DML */", + "--", + "insert into corder(customer_id, product_id, oname) values (1,1,'gift'),(1,2,'gift'),(2,1,'work'),(3,2,'personal'),(4,1,'personal') /* orders are grouped with their customer, observe lookup table changes */", + "select * from corder where customer_id=1 /* single-shard select */", + "select * from corder where corder_id=1 /* use unique lookup */", + "select * from corder where oname='gift' /* use non-unique lookup, also try with 'work' and 'personal' */", + "select c.uname, o.oname, o.product_id from customer c join corder o on c.customer_id = o.customer_id where c.customer_id=1 /* local join */", + "select c.uname, o.oname, o.product_id from customer c join corder o on c.customer_id = o.customer_id /* scatter local join */", + "select c.uname, o.oname, p.pname from customer c join corder o on c.customer_id = o.customer_id join product p on o.product_id = p.product_id /* cross-shard join */", + "delete from corder where corder_id=3 /* delete also deletes lookup entries */", + "--", + "insert into corder_event(corder_id, ename) values(1, 'paid'), (5, 'delivered') /* automatic population of keyspace_id */", ]; $scope.submitQuery() } diff --git a/examples/demo/schema/customer/customer_schema.sql b/examples/demo/schema/customer/customer_schema.sql new file mode 100644 index 00000000000..aeabb800b10 --- /dev/null +++ b/examples/demo/schema/customer/customer_schema.sql @@ -0,0 +1,4 @@ +create table customer(customer_id bigint, uname varchar(128), primary key(customer_id)); +create table corder(corder_id bigint, customer_id bigint, product_id bigint, oname varchar(128), primary key(corder_id)); +create table corder_event(corder_event_id bigint, corder_id bigint, ename varchar(128), keyspace_id varbinary(10), primary key(corder_id, corder_event_id)); +create table oname_keyspace_idx(oname varchar(128), corder_id bigint, keyspace_id varbinary(10), primary key(oname, corder_id)); diff --git a/examples/demo/schema/customer/vschema.json b/examples/demo/schema/customer/vschema.json new file mode 100644 index 00000000000..9c361475227 --- /dev/null +++ b/examples/demo/schema/customer/vschema.json @@ -0,0 +1,79 @@ +{ + "sharded": true, + "vindexes": { + "hash": { + "type": "hash" + }, + "corder_keyspace_idx": { + "type": "consistent_lookup_unique", + "params": { + "table": "product.corder_keyspace_idx", + "from": "corder_id", + "to": "keyspace_id" + }, + "owner": "corder" + }, + "oname_keyspace_idx": { + "type": "consistent_lookup", + "params": { + "table": "customer.oname_keyspace_idx", + "from": "oname,corder_id", + "to": "keyspace_id" + }, + "owner": "corder" + }, + "unicode_loose_md5": { + "type": "unicode_loose_md5" + }, + "binary": { + "type": "binary" + } + }, + "tables": { + "customer": { + "column_vindexes": [{ + "column": "customer_id", + "name": "hash" + }], + "auto_increment": { + "column": "customer_id", + "sequence": "product.customer_seq" + } + }, + "corder": { + "column_vindexes": [{ + "column": "customer_id", + "name": "hash" + }, { + "column": "corder_id", + "name": "corder_keyspace_idx" + }, { + "columns": ["oname", "corder_id"], + "name": "oname_keyspace_idx" + }], + "auto_increment": { + "column": "corder_id", + "sequence": "product.corder_seq" + } + }, + "corder_event": { + "column_vindexes": [{ + "column": "corder_id", + "name": "corder_keyspace_idx" + }, { + "column": "keyspace_id", + "name": "binary" + }], + "auto_increment": { + "column": "corder_event_id", + "sequence": "product.corder_event_seq" + } + }, + "oname_keyspace_idx": { + "column_vindexes": [{ + "column": "oname", + "name": "unicode_loose_md5" + }] + } + } +} diff --git a/examples/demo/schema/lookup/lookup_schema.sql b/examples/demo/schema/lookup/lookup_schema.sql deleted file mode 100644 index 10ef4d3d16a..00000000000 --- a/examples/demo/schema/lookup/lookup_schema.sql +++ /dev/null @@ -1,5 +0,0 @@ -create table user_seq(id int, next_id bigint, cache bigint, primary key(id)) comment 'vitess_sequence'; -insert into user_seq(id, next_id, cache) values(0, 1, 3); -create table music_seq(id int, next_id bigint, cache bigint, primary key(id)) comment 'vitess_sequence'; -insert into music_seq(id, next_id, cache) values(0, 1, 2); -create table name_keyspace_idx(name varchar(128), keyspace_id binary(8), primary key(name, keyspace_id)); diff --git a/examples/demo/schema/lookup/vschema.json b/examples/demo/schema/lookup/vschema.json deleted file mode 100644 index b65f1f337ca..00000000000 --- a/examples/demo/schema/lookup/vschema.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "sharded": false, - "tables": { - "user_seq": { - "type": "sequence" - }, - "music_seq": { - "type": "sequence" - }, - "name_keyspace_idx": {} - } -} diff --git a/examples/demo/schema/product/product_schema.sql b/examples/demo/schema/product/product_schema.sql new file mode 100644 index 00000000000..766da1d5838 --- /dev/null +++ b/examples/demo/schema/product/product_schema.sql @@ -0,0 +1,8 @@ +create table product(product_id bigint auto_increment, pname varchar(128), primary key(product_id)); +create table customer_seq(id bigint, next_id bigint, cache bigint, primary key(id)) comment 'vitess_sequence'; +insert into customer_seq(id, next_id, cache) values(0, 1, 3); +create table corder_seq(id bigint, next_id bigint, cache bigint, primary key(id)) comment 'vitess_sequence'; +insert into corder_seq(id, next_id, cache) values(0, 1, 3); +create table corder_event_seq(id bigint, next_id bigint, cache bigint, primary key(id)) comment 'vitess_sequence'; +insert into corder_event_seq(id, next_id, cache) values(0, 1, 3); +create table corder_keyspace_idx(corder_id bigint not null auto_increment, keyspace_id varbinary(10), primary key(corder_id)); diff --git a/examples/demo/schema/product/vschema.json b/examples/demo/schema/product/vschema.json new file mode 100644 index 00000000000..712e8c1a277 --- /dev/null +++ b/examples/demo/schema/product/vschema.json @@ -0,0 +1,10 @@ +{ + "sharded": false, + "tables": { + "product": {}, + "customer_seq": { "type": "sequence" }, + "corder_seq": { "type": "sequence" }, + "corder_event_seq": { "type": "sequence" }, + "corder_keyspace_idx": {} + } +} diff --git a/examples/demo/schema/user/user_schema.sql b/examples/demo/schema/user/user_schema.sql deleted file mode 100644 index 08c79a9f732..00000000000 --- a/examples/demo/schema/user/user_schema.sql +++ /dev/null @@ -1,6 +0,0 @@ -create table user(user_id bigint, name varchar(128), primary key(user_id)); -create table user_extra(user_id bigint, extra varchar(128), primary key(user_id)); -create table music(user_id bigint, music_id bigint, primary key(user_id, music_id)); -create table music_extra(music_id bigint, keyspace_id bigint unsigned, primary key(music_id)); -create table name_info(name varchar(128), info varchar(128), primary key(name)); -create table music_keyspace_idx(music_id bigint not null auto_increment, keyspace_id binary(8), primary key(music_id)); diff --git a/examples/demo/schema/user/vschema.json b/examples/demo/schema/user/vschema.json deleted file mode 100644 index 0cf0f42e6d1..00000000000 --- a/examples/demo/schema/user/vschema.json +++ /dev/null @@ -1,102 +0,0 @@ -{ - "sharded": true, - "vindexes": { - "hash": { - "type": "hash" - }, - "unicode_loose_md5": { - "type": "unicode_loose_md5" - }, - "name_keyspace_idx": { - "type": "lookup", - "params": { - "table": "name_keyspace_idx", - "from": "name", - "to": "keyspace_id" - }, - "owner": "user" - }, - "music_keyspace_idx": { - "type": "lookup_unique", - "params": { - "table": "music_keyspace_idx", - "from": "music_id", - "to": "keyspace_id" - }, - "owner": "music" - }, - "keyspace_idx": { - "type": "numeric" - } - }, - "tables": { - "user": { - "column_vindexes": [ - { - "column": "user_id", - "name": "hash" - }, - { - "column": "name", - "name": "name_keyspace_idx" - } - ], - "auto_increment": { - "column": "user_id", - "sequence": "user_seq" - } - }, - "user_extra": { - "column_vindexes": [ - { - "column": "user_id", - "name": "hash" - } - ] - }, - "music": { - "column_vindexes": [ - { - "column": "user_id", - "name": "hash" - }, - { - "column": "music_id", - "name": "music_keyspace_idx" - } - ], - "auto_increment": { - "column": "music_id", - "sequence": "music_seq" - } - }, - "music_extra": { - "column_vindexes": [ - { - "column": "music_id", - "name": "music_keyspace_idx" - }, - { - "column": "keyspace_id", - "name": "keyspace_idx" - } - ] - }, - "name_info": { - "column_vindexes": [ - { - "column": "name", - "name": "unicode_loose_md5" - } - ] - }, - "music_keyspace_idx": { - "column_vindexes": [ - { - "column": "music_id", - "name": "hash" - } - ] - } - } -} diff --git a/examples/demo/vschema_ddls.sql b/examples/demo/vschema_ddls.sql new file mode 100644 index 00000000000..3347fbe2b86 --- /dev/null +++ b/examples/demo/vschema_ddls.sql @@ -0,0 +1,29 @@ +-- Unsharded Keyspace +alter vschema add table product.product; + +-- Sharded Keyspace +alter vschema on customer.customer add vindex hash(customer_id) using hash; + +-- Sequences +alter vschema add sequence product.customer_seq; +alter vschema on customer.customer add auto_increment customer_id using product.customer_seq; + +-- Shared Vindexes and Foreign Keys +alter vschema on customer.corder add vindex hash(customer_id); +alter vschema add sequence product.corder_seq; +alter vschema on customer.corder add auto_increment corder_id using product.corder_seq; + +-- Unique Lookup Vindexes +alter vschema add table product.corder_keyspace_idx; +alter vschema on customer.corder add vindex corder_keyspace_idx(corder_id) using consistent_lookup_unique with owner=`corder`, table=`product.corder_keyspace_idx`, from=`corder_id`, to=`keyspace_id`; + +-- Non-Unique Lookup Vindexes +alter vschema on customer.oname_keyspace_idx add vindex unicode_loose_md5(oname) using unicode_loose_md5; +alter vschema on customer.corder add vindex oname_keyspace_idx(oname,corder_id) using consistent_lookup with owner=`corder`, table=`customer.oname_keyspace_idx`, from=`oname,corder_id`, to=`keyspace_id`; + +-- Lookup as Primary Vindex +alter vschema add sequence product.corder_event_seq; +alter vschema on customer.corder_event add vindex corder_keyspace_idx(corder_id); +alter vschema on customer.corder_event add auto_increment corder_event_id using product.corder_event_seq; +-- Reversible Vindexes +alter vschema on customer.corder_event add vindex `binary`(keyspace_id) using `binary`; diff --git a/examples/local/101_initial_cluster.sh b/examples/local/101_initial_cluster.sh index 5d42d908f6e..40c83ad8ec8 100755 --- a/examples/local/101_initial_cluster.sh +++ b/examples/local/101_initial_cluster.sh @@ -38,7 +38,7 @@ for i in 100 101 102; do done # set one of the replicas to master -vtctlclient InitShardMaster -force commerce/0 zone1-100 +vtctldclient InitShardPrimary --force commerce/0 zone1-100 # create the schema vtctlclient ApplySchema -sql-file create_commerce_schema.sql commerce diff --git a/examples/local/201_customer_tablets.sh b/examples/local/201_customer_tablets.sh index b53ffb6cd16..bb7ef6d531f 100755 --- a/examples/local/201_customer_tablets.sh +++ b/examples/local/201_customer_tablets.sh @@ -25,4 +25,4 @@ for i in 200 201 202; do CELL=zone1 KEYSPACE=customer TABLET_UID=$i ./scripts/vttablet-up.sh done -vtctlclient InitShardMaster -force customer/0 zone1-200 +vtctldclient InitShardPrimary --force customer/0 zone1-200 diff --git a/examples/local/302_new_shards.sh b/examples/local/302_new_shards.sh index 33f2ec294e1..3aede2b3d6e 100755 --- a/examples/local/302_new_shards.sh +++ b/examples/local/302_new_shards.sh @@ -29,5 +29,5 @@ for i in 400 401 402; do SHARD=80- CELL=zone1 KEYSPACE=customer TABLET_UID=$i ./scripts/vttablet-up.sh done -vtctlclient InitShardMaster -force customer/-80 zone1-300 -vtctlclient InitShardMaster -force customer/80- zone1-400 +vtctldclient InitShardPrimary --force customer/-80 zone1-300 +vtctldclient InitShardPrimary --force customer/80- zone1-400 diff --git a/examples/local/README.md b/examples/local/README.md index c6d8510cc52..030b7299ce5 100644 --- a/examples/local/README.md +++ b/examples/local/README.md @@ -5,12 +5,12 @@ This document contains the summary of the commands to be run. ``` +# Setup environment and aliases +source env.sh + # Bring up initial cluster and commerce keyspace ./101_initial_cluster.sh -# Setup aliases -source alias.source - # Insert and verify data mysql < ../common/insert_commerce_data.sql mysql --table < ../common/select_commerce_data.sql diff --git a/examples/local/env.sh b/examples/local/env.sh index c0b982a3dbe..e66871478ce 100644 --- a/examples/local/env.sh +++ b/examples/local/env.sh @@ -76,6 +76,7 @@ mkdir -p "${VTDATAROOT}/tmp" alias mysql="command mysql -h 127.0.0.1 -P 15306" alias vtctlclient="command vtctlclient -server localhost:15999 -log_dir ${VTDATAROOT}/tmp -alsologtostderr" +alias vtctldclient="command vtctldclient --server localhost:15999" # Make sure aliases are expanded in non-interactive shell shopt -s expand_aliases diff --git a/examples/local/scripts/vtadmin-up.sh b/examples/local/scripts/vtadmin-up.sh new file mode 100755 index 00000000000..de554b7f880 --- /dev/null +++ b/examples/local/scripts/vtadmin-up.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +source ./env.sh + +log_dir="${VTDATAROOT}/tmp" +vtadmin_api_port=14200 + +vtadmin \ + --addr ":${vtadmin_api_port}" \ + --http-origin "http://localhost:3000" \ + --logtostderr \ + --alsologtostderr \ + --cluster "id=local,name=local,discovery=staticfile,discovery-staticfile-path=./vtadmin/discovery.json" \ + > "${log_dir}/vtadmin-api.out" 2>&1 & +vtadmin_pid=$! + +function cleanup() { + kill -9 "${vtadmin_pid}" + + echo + echo "Shutdown complete!" +} + +trap cleanup INT QUIT TERM + +echo "vtadmin-api is up! Logs are in ${log_dir}/vtadmin-api.out, and its PID is ${vtadmin_pid}" + +( + cd ../../web/vtadmin && + npm install && + REACT_APP_VTADMIN_API_ADDRESS="http://127.0.0.1:${vtadmin_api_port}" \ + npm run start +) diff --git a/examples/local/vtadmin/discovery.json b/examples/local/vtadmin/discovery.json new file mode 100644 index 00000000000..7aa60f530ff --- /dev/null +++ b/examples/local/vtadmin/discovery.json @@ -0,0 +1,16 @@ +{ + "vtctlds": [ + { + "host": { + "hostname": "localhost:15999" + } + } + ], + "vtgates": [ + { + "host": { + "hostname": "localhost:15991" + } + } + ] +} diff --git a/examples/operator/101_initial_cluster.yaml b/examples/operator/101_initial_cluster.yaml index 2fe2b020de9..93bae05a60b 100644 --- a/examples/operator/101_initial_cluster.yaml +++ b/examples/operator/101_initial_cluster.yaml @@ -8,12 +8,12 @@ metadata: name: example spec: images: - vtctld: vitess/lite:v9.0.0 - vtgate: vitess/lite:v9.0.0 - vttablet: vitess/lite:v9.0.0 - vtbackup: vitess/lite:v9.0.0 + vtctld: vitess/lite:v10.0.1 + vtgate: vitess/lite:v10.0.1 + vttablet: vitess/lite:v10.0.1 + vtbackup: vitess/lite:v10.0.1 mysqld: - mysql56Compatible: vitess/lite:v9.0.0 + mysql56Compatible: vitess/lite:v10.0.1 mysqldExporter: prom/mysqld-exporter:v0.11.0 cells: - name: zone1 diff --git a/examples/operator/201_customer_tablets.yaml b/examples/operator/201_customer_tablets.yaml index 81ec63e8e78..0e9d8f0d893 100644 --- a/examples/operator/201_customer_tablets.yaml +++ b/examples/operator/201_customer_tablets.yaml @@ -4,12 +4,12 @@ metadata: name: example spec: images: - vtctld: vitess/lite:v9.0.0 - vtgate: vitess/lite:v9.0.0 - vttablet: vitess/lite:v9.0.0 - vtbackup: vitess/lite:v9.0.0 + vtctld: vitess/lite:v10.0.1 + vtgate: vitess/lite:v10.0.1 + vttablet: vitess/lite:v10.0.1 + vtbackup: vitess/lite:v10.0.1 mysqld: - mysql56Compatible: vitess/lite:v9.0.0 + mysql56Compatible: vitess/lite:v10.0.1 mysqldExporter: prom/mysqld-exporter:v0.11.0 cells: - name: zone1 diff --git a/examples/operator/302_new_shards.yaml b/examples/operator/302_new_shards.yaml index d02c2e41a25..c600aa2edc7 100644 --- a/examples/operator/302_new_shards.yaml +++ b/examples/operator/302_new_shards.yaml @@ -4,12 +4,12 @@ metadata: name: example spec: images: - vtctld: vitess/lite:v9.0.0 - vtgate: vitess/lite:v9.0.0 - vttablet: vitess/lite:v9.0.0 - vtbackup: vitess/lite:v9.0.0 + vtctld: vitess/lite:v10.0.1 + vtgate: vitess/lite:v10.0.1 + vttablet: vitess/lite:v10.0.1 + vtbackup: vitess/lite:v10.0.1 mysqld: - mysql56Compatible: vitess/lite:v9.0.0 + mysql56Compatible: vitess/lite:v10.0.1 mysqldExporter: prom/mysqld-exporter:v0.11.0 cells: - name: zone1 diff --git a/examples/operator/306_down_shard_0.yaml b/examples/operator/306_down_shard_0.yaml index 28a4d71d160..c4d4b1c4dbe 100644 --- a/examples/operator/306_down_shard_0.yaml +++ b/examples/operator/306_down_shard_0.yaml @@ -4,12 +4,12 @@ metadata: name: example spec: images: - vtctld: vitess/lite:v9.0.0 - vtgate: vitess/lite:v9.0.0 - vttablet: vitess/lite:v9.0.0 - vtbackup: vitess/lite:v9.0.0 + vtctld: vitess/lite:v10.0.1 + vtgate: vitess/lite:v10.0.1 + vttablet: vitess/lite:v10.0.1 + vtbackup: vitess/lite:v10.0.1 mysqld: - mysql56Compatible: vitess/lite:v9.0.0 + mysql56Compatible: vitess/lite:v10.0.1 mysqldExporter: prom/mysqld-exporter:v0.11.0 cells: - name: zone1 diff --git a/examples/operator/README.md b/examples/operator/README.md index 7bdf3d2b808..83f3013dc22 100644 --- a/examples/operator/README.md +++ b/examples/operator/README.md @@ -14,7 +14,7 @@ vtctlclient ApplyVSchema -vschema="$(cat vschema_commerce_initial.json)" commerc # Insert and verify data mysql < ../common/insert_commerce_data.sql -mysql --table < select_commerce_data.sql +mysql --table < ../common/select_commerce_data.sql # Bring up customer keyspace kubectl apply -f 201_customer_tablets.yaml diff --git a/examples/operator/operator.yaml b/examples/operator/operator.yaml index 916102603c6..9bbadc8221e 100644 --- a/examples/operator/operator.yaml +++ b/examples/operator/operator.yaml @@ -5773,7 +5773,7 @@ spec: fieldPath: metadata.name - name: OPERATOR_NAME value: vitess-operator - image: planetscale/vitess-operator:v2.3.0 + image: planetscale/vitess-operator:v2.4.1 imagePullPolicy: IfNotPresent name: vitess-operator resources: diff --git a/examples/operator/select_commerce_data.sql b/examples/operator/select_commerce_data.sql deleted file mode 100644 index dd7f624d7ca..00000000000 --- a/examples/operator/select_commerce_data.sql +++ /dev/null @@ -1,8 +0,0 @@ -\! echo 'Using commerce' -use commerce; -\! echo 'Customer' -select * from customer; -\! echo 'Product' -select * from product; -\! echo 'COrder' -select * from corder; diff --git a/examples/operator/vtorc_example.yaml b/examples/operator/vtorc_example.yaml index 63d01861267..ac62a7a296d 100644 --- a/examples/operator/vtorc_example.yaml +++ b/examples/operator/vtorc_example.yaml @@ -8,13 +8,13 @@ metadata: name: example spec: images: - vtctld: vitess/lite:v9.0.0 - vtorc: vitess/lite:v9.0.0 - vtgate: vitess/lite:v9.0.0 - vttablet: vitess/lite:v9.0.0 - vtbackup: vitess/lite:v9.0.0 + vtctld: vitess/lite:v10.0.1 + vtorc: vitess/lite:v10.0.1 + vtgate: vitess/lite:v10.0.1 + vttablet: vitess/lite:v10.0.1 + vtbackup: vitess/lite:v10.0.1 mysqld: - mysql56Compatible: vitess/lite:v9.0.0 + mysql56Compatible: vitess/lite:v10.0.1 mysqldExporter: prom/mysqld-exporter:v0.11.0 cells: - name: zone1 diff --git a/examples/region_sharding/README.md b/examples/region_sharding/README.md index 21c53f1b497..a83bf1e54c4 100644 --- a/examples/region_sharding/README.md +++ b/examples/region_sharding/README.md @@ -9,14 +9,12 @@ This document contains the summary of the commands to be run. # Example: "region_map": "/home/user/vitess/examples/region_sharding/countries.json", +# setup environment and aliases +source env.sh # Bring up initial cluster and main keyspace (unsharded) ./101_initial_cluster.sh -# setup aliases -alias mysql="command mysql -h 127.0.0.1 -P 15306" -alias vtctlclient="command vtctlclient -server localhost:15999 -log_dir ${VTDATAROOT}/tmp -alsologtostderr" - # Insert and verify data mysql < insert_customers.sql mysql --table < show_initial_data.sql diff --git a/go.mod b/go.mod index f2a1bab366f..fb655e991ed 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,7 @@ go 1.15 require ( cloud.google.com/go/storage v1.0.0 + github.com/AdaLogics/go-fuzz-headers v0.0.0-20210330150358-dbd898e17899 github.com/Azure/azure-pipeline-go v0.2.2 github.com/Azure/azure-storage-blob-go v0.10.0 github.com/Azure/go-autorest/autorest v0.10.0 // indirect @@ -22,17 +23,18 @@ require ( github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect github.com/corpix/uarand v0.1.1 // indirect github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432 + github.com/dave/jennifer v1.4.1 github.com/evanphx/json-patch v4.5.0+incompatible github.com/fsnotify/fsnotify v1.4.9 github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab - github.com/go-sql-driver/mysql v1.5.0 - github.com/gogo/protobuf v1.3.1 // indirect + github.com/go-sql-driver/mysql v1.5.1-0.20210202043019-fe2230a8b20c + github.com/gogo/protobuf v1.3.1 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 // indirect github.com/golang/mock v1.3.1 - github.com/golang/protobuf v1.3.2 + github.com/golang/protobuf v1.3.3 github.com/golang/snappy v0.0.1 - github.com/google/go-cmp v0.4.0 + github.com/google/go-cmp v0.5.2 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/google/uuid v1.1.1 github.com/googleapis/gnostic v0.2.0 // indirect @@ -46,24 +48,21 @@ require ( github.com/hashicorp/go-msgpack v0.5.5 github.com/hashicorp/go-sockaddr v1.0.2 // indirect github.com/hashicorp/go-uuid v1.0.2 // indirect - github.com/hashicorp/golang-lru v0.5.3 // indirect github.com/hashicorp/serf v0.9.2 // indirect github.com/howeyc/gopass v0.0.0-20190910152052-7cb4b85ec19c github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428 github.com/imdario/mergo v0.3.6 // indirect + github.com/jmoiron/sqlx v1.2.0 github.com/klauspost/compress v1.4.1 // indirect github.com/klauspost/cpuid v1.2.0 // indirect github.com/klauspost/pgzip v1.2.4 - github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect github.com/krishicks/yaml-patch v0.0.10 - github.com/looplab/fsm v0.2.0 github.com/magiconair/properties v1.8.1 github.com/martini-contrib/auth v0.0.0-20150219114609-fa62c19b7ae8 github.com/martini-contrib/gzip v0.0.0-20151124214156-6c035326b43f github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11 github.com/mattn/go-sqlite3 v1.14.0 github.com/minio/minio-go v0.0.0-20190131015406-c8a261de75c1 - github.com/mitchellh/go-ps v1.0.0 // indirect github.com/mitchellh/go-testing-interface v1.14.0 // indirect github.com/mitchellh/mapstructure v1.2.3 // indirect github.com/montanaflynn/stats v0.6.3 @@ -84,6 +83,7 @@ require ( github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e github.com/satori/go.uuid v1.2.0 // indirect github.com/sjmudd/stopwatch v0.0.0-20170613150411-f380bf8a9be1 + github.com/skeema/tengo v0.0.0-00010101000000-000000000000 github.com/soheilhy/cmux v0.1.4 github.com/spf13/cobra v1.1.1 github.com/spf13/pflag v1.0.5 @@ -100,12 +100,12 @@ require ( golang.org/x/lint v0.0.0-20190930215403-16217165b5de golang.org/x/net v0.0.0-20201021035429-f5854403a974 golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 - golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 + golang.org/x/sync v0.0.0-20201207232520-09787c993a3a golang.org/x/text v0.3.3 golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 golang.org/x/tools v0.0.0-20201202200335-bef1c476418a google.golang.org/api v0.13.0 - google.golang.org/grpc v1.24.0 + google.golang.org/grpc v1.29.1 gopkg.in/DataDog/dd-trace-go.v1 v1.17.0 gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect gopkg.in/gcfg.v1 v1.2.3 @@ -117,6 +117,12 @@ require ( k8s.io/apiextensions-apiserver v0.17.3 k8s.io/apimachinery v0.17.3 k8s.io/client-go v0.17.3 - k8s.io/utils v0.0.0-20191114184206-e782cd3c129f + k8s.io/code-generator v0.17.3 sigs.k8s.io/yaml v1.1.0 ) + +replace github.com/skeema/tengo => github.com/planetscale/tengo v0.9.6-ps.v1 + +// (NOTE:@ajm188) Something we depend on depends on moby/term, and that version +// of moby/term has this issue: https://github.com/moby/term/issues/15. +replace golang.org/x/sys => golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6 diff --git a/go.sum b/go.sum index b2bd789c34f..5815ed537bd 100644 --- a/go.sum +++ b/go.sum @@ -1,10 +1,10 @@ +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.41.0/go.mod h1:OauMR7DV8fzvZIl2qg6rkaIhD/vmgk4iwEw/h6ercmg= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1 h1:lRi0CHyU+ytlvylOlFKKq0af6JncuyoRh1J+QJBqQx0= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3 h1:AVXDdKsrtX33oR9fbCMu/+c1o8Ofjq6Ku/MInaLVg5Y= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= @@ -18,17 +18,18 @@ cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2k cloud.google.com/go/storage v1.0.0 h1:VV2nUM3wwLLGh9lSABFgZMjInyUbJeaRSE64WuAIQ+4= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20210330150358-dbd898e17899 h1:Cm0cjER/2C+3BEuRBARZ+1HG+jwU5jbVkYysA7zE2H8= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20210330150358-dbd898e17899/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg= github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY= github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= github.com/Azure/azure-storage-blob-go v0.10.0 h1:evCwGreYo3XLeBV4vSxLbLiYb6e0SzsJiXQVRGsRXxs= github.com/Azure/azure-storage-blob-go v0.10.0/go.mod h1:ep1edmW+kNQx4UfWM9heESNmQdijykocJ0YOxmMX8SE= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.10.0 h1:mvdtztBqcL8se7MdrUweNieTNi4kfNG6GOJuurQJpuY= github.com/Azure/go-autorest/autorest v0.10.0/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.2 h1:O1X4oexUxnZCaEUGsvMnr8ZGj8HI37tNezwY4npRqA0= github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.8.3 h1:O1AGG9Xig71FxdX9HO5pGNyZ7TbSyHaVg+5eJO/jSGw= github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= @@ -51,13 +52,17 @@ github.com/BurntSushi/xgbutil v0.0.0-20160919175755-f7c97cef3b4e h1:4ZrkT/RzpnRO github.com/BurntSushi/xgbutil v0.0.0-20160919175755-f7c97cef3b4e/go.mod h1:uw9h2sd4WWHOPdJ13MQpwK5qYWKYDumDqxWWIknEQ+k= github.com/DataDog/datadog-go v2.2.0+incompatible h1:V5BKkxACZLjzHjSgBbr2gvLA2Ae49yhc6CSY7MLy5k4= github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/GeertJohan/go.incremental v1.0.0 h1:7AH+pY1XUgQE4Y1HcXYaMqAI0m9yrFqo/jt0CW30vsg= github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0= github.com/GeertJohan/go.rice v1.0.0 h1:KkI6O9uMaQU3VEKaj01ulavtF7o1fWT7+pk/4voiMLQ= github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtixis4Tib9if0= github.com/Masterminds/glide v0.13.2/go.mod h1:STyF5vcenH/rUqTEv+/hBXlSTo7KYwg2oc2f4tzPWic= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/vcs v1.13.0/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.15-0.20200113171025-3fe6c5262873 h1:93nQ7k53GjoMQ07HVP8g6Zj1fQZDDj7Xy2VkNNtvX8o= +github.com/Microsoft/go-winio v0.4.15-0.20200113171025-3fe6c5262873/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/hcsshim v0.8.9 h1:VrfodqvztU8YSOvygU+DN1BGaSGxmrNfqOv5oOuX2Bk= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/goquery v1.5.1 h1:PSPBGne8NIUWw+/7vFBV+kG2J/5MOjbzc7154OaKCSE= @@ -67,8 +72,9 @@ github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbt github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/VividCortex/mysqlerr v0.0.0-20170204212430-6c6b55f8796f h1:HR5nRmUQgXrwqZOwZ2DAc/aCi3Bu3xENpspW935vxu0= +github.com/VividCortex/mysqlerr v0.0.0-20170204212430-6c6b55f8796f/go.mod h1:f3HiCrHjHBdcm6E83vGaXh1KomZMA2P6aeo3hKx/wg0= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= -github.com/akavel/rsrc v0.8.0 h1:zjWn7ukO9Kc5Q62DOJCcxGpXC18RawVtYAGdz2aLlfw= github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= @@ -81,7 +87,6 @@ github.com/andybalholm/cascadia v1.1.0 h1:BuuO6sSfQNFRu1LppgbD25Hr2vLYW25JvxHs5z github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= github.com/aquarapid/vaultlib v0.5.1 h1:vuLWR6bZzLHybjJBSUYPgZlIp6KZ+SXeHLRRYTuk6d4= github.com/aquarapid/vaultlib v0.5.1/go.mod h1:yT7AlEXtuabkxylOc/+Ulyp18tff1+QjgNLTnFWTlOs= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e h1:QEF07wC0T1rKkctt1RINW/+RMTVmiwxETico2l3gxJA= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6 h1:G1bPvciwNyF7IUmKXNt9Ak3m6u9DE1rF+RmtIkBpVdA= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= @@ -89,7 +94,6 @@ github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmV github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= @@ -97,12 +101,10 @@ github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:l github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.28.8 h1:kPGnElMdW0GDc54Giy1lcE/3gAr2Gzl6cMjYKoBNFhw= github.com/aws/aws-sdk-go v1.28.8/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= @@ -110,29 +112,39 @@ github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdn github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/buger/jsonparser v0.0.0-20200322175846-f7e751efca13 h1:+qUNY4VRkEH46bLUwxCyUU+iOGJMQBVibAaYzWiwWcg= github.com/buger/jsonparser v0.0.0-20200322175846-f7e751efca13/go.mod h1:tgcrVJ81GPSF0mz+0nu1Xaz0fazGPrmmJfJtxjbHhUQ= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/codegangsta/cli v1.20.0/go.mod h1:/qJNoX69yVSKu5o4jLyXAENLRyk1uhi7zkbQ3slBdOA= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= +github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.4 h1:3o0smo5SKY7H6AJCmJhsnCjR2/V2T8VmiHt7seN2/kI= +github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200413184840-d3ef23f19fbb h1:nXPkFq8X1a9ycY3GYQpFNxHh3j2JgY7zDZfq2EXMIzk= +github.com/containerd/continuity v0.0.0-20200413184840-d3ef23f19fbb/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible h1:8F3hqu9fGYLBifCmRCJsicFqDx/D68Rt3q1JMazcgBQ= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-oidc v2.1.0+incompatible h1:sdJrfw8akMnCuUlaZU3tE/uYXFgfqom8DBE9so9EBsM= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= @@ -150,10 +162,14 @@ github.com/corpix/uarand v0.1.1/go.mod h1:SFKZvkcRoLqVRFZ4u25xPmp6m9ktANfbpXZ7SJ github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9 h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432 h1:M5QgkYacWj0Xs8MhpIK/5uwU02icXpEoSo9sM2aRCps= github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432/go.mod h1:xwIwAxMvYnVrGJPe2FKx5prTrnAjGOD8zvDOnxnrrkM= github.com/daaku/go.zipexe v1.0.0 h1:VSOgZtH418pH9L16hC/JrgSNJbbAL26pj7lmD1+CGdY= github.com/daaku/go.zipexe v1.0.0/go.mod h1:z8IiR6TsVLEYKwXAoE/I+8ys/sDkgTzSL0CLnGVd57E= +github.com/dave/jennifer v1.4.1 h1:XyqG6cn5RQsTj3qlWQTKlRGAyrTcsk1kUmWdZBzRjDw= +github.com/dave/jennifer v1.4.1/go.mod h1:7jEdnm+qBcxl8PC0zyp7vxcpSRnzXSt9r39tpTVGlwA= github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -161,8 +177,15 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v17.12.0-ce-rc1.0.20200505174321-1655290016ac+incompatible h1:ZxJX4ZSNg1LORBsStUojbrLfkrE3Ut122XhzyZnN110= +github.com/docker/docker v17.12.0-ce-rc1.0.20200505174321-1655290016ac+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -170,6 +193,9 @@ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25Kn github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -178,10 +204,11 @@ github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsouza/go-dockerclient v1.6.6 h1:9e3xkBrVkPb81gzYq23i7iDUEd6sx2ooeJA/gnYU6R4= +github.com/fsouza/go-dockerclient v1.6.6/go.mod h1:3/oRIWoe7uT6bwtAayj/EmJmepBjeL4pYvt7ZxC7Rnk= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -238,11 +265,12 @@ github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= -github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.5.1-0.20210202043019-fe2230a8b20c h1:yUT3Ygm3yXBD2qLPxYRDBcnEz0MHgQ4TJ/87C/wKnWA= +github.com/go-sql-driver/mysql v1.5.1-0.20210202043019-fe2230a8b20c/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= @@ -260,21 +288,21 @@ github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFU github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github/v27 v27.0.4/go.mod h1:/0Gr8pJ55COkmv+S/yPKCczSkUPIM/LnFyubufRNIS0= -github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= @@ -286,7 +314,6 @@ github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OI github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -296,7 +323,6 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g= github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/gophercloud/gophercloud v0.1.0 h1:P/nh25+rzXouhytV2pUHBb65fnds26Ghl8/391+sT5o= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1ks85zJ1lfDGgIiMDuIptTOhJq+zKyg= @@ -306,7 +332,6 @@ github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -328,20 +353,17 @@ github.com/hashicorp/consul/sdk v0.5.0 h1:WC4594Wp/LkEeML/OdQKEC1yqBmEYkRp6i7X5u github.com/hashicorp/consul/sdk v0.5.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v0.12.0 h1:d4QkX8FRTYaKaCZBoXYY8zJX2BXjWxurN/GA2tkrmZM= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.1.0 h1:vN9wG1D6KG6YHRTWr8512cxGOVgTMEfgEdSj/hr8MPc= github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= @@ -349,27 +371,21 @@ github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= -github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk= -github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/mdns v1.0.1 h1:XFSOubp8KWB+Jd2PDyaX5xUd5bhSP/+pTDZVDMzZJM8= github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.2.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= @@ -390,10 +406,11 @@ github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmoiron/sqlx v1.2.0 h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA= +github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= @@ -404,14 +421,12 @@ github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGn github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024 h1:rBMNdlhTLzJjJSDIjNEXX1Pz3Hmwmz91v+zycvx9PJc= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE= github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= -github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.4.1 h1:8VMb5+0wMgdBykOV96DwNwKFQ+WTI4pzYURP99CcB9E= github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= @@ -421,10 +436,7 @@ github.com/klauspost/pgzip v1.2.4 h1:TQ7CNpYKovDOmqzRHKxJh0BeaBI7UdQZYc6p7pMQh1A github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -434,9 +446,9 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/krishicks/yaml-patch v0.0.10 h1:H4FcHpnNwVmw8u0MjPRjWyIXtco6zM2F78t+57oNM3E= github.com/krishicks/yaml-patch v0.0.10/go.mod h1:Sm5TchwZS6sm7RJoyg87tzxm2ZcKzdRE4Q7TjNhPrME= -github.com/looplab/fsm v0.2.0 h1:M8hf5EF4AYLcT1FNKVUX8nu7D0xfp291iGeuigSxfrw= -github.com/looplab/fsm v0.2.0/go.mod h1:p+IElwgCnAByqr2DWMuNbPjgMwqcHvTRZZn3dvKEke0= -github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= @@ -452,7 +464,6 @@ github.com/martini-contrib/gzip v0.0.0-20151124214156-6c035326b43f h1:wVDxEVZP1e github.com/martini-contrib/gzip v0.0.0-20151124214156-6c035326b43f/go.mod h1:jhUB0rZB2TPWqy0yGugKRRictO591eSO7If7O4MfCaA= github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11 h1:YFh+sjyJTMQSYjKwM4dFKhJPJC/wfo98tPUc17HdoYw= github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11/go.mod h1:Ah2dBMoxZEqk118as2T4u4fjfXarE0pPnMJaArZQZsI= -github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= @@ -460,37 +471,31 @@ github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d h1:oNAwILwmgWKFpuU+dXvI6dl9jG2mAWAZLX3r9s0PPiw= github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-runewidth v0.0.2 h1:UnlwIPBGaTZfPQ6T1IGzPI0EkYAQmT9fAEJ/poFC63o= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.14.0 h1:mLyGNKR8+Vv9CAU7PphKa2hkEqxxhn8i32J6FPj1/QA= github.com/mattn/go-sqlite3 v1.14.0/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71ShQilpsus= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mch1307/vaultlib v0.5.0 h1:+tI8YCG033aVI+kAKwo0fwrUylFs+wO6DB7DM5qXJzU= -github.com/mch1307/vaultlib v0.5.0/go.mod h1:phFbO1oIDL1xTqUrNXbrAG0VdcYEKP8TNa9FJd7hFic= -github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26 h1:gPxPSwALAeHJSjarOs00QjVdV9QoBvc1D2ujQUr5BzU= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/minio/minio-go v0.0.0-20190131015406-c8a261de75c1 h1:jw16EimP5oAEM/2wt+SiEUov/YDyTCTDuPtIKgQIvk0= github.com/minio/minio-go v0.0.0-20190131015406-c8a261de75c1/go.mod h1:vuvdOZLJuf5HmJAJrKV64MmozrSsk+or0PB5dzdfspg= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/cli v1.1.0 h1:tEElEatulEHDeedTxwckzyYMA5c86fbmNIUL1hBIiTg= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc= -github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.14.0 h1:/x0XQ6h+3U3nAyk1yx+bHPURrKa9sVVvYbuqZ7pIAtI= github.com/mitchellh/go-testing-interface v1.14.0/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= @@ -498,10 +503,15 @@ github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUb github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.2.3 h1:f/MjBEBDLttYCGfRaKBbKSRVF5aV2O6fnBpzknuE3jU= github.com/mitchellh/mapstructure v1.2.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/sys/mount v0.1.0 h1:Ytx78EatgFKtrqZ0BvJ0UtJE472ZvawVmil6pIfuCCU= +github.com/moby/sys/mount v0.1.0/go.mod h1:FVQFLDRWwyBjDTBNQXDlWnSFREqOo3OKX9aqhmeoo74= +github.com/moby/sys/mountinfo v0.1.0 h1:r8vMRbMAFEAfiNptYVokP+nfxPJzvRuia5e2vzXtENo= +github.com/moby/sys/mountinfo v0.1.0/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o= +github.com/moby/term v0.0.0-20200429084858-129dac9f73f6 h1:3Y9aosU6S5Bo8GYH0s+t1ej4m30GuUKvQ3c9ZLqdL28= +github.com/moby/term v0.0.0-20200429084858-129dac9f73f6/go.mod h1:or9wGItza1sRcM4Wd3dIv8DsFHYQuFsMHEdxUIlUxms= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -511,15 +521,17 @@ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9 github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/montanaflynn/stats v0.6.3 h1:F8446DrvIF5V5smZfZ8K9nrmmix0AFgevPdLruGOmzk= github.com/montanaflynn/stats v0.6.3/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/ngdinhtoan/glide-cleanup v0.2.0/go.mod h1:UQzsmiDOb8YV3nOsCxK/c9zPpCZVNoHScRE3EO9pVMM= -github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229 h1:E2B8qYyeSgv5MXpmzZXRNp8IAQ4vjxIjhpAf5hv/tAg= github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E= +github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 h1:Up6+btDp321ZG5/zdSLo48H9Iaq0UQGthrhWC6pCxzE= +github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481/go.mod h1:yKZQO8QE2bHlgozqWDiRVqTFlLQSj30K/6SAK8EeYFw= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5 h1:58+kh9C6jJVXYjt8IE48G2eWl6BjwU5Gj0gqY84fy78= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.5-0.20200416053754-163badb3bac6 h1:F721VBMijn0OBFZ5wUSuMVVLQj2IJiiupn6UNd7UbBE= github.com/olekukonko/tablewriter v0.0.5-0.20200416053754-163badb3bac6/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= @@ -532,6 +544,15 @@ github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGV github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02 h1:0R5mDLI66Qw13qN80TRz85zthQ2nf2+uDyiV23w6c3Q= github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02/go.mod h1:JNdpVEzCpXBgIiv4ds+TzhN1hrtxq6ClLrTlT9OQRSc= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= @@ -545,7 +566,6 @@ github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaR github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= @@ -553,38 +573,37 @@ github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG github.com/pires/go-proxyproto v0.0.0-20191211124218-517ecdf5bb2b h1:JPLdtNmpXbWytipbGwYz7zXZzlQNASEiFw5aGAM75us= github.com/pires/go-proxyproto v0.0.0-20191211124218-517ecdf5bb2b/go.mod h1:Odh9VFOZJCf9G8cLW5o435Xf1J95Jw9Gw5rnCjcwzAY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planetscale/tengo v0.9.6-ps.v1 h1:sVudRi2EKEJuPHchj8Ap6uFDGyybi0amP6OLc4Bao9s= +github.com/planetscale/tengo v0.9.6-ps.v1/go.mod h1:zrvIPs4+lw2VBJ2XX/tQj+gPbFheMht4AZFKGEhihYI= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.4.1 h1:FFSuS004yOQEtDdTq+TAOLP5xUq63KqAFYyOi8zA+Y8= github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -599,7 +618,6 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/columnize v2.1.0+incompatible h1:j1Wcmh8OrK4Q7GXY+V7SVSY8nUWQxHW5TkBe7YUl+2s= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e h1:CGjiMQ0wMH4wtNWrlj6kiTbkPt2F3rbYnhGX6TWLfco= github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= @@ -609,7 +627,9 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUt github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sjmudd/stopwatch v0.0.0-20170613150411-f380bf8a9be1 h1:acClJNSOjUrAUKW+ZneCZymCFDWtSaJG5YQl8FoOlyI= @@ -617,27 +637,23 @@ github.com/sjmudd/stopwatch v0.0.0-20170613150411-f380bf8a9be1/go.mod h1:Pgf1sZ2 github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304 h1:Jpy1PXuP99tXNrhbq2BaPz9B+jNAvH1JPQQpG/9GCXY= github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c h1:Ho+uVpkel/udgjbwB5Lktg9BtvJSh2DT0Hi6LPSyI2w= github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= -github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -646,10 +662,8 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spyzhov/ajson v0.4.2 h1:JMByd/jZApPKDvNsmO90X2WWGbmT2ahDFp73QhZbg3s= github.com/spyzhov/ajson v0.4.2/go.mod h1:63V+CGM6f1Bu/p4nLIN8885ojBdt88TbLoSFzyqMuVA= -github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -667,7 +681,6 @@ github.com/tinylib/msgp v1.1.1/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDW github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uber-go/atomic v1.4.0 h1:yOuPqEq4ovnhEjpHmfFwsqBXDYbQeT6Nb0bwD6XnD5o= github.com/uber-go/atomic v1.4.0/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= @@ -676,10 +689,9 @@ github.com/uber/jaeger-client-go v2.16.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMW github.com/uber/jaeger-lib v2.0.0+incompatible h1:iMSCV0rmXEogjNWPh2D0xk9YVKvrtGoHJNe9ebLu/pw= github.com/uber/jaeger-lib v2.0.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasttemplate v1.0.1 h1:tY9CJiPnMXf1ERmG2EyK7gNUd+c6RKGD0IfU8WdUSz8= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= @@ -705,6 +717,7 @@ go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -718,10 +731,9 @@ golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200429183012-4b2356b1ed79/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -738,7 +750,6 @@ golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTk golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422 h1:QzoH/1pFpZguR8NrRHLcO6jKqfv2zpuSqZLgdm7ZmjI= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= @@ -773,11 +784,8 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= @@ -791,50 +799,15 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6 h1:DvY3Zkh7KabQE/kfzMvYvKirSiguP9Q/veMtkYyf0o8= +golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -862,6 +835,7 @@ golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624190245-7f2218787638/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -874,7 +848,6 @@ golang.org/x/tools v0.0.0-20201202200335-bef1c476418a h1:TYqOq/v+Ri5aADpldxXOj6P golang.org/x/tools v0.0.0-20201202200335-bef1c476418a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -884,7 +857,6 @@ gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmK google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0 h1:jbyannxz0XFD3zdjgrSUsaJbgpH4eTrkdhRChkHPfO8= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0 h1:Q3Ui3V3/CVinFWFiW39Iw0kMuVrRzYX0wN6OPFp0lTA= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= @@ -907,11 +879,14 @@ google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvx google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s= -google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= gopkg.in/DataDog/dd-trace-go.v1 v1.17.0 h1:j9vAp9Re9bbtA/QFehkJpNba/6W2IbJtNuXZophCa54= gopkg.in/DataDog/dd-trace-go.v1 v1.17.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM= @@ -926,6 +901,7 @@ gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3 h1:m8OOJ4ccYHnx2f4gQwpno8nAX5OGOh7RLaaz0pj3Ogs= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.41.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= @@ -945,12 +921,13 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2 h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -966,9 +943,11 @@ k8s.io/apimachinery v0.17.3/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0 k8s.io/apiserver v0.17.3/go.mod h1:iJtsPpu1ZpEnHaNawpSV0nYTGBhhX2dUlnn7/QS7QiY= k8s.io/client-go v0.17.3 h1:deUna1Ksx05XeESH6XGCyONNFfiQmDdqeqUvicvP6nU= k8s.io/client-go v0.17.3/go.mod h1:cLXlTMtWHkuK4tD360KpWz2gG2KtdWEr/OT02i3emRQ= +k8s.io/code-generator v0.17.3 h1:q/hDMk2cvFzSxol7k/VA1qCssR7VSMXHQHhzuX29VJ8= k8s.io/code-generator v0.17.3/go.mod h1:l8BLVwASXQZTo2xamW5mQNFCe1XPiAesVq7Y1t7PiQQ= k8s.io/component-base v0.17.3/go.mod h1:GeQf4BrgelWm64PXkIXiPh/XS0hnO42d9gx9BtbZRp8= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20190822140433-26a664648505 h1:ZY6yclUKVbZ+SdWnkfY+Je5vrMpKOxmGeKRbsXVmqYM= k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= diff --git a/go/cache/cache.go b/go/cache/cache.go new file mode 100644 index 00000000000..2bbf8011537 --- /dev/null +++ b/go/cache/cache.go @@ -0,0 +1,86 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +// Cache is a generic interface type for a data structure that keeps recently used +// objects in memory and evicts them when it becomes full. +type Cache interface { + Get(key string) (interface{}, bool) + Set(key string, val interface{}) bool + ForEach(callback func(interface{}) bool) + + Delete(key string) + Clear() + + // Wait waits for all pending operations on the cache to settle. Since cache writes + // are asynchronous, a write may not be immediately accessible unless the user + // manually calls Wait. + Wait() + + Len() int + Evictions() int64 + UsedCapacity() int64 + MaxCapacity() int64 + SetCapacity(int64) +} + +type cachedObject interface { + CachedSize(alloc bool) int64 +} + +// NewDefaultCacheImpl returns the default cache implementation for Vitess. The options in the +// Config struct control the memory and entry limits for the cache, and the underlying cache +// implementation. +func NewDefaultCacheImpl(cfg *Config) Cache { + switch { + case cfg == nil: + return &nullCache{} + + case cfg.LFU: + if cfg.MaxEntries == 0 || cfg.MaxMemoryUsage == 0 { + return &nullCache{} + } + return NewRistrettoCache(cfg.MaxEntries, cfg.MaxMemoryUsage, func(val interface{}) int64 { + return val.(cachedObject).CachedSize(true) + }) + + default: + if cfg.MaxEntries == 0 { + return &nullCache{} + } + return NewLRUCache(cfg.MaxEntries, func(_ interface{}) int64 { + return 1 + }) + } +} + +// Config is the configuration options for a cache instance +type Config struct { + // MaxEntries is the estimated amount of entries that the cache will hold at capacity + MaxEntries int64 + // MaxMemoryUsage is the maximum amount of memory the cache can handle + MaxMemoryUsage int64 + // LFU toggles whether to use a new cache implementation with a TinyLFU admission policy + LFU bool +} + +// DefaultConfig is the default configuration for a cache instance in Vitess +var DefaultConfig = &Config{ + MaxEntries: 5000, + MaxMemoryUsage: 32 * 1024 * 1024, + LFU: true, +} diff --git a/go/cache/cache_test.go b/go/cache/cache_test.go new file mode 100644 index 00000000000..911a3bb207b --- /dev/null +++ b/go/cache/cache_test.go @@ -0,0 +1,47 @@ +package cache + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/cache/ristretto" +) + +func TestNewDefaultCacheImpl(t *testing.T) { + assertNullCache := func(t *testing.T, cache Cache) { + _, ok := cache.(*nullCache) + require.True(t, ok) + } + + assertLFUCache := func(t *testing.T, cache Cache) { + _, ok := cache.(*ristretto.Cache) + require.True(t, ok) + } + + assertLRUCache := func(t *testing.T, cache Cache) { + _, ok := cache.(*LRUCache) + require.True(t, ok) + } + + tests := []struct { + cfg *Config + verify func(t *testing.T, cache Cache) + }{ + {&Config{MaxEntries: 0, MaxMemoryUsage: 0, LFU: false}, assertNullCache}, + {&Config{MaxEntries: 0, MaxMemoryUsage: 0, LFU: true}, assertNullCache}, + {&Config{MaxEntries: 100, MaxMemoryUsage: 0, LFU: false}, assertLRUCache}, + {&Config{MaxEntries: 0, MaxMemoryUsage: 1000, LFU: false}, assertNullCache}, + {&Config{MaxEntries: 100, MaxMemoryUsage: 1000, LFU: false}, assertLRUCache}, + {&Config{MaxEntries: 100, MaxMemoryUsage: 0, LFU: true}, assertNullCache}, + {&Config{MaxEntries: 100, MaxMemoryUsage: 1000, LFU: true}, assertLFUCache}, + {&Config{MaxEntries: 0, MaxMemoryUsage: 1000, LFU: true}, assertNullCache}, + } + for _, tt := range tests { + t.Run(fmt.Sprintf("%d.%d.%v", tt.cfg.MaxEntries, tt.cfg.MaxMemoryUsage, tt.cfg.LFU), func(t *testing.T) { + cache := NewDefaultCacheImpl(tt.cfg) + tt.verify(t, cache) + }) + } +} diff --git a/go/cache/lru_cache.go b/go/cache/lru_cache.go index cf33235670a..9175f942e94 100644 --- a/go/cache/lru_cache.go +++ b/go/cache/lru_cache.go @@ -25,59 +25,55 @@ package cache import ( "container/list" - "fmt" "sync" "time" ) +var _ Cache = &LRUCache{} + // LRUCache is a typical LRU cache implementation. If the cache // reaches the capacity, the least recently used item is deleted from // the cache. Note the capacity is not the number of items, but the -// total sum of the Size() of each item. +// total sum of the CachedSize() of each item. type LRUCache struct { mu sync.Mutex // list & table contain *entry objects. list *list.List table map[string]*list.Element + cost func(interface{}) int64 size int64 capacity int64 evictions int64 } -// Value is the interface values that go into LRUCache need to satisfy -type Value interface { - // Size returns how big this value is. If you want to just track - // the cache by number of objects, you may return the size as 1. - Size() int -} - // Item is what is stored in the cache type Item struct { Key string - Value Value + Value interface{} } type entry struct { key string - value Value + value interface{} size int64 timeAccessed time.Time } // NewLRUCache creates a new empty cache with the given capacity. -func NewLRUCache(capacity int64) *LRUCache { +func NewLRUCache(capacity int64, cost func(interface{}) int64) *LRUCache { return &LRUCache{ list: list.New(), table: make(map[string]*list.Element), capacity: capacity, + cost: cost, } } // Get returns a value from the cache, and marks the entry as most // recently used. -func (lru *LRUCache) Get(key string) (v Value, ok bool) { +func (lru *LRUCache) Get(key string) (v interface{}, ok bool) { lru.mu.Lock() defer lru.mu.Unlock() @@ -89,20 +85,8 @@ func (lru *LRUCache) Get(key string) (v Value, ok bool) { return element.Value.(*entry).value, true } -// Peek returns a value from the cache without changing the LRU order. -func (lru *LRUCache) Peek(key string) (v Value, ok bool) { - lru.mu.Lock() - defer lru.mu.Unlock() - - element := lru.table[key] - if element == nil { - return nil, false - } - return element.Value.(*entry).value, true -} - // Set sets a value in the cache. -func (lru *LRUCache) Set(key string, value Value) { +func (lru *LRUCache) Set(key string, value interface{}) bool { lru.mu.Lock() defer lru.mu.Unlock() @@ -111,23 +95,12 @@ func (lru *LRUCache) Set(key string, value Value) { } else { lru.addNew(key, value) } -} - -// SetIfAbsent will set the value in the cache if not present. If the -// value exists in the cache, we don't set it. -func (lru *LRUCache) SetIfAbsent(key string, value Value) { - lru.mu.Lock() - defer lru.mu.Unlock() - - if element := lru.table[key]; element != nil { - lru.moveToFront(element) - } else { - lru.addNew(key, value) - } + // the LRU cache cannot fail to insert items; it always returns true + return true } // Delete removes an entry from the cache, and returns if the entry existed. -func (lru *LRUCache) Delete(key string) bool { +func (lru *LRUCache) delete(key string) bool { lru.mu.Lock() defer lru.mu.Unlock() @@ -142,6 +115,11 @@ func (lru *LRUCache) Delete(key string) bool { return true } +// Delete removes an entry from the cache +func (lru *LRUCache) Delete(key string) { + lru.delete(key) +} + // Clear will clear the entire cache. func (lru *LRUCache) Clear() { lru.mu.Lock() @@ -152,6 +130,13 @@ func (lru *LRUCache) Clear() { lru.size = 0 } +// Len returns the size of the cache (in entries) +func (lru *LRUCache) Len() int { + lru.mu.Lock() + defer lru.mu.Unlock() + return lru.list.Len() +} + // SetCapacity will set the capacity of the cache. If the capacity is // smaller, and the current cache size exceed that capacity, the cache // will be shrank. @@ -163,75 +148,40 @@ func (lru *LRUCache) SetCapacity(capacity int64) { lru.checkCapacity() } -// Stats returns a few stats on the cache. -func (lru *LRUCache) Stats() (length, size, capacity, evictions int64, oldest time.Time) { - lru.mu.Lock() - defer lru.mu.Unlock() - if lastElem := lru.list.Back(); lastElem != nil { - oldest = lastElem.Value.(*entry).timeAccessed - } - return int64(lru.list.Len()), lru.size, lru.capacity, lru.evictions, oldest -} - -// StatsJSON returns stats as a JSON object in a string. -func (lru *LRUCache) StatsJSON() string { - if lru == nil { - return "{}" - } - l, s, c, e, o := lru.Stats() - return fmt.Sprintf("{\"Length\": %v, \"Size\": %v, \"Capacity\": %v, \"Evictions\": %v, \"OldestAccess\": \"%v\"}", l, s, c, e, o) -} - -// Length returns how many elements are in the cache -func (lru *LRUCache) Length() int64 { - lru.mu.Lock() - defer lru.mu.Unlock() - return int64(lru.list.Len()) -} +// Wait is a no-op in the LRU cache +func (lru *LRUCache) Wait() {} -// Size returns the sum of the objects' Size() method. -func (lru *LRUCache) Size() int64 { - lru.mu.Lock() - defer lru.mu.Unlock() +// UsedCapacity returns the size of the cache (in bytes) +func (lru *LRUCache) UsedCapacity() int64 { return lru.size } -// Capacity returns the cache maximum capacity. -func (lru *LRUCache) Capacity() int64 { +// MaxCapacity returns the cache maximum capacity. +func (lru *LRUCache) MaxCapacity() int64 { lru.mu.Lock() defer lru.mu.Unlock() return lru.capacity } -// Evictions returns the eviction count. +// Evictions returns the number of evictions func (lru *LRUCache) Evictions() int64 { lru.mu.Lock() defer lru.mu.Unlock() return lru.evictions } -// Oldest returns the insertion time of the oldest element in the cache, -// or a IsZero() time if cache is empty. -func (lru *LRUCache) Oldest() (oldest time.Time) { - lru.mu.Lock() - defer lru.mu.Unlock() - if lastElem := lru.list.Back(); lastElem != nil { - oldest = lastElem.Value.(*entry).timeAccessed - } - return -} - -// Keys returns all the keys for the cache, ordered from most recently +// ForEach yields all the values for the cache, ordered from most recently // used to least recently used. -func (lru *LRUCache) Keys() []string { +func (lru *LRUCache) ForEach(callback func(value interface{}) bool) { lru.mu.Lock() defer lru.mu.Unlock() - keys := make([]string, 0, lru.list.Len()) for e := lru.list.Front(); e != nil; e = e.Next() { - keys = append(keys, e.Value.(*entry).key) + v := e.Value.(*entry) + if !callback(v.value) { + break + } } - return keys } // Items returns all the values for the cache, ordered from most recently @@ -248,8 +198,8 @@ func (lru *LRUCache) Items() []Item { return items } -func (lru *LRUCache) updateInplace(element *list.Element, value Value) { - valueSize := int64(value.Size()) +func (lru *LRUCache) updateInplace(element *list.Element, value interface{}) { + valueSize := lru.cost(value) sizeDiff := valueSize - element.Value.(*entry).size element.Value.(*entry).value = value element.Value.(*entry).size = valueSize @@ -263,8 +213,8 @@ func (lru *LRUCache) moveToFront(element *list.Element) { element.Value.(*entry).timeAccessed = time.Now() } -func (lru *LRUCache) addNew(key string, value Value) { - newEntry := &entry{key, value, int64(value.Size()), time.Now()} +func (lru *LRUCache) addNew(key string, value interface{}) { + newEntry := &entry{key, value, lru.cost(value), time.Now()} element := lru.list.PushFront(newEntry) lru.table[key] = element lru.size += newEntry.size diff --git a/go/cache/lru_cache_test.go b/go/cache/lru_cache_test.go index 9a7f09232e6..152ac17ab6f 100644 --- a/go/cache/lru_cache_test.go +++ b/go/cache/lru_cache_test.go @@ -17,22 +17,20 @@ limitations under the License. package cache import ( - "encoding/json" "testing" - "time" ) type CacheValue struct { - size int + size int64 } -func (cv *CacheValue) Size() int { - return cv.size +func cacheValueSize(val interface{}) int64 { + return val.(*CacheValue).size } func TestInitialState(t *testing.T) { - cache := NewLRUCache(5) - l, sz, c, e, _ := cache.Stats() + cache := NewLRUCache(5, cacheValueSize) + l, sz, c, e := cache.Len(), cache.UsedCapacity(), cache.MaxCapacity(), cache.Evictions() if l != 0 { t.Errorf("length = %v, want 0", l) } @@ -48,7 +46,7 @@ func TestInitialState(t *testing.T) { } func TestSetInsertsValue(t *testing.T) { - cache := NewLRUCache(100) + cache := NewLRUCache(100, cacheValueSize) data := &CacheValue{0} key := "key" cache.Set(key, data) @@ -58,37 +56,14 @@ func TestSetInsertsValue(t *testing.T) { t.Errorf("Cache has incorrect value: %v != %v", data, v) } - k := cache.Keys() - if len(k) != 1 || k[0] != key { - t.Errorf("Cache.Keys() returned incorrect values: %v", k) - } values := cache.Items() if len(values) != 1 || values[0].Key != key { t.Errorf("Cache.Values() returned incorrect values: %v", values) } } -func TestSetIfAbsent(t *testing.T) { - cache := NewLRUCache(100) - data := &CacheValue{0} - key := "key" - cache.SetIfAbsent(key, data) - - v, ok := cache.Get(key) - if !ok || v.(*CacheValue) != data { - t.Errorf("Cache has incorrect value: %v != %v", data, v) - } - - cache.SetIfAbsent(key, &CacheValue{1}) - - v, ok = cache.Get(key) - if !ok || v.(*CacheValue) != data { - t.Errorf("Cache has incorrect value: %v != %v", data, v) - } -} - func TestGetValueWithMultipleTypes(t *testing.T) { - cache := NewLRUCache(100) + cache := NewLRUCache(100, cacheValueSize) data := &CacheValue{0} key := "key" cache.Set(key, data) @@ -105,23 +80,23 @@ func TestGetValueWithMultipleTypes(t *testing.T) { } func TestSetUpdatesSize(t *testing.T) { - cache := NewLRUCache(100) + cache := NewLRUCache(100, cacheValueSize) emptyValue := &CacheValue{0} key := "key1" cache.Set(key, emptyValue) - if _, sz, _, _, _ := cache.Stats(); sz != 0 { - t.Errorf("cache.Size() = %v, expected 0", sz) + if sz := cache.UsedCapacity(); sz != 0 { + t.Errorf("cache.UsedCapacity() = %v, expected 0", sz) } someValue := &CacheValue{20} key = "key2" cache.Set(key, someValue) - if _, sz, _, _, _ := cache.Stats(); sz != 20 { - t.Errorf("cache.Size() = %v, expected 20", sz) + if sz := cache.UsedCapacity(); sz != 20 { + t.Errorf("cache.UsedCapacity() = %v, expected 20", sz) } } func TestSetWithOldKeyUpdatesValue(t *testing.T) { - cache := NewLRUCache(100) + cache := NewLRUCache(100, cacheValueSize) emptyValue := &CacheValue{0} key := "key1" cache.Set(key, emptyValue) @@ -135,67 +110,42 @@ func TestSetWithOldKeyUpdatesValue(t *testing.T) { } func TestSetWithOldKeyUpdatesSize(t *testing.T) { - cache := NewLRUCache(100) + cache := NewLRUCache(100, cacheValueSize) emptyValue := &CacheValue{0} key := "key1" cache.Set(key, emptyValue) - if _, sz, _, _, _ := cache.Stats(); sz != 0 { - t.Errorf("cache.Size() = %v, expected %v", sz, 0) + if sz := cache.UsedCapacity(); sz != 0 { + t.Errorf("cache.UsedCapacity() = %v, expected %v", sz, 0) } someValue := &CacheValue{20} cache.Set(key, someValue) expected := int64(someValue.size) - if _, sz, _, _, _ := cache.Stats(); sz != expected { - t.Errorf("cache.Size() = %v, expected %v", sz, expected) + if sz := cache.UsedCapacity(); sz != expected { + t.Errorf("cache.UsedCapacity() = %v, expected %v", sz, expected) } } func TestGetNonExistent(t *testing.T) { - cache := NewLRUCache(100) + cache := NewLRUCache(100, cacheValueSize) if _, ok := cache.Get("notthere"); ok { t.Error("Cache returned a notthere value after no inserts.") } } -func TestPeek(t *testing.T) { - cache := NewLRUCache(2) - val1 := &CacheValue{1} - cache.Set("key1", val1) - val2 := &CacheValue{1} - cache.Set("key2", val2) - // Make key1 the most recent. - cache.Get("key1") - // Peek key2. - if v, ok := cache.Peek("key2"); ok && v.(*CacheValue) != val2 { - t.Errorf("key2 received: %v, want %v", v, val2) - } - // Push key2 out - cache.Set("key3", &CacheValue{1}) - if v, ok := cache.Peek("key2"); ok { - t.Errorf("key2 received: %v, want absent", v) - } -} - func TestDelete(t *testing.T) { - cache := NewLRUCache(100) + cache := NewLRUCache(100, cacheValueSize) value := &CacheValue{1} key := "key" - if cache.Delete(key) { - t.Error("Item unexpectedly already in cache.") - } - + cache.Delete(key) cache.Set(key, value) + cache.Delete(key) - if !cache.Delete(key) { - t.Error("Expected item to be in cache.") - } - - if _, sz, _, _, _ := cache.Stats(); sz != 0 { - t.Errorf("cache.Size() = %v, expected 0", sz) + if sz := cache.UsedCapacity(); sz != 0 { + t.Errorf("cache.UsedCapacity() = %v, expected 0", sz) } if _, ok := cache.Get(key); ok { @@ -204,21 +154,21 @@ func TestDelete(t *testing.T) { } func TestClear(t *testing.T) { - cache := NewLRUCache(100) + cache := NewLRUCache(100, cacheValueSize) value := &CacheValue{1} key := "key" cache.Set(key, value) cache.Clear() - if _, sz, _, _, _ := cache.Stats(); sz != 0 { - t.Errorf("cache.Size() = %v, expected 0 after Clear()", sz) + if sz := cache.UsedCapacity(); sz != 0 { + t.Errorf("cache.UsedCapacity() = %v, expected 0 after Clear()", sz) } } func TestCapacityIsObeyed(t *testing.T) { size := int64(3) - cache := NewLRUCache(100) + cache := NewLRUCache(100, cacheValueSize) cache.SetCapacity(size) value := &CacheValue{1} @@ -226,50 +176,34 @@ func TestCapacityIsObeyed(t *testing.T) { cache.Set("key1", value) cache.Set("key2", value) cache.Set("key3", value) - if _, sz, _, _, _ := cache.Stats(); sz != size { - t.Errorf("cache.Size() = %v, expected %v", sz, size) + if sz := cache.UsedCapacity(); sz != size { + t.Errorf("cache.UsedCapacity() = %v, expected %v", sz, size) } // Insert one more; something should be evicted to make room. cache.Set("key4", value) - _, sz, _, evictions, _ := cache.Stats() + sz, evictions := cache.UsedCapacity(), cache.Evictions() if sz != size { - t.Errorf("post-evict cache.Size() = %v, expected %v", sz, size) + t.Errorf("post-evict cache.UsedCapacity() = %v, expected %v", sz, size) } if evictions != 1 { - t.Errorf("post-evict cache.evictions = %v, expected 1", evictions) - } - - // Check json stats - data := cache.StatsJSON() - m := make(map[string]interface{}) - if err := json.Unmarshal([]byte(data), &m); err != nil { - t.Errorf("cache.StatsJSON() returned bad json data: %v %v", data, err) - } - if m["Size"].(float64) != float64(size) { - t.Errorf("cache.StatsJSON() returned bad size: %v", m) + t.Errorf("post-evict cache.Evictions() = %v, expected 1", evictions) } // Check various other stats - if l := cache.Length(); l != size { - t.Errorf("cache.StatsJSON() returned bad length: %v", l) + if l := cache.Len(); int64(l) != size { + t.Errorf("cache.Len() returned bad length: %v", l) } - if s := cache.Size(); s != size { - t.Errorf("cache.StatsJSON() returned bad size: %v", s) + if s := cache.UsedCapacity(); s != size { + t.Errorf("cache.UsedCapacity() returned bad size: %v", s) } - if c := cache.Capacity(); c != size { - t.Errorf("cache.StatsJSON() returned bad length: %v", c) - } - - // checks StatsJSON on nil - cache = nil - if s := cache.StatsJSON(); s != "{}" { - t.Errorf("cache.StatsJSON() on nil object returned %v", s) + if c := cache.MaxCapacity(); c != size { + t.Errorf("cache.UsedCapacity() returned bad length: %v", c) } } func TestLRUIsEvicted(t *testing.T) { size := int64(3) - cache := NewLRUCache(size) + cache := NewLRUCache(size, cacheValueSize) cache.Set("key1", &CacheValue{1}) cache.Set("key2", &CacheValue{1}) @@ -278,9 +212,7 @@ func TestLRUIsEvicted(t *testing.T) { // Look up the elements. This will rearrange the LRU ordering. cache.Get("key3") - beforeKey2 := time.Now() cache.Get("key2") - afterKey2 := time.Now() cache.Get("key1") // lru: [key1, key2, key3] @@ -292,11 +224,6 @@ func TestLRUIsEvicted(t *testing.T) { t.Error("Least recently used element was not evicted.") } - // Check oldest - if o := cache.Oldest(); o.Before(beforeKey2) || o.After(afterKey2) { - t.Errorf("cache.Oldest returned an unexpected value: got %v, expected a value between %v and %v", o, beforeKey2, afterKey2) - } - if e, want := cache.Evictions(), int64(1); e != want { t.Errorf("evictions: %d, want: %d", e, want) } diff --git a/go/cache/null.go b/go/cache/null.go new file mode 100644 index 00000000000..5ef0f13a8c7 --- /dev/null +++ b/go/cache/null.go @@ -0,0 +1,63 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +// nullCache is a no-op cache that does not store items +type nullCache struct{} + +// Get never returns anything on the nullCache +func (n *nullCache) Get(_ string) (interface{}, bool) { + return nil, false +} + +// Set is a no-op in the nullCache +func (n *nullCache) Set(_ string, _ interface{}) bool { + return false +} + +// ForEach iterates the nullCache, which is always empty +func (n *nullCache) ForEach(_ func(interface{}) bool) {} + +// Delete is a no-op in the nullCache +func (n *nullCache) Delete(_ string) {} + +// Clear is a no-op in the nullCache +func (n *nullCache) Clear() {} + +// Wait is a no-op in the nullcache +func (n *nullCache) Wait() {} + +func (n *nullCache) Len() int { + return 0 +} + +// Capacity returns the capacity of the nullCache, which is always 0 +func (n *nullCache) UsedCapacity() int64 { + return 0 +} + +// Capacity returns the capacity of the nullCache, which is always 0 +func (n *nullCache) MaxCapacity() int64 { + return 0 +} + +// SetCapacity sets the capacity of the null cache, which is a no-op +func (n *nullCache) SetCapacity(_ int64) {} + +func (n *nullCache) Evictions() int64 { + return 0 +} diff --git a/go/cache/perf_test.go b/go/cache/perf_test.go index b5c9a1a8b38..95546f66c06 100644 --- a/go/cache/perf_test.go +++ b/go/cache/perf_test.go @@ -20,15 +20,11 @@ import ( "testing" ) -type MyValue []byte - -func (mv MyValue) Size() int { - return cap(mv) -} - func BenchmarkGet(b *testing.B) { - cache := NewLRUCache(64 * 1024 * 1024) - value := make(MyValue, 1000) + cache := NewLRUCache(64*1024*1024, func(val interface{}) int64 { + return int64(cap(val.([]byte))) + }) + value := make([]byte, 1000) cache.Set("stuff", value) for i := 0; i < b.N; i++ { val, ok := cache.Get("stuff") diff --git a/go/cache/ristretto.go b/go/cache/ristretto.go new file mode 100644 index 00000000000..29eb52fe692 --- /dev/null +++ b/go/cache/ristretto.go @@ -0,0 +1,28 @@ +package cache + +import ( + "vitess.io/vitess/go/cache/ristretto" +) + +var _ Cache = &ristretto.Cache{} + +// NewRistrettoCache returns a Cache implementation based on Ristretto +func NewRistrettoCache(maxEntries, maxCost int64, cost func(interface{}) int64) *ristretto.Cache { + // The TinyLFU paper recommends to allocate 10x times the max entries amount as counters + // for the admission policy; since our caches are small and we're very interested on admission + // accuracy, we're a bit more greedy than 10x + const CounterRatio = 12 + + config := ristretto.Config{ + NumCounters: maxEntries * CounterRatio, + MaxCost: maxCost, + BufferItems: 64, + Metrics: true, + Cost: cost, + } + cache, err := ristretto.NewCache(&config) + if err != nil { + panic(err) + } + return cache +} diff --git a/go/cache/ristretto/bloom/bbloom.go b/go/cache/ristretto/bloom/bbloom.go new file mode 100644 index 00000000000..ce5daa6864d --- /dev/null +++ b/go/cache/ristretto/bloom/bbloom.go @@ -0,0 +1,151 @@ +// The MIT License (MIT) +// Copyright (c) 2014 Andreas Briese, eduToolbox@Bri-C GmbH, Sarstedt + +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: + +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. + +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package bloom + +import ( + "math" + "unsafe" +) + +// helper +var mask = []uint8{1, 2, 4, 8, 16, 32, 64, 128} + +func getSize(ui64 uint64) (size uint64, exponent uint64) { + if ui64 < uint64(512) { + ui64 = uint64(512) + } + size = uint64(1) + for size < ui64 { + size <<= 1 + exponent++ + } + return size, exponent +} + +// NewBloomFilterWithErrorRate returns a new bloomfilter with optimal size for the given +// error rate +func NewBloomFilterWithErrorRate(numEntries uint64, wrongs float64) *Bloom { + size := -1 * float64(numEntries) * math.Log(wrongs) / math.Pow(0.69314718056, 2) + locs := math.Ceil(0.69314718056 * size / float64(numEntries)) + return NewBloomFilter(uint64(size), uint64(locs)) +} + +// NewBloomFilter returns a new bloomfilter. +func NewBloomFilter(entries, locs uint64) (bloomfilter *Bloom) { + size, exponent := getSize(entries) + bloomfilter = &Bloom{ + sizeExp: exponent, + size: size - 1, + setLocs: locs, + shift: 64 - exponent, + } + bloomfilter.Size(size) + return bloomfilter +} + +// Bloom filter +type Bloom struct { + bitset []uint64 + ElemNum uint64 + sizeExp uint64 + size uint64 + setLocs uint64 + shift uint64 +} + +// <--- http://www.cse.yorku.ca/~oz/hash.html +// modified Berkeley DB Hash (32bit) +// hash is casted to l, h = 16bit fragments +// func (bl Bloom) absdbm(b *[]byte) (l, h uint64) { +// hash := uint64(len(*b)) +// for _, c := range *b { +// hash = uint64(c) + (hash << 6) + (hash << bl.sizeExp) - hash +// } +// h = hash >> bl.shift +// l = hash << bl.shift >> bl.shift +// return l, h +// } + +// Add adds hash of a key to the bloomfilter. +func (bl *Bloom) Add(hash uint64) { + h := hash >> bl.shift + l := hash << bl.shift >> bl.shift + for i := uint64(0); i < bl.setLocs; i++ { + bl.Set((h + i*l) & bl.size) + bl.ElemNum++ + } +} + +// Has checks if bit(s) for entry hash is/are set, +// returns true if the hash was added to the Bloom Filter. +func (bl Bloom) Has(hash uint64) bool { + h := hash >> bl.shift + l := hash << bl.shift >> bl.shift + for i := uint64(0); i < bl.setLocs; i++ { + if !bl.IsSet((h + i*l) & bl.size) { + return false + } + } + return true +} + +// AddIfNotHas only Adds hash, if it's not present in the bloomfilter. +// Returns true if hash was added. +// Returns false if hash was already registered in the bloomfilter. +func (bl *Bloom) AddIfNotHas(hash uint64) bool { + if bl.Has(hash) { + return false + } + bl.Add(hash) + return true +} + +// TotalSize returns the total size of the bloom filter. +func (bl *Bloom) TotalSize() int { + // The bl struct has 5 members and each one is 8 byte. The bitset is a + // uint64 byte slice. + return len(bl.bitset)*8 + 5*8 +} + +// Size makes Bloom filter with as bitset of size sz. +func (bl *Bloom) Size(sz uint64) { + bl.bitset = make([]uint64, sz>>6) +} + +// Clear resets the Bloom filter. +func (bl *Bloom) Clear() { + for i := range bl.bitset { + bl.bitset[i] = 0 + } +} + +// Set sets the bit[idx] of bitset. +func (bl *Bloom) Set(idx uint64) { + ptr := unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[idx>>6])) + uintptr((idx%64)>>3)) + *(*uint8)(ptr) |= mask[idx%8] +} + +// IsSet checks if bit[idx] of bitset is set, returns true/false. +func (bl *Bloom) IsSet(idx uint64) bool { + ptr := unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[idx>>6])) + uintptr((idx%64)>>3)) + r := ((*(*uint8)(ptr)) >> (idx % 8)) & 1 + return r == 1 +} diff --git a/go/cache/ristretto/bloom/bbloom_test.go b/go/cache/ristretto/bloom/bbloom_test.go new file mode 100644 index 00000000000..960fb034e63 --- /dev/null +++ b/go/cache/ristretto/bloom/bbloom_test.go @@ -0,0 +1,83 @@ +package bloom + +import ( + "crypto/rand" + "fmt" + "os" + "testing" + + "vitess.io/vitess/go/hack" +) + +var ( + wordlist1 [][]byte + n = uint64(1 << 16) + bf *Bloom +) + +func TestMain(m *testing.M) { + wordlist1 = make([][]byte, n) + for i := range wordlist1 { + b := make([]byte, 32) + rand.Read(b) + wordlist1[i] = b + } + fmt.Println("\n###############\nbbloom_test.go") + fmt.Print("Benchmarks relate to 2**16 OP. --> output/65536 op/ns\n###############\n\n") + + os.Exit(m.Run()) +} + +func TestM_NumberOfWrongs(t *testing.T) { + bf = NewBloomFilter(n*10, 7) + + cnt := 0 + for i := range wordlist1 { + hash := hack.RuntimeMemhash(wordlist1[i], 0) + if !bf.AddIfNotHas(hash) { + cnt++ + } + } + fmt.Printf("Bloomfilter New(7* 2**16, 7) (-> size=%v bit): \n Check for 'false positives': %v wrong positive 'Has' results on 2**16 entries => %v %%\n", len(bf.bitset)<<6, cnt, float64(cnt)/float64(n)) + +} + +func BenchmarkM_New(b *testing.B) { + for r := 0; r < b.N; r++ { + _ = NewBloomFilter(n*10, 7) + } +} + +func BenchmarkM_Clear(b *testing.B) { + bf = NewBloomFilter(n*10, 7) + for i := range wordlist1 { + hash := hack.RuntimeMemhash(wordlist1[i], 0) + bf.Add(hash) + } + b.ResetTimer() + for r := 0; r < b.N; r++ { + bf.Clear() + } +} + +func BenchmarkM_Add(b *testing.B) { + bf = NewBloomFilter(n*10, 7) + b.ResetTimer() + for r := 0; r < b.N; r++ { + for i := range wordlist1 { + hash := hack.RuntimeMemhash(wordlist1[i], 0) + bf.Add(hash) + } + } + +} + +func BenchmarkM_Has(b *testing.B) { + b.ResetTimer() + for r := 0; r < b.N; r++ { + for i := range wordlist1 { + hash := hack.RuntimeMemhash(wordlist1[i], 0) + bf.Has(hash) + } + } +} diff --git a/go/cache/ristretto/cache.go b/go/cache/ristretto/cache.go new file mode 100644 index 00000000000..bb39a3ff359 --- /dev/null +++ b/go/cache/ristretto/cache.go @@ -0,0 +1,681 @@ +/* + * Copyright 2019 Dgraph Labs, Inc. and Contributors + * Copyright 2021 The Vitess Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package ristretto is a fast, fixed size, in-memory cache with a dual focus on +// throughput and hit ratio performance. You can easily add Ristretto to an +// existing system and keep the most valuable data where you need it. +package ristretto + +import ( + "bytes" + "errors" + "fmt" + "sync" + "sync/atomic" + "time" + "unsafe" + + "vitess.io/vitess/go/hack" +) + +var ( + // TODO: find the optimal value for this or make it configurable + setBufSize = 32 * 1024 +) + +func defaultStringHash(key string) (uint64, uint64) { + const Seed1 = uint64(0x1122334455667788) + const Seed2 = uint64(0x8877665544332211) + return hack.RuntimeStrhash(key, Seed1), hack.RuntimeStrhash(key, Seed2) +} + +type itemCallback func(*Item) + +// CacheItemSize is the overhead in bytes for every stored cache item +const CacheItemSize = int64(unsafe.Sizeof(storeItem{})) + +// Cache is a thread-safe implementation of a hashmap with a TinyLFU admission +// policy and a Sampled LFU eviction policy. You can use the same Cache instance +// from as many goroutines as you want. +type Cache struct { + // store is the central concurrent hashmap where key-value items are stored. + store store + // policy determines what gets let in to the cache and what gets kicked out. + policy policy + // getBuf is a custom ring buffer implementation that gets pushed to when + // keys are read. + getBuf *ringBuffer + // setBuf is a buffer allowing us to batch/drop Sets during times of high + // contention. + setBuf chan *Item + // onEvict is called for item evictions. + onEvict itemCallback + // onReject is called when an item is rejected via admission policy. + onReject itemCallback + // onExit is called whenever a value goes out of scope from the cache. + onExit func(interface{}) + // KeyToHash function is used to customize the key hashing algorithm. + // Each key will be hashed using the provided function. If keyToHash value + // is not set, the default keyToHash function is used. + keyToHash func(string) (uint64, uint64) + // stop is used to stop the processItems goroutine. + stop chan struct{} + // indicates whether cache is closed. + isClosed bool + // cost calculates cost from a value. + cost func(value interface{}) int64 + // ignoreInternalCost dictates whether to ignore the cost of internally storing + // the item in the cost calculation. + ignoreInternalCost bool + // Metrics contains a running log of important statistics like hits, misses, + // and dropped items. + Metrics *Metrics +} + +// Config is passed to NewCache for creating new Cache instances. +type Config struct { + // NumCounters determines the number of counters (keys) to keep that hold + // access frequency information. It's generally a good idea to have more + // counters than the max cache capacity, as this will improve eviction + // accuracy and subsequent hit ratios. + // + // For example, if you expect your cache to hold 1,000,000 items when full, + // NumCounters should be 10,000,000 (10x). Each counter takes up 4 bits, so + // keeping 10,000,000 counters would require 5MB of memory. + NumCounters int64 + // MaxCost can be considered as the cache capacity, in whatever units you + // choose to use. + // + // For example, if you want the cache to have a max capacity of 100MB, you + // would set MaxCost to 100,000,000 and pass an item's number of bytes as + // the `cost` parameter for calls to Set. If new items are accepted, the + // eviction process will take care of making room for the new item and not + // overflowing the MaxCost value. + MaxCost int64 + // BufferItems determines the size of Get buffers. + // + // Unless you have a rare use case, using `64` as the BufferItems value + // results in good performance. + BufferItems int64 + // Metrics determines whether cache statistics are kept during the cache's + // lifetime. There *is* some overhead to keeping statistics, so you should + // only set this flag to true when testing or throughput performance isn't a + // major factor. + Metrics bool + // OnEvict is called for every eviction and passes the hashed key, value, + // and cost to the function. + OnEvict func(item *Item) + // OnReject is called for every rejection done via the policy. + OnReject func(item *Item) + // OnExit is called whenever a value is removed from cache. This can be + // used to do manual memory deallocation. Would also be called on eviction + // and rejection of the value. + OnExit func(val interface{}) + // KeyToHash function is used to customize the key hashing algorithm. + // Each key will be hashed using the provided function. If keyToHash value + // is not set, the default keyToHash function is used. + KeyToHash func(string) (uint64, uint64) + // Cost evaluates a value and outputs a corresponding cost. This function + // is ran after Set is called for a new item or an item update with a cost + // param of 0. + Cost func(value interface{}) int64 + // IgnoreInternalCost set to true indicates to the cache that the cost of + // internally storing the value should be ignored. This is useful when the + // cost passed to set is not using bytes as units. Keep in mind that setting + // this to true will increase the memory usage. + IgnoreInternalCost bool +} + +type itemFlag byte + +const ( + itemNew itemFlag = iota + itemDelete + itemUpdate +) + +// Item is passed to setBuf so items can eventually be added to the cache. +type Item struct { + flag itemFlag + Key uint64 + Conflict uint64 + Value interface{} + Cost int64 + wg *sync.WaitGroup +} + +// NewCache returns a new Cache instance and any configuration errors, if any. +func NewCache(config *Config) (*Cache, error) { + switch { + case config.NumCounters == 0: + return nil, errors.New("NumCounters can't be zero") + case config.MaxCost == 0: + return nil, errors.New("Capacity can't be zero") + case config.BufferItems == 0: + return nil, errors.New("BufferItems can't be zero") + } + policy := newPolicy(config.NumCounters, config.MaxCost) + cache := &Cache{ + store: newStore(), + policy: policy, + getBuf: newRingBuffer(policy, config.BufferItems), + setBuf: make(chan *Item, setBufSize), + keyToHash: config.KeyToHash, + stop: make(chan struct{}), + cost: config.Cost, + ignoreInternalCost: config.IgnoreInternalCost, + } + cache.onExit = func(val interface{}) { + if config.OnExit != nil && val != nil { + config.OnExit(val) + } + } + cache.onEvict = func(item *Item) { + if config.OnEvict != nil { + config.OnEvict(item) + } + cache.onExit(item.Value) + } + cache.onReject = func(item *Item) { + if config.OnReject != nil { + config.OnReject(item) + } + cache.onExit(item.Value) + } + if cache.keyToHash == nil { + cache.keyToHash = defaultStringHash + } + if config.Metrics { + cache.collectMetrics() + } + // NOTE: benchmarks seem to show that performance decreases the more + // goroutines we have running cache.processItems(), so 1 should + // usually be sufficient + go cache.processItems() + return cache, nil +} + +// Wait blocks until all the current cache operations have been processed in the background +func (c *Cache) Wait() { + if c == nil || c.isClosed { + return + } + wg := &sync.WaitGroup{} + wg.Add(1) + c.setBuf <- &Item{wg: wg} + wg.Wait() +} + +// Get returns the value (if any) and a boolean representing whether the +// value was found or not. The value can be nil and the boolean can be true at +// the same time. +func (c *Cache) Get(key string) (interface{}, bool) { + if c == nil || c.isClosed { + return nil, false + } + keyHash, conflictHash := c.keyToHash(key) + c.getBuf.Push(keyHash) + value, ok := c.store.Get(keyHash, conflictHash) + if ok { + c.Metrics.add(hit, keyHash, 1) + } else { + c.Metrics.add(miss, keyHash, 1) + } + return value, ok +} + +// Set attempts to add the key-value item to the cache. If it returns false, +// then the Set was dropped and the key-value item isn't added to the cache. If +// it returns true, there's still a chance it could be dropped by the policy if +// its determined that the key-value item isn't worth keeping, but otherwise the +// item will be added and other items will be evicted in order to make room. +// +// The cost of the entry will be evaluated lazily by the cache's Cost function. +func (c *Cache) Set(key string, value interface{}) bool { + return c.SetWithCost(key, value, 0) +} + +// SetWithCost works like Set but adds a key-value pair to the cache with a specific +// cost. The built-in Cost function will not be called to evaluate the object's cost +// and instead the given value will be used. +func (c *Cache) SetWithCost(key string, value interface{}, cost int64) bool { + if c == nil || c.isClosed { + return false + } + + keyHash, conflictHash := c.keyToHash(key) + i := &Item{ + flag: itemNew, + Key: keyHash, + Conflict: conflictHash, + Value: value, + Cost: cost, + } + // cost is eventually updated. The expiration must also be immediately updated + // to prevent items from being prematurely removed from the map. + if prev, ok := c.store.Update(i); ok { + c.onExit(prev) + i.flag = itemUpdate + } + // Attempt to send item to policy. + select { + case c.setBuf <- i: + return true + default: + if i.flag == itemUpdate { + // Return true if this was an update operation since we've already + // updated the store. For all the other operations (set/delete), we + // return false which means the item was not inserted. + return true + } + c.Metrics.add(dropSets, keyHash, 1) + return false + } +} + +// Delete deletes the key-value item from the cache if it exists. +func (c *Cache) Delete(key string) { + if c == nil || c.isClosed { + return + } + keyHash, conflictHash := c.keyToHash(key) + // Delete immediately. + _, prev := c.store.Del(keyHash, conflictHash) + c.onExit(prev) + // If we've set an item, it would be applied slightly later. + // So we must push the same item to `setBuf` with the deletion flag. + // This ensures that if a set is followed by a delete, it will be + // applied in the correct order. + c.setBuf <- &Item{ + flag: itemDelete, + Key: keyHash, + Conflict: conflictHash, + } +} + +// Close stops all goroutines and closes all channels. +func (c *Cache) Close() { + if c == nil || c.isClosed { + return + } + c.Clear() + + // Block until processItems goroutine is returned. + c.stop <- struct{}{} + close(c.stop) + close(c.setBuf) + c.policy.Close() + c.isClosed = true +} + +// Clear empties the hashmap and zeroes all policy counters. Note that this is +// not an atomic operation (but that shouldn't be a problem as it's assumed that +// Set/Get calls won't be occurring until after this). +func (c *Cache) Clear() { + if c == nil || c.isClosed { + return + } + // Block until processItems goroutine is returned. + c.stop <- struct{}{} + + // Clear out the setBuf channel. +loop: + for { + select { + case i := <-c.setBuf: + if i.wg != nil { + i.wg.Done() + continue + } + if i.flag != itemUpdate { + // In itemUpdate, the value is already set in the store. So, no need to call + // onEvict here. + c.onEvict(i) + } + default: + break loop + } + } + + // Clear value hashmap and policy data. + c.policy.Clear() + c.store.Clear(c.onEvict) + // Only reset metrics if they're enabled. + if c.Metrics != nil { + c.Metrics.Clear() + } + // Restart processItems goroutine. + go c.processItems() +} + +// Len returns the size of the cache (in entries) +func (c *Cache) Len() int { + if c == nil { + return 0 + } + return c.store.Len() +} + +// UsedCapacity returns the size of the cache (in bytes) +func (c *Cache) UsedCapacity() int64 { + if c == nil { + return 0 + } + return c.policy.Used() +} + +// MaxCapacity returns the max cost of the cache (in bytes) +func (c *Cache) MaxCapacity() int64 { + if c == nil { + return 0 + } + return c.policy.MaxCost() +} + +// SetCapacity updates the maxCost of an existing cache. +func (c *Cache) SetCapacity(maxCost int64) { + if c == nil { + return + } + c.policy.UpdateMaxCost(maxCost) +} + +// Evictions returns the number of evictions +func (c *Cache) Evictions() int64 { + // TODO + if c == nil || c.Metrics == nil { + return 0 + } + return int64(c.Metrics.KeysEvicted()) +} + +// ForEach yields all the values currently stored in the cache to the given callback. +// The callback may return `false` to stop the iteration early. +func (c *Cache) ForEach(forEach func(interface{}) bool) { + if c == nil { + return + } + c.store.ForEach(forEach) +} + +// processItems is ran by goroutines processing the Set buffer. +func (c *Cache) processItems() { + startTs := make(map[uint64]time.Time) + numToKeep := 100000 // TODO: Make this configurable via options. + + trackAdmission := func(key uint64) { + if c.Metrics == nil { + return + } + startTs[key] = time.Now() + if len(startTs) > numToKeep { + for k := range startTs { + if len(startTs) <= numToKeep { + break + } + delete(startTs, k) + } + } + } + onEvict := func(i *Item) { + delete(startTs, i.Key) + if c.onEvict != nil { + c.onEvict(i) + } + } + + for { + select { + case i := <-c.setBuf: + if i.wg != nil { + i.wg.Done() + continue + } + // Calculate item cost value if new or update. + if i.Cost == 0 && c.cost != nil && i.flag != itemDelete { + i.Cost = c.cost(i.Value) + } + if !c.ignoreInternalCost { + // Add the cost of internally storing the object. + i.Cost += CacheItemSize + } + + switch i.flag { + case itemNew: + victims, added := c.policy.Add(i.Key, i.Cost) + if added { + c.store.Set(i) + c.Metrics.add(keyAdd, i.Key, 1) + trackAdmission(i.Key) + } else { + c.onReject(i) + } + for _, victim := range victims { + victim.Conflict, victim.Value = c.store.Del(victim.Key, 0) + onEvict(victim) + } + + case itemUpdate: + c.policy.Update(i.Key, i.Cost) + + case itemDelete: + c.policy.Del(i.Key) // Deals with metrics updates. + _, val := c.store.Del(i.Key, i.Conflict) + c.onExit(val) + } + case <-c.stop: + return + } + } +} + +// collectMetrics just creates a new *Metrics instance and adds the pointers +// to the cache and policy instances. +func (c *Cache) collectMetrics() { + c.Metrics = newMetrics() + c.policy.CollectMetrics(c.Metrics) +} + +type metricType int + +const ( + // The following 2 keep track of hits and misses. + hit = iota + miss + // The following 3 keep track of number of keys added, updated and evicted. + keyAdd + keyUpdate + keyEvict + // The following 2 keep track of cost of keys added and evicted. + costAdd + costEvict + // The following keep track of how many sets were dropped or rejected later. + dropSets + rejectSets + // The following 2 keep track of how many gets were kept and dropped on the + // floor. + dropGets + keepGets + // This should be the final enum. Other enums should be set before this. + doNotUse +) + +func stringFor(t metricType) string { + switch t { + case hit: + return "hit" + case miss: + return "miss" + case keyAdd: + return "keys-added" + case keyUpdate: + return "keys-updated" + case keyEvict: + return "keys-evicted" + case costAdd: + return "cost-added" + case costEvict: + return "cost-evicted" + case dropSets: + return "sets-dropped" + case rejectSets: + return "sets-rejected" // by policy. + case dropGets: + return "gets-dropped" + case keepGets: + return "gets-kept" + default: + return "unidentified" + } +} + +// Metrics is a snapshot of performance statistics for the lifetime of a cache instance. +type Metrics struct { + all [doNotUse][]*uint64 +} + +func newMetrics() *Metrics { + s := &Metrics{} + for i := 0; i < doNotUse; i++ { + s.all[i] = make([]*uint64, 256) + slice := s.all[i] + for j := range slice { + slice[j] = new(uint64) + } + } + return s +} + +func (p *Metrics) add(t metricType, hash, delta uint64) { + if p == nil { + return + } + valp := p.all[t] + // Avoid false sharing by padding at least 64 bytes of space between two + // atomic counters which would be incremented. + idx := (hash % 25) * 10 + atomic.AddUint64(valp[idx], delta) +} + +func (p *Metrics) get(t metricType) uint64 { + if p == nil { + return 0 + } + valp := p.all[t] + var total uint64 + for i := range valp { + total += atomic.LoadUint64(valp[i]) + } + return total +} + +// Hits is the number of Get calls where a value was found for the corresponding key. +func (p *Metrics) Hits() uint64 { + return p.get(hit) +} + +// Misses is the number of Get calls where a value was not found for the corresponding key. +func (p *Metrics) Misses() uint64 { + return p.get(miss) +} + +// KeysAdded is the total number of Set calls where a new key-value item was added. +func (p *Metrics) KeysAdded() uint64 { + return p.get(keyAdd) +} + +// KeysUpdated is the total number of Set calls where the value was updated. +func (p *Metrics) KeysUpdated() uint64 { + return p.get(keyUpdate) +} + +// KeysEvicted is the total number of keys evicted. +func (p *Metrics) KeysEvicted() uint64 { + return p.get(keyEvict) +} + +// CostAdded is the sum of costs that have been added (successful Set calls). +func (p *Metrics) CostAdded() uint64 { + return p.get(costAdd) +} + +// CostEvicted is the sum of all costs that have been evicted. +func (p *Metrics) CostEvicted() uint64 { + return p.get(costEvict) +} + +// SetsDropped is the number of Set calls that don't make it into internal +// buffers (due to contention or some other reason). +func (p *Metrics) SetsDropped() uint64 { + return p.get(dropSets) +} + +// SetsRejected is the number of Set calls rejected by the policy (TinyLFU). +func (p *Metrics) SetsRejected() uint64 { + return p.get(rejectSets) +} + +// GetsDropped is the number of Get counter increments that are dropped +// internally. +func (p *Metrics) GetsDropped() uint64 { + return p.get(dropGets) +} + +// GetsKept is the number of Get counter increments that are kept. +func (p *Metrics) GetsKept() uint64 { + return p.get(keepGets) +} + +// Ratio is the number of Hits over all accesses (Hits + Misses). This is the +// percentage of successful Get calls. +func (p *Metrics) Ratio() float64 { + if p == nil { + return 0.0 + } + hits, misses := p.get(hit), p.get(miss) + if hits == 0 && misses == 0 { + return 0.0 + } + return float64(hits) / float64(hits+misses) +} + +// Clear resets all the metrics. +func (p *Metrics) Clear() { + if p == nil { + return + } + for i := 0; i < doNotUse; i++ { + for j := range p.all[i] { + atomic.StoreUint64(p.all[i][j], 0) + } + } +} + +// String returns a string representation of the metrics. +func (p *Metrics) String() string { + if p == nil { + return "" + } + var buf bytes.Buffer + for i := 0; i < doNotUse; i++ { + t := metricType(i) + fmt.Fprintf(&buf, "%s: %d ", stringFor(t), p.get(t)) + } + fmt.Fprintf(&buf, "gets-total: %d ", p.get(hit)+p.get(miss)) + fmt.Fprintf(&buf, "hit-ratio: %.2f", p.Ratio()) + return buf.String() +} diff --git a/go/cache/ristretto/cache_test.go b/go/cache/ristretto/cache_test.go new file mode 100644 index 00000000000..a070c6f785a --- /dev/null +++ b/go/cache/ristretto/cache_test.go @@ -0,0 +1,688 @@ +/* + * Copyright 2019 Dgraph Labs, Inc. and Contributors + * Copyright 2021 The Vitess Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ristretto + +import ( + "fmt" + "math/rand" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +var wait = time.Millisecond * 10 + +func TestCacheKeyToHash(t *testing.T) { + keyToHashCount := 0 + c, err := NewCache(&Config{ + NumCounters: 10, + MaxCost: 1000, + BufferItems: 64, + IgnoreInternalCost: true, + KeyToHash: func(key string) (uint64, uint64) { + keyToHashCount++ + return defaultStringHash(key) + }, + }) + require.NoError(t, err) + if c.SetWithCost("1", 1, 1) { + time.Sleep(wait) + val, ok := c.Get("1") + require.True(t, ok) + require.NotNil(t, val) + c.Delete("1") + } + require.Equal(t, 3, keyToHashCount) +} + +func TestCacheMaxCost(t *testing.T) { + charset := "abcdefghijklmnopqrstuvwxyz0123456789" + key := func() string { + k := make([]byte, 2) + for i := range k { + k[i] = charset[rand.Intn(len(charset))] + } + return string(k) + } + c, err := NewCache(&Config{ + NumCounters: 12960, // 36^2 * 10 + MaxCost: 1e6, // 1mb + BufferItems: 64, + Metrics: true, + }) + require.NoError(t, err) + stop := make(chan struct{}, 8) + for i := 0; i < 8; i++ { + go func() { + for { + select { + case <-stop: + return + default: + time.Sleep(time.Millisecond) + + k := key() + if _, ok := c.Get(k); !ok { + val := "" + if rand.Intn(100) < 10 { + val = "test" + } else { + val = strings.Repeat("a", 1000) + } + c.SetWithCost(key(), val, int64(2+len(val))) + } + } + } + }() + } + for i := 0; i < 20; i++ { + time.Sleep(time.Second) + cacheCost := c.Metrics.CostAdded() - c.Metrics.CostEvicted() + t.Logf("total cache cost: %d\n", cacheCost) + require.True(t, float64(cacheCost) <= float64(1e6*1.05)) + } + for i := 0; i < 8; i++ { + stop <- struct{}{} + } +} + +func TestUpdateMaxCost(t *testing.T) { + c, err := NewCache(&Config{ + NumCounters: 10, + MaxCost: 10, + BufferItems: 64, + }) + require.NoError(t, err) + require.Equal(t, int64(10), c.MaxCapacity()) + require.True(t, c.SetWithCost("1", 1, 1)) + time.Sleep(wait) + _, ok := c.Get("1") + // Set is rejected because the cost of the entry is too high + // when accounting for the internal cost of storing the entry. + require.False(t, ok) + + // Update the max cost of the cache and retry. + c.SetCapacity(1000) + require.Equal(t, int64(1000), c.MaxCapacity()) + require.True(t, c.SetWithCost("1", 1, 1)) + time.Sleep(wait) + val, ok := c.Get("1") + require.True(t, ok) + require.NotNil(t, val) + c.Delete("1") +} + +func TestNewCache(t *testing.T) { + _, err := NewCache(&Config{ + NumCounters: 0, + }) + require.Error(t, err) + + _, err = NewCache(&Config{ + NumCounters: 100, + MaxCost: 0, + }) + require.Error(t, err) + + _, err = NewCache(&Config{ + NumCounters: 100, + MaxCost: 10, + BufferItems: 0, + }) + require.Error(t, err) + + c, err := NewCache(&Config{ + NumCounters: 100, + MaxCost: 10, + BufferItems: 64, + Metrics: true, + }) + require.NoError(t, err) + require.NotNil(t, c) +} + +func TestNilCache(t *testing.T) { + var c *Cache + val, ok := c.Get("1") + require.False(t, ok) + require.Nil(t, val) + + require.False(t, c.SetWithCost("1", 1, 1)) + c.Delete("1") + c.Clear() + c.Close() +} + +func TestMultipleClose(t *testing.T) { + var c *Cache + c.Close() + + var err error + c, err = NewCache(&Config{ + NumCounters: 100, + MaxCost: 10, + BufferItems: 64, + Metrics: true, + }) + require.NoError(t, err) + c.Close() + c.Close() +} + +func TestSetAfterClose(t *testing.T) { + c, err := newTestCache() + require.NoError(t, err) + require.NotNil(t, c) + + c.Close() + require.False(t, c.SetWithCost("1", 1, 1)) +} + +func TestClearAfterClose(t *testing.T) { + c, err := newTestCache() + require.NoError(t, err) + require.NotNil(t, c) + + c.Close() + c.Clear() +} + +func TestGetAfterClose(t *testing.T) { + c, err := newTestCache() + require.NoError(t, err) + require.NotNil(t, c) + + require.True(t, c.SetWithCost("1", 1, 1)) + c.Close() + + _, ok := c.Get("2") + require.False(t, ok) +} + +func TestDelAfterClose(t *testing.T) { + c, err := newTestCache() + require.NoError(t, err) + require.NotNil(t, c) + + require.True(t, c.SetWithCost("1", 1, 1)) + c.Close() + + c.Delete("1") +} + +func TestCacheProcessItems(t *testing.T) { + m := &sync.Mutex{} + evicted := make(map[uint64]struct{}) + c, err := NewCache(&Config{ + NumCounters: 100, + MaxCost: 10, + BufferItems: 64, + IgnoreInternalCost: true, + Cost: func(value interface{}) int64 { + return int64(value.(int)) + }, + OnEvict: func(item *Item) { + m.Lock() + defer m.Unlock() + evicted[item.Key] = struct{}{} + }, + }) + require.NoError(t, err) + + var key uint64 + var conflict uint64 + + key, conflict = defaultStringHash("1") + c.setBuf <- &Item{ + flag: itemNew, + Key: key, + Conflict: conflict, + Value: 1, + Cost: 0, + } + time.Sleep(wait) + require.True(t, c.policy.Has(key)) + require.Equal(t, int64(1), c.policy.Cost(key)) + + key, conflict = defaultStringHash("1") + c.setBuf <- &Item{ + flag: itemUpdate, + Key: key, + Conflict: conflict, + Value: 2, + Cost: 0, + } + time.Sleep(wait) + require.Equal(t, int64(2), c.policy.Cost(key)) + + key, conflict = defaultStringHash("1") + c.setBuf <- &Item{ + flag: itemDelete, + Key: key, + Conflict: conflict, + } + time.Sleep(wait) + key, conflict = defaultStringHash("1") + val, ok := c.store.Get(key, conflict) + require.False(t, ok) + require.Nil(t, val) + require.False(t, c.policy.Has(1)) + + key, conflict = defaultStringHash("2") + c.setBuf <- &Item{ + flag: itemNew, + Key: key, + Conflict: conflict, + Value: 2, + Cost: 3, + } + key, conflict = defaultStringHash("3") + c.setBuf <- &Item{ + flag: itemNew, + Key: key, + Conflict: conflict, + Value: 3, + Cost: 3, + } + key, conflict = defaultStringHash("4") + c.setBuf <- &Item{ + flag: itemNew, + Key: key, + Conflict: conflict, + Value: 3, + Cost: 3, + } + key, conflict = defaultStringHash("5") + c.setBuf <- &Item{ + flag: itemNew, + Key: key, + Conflict: conflict, + Value: 3, + Cost: 5, + } + time.Sleep(wait) + m.Lock() + require.NotEqual(t, 0, len(evicted)) + m.Unlock() + + defer func() { + require.NotNil(t, recover()) + }() + c.Close() + c.setBuf <- &Item{flag: itemNew} +} + +func TestCacheGet(t *testing.T) { + c, err := NewCache(&Config{ + NumCounters: 100, + MaxCost: 10, + BufferItems: 64, + IgnoreInternalCost: true, + Metrics: true, + }) + require.NoError(t, err) + + key, conflict := defaultStringHash("1") + i := Item{ + Key: key, + Conflict: conflict, + Value: 1, + } + c.store.Set(&i) + val, ok := c.Get("1") + require.True(t, ok) + require.NotNil(t, val) + + val, ok = c.Get("2") + require.False(t, ok) + require.Nil(t, val) + + // 0.5 and not 1.0 because we tried Getting each item twice + require.Equal(t, 0.5, c.Metrics.Ratio()) + + c = nil + val, ok = c.Get("0") + require.False(t, ok) + require.Nil(t, val) +} + +// retrySet calls SetWithCost until the item is accepted by the cache. +func retrySet(t *testing.T, c *Cache, key string, value int, cost int64) { + for { + if set := c.SetWithCost(key, value, cost); !set { + time.Sleep(wait) + continue + } + + time.Sleep(wait) + val, ok := c.Get(key) + require.True(t, ok) + require.NotNil(t, val) + require.Equal(t, value, val.(int)) + return + } +} + +func TestCacheSet(t *testing.T) { + c, err := NewCache(&Config{ + NumCounters: 100, + MaxCost: 10, + IgnoreInternalCost: true, + BufferItems: 64, + Metrics: true, + }) + require.NoError(t, err) + + retrySet(t, c, "1", 1, 1) + + c.SetWithCost("1", 2, 2) + val, ok := c.store.Get(defaultStringHash("1")) + require.True(t, ok) + require.Equal(t, 2, val.(int)) + + c.stop <- struct{}{} + for i := 0; i < setBufSize; i++ { + key, conflict := defaultStringHash("1") + c.setBuf <- &Item{ + flag: itemUpdate, + Key: key, + Conflict: conflict, + Value: 1, + Cost: 1, + } + } + require.False(t, c.SetWithCost("2", 2, 1)) + require.Equal(t, uint64(1), c.Metrics.SetsDropped()) + close(c.setBuf) + close(c.stop) + + c = nil + require.False(t, c.SetWithCost("1", 1, 1)) +} + +func TestCacheInternalCost(t *testing.T) { + c, err := NewCache(&Config{ + NumCounters: 100, + MaxCost: 10, + BufferItems: 64, + Metrics: true, + }) + require.NoError(t, err) + + // Get should return false because the cache's cost is too small to store the item + // when accounting for the internal cost. + c.SetWithCost("1", 1, 1) + time.Sleep(wait) + _, ok := c.Get("1") + require.False(t, ok) +} + +func TestCacheDel(t *testing.T) { + c, err := NewCache(&Config{ + NumCounters: 100, + MaxCost: 10, + BufferItems: 64, + }) + require.NoError(t, err) + + c.SetWithCost("1", 1, 1) + c.Delete("1") + // The deletes and sets are pushed through the setbuf. It might be possible + // that the delete is not processed before the following get is called. So + // wait for a millisecond for things to be processed. + time.Sleep(time.Millisecond) + val, ok := c.Get("1") + require.False(t, ok) + require.Nil(t, val) + + c = nil + defer func() { + require.Nil(t, recover()) + }() + c.Delete("1") +} + +func TestCacheClear(t *testing.T) { + c, err := NewCache(&Config{ + NumCounters: 100, + MaxCost: 10, + IgnoreInternalCost: true, + BufferItems: 64, + Metrics: true, + }) + require.NoError(t, err) + + for i := 0; i < 10; i++ { + c.SetWithCost(strconv.Itoa(i), i, 1) + } + time.Sleep(wait) + require.Equal(t, uint64(10), c.Metrics.KeysAdded()) + + c.Clear() + require.Equal(t, uint64(0), c.Metrics.KeysAdded()) + + for i := 0; i < 10; i++ { + val, ok := c.Get(strconv.Itoa(i)) + require.False(t, ok) + require.Nil(t, val) + } +} + +func TestCacheMetrics(t *testing.T) { + c, err := NewCache(&Config{ + NumCounters: 100, + MaxCost: 10, + IgnoreInternalCost: true, + BufferItems: 64, + Metrics: true, + }) + require.NoError(t, err) + + for i := 0; i < 10; i++ { + c.SetWithCost(strconv.Itoa(i), i, 1) + } + time.Sleep(wait) + m := c.Metrics + require.Equal(t, uint64(10), m.KeysAdded()) +} + +func TestMetrics(t *testing.T) { + newMetrics() +} + +func TestNilMetrics(t *testing.T) { + var m *Metrics + for _, f := range []func() uint64{ + m.Hits, + m.Misses, + m.KeysAdded, + m.KeysEvicted, + m.CostEvicted, + m.SetsDropped, + m.SetsRejected, + m.GetsDropped, + m.GetsKept, + } { + require.Equal(t, uint64(0), f()) + } +} + +func TestMetricsAddGet(t *testing.T) { + m := newMetrics() + m.add(hit, 1, 1) + m.add(hit, 2, 2) + m.add(hit, 3, 3) + require.Equal(t, uint64(6), m.Hits()) + + m = nil + m.add(hit, 1, 1) + require.Equal(t, uint64(0), m.Hits()) +} + +func TestMetricsRatio(t *testing.T) { + m := newMetrics() + require.Equal(t, float64(0), m.Ratio()) + + m.add(hit, 1, 1) + m.add(hit, 2, 2) + m.add(miss, 1, 1) + m.add(miss, 2, 2) + require.Equal(t, 0.5, m.Ratio()) + + m = nil + require.Equal(t, float64(0), m.Ratio()) +} + +func TestMetricsString(t *testing.T) { + m := newMetrics() + m.add(hit, 1, 1) + m.add(miss, 1, 1) + m.add(keyAdd, 1, 1) + m.add(keyUpdate, 1, 1) + m.add(keyEvict, 1, 1) + m.add(costAdd, 1, 1) + m.add(costEvict, 1, 1) + m.add(dropSets, 1, 1) + m.add(rejectSets, 1, 1) + m.add(dropGets, 1, 1) + m.add(keepGets, 1, 1) + require.Equal(t, uint64(1), m.Hits()) + require.Equal(t, uint64(1), m.Misses()) + require.Equal(t, 0.5, m.Ratio()) + require.Equal(t, uint64(1), m.KeysAdded()) + require.Equal(t, uint64(1), m.KeysUpdated()) + require.Equal(t, uint64(1), m.KeysEvicted()) + require.Equal(t, uint64(1), m.CostAdded()) + require.Equal(t, uint64(1), m.CostEvicted()) + require.Equal(t, uint64(1), m.SetsDropped()) + require.Equal(t, uint64(1), m.SetsRejected()) + require.Equal(t, uint64(1), m.GetsDropped()) + require.Equal(t, uint64(1), m.GetsKept()) + + require.NotEqual(t, 0, len(m.String())) + + m = nil + require.Equal(t, 0, len(m.String())) + + require.Equal(t, "unidentified", stringFor(doNotUse)) +} + +func TestCacheMetricsClear(t *testing.T) { + c, err := NewCache(&Config{ + NumCounters: 100, + MaxCost: 10, + BufferItems: 64, + Metrics: true, + }) + require.NoError(t, err) + + c.SetWithCost("1", 1, 1) + stop := make(chan struct{}) + go func() { + for { + select { + case <-stop: + return + default: + c.Get("1") + } + } + }() + time.Sleep(wait) + c.Clear() + stop <- struct{}{} + c.Metrics = nil + c.Metrics.Clear() +} + +// Regression test for bug https://github.com/dgraph-io/ristretto/issues/167 +func TestDropUpdates(t *testing.T) { + originalSetBugSize := setBufSize + defer func() { setBufSize = originalSetBugSize }() + + test := func() { + // dropppedMap stores the items dropped from the cache. + droppedMap := make(map[int]struct{}) + lastEvictedSet := int64(-1) + + var err error + handler := func(_ interface{}, value interface{}) { + v := value.(string) + lastEvictedSet, err = strconv.ParseInt(string(v), 10, 32) + require.NoError(t, err) + + _, ok := droppedMap[int(lastEvictedSet)] + if ok { + panic(fmt.Sprintf("val = %+v was dropped but it got evicted. Dropped items: %+v\n", + lastEvictedSet, droppedMap)) + } + } + + // This is important. The race condition shows up only when the setBuf + // is full and that's why we reduce the buf size here. The test will + // try to fill up the setbuf to it's capacity and then perform an + // update on a key. + setBufSize = 10 + + c, err := NewCache(&Config{ + NumCounters: 100, + MaxCost: 10, + BufferItems: 64, + Metrics: true, + OnEvict: func(item *Item) { + if item.Value != nil { + handler(nil, item.Value) + } + }, + }) + require.NoError(t, err) + + for i := 0; i < 5*setBufSize; i++ { + v := fmt.Sprintf("%0100d", i) + // We're updating the same key. + if !c.SetWithCost("0", v, 1) { + // The race condition doesn't show up without this sleep. + time.Sleep(time.Microsecond) + droppedMap[i] = struct{}{} + } + } + // Wait for all the items to be processed. + c.Wait() + // This will cause eviction from the cache. + require.True(t, c.SetWithCost("1", nil, 10)) + c.Close() + } + + // Run the test 100 times since it's not reliable. + for i := 0; i < 100; i++ { + test() + } +} + +func newTestCache() (*Cache, error) { + return NewCache(&Config{ + NumCounters: 100, + MaxCost: 10, + BufferItems: 64, + Metrics: true, + }) +} diff --git a/go/cache/ristretto/policy.go b/go/cache/ristretto/policy.go new file mode 100644 index 00000000000..38ffbf6d3d6 --- /dev/null +++ b/go/cache/ristretto/policy.go @@ -0,0 +1,422 @@ +/* + * Copyright 2020 Dgraph Labs, Inc. and Contributors + * Copyright 2021 The Vitess Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ristretto + +import ( + "math" + "sync" + "sync/atomic" + + "vitess.io/vitess/go/cache/ristretto/bloom" +) + +const ( + // lfuSample is the number of items to sample when looking at eviction + // candidates. 5 seems to be the most optimal number [citation needed]. + lfuSample = 5 +) + +// policy is the interface encapsulating eviction/admission behavior. +// +// TODO: remove this interface and just rename defaultPolicy to policy, as we +// are probably only going to use/implement/maintain one policy. +type policy interface { + ringConsumer + // Add attempts to Add the key-cost pair to the Policy. It returns a slice + // of evicted keys and a bool denoting whether or not the key-cost pair + // was added. If it returns true, the key should be stored in cache. + Add(uint64, int64) ([]*Item, bool) + // Has returns true if the key exists in the Policy. + Has(uint64) bool + // Del deletes the key from the Policy. + Del(uint64) + // Cap returns the amount of used capacity. + Used() int64 + // Close stops all goroutines and closes all channels. + Close() + // Update updates the cost value for the key. + Update(uint64, int64) + // Cost returns the cost value of a key or -1 if missing. + Cost(uint64) int64 + // Optionally, set stats object to track how policy is performing. + CollectMetrics(*Metrics) + // Clear zeroes out all counters and clears hashmaps. + Clear() + // MaxCost returns the current max cost of the cache policy. + MaxCost() int64 + // UpdateMaxCost updates the max cost of the cache policy. + UpdateMaxCost(int64) +} + +func newPolicy(numCounters, maxCost int64) policy { + return newDefaultPolicy(numCounters, maxCost) +} + +type defaultPolicy struct { + sync.Mutex + admit *tinyLFU + evict *sampledLFU + itemsCh chan []uint64 + stop chan struct{} + isClosed bool + metrics *Metrics + numCounters int64 + maxCost int64 +} + +func newDefaultPolicy(numCounters, maxCost int64) *defaultPolicy { + p := &defaultPolicy{ + admit: newTinyLFU(numCounters), + evict: newSampledLFU(maxCost), + itemsCh: make(chan []uint64, 3), + stop: make(chan struct{}), + numCounters: numCounters, + maxCost: maxCost, + } + go p.processItems() + return p +} + +func (p *defaultPolicy) CollectMetrics(metrics *Metrics) { + p.metrics = metrics + p.evict.metrics = metrics +} + +type policyPair struct { + key uint64 + cost int64 +} + +func (p *defaultPolicy) processItems() { + for { + select { + case items := <-p.itemsCh: + p.Lock() + p.admit.Push(items) + p.Unlock() + case <-p.stop: + return + } + } +} + +func (p *defaultPolicy) Push(keys []uint64) bool { + if p.isClosed { + return false + } + + if len(keys) == 0 { + return true + } + + select { + case p.itemsCh <- keys: + p.metrics.add(keepGets, keys[0], uint64(len(keys))) + return true + default: + p.metrics.add(dropGets, keys[0], uint64(len(keys))) + return false + } +} + +// Add decides whether the item with the given key and cost should be accepted by +// the policy. It returns the list of victims that have been evicted and a boolean +// indicating whether the incoming item should be accepted. +func (p *defaultPolicy) Add(key uint64, cost int64) ([]*Item, bool) { + p.Lock() + defer p.Unlock() + + // Cannot add an item bigger than entire cache. + if cost > p.evict.getMaxCost() { + return nil, false + } + + // No need to go any further if the item is already in the cache. + if has := p.evict.updateIfHas(key, cost); has { + // An update does not count as an addition, so return false. + return nil, false + } + + // If the execution reaches this point, the key doesn't exist in the cache. + // Calculate the remaining room in the cache (usually bytes). + room := p.evict.roomLeft(cost) + if room >= 0 { + // There's enough room in the cache to store the new item without + // overflowing. Do that now and stop here. + p.evict.add(key, cost) + p.metrics.add(costAdd, key, uint64(cost)) + return nil, true + } + + // incHits is the hit count for the incoming item. + incHits := p.admit.Estimate(key) + // sample is the eviction candidate pool to be filled via random sampling. + // TODO: perhaps we should use a min heap here. Right now our time + // complexity is N for finding the min. Min heap should bring it down to + // O(lg N). + sample := make([]*policyPair, 0, lfuSample) + // As items are evicted they will be appended to victims. + victims := make([]*Item, 0) + + // Delete victims until there's enough space or a minKey is found that has + // more hits than incoming item. + for ; room < 0; room = p.evict.roomLeft(cost) { + // Fill up empty slots in sample. + sample = p.evict.fillSample(sample) + + // Find minimally used item in sample. + minKey, minHits, minID, minCost := uint64(0), int64(math.MaxInt64), 0, int64(0) + for i, pair := range sample { + // Look up hit count for sample key. + if hits := p.admit.Estimate(pair.key); hits < minHits { + minKey, minHits, minID, minCost = pair.key, hits, i, pair.cost + } + } + + // If the incoming item isn't worth keeping in the policy, reject. + if incHits < minHits { + p.metrics.add(rejectSets, key, 1) + return victims, false + } + + // Delete the victim from metadata. + p.evict.del(minKey) + + // Delete the victim from sample. + sample[minID] = sample[len(sample)-1] + sample = sample[:len(sample)-1] + // Store victim in evicted victims slice. + victims = append(victims, &Item{ + Key: minKey, + Conflict: 0, + Cost: minCost, + }) + } + + p.evict.add(key, cost) + p.metrics.add(costAdd, key, uint64(cost)) + return victims, true +} + +func (p *defaultPolicy) Has(key uint64) bool { + p.Lock() + _, exists := p.evict.keyCosts[key] + p.Unlock() + return exists +} + +func (p *defaultPolicy) Del(key uint64) { + p.Lock() + p.evict.del(key) + p.Unlock() +} + +func (p *defaultPolicy) Used() int64 { + p.Lock() + used := p.evict.used + p.Unlock() + return used +} + +func (p *defaultPolicy) Update(key uint64, cost int64) { + p.Lock() + p.evict.updateIfHas(key, cost) + p.Unlock() +} + +func (p *defaultPolicy) Cost(key uint64) int64 { + p.Lock() + if cost, found := p.evict.keyCosts[key]; found { + p.Unlock() + return cost + } + p.Unlock() + return -1 +} + +func (p *defaultPolicy) Clear() { + p.Lock() + p.admit = newTinyLFU(p.numCounters) + p.evict = newSampledLFU(p.maxCost) + p.Unlock() +} + +func (p *defaultPolicy) Close() { + if p.isClosed { + return + } + + // Block until the p.processItems goroutine returns. + p.stop <- struct{}{} + close(p.stop) + close(p.itemsCh) + p.isClosed = true +} + +func (p *defaultPolicy) MaxCost() int64 { + if p == nil || p.evict == nil { + return 0 + } + return p.evict.getMaxCost() +} + +func (p *defaultPolicy) UpdateMaxCost(maxCost int64) { + if p == nil || p.evict == nil { + return + } + p.evict.updateMaxCost(maxCost) +} + +// sampledLFU is an eviction helper storing key-cost pairs. +type sampledLFU struct { + keyCosts map[uint64]int64 + maxCost int64 + used int64 + metrics *Metrics +} + +func newSampledLFU(maxCost int64) *sampledLFU { + return &sampledLFU{ + keyCosts: make(map[uint64]int64), + maxCost: maxCost, + } +} + +func (p *sampledLFU) getMaxCost() int64 { + return atomic.LoadInt64(&p.maxCost) +} + +func (p *sampledLFU) updateMaxCost(maxCost int64) { + atomic.StoreInt64(&p.maxCost, maxCost) +} + +func (p *sampledLFU) roomLeft(cost int64) int64 { + return p.getMaxCost() - (p.used + cost) +} + +func (p *sampledLFU) fillSample(in []*policyPair) []*policyPair { + if len(in) >= lfuSample { + return in + } + for key, cost := range p.keyCosts { + in = append(in, &policyPair{key, cost}) + if len(in) >= lfuSample { + return in + } + } + return in +} + +func (p *sampledLFU) del(key uint64) { + cost, ok := p.keyCosts[key] + if !ok { + return + } + p.used -= cost + delete(p.keyCosts, key) + p.metrics.add(costEvict, key, uint64(cost)) + p.metrics.add(keyEvict, key, 1) +} + +func (p *sampledLFU) add(key uint64, cost int64) { + p.keyCosts[key] = cost + p.used += cost +} + +func (p *sampledLFU) updateIfHas(key uint64, cost int64) bool { + if prev, found := p.keyCosts[key]; found { + // Update the cost of an existing key, but don't worry about evicting. + // Evictions will be handled the next time a new item is added. + p.metrics.add(keyUpdate, key, 1) + if prev > cost { + diff := prev - cost + p.metrics.add(costAdd, key, ^uint64(uint64(diff)-1)) + } else if cost > prev { + diff := cost - prev + p.metrics.add(costAdd, key, uint64(diff)) + } + p.used += cost - prev + p.keyCosts[key] = cost + return true + } + return false +} + +func (p *sampledLFU) clear() { + p.used = 0 + p.keyCosts = make(map[uint64]int64) +} + +// tinyLFU is an admission helper that keeps track of access frequency using +// tiny (4-bit) counters in the form of a count-min sketch. +// tinyLFU is NOT thread safe. +type tinyLFU struct { + freq *cmSketch + door *bloom.Bloom + incrs int64 + resetAt int64 +} + +func newTinyLFU(numCounters int64) *tinyLFU { + return &tinyLFU{ + freq: newCmSketch(numCounters), + door: bloom.NewBloomFilterWithErrorRate(uint64(numCounters), 0.01), + resetAt: numCounters, + } +} + +func (p *tinyLFU) Push(keys []uint64) { + for _, key := range keys { + p.Increment(key) + } +} + +func (p *tinyLFU) Estimate(key uint64) int64 { + hits := p.freq.Estimate(key) + if p.door.Has(key) { + hits++ + } + return hits +} + +func (p *tinyLFU) Increment(key uint64) { + // Flip doorkeeper bit if not already done. + if added := p.door.AddIfNotHas(key); !added { + // Increment count-min counter if doorkeeper bit is already set. + p.freq.Increment(key) + } + p.incrs++ + if p.incrs >= p.resetAt { + p.reset() + } +} + +func (p *tinyLFU) reset() { + // Zero out incrs. + p.incrs = 0 + // clears doorkeeper bits + p.door.Clear() + // halves count-min counters + p.freq.Reset() +} + +func (p *tinyLFU) clear() { + p.incrs = 0 + p.freq.Clear() + p.door.Clear() +} diff --git a/go/cache/ristretto/policy_test.go b/go/cache/ristretto/policy_test.go new file mode 100644 index 00000000000..c864b6c74d0 --- /dev/null +++ b/go/cache/ristretto/policy_test.go @@ -0,0 +1,276 @@ +/* + * Copyright 2020 Dgraph Labs, Inc. and Contributors + * Copyright 2021 The Vitess Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ristretto + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestPolicy(t *testing.T) { + defer func() { + require.Nil(t, recover()) + }() + newPolicy(100, 10) +} + +func TestPolicyMetrics(t *testing.T) { + p := newDefaultPolicy(100, 10) + p.CollectMetrics(newMetrics()) + require.NotNil(t, p.metrics) + require.NotNil(t, p.evict.metrics) +} + +func TestPolicyProcessItems(t *testing.T) { + p := newDefaultPolicy(100, 10) + p.itemsCh <- []uint64{1, 2, 2} + time.Sleep(wait) + p.Lock() + require.Equal(t, int64(2), p.admit.Estimate(2)) + require.Equal(t, int64(1), p.admit.Estimate(1)) + p.Unlock() + + p.stop <- struct{}{} + p.itemsCh <- []uint64{3, 3, 3} + time.Sleep(wait) + p.Lock() + require.Equal(t, int64(0), p.admit.Estimate(3)) + p.Unlock() +} + +func TestPolicyPush(t *testing.T) { + p := newDefaultPolicy(100, 10) + require.True(t, p.Push([]uint64{})) + + keepCount := 0 + for i := 0; i < 10; i++ { + if p.Push([]uint64{1, 2, 3, 4, 5}) { + keepCount++ + } + } + require.NotEqual(t, 0, keepCount) +} + +func TestPolicyAdd(t *testing.T) { + p := newDefaultPolicy(1000, 100) + if victims, added := p.Add(1, 101); victims != nil || added { + t.Fatal("can't add an item bigger than entire cache") + } + p.Lock() + p.evict.add(1, 1) + p.admit.Increment(1) + p.admit.Increment(2) + p.admit.Increment(3) + p.Unlock() + + victims, added := p.Add(1, 1) + require.Nil(t, victims) + require.False(t, added) + + victims, added = p.Add(2, 20) + require.Nil(t, victims) + require.True(t, added) + + victims, added = p.Add(3, 90) + require.NotNil(t, victims) + require.True(t, added) + + victims, added = p.Add(4, 20) + require.NotNil(t, victims) + require.False(t, added) +} + +func TestPolicyHas(t *testing.T) { + p := newDefaultPolicy(100, 10) + p.Add(1, 1) + require.True(t, p.Has(1)) + require.False(t, p.Has(2)) +} + +func TestPolicyDel(t *testing.T) { + p := newDefaultPolicy(100, 10) + p.Add(1, 1) + p.Del(1) + p.Del(2) + require.False(t, p.Has(1)) + require.False(t, p.Has(2)) +} + +func TestPolicyCap(t *testing.T) { + p := newDefaultPolicy(100, 10) + p.Add(1, 1) + require.Equal(t, int64(9), p.MaxCost()-p.Used()) +} + +func TestPolicyUpdate(t *testing.T) { + p := newDefaultPolicy(100, 10) + p.Add(1, 1) + p.Update(1, 2) + p.Lock() + require.Equal(t, int64(2), p.evict.keyCosts[1]) + p.Unlock() +} + +func TestPolicyCost(t *testing.T) { + p := newDefaultPolicy(100, 10) + p.Add(1, 2) + require.Equal(t, int64(2), p.Cost(1)) + require.Equal(t, int64(-1), p.Cost(2)) +} + +func TestPolicyClear(t *testing.T) { + p := newDefaultPolicy(100, 10) + p.Add(1, 1) + p.Add(2, 2) + p.Add(3, 3) + p.Clear() + require.Equal(t, int64(10), p.MaxCost()-p.Used()) + require.False(t, p.Has(1)) + require.False(t, p.Has(2)) + require.False(t, p.Has(3)) +} + +func TestPolicyClose(t *testing.T) { + defer func() { + require.NotNil(t, recover()) + }() + + p := newDefaultPolicy(100, 10) + p.Add(1, 1) + p.Close() + p.itemsCh <- []uint64{1} +} + +func TestPushAfterClose(t *testing.T) { + p := newDefaultPolicy(100, 10) + p.Close() + require.False(t, p.Push([]uint64{1, 2})) +} + +func TestAddAfterClose(t *testing.T) { + p := newDefaultPolicy(100, 10) + p.Close() + p.Add(1, 1) +} + +func TestSampledLFUAdd(t *testing.T) { + e := newSampledLFU(4) + e.add(1, 1) + e.add(2, 2) + e.add(3, 1) + require.Equal(t, int64(4), e.used) + require.Equal(t, int64(2), e.keyCosts[2]) +} + +func TestSampledLFUDel(t *testing.T) { + e := newSampledLFU(4) + e.add(1, 1) + e.add(2, 2) + e.del(2) + require.Equal(t, int64(1), e.used) + _, ok := e.keyCosts[2] + require.False(t, ok) + e.del(4) +} + +func TestSampledLFUUpdate(t *testing.T) { + e := newSampledLFU(4) + e.add(1, 1) + require.True(t, e.updateIfHas(1, 2)) + require.Equal(t, int64(2), e.used) + require.False(t, e.updateIfHas(2, 2)) +} + +func TestSampledLFUClear(t *testing.T) { + e := newSampledLFU(4) + e.add(1, 1) + e.add(2, 2) + e.add(3, 1) + e.clear() + require.Equal(t, 0, len(e.keyCosts)) + require.Equal(t, int64(0), e.used) +} + +func TestSampledLFURoom(t *testing.T) { + e := newSampledLFU(16) + e.add(1, 1) + e.add(2, 2) + e.add(3, 3) + require.Equal(t, int64(6), e.roomLeft(4)) +} + +func TestSampledLFUSample(t *testing.T) { + e := newSampledLFU(16) + e.add(4, 4) + e.add(5, 5) + sample := e.fillSample([]*policyPair{ + {1, 1}, + {2, 2}, + {3, 3}, + }) + k := sample[len(sample)-1].key + require.Equal(t, 5, len(sample)) + require.NotEqual(t, 1, k) + require.NotEqual(t, 2, k) + require.NotEqual(t, 3, k) + require.Equal(t, len(sample), len(e.fillSample(sample))) + e.del(5) + sample = e.fillSample(sample[:len(sample)-2]) + require.Equal(t, 4, len(sample)) +} + +func TestTinyLFUIncrement(t *testing.T) { + a := newTinyLFU(4) + a.Increment(1) + a.Increment(1) + a.Increment(1) + require.True(t, a.door.Has(1)) + require.Equal(t, int64(2), a.freq.Estimate(1)) + + a.Increment(1) + require.False(t, a.door.Has(1)) + require.Equal(t, int64(1), a.freq.Estimate(1)) +} + +func TestTinyLFUEstimate(t *testing.T) { + a := newTinyLFU(8) + a.Increment(1) + a.Increment(1) + a.Increment(1) + require.Equal(t, int64(3), a.Estimate(1)) + require.Equal(t, int64(0), a.Estimate(2)) +} + +func TestTinyLFUPush(t *testing.T) { + a := newTinyLFU(16) + a.Push([]uint64{1, 2, 2, 3, 3, 3}) + require.Equal(t, int64(1), a.Estimate(1)) + require.Equal(t, int64(2), a.Estimate(2)) + require.Equal(t, int64(3), a.Estimate(3)) + require.Equal(t, int64(6), a.incrs) +} + +func TestTinyLFUClear(t *testing.T) { + a := newTinyLFU(16) + a.Push([]uint64{1, 3, 3, 3}) + a.clear() + require.Equal(t, int64(0), a.incrs) + require.Equal(t, int64(0), a.Estimate(3)) +} diff --git a/go/cache/ristretto/ring.go b/go/cache/ristretto/ring.go new file mode 100644 index 00000000000..afc2c1559f8 --- /dev/null +++ b/go/cache/ristretto/ring.go @@ -0,0 +1,92 @@ +/* + * Copyright 2019 Dgraph Labs, Inc. and Contributors + * Copyright 2021 The Vitess Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ristretto + +import ( + "sync" +) + +// ringConsumer is the user-defined object responsible for receiving and +// processing items in batches when buffers are drained. +type ringConsumer interface { + Push([]uint64) bool +} + +// ringStripe is a singular ring buffer that is not concurrent safe. +type ringStripe struct { + cons ringConsumer + data []uint64 + capa int +} + +func newRingStripe(cons ringConsumer, capa int64) *ringStripe { + return &ringStripe{ + cons: cons, + data: make([]uint64, 0, capa), + capa: int(capa), + } +} + +// Push appends an item in the ring buffer and drains (copies items and +// sends to Consumer) if full. +func (s *ringStripe) Push(item uint64) { + s.data = append(s.data, item) + // Decide if the ring buffer should be drained. + if len(s.data) >= s.capa { + // Send elements to consumer and create a new ring stripe. + if s.cons.Push(s.data) { + s.data = make([]uint64, 0, s.capa) + } else { + s.data = s.data[:0] + } + } +} + +// ringBuffer stores multiple buffers (stripes) and distributes Pushed items +// between them to lower contention. +// +// This implements the "batching" process described in the BP-Wrapper paper +// (section III part A). +type ringBuffer struct { + pool *sync.Pool +} + +// newRingBuffer returns a striped ring buffer. The Consumer in ringConfig will +// be called when individual stripes are full and need to drain their elements. +func newRingBuffer(cons ringConsumer, capa int64) *ringBuffer { + // LOSSY buffers use a very simple sync.Pool for concurrently reusing + // stripes. We do lose some stripes due to GC (unheld items in sync.Pool + // are cleared), but the performance gains generally outweigh the small + // percentage of elements lost. The performance primarily comes from + // low-level runtime functions used in the standard library that aren't + // available to us (such as runtime_procPin()). + return &ringBuffer{ + pool: &sync.Pool{ + New: func() interface{} { return newRingStripe(cons, capa) }, + }, + } +} + +// Push adds an element to one of the internal stripes and possibly drains if +// the stripe becomes full. +func (b *ringBuffer) Push(item uint64) { + // Reuse or create a new stripe. + stripe := b.pool.Get().(*ringStripe) + stripe.Push(item) + b.pool.Put(stripe) +} diff --git a/go/cache/ristretto/ring_test.go b/go/cache/ristretto/ring_test.go new file mode 100644 index 00000000000..0dbe962ccc6 --- /dev/null +++ b/go/cache/ristretto/ring_test.go @@ -0,0 +1,87 @@ +/* + * Copyright 2020 Dgraph Labs, Inc. and Contributors + * Copyright 2021 The Vitess Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ristretto + +import ( + "sync" + "testing" + + "github.com/stretchr/testify/require" +) + +type testConsumer struct { + push func([]uint64) + save bool +} + +func (c *testConsumer) Push(items []uint64) bool { + if c.save { + c.push(items) + return true + } + return false +} + +func TestRingDrain(t *testing.T) { + drains := 0 + r := newRingBuffer(&testConsumer{ + push: func(items []uint64) { + drains++ + }, + save: true, + }, 1) + for i := 0; i < 100; i++ { + r.Push(uint64(i)) + } + require.Equal(t, 100, drains, "buffers shouldn't be dropped with BufferItems == 1") +} + +func TestRingReset(t *testing.T) { + drains := 0 + r := newRingBuffer(&testConsumer{ + push: func(items []uint64) { + drains++ + }, + save: false, + }, 4) + for i := 0; i < 100; i++ { + r.Push(uint64(i)) + } + require.Equal(t, 0, drains, "testConsumer shouldn't be draining") +} + +func TestRingConsumer(t *testing.T) { + mu := &sync.Mutex{} + drainItems := make(map[uint64]struct{}) + r := newRingBuffer(&testConsumer{ + push: func(items []uint64) { + mu.Lock() + defer mu.Unlock() + for i := range items { + drainItems[items[i]] = struct{}{} + } + }, + save: true, + }, 4) + for i := 0; i < 100; i++ { + r.Push(uint64(i)) + } + l := len(drainItems) + require.NotEqual(t, 0, l) + require.True(t, l <= 100) +} diff --git a/go/cache/ristretto/sketch.go b/go/cache/ristretto/sketch.go new file mode 100644 index 00000000000..ce0504a2a83 --- /dev/null +++ b/go/cache/ristretto/sketch.go @@ -0,0 +1,156 @@ +/* + * Copyright 2019 Dgraph Labs, Inc. and Contributors + * Copyright 2021 The Vitess Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package ristretto includes multiple probabalistic data structures needed for +// admission/eviction metadata. Most are Counting Bloom Filter variations, but +// a caching-specific feature that is also required is a "freshness" mechanism, +// which basically serves as a "lifetime" process. This freshness mechanism +// was described in the original TinyLFU paper [1], but other mechanisms may +// be better suited for certain data distributions. +// +// [1]: https://arxiv.org/abs/1512.00727 +package ristretto + +import ( + "fmt" + "math/rand" + "time" +) + +// cmSketch is a Count-Min sketch implementation with 4-bit counters, heavily +// based on Damian Gryski's CM4 [1]. +// +// [1]: https://github.com/dgryski/go-tinylfu/blob/master/cm4.go +type cmSketch struct { + rows [cmDepth]cmRow + seed [cmDepth]uint64 + mask uint64 +} + +const ( + // cmDepth is the number of counter copies to store (think of it as rows). + cmDepth = 4 +) + +func newCmSketch(numCounters int64) *cmSketch { + if numCounters == 0 { + panic("cmSketch: bad numCounters") + } + // Get the next power of 2 for better cache performance. + numCounters = next2Power(numCounters) + sketch := &cmSketch{mask: uint64(numCounters - 1)} + // Initialize rows of counters and seeds. + source := rand.New(rand.NewSource(time.Now().UnixNano())) + for i := 0; i < cmDepth; i++ { + sketch.seed[i] = source.Uint64() + sketch.rows[i] = newCmRow(numCounters) + } + return sketch +} + +// Increment increments the count(ers) for the specified key. +func (s *cmSketch) Increment(hashed uint64) { + for i := range s.rows { + s.rows[i].increment((hashed ^ s.seed[i]) & s.mask) + } +} + +// Estimate returns the value of the specified key. +func (s *cmSketch) Estimate(hashed uint64) int64 { + min := byte(255) + for i := range s.rows { + val := s.rows[i].get((hashed ^ s.seed[i]) & s.mask) + if val < min { + min = val + } + } + return int64(min) +} + +// Reset halves all counter values. +func (s *cmSketch) Reset() { + for _, r := range s.rows { + r.reset() + } +} + +// Clear zeroes all counters. +func (s *cmSketch) Clear() { + for _, r := range s.rows { + r.clear() + } +} + +// cmRow is a row of bytes, with each byte holding two counters. +type cmRow []byte + +func newCmRow(numCounters int64) cmRow { + return make(cmRow, numCounters/2) +} + +func (r cmRow) get(n uint64) byte { + return byte(r[n/2]>>((n&1)*4)) & 0x0f +} + +func (r cmRow) increment(n uint64) { + // Index of the counter. + i := n / 2 + // Shift distance (even 0, odd 4). + s := (n & 1) * 4 + // Counter value. + v := (r[i] >> s) & 0x0f + // Only increment if not max value (overflow wrap is bad for LFU). + if v < 15 { + r[i] += 1 << s + } +} + +func (r cmRow) reset() { + // Halve each counter. + for i := range r { + r[i] = (r[i] >> 1) & 0x77 + } +} + +func (r cmRow) clear() { + // Zero each counter. + for i := range r { + r[i] = 0 + } +} + +func (r cmRow) string() string { + s := "" + for i := uint64(0); i < uint64(len(r)*2); i++ { + s += fmt.Sprintf("%02d ", (r[(i/2)]>>((i&1)*4))&0x0f) + } + s = s[:len(s)-1] + return s +} + +// next2Power rounds x up to the next power of 2, if it's not already one. +func next2Power(x int64) int64 { + x-- + x |= x >> 1 + x |= x >> 2 + x |= x >> 4 + x |= x >> 8 + x |= x >> 16 + x |= x >> 32 + x++ + return x +} diff --git a/go/cache/ristretto/sketch_test.go b/go/cache/ristretto/sketch_test.go new file mode 100644 index 00000000000..f0d523df559 --- /dev/null +++ b/go/cache/ristretto/sketch_test.go @@ -0,0 +1,102 @@ +/* + * Copyright 2020 Dgraph Labs, Inc. and Contributors + * Copyright 2021 The Vitess Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ristretto + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestSketch(t *testing.T) { + defer func() { + require.NotNil(t, recover()) + }() + + s := newCmSketch(5) + require.Equal(t, uint64(7), s.mask) + newCmSketch(0) +} + +func TestSketchIncrement(t *testing.T) { + s := newCmSketch(16) + s.Increment(1) + s.Increment(5) + s.Increment(9) + for i := 0; i < cmDepth; i++ { + if s.rows[i].string() != s.rows[0].string() { + break + } + require.False(t, i == cmDepth-1, "identical rows, bad seeding") + } +} + +func TestSketchEstimate(t *testing.T) { + s := newCmSketch(16) + s.Increment(1) + s.Increment(1) + require.Equal(t, int64(2), s.Estimate(1)) + require.Equal(t, int64(0), s.Estimate(0)) +} + +func TestSketchReset(t *testing.T) { + s := newCmSketch(16) + s.Increment(1) + s.Increment(1) + s.Increment(1) + s.Increment(1) + s.Reset() + require.Equal(t, int64(2), s.Estimate(1)) +} + +func TestSketchClear(t *testing.T) { + s := newCmSketch(16) + for i := 0; i < 16; i++ { + s.Increment(uint64(i)) + } + s.Clear() + for i := 0; i < 16; i++ { + require.Equal(t, int64(0), s.Estimate(uint64(i))) + } +} + +func TestNext2Power(t *testing.T) { + sz := 12 << 30 + szf := float64(sz) * 0.01 + val := int64(szf) + t.Logf("szf = %.2f val = %d\n", szf, val) + pow := next2Power(val) + t.Logf("pow = %d. mult 4 = %d\n", pow, pow*4) +} + +func BenchmarkSketchIncrement(b *testing.B) { + s := newCmSketch(16) + b.SetBytes(1) + for n := 0; n < b.N; n++ { + s.Increment(1) + } +} + +func BenchmarkSketchEstimate(b *testing.B) { + s := newCmSketch(16) + s.Increment(1) + b.SetBytes(1) + for n := 0; n < b.N; n++ { + s.Estimate(1) + } +} diff --git a/go/cache/ristretto/store.go b/go/cache/ristretto/store.go new file mode 100644 index 00000000000..44e5ad8b147 --- /dev/null +++ b/go/cache/ristretto/store.go @@ -0,0 +1,240 @@ +/* + * Copyright 2019 Dgraph Labs, Inc. and Contributors + * Copyright 2021 The Vitess Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ristretto + +import ( + "sync" +) + +// TODO: Do we need this to be a separate struct from Item? +type storeItem struct { + key uint64 + conflict uint64 + value interface{} +} + +// store is the interface fulfilled by all hash map implementations in this +// file. Some hash map implementations are better suited for certain data +// distributions than others, so this allows us to abstract that out for use +// in Ristretto. +// +// Every store is safe for concurrent usage. +type store interface { + // Get returns the value associated with the key parameter. + Get(uint64, uint64) (interface{}, bool) + // Set adds the key-value pair to the Map or updates the value if it's + // already present. The key-value pair is passed as a pointer to an + // item object. + Set(*Item) + // Del deletes the key-value pair from the Map. + Del(uint64, uint64) (uint64, interface{}) + // Update attempts to update the key with a new value and returns true if + // successful. + Update(*Item) (interface{}, bool) + // Clear clears all contents of the store. + Clear(onEvict itemCallback) + // ForEach yields all the values in the store + ForEach(forEach func(interface{}) bool) + // Len returns the number of entries in the store + Len() int +} + +// newStore returns the default store implementation. +func newStore() store { + return newShardedMap() +} + +const numShards uint64 = 256 + +type shardedMap struct { + shards []*lockedMap +} + +func newShardedMap() *shardedMap { + sm := &shardedMap{ + shards: make([]*lockedMap, int(numShards)), + } + for i := range sm.shards { + sm.shards[i] = newLockedMap() + } + return sm +} + +func (sm *shardedMap) Get(key, conflict uint64) (interface{}, bool) { + return sm.shards[key%numShards].get(key, conflict) +} + +func (sm *shardedMap) Set(i *Item) { + if i == nil { + // If item is nil make this Set a no-op. + return + } + + sm.shards[i.Key%numShards].Set(i) +} + +func (sm *shardedMap) Del(key, conflict uint64) (uint64, interface{}) { + return sm.shards[key%numShards].Del(key, conflict) +} + +func (sm *shardedMap) Update(newItem *Item) (interface{}, bool) { + return sm.shards[newItem.Key%numShards].Update(newItem) +} + +func (sm *shardedMap) ForEach(forEach func(interface{}) bool) { + for _, shard := range sm.shards { + if !shard.foreach(forEach) { + break + } + } +} + +func (sm *shardedMap) Len() int { + l := 0 + for _, shard := range sm.shards { + l += shard.Len() + } + return l +} + +func (sm *shardedMap) Clear(onEvict itemCallback) { + for i := uint64(0); i < numShards; i++ { + sm.shards[i].Clear(onEvict) + } +} + +type lockedMap struct { + sync.RWMutex + data map[uint64]storeItem +} + +func newLockedMap() *lockedMap { + return &lockedMap{ + data: make(map[uint64]storeItem), + } +} + +func (m *lockedMap) get(key, conflict uint64) (interface{}, bool) { + m.RLock() + item, ok := m.data[key] + m.RUnlock() + if !ok { + return nil, false + } + if conflict != 0 && (conflict != item.conflict) { + return nil, false + } + return item.value, true +} + +func (m *lockedMap) Set(i *Item) { + if i == nil { + // If the item is nil make this Set a no-op. + return + } + + m.Lock() + defer m.Unlock() + item, ok := m.data[i.Key] + + if ok { + // The item existed already. We need to check the conflict key and reject the + // update if they do not match. Only after that the expiration map is updated. + if i.Conflict != 0 && (i.Conflict != item.conflict) { + return + } + } + + m.data[i.Key] = storeItem{ + key: i.Key, + conflict: i.Conflict, + value: i.Value, + } +} + +func (m *lockedMap) Del(key, conflict uint64) (uint64, interface{}) { + m.Lock() + item, ok := m.data[key] + if !ok { + m.Unlock() + return 0, nil + } + if conflict != 0 && (conflict != item.conflict) { + m.Unlock() + return 0, nil + } + + delete(m.data, key) + m.Unlock() + return item.conflict, item.value +} + +func (m *lockedMap) Update(newItem *Item) (interface{}, bool) { + m.Lock() + item, ok := m.data[newItem.Key] + if !ok { + m.Unlock() + return nil, false + } + if newItem.Conflict != 0 && (newItem.Conflict != item.conflict) { + m.Unlock() + return nil, false + } + + m.data[newItem.Key] = storeItem{ + key: newItem.Key, + conflict: newItem.Conflict, + value: newItem.Value, + } + + m.Unlock() + return item.value, true +} + +func (m *lockedMap) Len() int { + m.RLock() + l := len(m.data) + m.RUnlock() + return l +} + +func (m *lockedMap) Clear(onEvict itemCallback) { + m.Lock() + i := &Item{} + if onEvict != nil { + for _, si := range m.data { + i.Key = si.key + i.Conflict = si.conflict + i.Value = si.value + onEvict(i) + } + } + m.data = make(map[uint64]storeItem) + m.Unlock() +} + +func (m *lockedMap) foreach(forEach func(interface{}) bool) bool { + m.RLock() + defer m.RUnlock() + for _, si := range m.data { + if !forEach(si.value) { + return false + } + } + return true +} diff --git a/go/cache/ristretto/store_test.go b/go/cache/ristretto/store_test.go new file mode 100644 index 00000000000..54634736a72 --- /dev/null +++ b/go/cache/ristretto/store_test.go @@ -0,0 +1,224 @@ +/* + * Copyright 2020 Dgraph Labs, Inc. and Contributors + * Copyright 2021 The Vitess Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ristretto + +import ( + "strconv" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestStoreSetGet(t *testing.T) { + s := newStore() + key, conflict := defaultStringHash("1") + i := Item{ + Key: key, + Conflict: conflict, + Value: 2, + } + s.Set(&i) + val, ok := s.Get(key, conflict) + require.True(t, ok) + require.Equal(t, 2, val.(int)) + + i.Value = 3 + s.Set(&i) + val, ok = s.Get(key, conflict) + require.True(t, ok) + require.Equal(t, 3, val.(int)) + + key, conflict = defaultStringHash("2") + i = Item{ + Key: key, + Conflict: conflict, + Value: 2, + } + s.Set(&i) + val, ok = s.Get(key, conflict) + require.True(t, ok) + require.Equal(t, 2, val.(int)) +} + +func TestStoreDel(t *testing.T) { + s := newStore() + key, conflict := defaultStringHash("1") + i := Item{ + Key: key, + Conflict: conflict, + Value: 1, + } + s.Set(&i) + s.Del(key, conflict) + val, ok := s.Get(key, conflict) + require.False(t, ok) + require.Nil(t, val) + + s.Del(2, 0) +} + +func TestStoreClear(t *testing.T) { + s := newStore() + for i := 0; i < 1000; i++ { + key, conflict := defaultStringHash(strconv.Itoa(i)) + it := Item{ + Key: key, + Conflict: conflict, + Value: i, + } + s.Set(&it) + } + s.Clear(nil) + for i := 0; i < 1000; i++ { + key, conflict := defaultStringHash(strconv.Itoa(i)) + val, ok := s.Get(key, conflict) + require.False(t, ok) + require.Nil(t, val) + } +} + +func TestStoreUpdate(t *testing.T) { + s := newStore() + key, conflict := defaultStringHash("1") + i := Item{ + Key: key, + Conflict: conflict, + Value: 1, + } + s.Set(&i) + i.Value = 2 + _, ok := s.Update(&i) + require.True(t, ok) + + val, ok := s.Get(key, conflict) + require.True(t, ok) + require.NotNil(t, val) + + val, ok = s.Get(key, conflict) + require.True(t, ok) + require.Equal(t, 2, val.(int)) + + i.Value = 3 + _, ok = s.Update(&i) + require.True(t, ok) + + val, ok = s.Get(key, conflict) + require.True(t, ok) + require.Equal(t, 3, val.(int)) + + key, conflict = defaultStringHash("2") + i = Item{ + Key: key, + Conflict: conflict, + Value: 2, + } + _, ok = s.Update(&i) + require.False(t, ok) + val, ok = s.Get(key, conflict) + require.False(t, ok) + require.Nil(t, val) +} + +func TestStoreCollision(t *testing.T) { + s := newShardedMap() + s.shards[1].Lock() + s.shards[1].data[1] = storeItem{ + key: 1, + conflict: 0, + value: 1, + } + s.shards[1].Unlock() + val, ok := s.Get(1, 1) + require.False(t, ok) + require.Nil(t, val) + + i := Item{ + Key: 1, + Conflict: 1, + Value: 2, + } + s.Set(&i) + val, ok = s.Get(1, 0) + require.True(t, ok) + require.NotEqual(t, 2, val.(int)) + + _, ok = s.Update(&i) + require.False(t, ok) + val, ok = s.Get(1, 0) + require.True(t, ok) + require.NotEqual(t, 2, val.(int)) + + s.Del(1, 1) + val, ok = s.Get(1, 0) + require.True(t, ok) + require.NotNil(t, val) +} + +func BenchmarkStoreGet(b *testing.B) { + s := newStore() + key, conflict := defaultStringHash("1") + i := Item{ + Key: key, + Conflict: conflict, + Value: 1, + } + s.Set(&i) + b.SetBytes(1) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + s.Get(key, conflict) + } + }) +} + +func BenchmarkStoreSet(b *testing.B) { + s := newStore() + key, conflict := defaultStringHash("1") + b.SetBytes(1) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + i := Item{ + Key: key, + Conflict: conflict, + Value: 1, + } + s.Set(&i) + } + }) +} + +func BenchmarkStoreUpdate(b *testing.B) { + s := newStore() + key, conflict := defaultStringHash("1") + i := Item{ + Key: key, + Conflict: conflict, + Value: 1, + } + s.Set(&i) + b.SetBytes(1) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + s.Update(&Item{ + Key: key, + Conflict: conflict, + Value: 2, + }) + } + }) +} diff --git a/go/cmd/vtctldclient/cli/awk.go b/go/cmd/vtctldclient/cli/awk.go new file mode 100644 index 00000000000..2789fec2e6d --- /dev/null +++ b/go/cmd/vtctldclient/cli/awk.go @@ -0,0 +1,73 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import ( + "fmt" + "sort" + "strings" + "time" + + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" +) + +// MarshalMapAWK returns a string representation of a string->string map in an +// AWK-friendly format. +func MarshalMapAWK(m map[string]string) string { + pairs := make([]string, len(m)) + i := 0 + + for k, v := range m { + pairs[i] = fmt.Sprintf("%v: %q", k, v) + + i++ + } + + sort.Strings(pairs) + + return "[" + strings.Join(pairs, " ") + "]" +} + +// MarshalTabletAWK marshals a tablet into an AWK-friendly line. +func MarshalTabletAWK(t *topodatapb.Tablet) string { + ti := topo.TabletInfo{ + Tablet: t, + } + + keyspace := t.Keyspace + if keyspace == "" { + keyspace = "" + } + + shard := t.Shard + if shard == "" { + shard = "" + } + + mtst := "" + // special case for old primary that hasn't been updated in the topo + // yet. + if t.MasterTermStartTime != nil && t.MasterTermStartTime.Seconds > 0 { + mtst = logutil.ProtoToTime(t.MasterTermStartTime).Format(time.RFC3339) + } + + return fmt.Sprintf("%v %v %v %v %v %v %v %v", topoproto.TabletAliasString(t.Alias), keyspace, shard, topoproto.TabletTypeLString(t.Type), ti.Addr(), ti.MysqlAddr(), MarshalMapAWK(t.Tags), mtst) +} diff --git a/go/cmd/vtctldclient/cli/cobra.go b/go/cmd/vtctldclient/cli/cobra.go new file mode 100644 index 00000000000..d3f43bddbfb --- /dev/null +++ b/go/cmd/vtctldclient/cli/cobra.go @@ -0,0 +1,32 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import "github.com/spf13/cobra" + +// FinishedParsing transitions a cobra.Command from treating RunE errors as +// usage errors to treating them just as normal runtime errors that should be +// propagated up to the root command's Execute method without also printing the +// subcommand's usage text on stderr. A subcommand should call this function +// from its RunE function when it has finished processing its flags and is +// moving into the pure "business logic" of its entrypoint. +// +// Package vitess.io/vitess/go/cmd/vtctldclient/internal/command has more +// details on why this exists. +func FinishedParsing(cmd *cobra.Command) { + cmd.SilenceUsage = true +} diff --git a/go/cmd/vtctldclient/cli/json.go b/go/cmd/vtctldclient/cli/json.go new file mode 100644 index 00000000000..903ca905b3e --- /dev/null +++ b/go/cmd/vtctldclient/cli/json.go @@ -0,0 +1,61 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/golang/protobuf/jsonpb" + "github.com/golang/protobuf/proto" +) + +// MarshalJSON marshals obj to a JSON string. It uses the jsonpb marshaler for +// proto.Message types, with some sensible defaults, and falls back to the +// standard Go marshaler otherwise. In both cases, the marshaled JSON is +// indented with two spaces for readability. +// +// Unfortunately jsonpb only works for types that implement proto.Message, +// either by being a proto message type or by anonymously embedding one, so for +// other types that may have nested struct fields, we still use the standard Go +// marshaler, which will result in different formattings. +func MarshalJSON(obj interface{}) ([]byte, error) { + switch obj := obj.(type) { + case proto.Message: + b := bytes.NewBuffer(nil) + m := jsonpb.Marshaler{ + EnumsAsInts: false, + EmitDefaults: true, + Indent: " ", + OrigName: true, + } + + if err := m.Marshal(b, obj); err != nil { + return nil, fmt.Errorf("jsonpb.Marshal = %v", err) + } + + return b.Bytes(), nil + default: + data, err := json.MarshalIndent(obj, "", " ") + if err != nil { + return nil, fmt.Errorf("json.Marshal = %v", err) + } + + return data, nil + } +} diff --git a/go/cmd/vtctldclient/cli/pflag.go b/go/cmd/vtctldclient/cli/pflag.go new file mode 100644 index 00000000000..8a364be8d86 --- /dev/null +++ b/go/cmd/vtctldclient/cli/pflag.go @@ -0,0 +1,93 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import ( + "github.com/spf13/pflag" + + "vitess.io/vitess/go/flagutil" + "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/topo/topoproto" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" +) + +// StringMapValue augments flagutil.StringMapValue so it can be used as a +// pflag.Value. +type StringMapValue struct { + flagutil.StringMapValue +} + +// Type is part of the pflag.Value interface. +func (v *StringMapValue) Type() string { + return "cli.StringMapValue" +} + +// KeyspaceIDTypeFlag adds the pflag.Value interface to a +// topodatapb.KeyspaceIdType. +type KeyspaceIDTypeFlag topodatapb.KeyspaceIdType + +var _ pflag.Value = (*KeyspaceIDTypeFlag)(nil) + +// Set is part of the pflag.Value interface. +func (v *KeyspaceIDTypeFlag) Set(arg string) error { + t, err := key.ParseKeyspaceIDType(arg) + if err != nil { + return err + } + + *v = KeyspaceIDTypeFlag(t) + + return nil +} + +// String is part of the pflag.Value interface. +func (v *KeyspaceIDTypeFlag) String() string { + return key.KeyspaceIDTypeString(topodatapb.KeyspaceIdType(*v)) +} + +// Type is part of the pflag.Value interface. +func (v *KeyspaceIDTypeFlag) Type() string { + return "cli.KeyspaceIdTypeFlag" +} + +// KeyspaceTypeFlag adds the pflag.Value interface to a topodatapb.KeyspaceType. +type KeyspaceTypeFlag topodatapb.KeyspaceType + +var _ pflag.Value = (*KeyspaceTypeFlag)(nil) + +// Set is part of the pflag.Value interface. +func (v *KeyspaceTypeFlag) Set(arg string) error { + kt, err := topoproto.ParseKeyspaceType(arg) + if err != nil { + return err + } + + *v = KeyspaceTypeFlag(kt) + + return nil +} + +// String is part of the pflag.Value interface. +func (v *KeyspaceTypeFlag) String() string { + return topoproto.KeyspaceTypeString(topodatapb.KeyspaceType(*v)) +} + +// Type is part of the pflag.Value interface. +func (v *KeyspaceTypeFlag) Type() string { + return "cli.KeyspaceTypeFlag" +} diff --git a/go/cmd/vtctldclient/cli/shards.go b/go/cmd/vtctldclient/cli/shards.go new file mode 100644 index 00000000000..f45b8324c00 --- /dev/null +++ b/go/cmd/vtctldclient/cli/shards.go @@ -0,0 +1,123 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import ( + "sort" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/vt/topo/topoproto" + + replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +// ParseKeyspaceShards takes a list of positional arguments and converts them to +// vtctldatapb.Shard objects. +func ParseKeyspaceShards(args []string) ([]*vtctldatapb.Shard, error) { + shards := make([]*vtctldatapb.Shard, 0, len(args)) + + for _, arg := range args { + keyspace, shard, err := topoproto.ParseKeyspaceShard(arg) + if err != nil { + return nil, err + } + + shards = append(shards, &vtctldatapb.Shard{ + Keyspace: keyspace, + Name: shard, + }) + } + + return shards, nil +} + +// ReplicatingTablet is a struct to group a Tablet together with its replication +// Status. +type ReplicatingTablet struct { + *replicationdatapb.Status + *topodatapb.Tablet +} + +type rTablets []*ReplicatingTablet + +func (rts rTablets) Len() int { return len(rts) } +func (rts rTablets) Swap(i, j int) { rts[i], rts[j] = rts[j], rts[i] } +func (rts rTablets) Less(i, j int) bool { + l, r := rts[i], rts[j] + + // l or r ReplicationStatus would be nil if we failed to get + // the position (put them at the beginning of the list) + if l.Status == nil { + return r.Status != nil + } + + if r.Status == nil { + return false + } + + // the type proto has MASTER first, so sort by that. Will show + // the MASTER first, then each replica type sorted by + // replication position. + if l.Tablet.Type < r.Tablet.Type { + return true + } + + if l.Tablet.Type > r.Tablet.Type { + return false + } + + // then compare replication positions + lpos, err := mysql.DecodePosition(l.Status.Position) + if err != nil { + return true + } + + rpos, err := mysql.DecodePosition(r.Status.Position) + if err != nil { + return false + } + + return !lpos.AtLeast(rpos) +} + +// SortedReplicatingTablets returns a sorted list of replicating tablets (which +// is a struct grouping a Tablet together with its replication Status). +// +// The sorting order is: +// 1. Tablets that do not have a replication Status. +// 2. Any tablets of type MASTER. +// 3. Remaining tablets sorted by comparing replication positions. +func SortedReplicatingTablets(tabletMap map[string]*topodatapb.Tablet, replicationStatuses map[string]*replicationdatapb.Status) []*ReplicatingTablet { + rtablets := make([]*ReplicatingTablet, 0, len(tabletMap)) + + for alias, tablet := range tabletMap { + if status, ok := replicationStatuses[alias]; ok { + rtablets = append(rtablets, &ReplicatingTablet{ + Status: status, + Tablet: tablet, + }) + } else { + rtablets = append(rtablets, &ReplicatingTablet{Tablet: tablet}) + } + } + + sort.Sort(rTablets(rtablets)) + + return rtablets +} diff --git a/go/cmd/vtctldclient/cli/tablets.go b/go/cmd/vtctldclient/cli/tablets.go new file mode 100644 index 00000000000..a2962c42b45 --- /dev/null +++ b/go/cmd/vtctldclient/cli/tablets.go @@ -0,0 +1,40 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import ( + "vitess.io/vitess/go/vt/topo/topoproto" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" +) + +// TabletAliasesFromPosArgs takes a list of positional (non-flag) arguments and +// converts them to tablet aliases. +func TabletAliasesFromPosArgs(args []string) ([]*topodatapb.TabletAlias, error) { + aliases := make([]*topodatapb.TabletAlias, 0, len(args)) + + for _, arg := range args { + alias, err := topoproto.ParseTabletAlias(arg) + if err != nil { + return nil, err + } + + aliases = append(aliases, alias) + } + + return aliases, nil +} diff --git a/go/cmd/vtctldclient/commands.go b/go/cmd/vtctldclient/commands.go deleted file mode 100644 index 951b04f8679..00000000000 --- a/go/cmd/vtctldclient/commands.go +++ /dev/null @@ -1,98 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "encoding/json" - "fmt" - - "github.com/spf13/cobra" - - vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" -) - -var ( - findAllShardsInKeyspaceCmd = &cobra.Command{ - Use: "FindAllShardsInKeyspace keyspace", - Aliases: []string{"findallshardsinkeyspace"}, - Args: cobra.ExactArgs(1), - RunE: commandFindAllShardsInKeyspace, - } - getKeyspaceCmd = &cobra.Command{ - Use: "GetKeyspace keyspace", - Aliases: []string{"getkeyspace"}, - Args: cobra.ExactArgs(1), - RunE: commandGetKeyspace, - } - getKeyspacesCmd = &cobra.Command{ - Use: "GetKeyspaces", - Aliases: []string{"getkeyspaces"}, - Args: cobra.NoArgs, - RunE: commandGetKeyspaces, - } -) - -func commandFindAllShardsInKeyspace(cmd *cobra.Command, args []string) error { - ks := cmd.Flags().Arg(0) - resp, err := client.FindAllShardsInKeyspace(commandCtx, &vtctldatapb.FindAllShardsInKeyspaceRequest{ - Keyspace: ks, - }) - - if err != nil { - return err - } - - data, err := json.Marshal(&resp) - if err != nil { - return err - } - - fmt.Printf("%s\n", data) - return nil -} - -func commandGetKeyspace(cmd *cobra.Command, args []string) error { - ks := cmd.Flags().Arg(0) - resp, err := client.GetKeyspace(commandCtx, &vtctldatapb.GetKeyspaceRequest{ - Keyspace: ks, - }) - - if err != nil { - return err - } - - fmt.Printf("%+v\n", resp.Keyspace) - - return nil -} - -func commandGetKeyspaces(cmd *cobra.Command, args []string) error { - resp, err := client.GetKeyspaces(commandCtx, &vtctldatapb.GetKeyspacesRequest{}) - if err != nil { - return err - } - - fmt.Printf("%+v\n", resp.Keyspaces) - - return nil -} - -func init() { - rootCmd.AddCommand(findAllShardsInKeyspaceCmd) - rootCmd.AddCommand(getKeyspaceCmd) - rootCmd.AddCommand(getKeyspacesCmd) -} diff --git a/go/cmd/vtctldclient/internal/command/backups.go b/go/cmd/vtctldclient/internal/command/backups.go new file mode 100644 index 00000000000..21d5673ef34 --- /dev/null +++ b/go/cmd/vtctldclient/internal/command/backups.go @@ -0,0 +1,62 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +// GetBackups makes a GetBackups gRPC call to a vtctld. +var GetBackups = &cobra.Command{ + Use: "GetBackups keyspace shard", + Args: cobra.ExactArgs(2), + RunE: commandGetBackups, +} + +func commandGetBackups(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + keyspace := cmd.Flags().Arg(0) + shard := cmd.Flags().Arg(1) + + resp, err := client.GetBackups(commandCtx, &vtctldatapb.GetBackupsRequest{ + Keyspace: keyspace, + Shard: shard, + }) + if err != nil { + return err + } + + names := make([]string, len(resp.Backups)) + for i, b := range resp.Backups { + names[i] = b.Name + } + + fmt.Printf("%s\n", strings.Join(names, "\n")) + + return nil +} + +func init() { + Root.AddCommand(GetBackups) +} diff --git a/go/cmd/vtctldclient/internal/command/cells.go b/go/cmd/vtctldclient/internal/command/cells.go new file mode 100644 index 00000000000..e04984f761b --- /dev/null +++ b/go/cmd/vtctldclient/internal/command/cells.go @@ -0,0 +1,106 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +var ( + // GetCellInfoNames makes a GetCellInfoNames gRPC call to a vtctld. + GetCellInfoNames = &cobra.Command{ + Use: "GetCellInfoNames", + Args: cobra.NoArgs, + RunE: commandGetCellInfoNames, + } + // GetCellInfo makes a GetCellInfo gRPC call to a vtctld. + GetCellInfo = &cobra.Command{ + Use: "GetCellInfo cell", + Args: cobra.ExactArgs(1), + RunE: commandGetCellInfo, + } + // GetCellsAliases makes a GetCellsAliases gRPC call to a vtctld. + GetCellsAliases = &cobra.Command{ + Use: "GetCellsAliases", + Args: cobra.NoArgs, + RunE: commandGetCellsAliases, + } +) + +func commandGetCellInfoNames(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + resp, err := client.GetCellInfoNames(commandCtx, &vtctldatapb.GetCellInfoNamesRequest{}) + if err != nil { + return err + } + + fmt.Printf("%s\n", strings.Join(resp.Names, "\n")) + + return nil +} + +func commandGetCellInfo(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + cell := cmd.Flags().Arg(0) + + resp, err := client.GetCellInfo(commandCtx, &vtctldatapb.GetCellInfoRequest{Cell: cell}) + if err != nil { + return err + } + + data, err := cli.MarshalJSON(resp.CellInfo) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + + return nil +} + +func commandGetCellsAliases(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + resp, err := client.GetCellsAliases(commandCtx, &vtctldatapb.GetCellsAliasesRequest{}) + if err != nil { + return err + } + + data, err := cli.MarshalJSON(resp.Aliases) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + + return nil +} + +func init() { + Root.AddCommand(GetCellInfoNames) + Root.AddCommand(GetCellInfo) + Root.AddCommand(GetCellsAliases) +} diff --git a/go/cmd/vtctldclient/internal/command/doc.go b/go/cmd/vtctldclient/internal/command/doc.go new file mode 100644 index 00000000000..b83db0ef0a4 --- /dev/null +++ b/go/cmd/vtctldclient/internal/command/doc.go @@ -0,0 +1,144 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package command contains the commands used by vtctldclient. It is intended only +for use in vtctldclient's main package and entrypoint. The rest of this +documentation is intended for maintainers. + +Commands are grouped into files by the types of resources they interact with ( +e.g. GetTablet, CreateTablet, DeleteTablet, GetTablets) or by what they do (e.g. +PlannedReparentShard, EmergencyReparentShard, InitShardPrimary). Please add the +command to the appropriate existing file, alphabetically, or create a new +grouping if one does not exist. + +The root command lives in root.go, and commands must attach themselves to this +during an init function in order to be reachable from the CLI. root.go also +contains the global variables available to any subcommand that are managed by +the root command's pre- and post-run functions. Commands must not attempt to +manage these, as that may conflict with Root's post-run cleanup actions. All +commands should, at a minimum, use the commandCtx rather than creating their own +context.Background to start, as it contains root tracing spans that would be +lost. + +Commands should not keep their logic in an anonymous function on the +cobra.Command struct, but instead in a separate function that is assigned to +RunE. Commands should strive to keep declaration, function definition, and flag +initialization located as closely together as possible, to make the code easier +to follow and understand (the global variables declared near Root are the +exception here, not the rule). Commands should also prevent individual flag +names from polluting the package namespace. + +A good pattern we have found is to do the following: + package command + + // (imports ...) + + var ( + CreateTablet = &cobra.Command{ + Use: "CreateTablet [options] --keyspace= --shard= ", + Args: cobra.ExactArgs(2), + RunE: commandCreateTablet, + } + GetTablet = &cobra.Command{ + Use: "GetTablet ", + Args: cobra.ExactArgs(1), + RunE: commandGetTablet, + } + ) + + var createTabletOptions = struct { + Opt1 string + Opt2 bool + Keyspace string + Shard string + }{} + + func commandCreateTablet(cmd *cobra.Command, args []string) error { + aliasStr := cmd.Flags().Args(0) + tabletTypeStr := cmd.Flags().Args(1) + + // do stuff with: + // - client + // - commandCtx + // - createTabletOptions + // - aliasStr + // - tabletTypeStr + + return nil + } + + // GetTablet takes no flags, so it needs no anonymous struct to store them + func commandGetTablet(cmd *cobra.Command, args []string) error { + aliasStr := cmd.Flags().Arg(0) + + // do stuff with: + // - client + // - commandCtx + // - aliasStr + + return nil + } + + // finally, hook up all the commands in this file to Root, and add any flags + // to each of those commands + + func init() { + CreateTablet.Flags().StringVar(&createTabletOptions.Opt1, "opt1", "default", "help") + CreateTablet.Flags().BoolVar(&createTabletOptions.Opt2, "opt2", false, "help") + CreateTablet.Flags().StringVarP(&createTabletOptions.Keyspace, "keyspace", "k", "keyspace of tablet") + CreateTablet.MarkFlagRequired("keyspace") + CreateTablet.Flags().StringVarP(&createTabletOptions.Shard, "shard", "s", "shard range of tablet") + CreateTablet.MarkFlagRequired("shard") + Root.AddCommand(CreateTablet) + + Root.AddCommand(GetTablet) + } + +A note on RunE and SilenceUsage: + +We prefer using RunE over Run for the entrypoint to our subcommands, because it +allows us return errors back up to the vtctldclient main function and do error +handling, logging, and exit-code management once, in one place, rather than on a +per-command basis. However, cobra treats errors returned from a command's RunE +as usage errors, and therefore will print the command's full usage text to +stderr when RunE returns non-nil, in addition to propagating that error back up +to the result of the root command's Execute() method. This is decidedly not what +we want. There is no plan to address this in cobra v1. [1] + +The suggested workaround for this issue is to set SilenceUsage: true, either on +the root command or on every subcommand individually. This also does not work +for vtctldclient, because not every flag can be parsed during pflag.Parse time, +and for certain flags (mutually exclusive options, optional flags that require +other flags to be set with them, etc) we do additional parsing and validation of +flags in an individual subcommand. We want errors during this phase to be +treated as usage errors, so setting SilenceUsage=true before this point would +not cause usage text to be printed for us. + +So, for us, we want to individually set cmd.SilenceUsage = true at *particular +points* in each command, dependending on whether that command needs to do +an additional parse & validation pass. In most cases, the command does not need +to post-validate its options, and can set cmd.SilencUsage = true as their first +line. We feel, though, that a line that reads "SilenceUsage = true" to be +potentially confusing in how it reads. A maintainer without sufficient context +may read this and say "Silence usage? We don't want that" and remove the lines, +so we provide a wrapper function that communicates intent, cli.FinishedParsing, +that each subcommand should call when they have transitioned from the parsing & +validation phase of their entrypoint to the actual logic. + +[1]: https://github.com/spf13/cobra/issues/340 +*/ +package command diff --git a/go/cmd/vtctldclient/internal/command/keyspaces.go b/go/cmd/vtctldclient/internal/command/keyspaces.go new file mode 100644 index 00000000000..c9e779358a1 --- /dev/null +++ b/go/cmd/vtctldclient/internal/command/keyspaces.go @@ -0,0 +1,290 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "errors" + "fmt" + "time" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/topo" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" + "vitess.io/vitess/go/vt/proto/vttime" +) + +var ( + // CreateKeyspace makes a CreateKeyspace gRPC call to a vtctld. + CreateKeyspace = &cobra.Command{ + Use: "CreateKeyspace KEYSPACE_NAME [--force] [--sharding-column-name NAME --sharding-column-type TYPE] [--base-keyspace KEYSPACE --snapshot-timestamp TIME] [--served-from DB_TYPE:KEYSPACE ...]", + Args: cobra.ExactArgs(1), + RunE: commandCreateKeyspace, + } + // DeleteKeyspace makes a DeleteKeyspace gRPC call to a vtctld. + DeleteKeyspace = &cobra.Command{ + Use: "DeleteKeyspace KEYSPACE_NAME", + Args: cobra.ExactArgs(1), + RunE: commandDeleteKeyspace, + } + // FindAllShardsInKeyspace makes a FindAllShardsInKeyspace gRPC call to a vtctld. + FindAllShardsInKeyspace = &cobra.Command{ + Use: "FindAllShardsInKeyspace keyspace", + Aliases: []string{"findallshardsinkeyspace"}, + Args: cobra.ExactArgs(1), + RunE: commandFindAllShardsInKeyspace, + } + // GetKeyspace makes a GetKeyspace gRPC call to a vtctld. + GetKeyspace = &cobra.Command{ + Use: "GetKeyspace keyspace", + Aliases: []string{"getkeyspace"}, + Args: cobra.ExactArgs(1), + RunE: commandGetKeyspace, + } + // GetKeyspaces makes a GetKeyspaces gRPC call to a vtctld. + GetKeyspaces = &cobra.Command{ + Use: "GetKeyspaces", + Aliases: []string{"getkeyspaces"}, + Args: cobra.NoArgs, + RunE: commandGetKeyspaces, + } + // RemoveKeyspaceCell makes a RemoveKeyspaceCell gRPC call to a vtctld. + RemoveKeyspaceCell = &cobra.Command{ + Use: "RemoveKeyspaceCell ", + Args: cobra.ExactArgs(2), + RunE: commandRemoveKeyspaceCell, + } +) + +var createKeyspaceOptions = struct { + Force bool + AllowEmptyVSchema bool + + ShardingColumnName string + ShardingColumnType cli.KeyspaceIDTypeFlag + + ServedFromsMap cli.StringMapValue + + KeyspaceType cli.KeyspaceTypeFlag + BaseKeyspace string + SnapshotTimestamp string +}{ + KeyspaceType: cli.KeyspaceTypeFlag(topodatapb.KeyspaceType_NORMAL), +} + +func commandCreateKeyspace(cmd *cobra.Command, args []string) error { + name := cmd.Flags().Arg(0) + + switch topodatapb.KeyspaceType(createKeyspaceOptions.KeyspaceType) { + case topodatapb.KeyspaceType_NORMAL, topodatapb.KeyspaceType_SNAPSHOT: + default: + return fmt.Errorf("invalid keyspace type passed to --type: %v", createKeyspaceOptions.KeyspaceType) + } + + var snapshotTime *vttime.Time + if topodatapb.KeyspaceType(createKeyspaceOptions.KeyspaceType) == topodatapb.KeyspaceType_SNAPSHOT { + if createKeyspaceOptions.BaseKeyspace == "" { + return errors.New("--base-keyspace is required for a snapshot keyspace") + } + + if createKeyspaceOptions.SnapshotTimestamp == "" { + return errors.New("--snapshot-timestamp is required for a snapshot keyspace") + } + + t, err := time.Parse(time.RFC3339, createKeyspaceOptions.SnapshotTimestamp) + if err != nil { + return fmt.Errorf("cannot parse --snapshot-timestamp as RFC3339: %w", err) + } + + if now := time.Now(); t.After(now) { + return fmt.Errorf("--snapshot-time cannot be in the future; snapshot = %v, now = %v", t, now) + } + + snapshotTime = logutil.TimeToProto(t) + } + + cli.FinishedParsing(cmd) + + req := &vtctldatapb.CreateKeyspaceRequest{ + Name: name, + Force: createKeyspaceOptions.Force, + AllowEmptyVSchema: createKeyspaceOptions.AllowEmptyVSchema, + ShardingColumnName: createKeyspaceOptions.ShardingColumnName, + ShardingColumnType: topodatapb.KeyspaceIdType(createKeyspaceOptions.ShardingColumnType), + Type: topodatapb.KeyspaceType(createKeyspaceOptions.KeyspaceType), + BaseKeyspace: createKeyspaceOptions.BaseKeyspace, + SnapshotTime: snapshotTime, + } + + for n, v := range createKeyspaceOptions.ServedFromsMap.StringMapValue { + tt, err := topo.ParseServingTabletType(n) + if err != nil { + return err + } + + req.ServedFroms = append(req.ServedFroms, &topodatapb.Keyspace_ServedFrom{ + TabletType: tt, + Keyspace: v, + }) + } + + resp, err := client.CreateKeyspace(commandCtx, req) + if err != nil { + return err + } + + data, err := cli.MarshalJSON(resp.Keyspace) + if err != nil { + return err + } + + fmt.Printf("Successfully created keyspace %s. Result:\n%s\n", name, data) + + return nil +} + +var deleteKeyspaceOptions = struct { + Recursive bool +}{} + +func commandDeleteKeyspace(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + ks := cmd.Flags().Arg(0) + _, err := client.DeleteKeyspace(commandCtx, &vtctldatapb.DeleteKeyspaceRequest{ + Keyspace: ks, + Recursive: deleteKeyspaceOptions.Recursive, + }) + + if err != nil { + return fmt.Errorf("DeleteKeyspace(%v) error: %w; please check the topo", ks, err) + } + + fmt.Printf("Successfully deleted keyspace %v.\n", ks) + + return nil +} + +func commandFindAllShardsInKeyspace(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + ks := cmd.Flags().Arg(0) + resp, err := client.FindAllShardsInKeyspace(commandCtx, &vtctldatapb.FindAllShardsInKeyspaceRequest{ + Keyspace: ks, + }) + + if err != nil { + return err + } + + data, err := cli.MarshalJSON(resp) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + return nil +} + +func commandGetKeyspace(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + ks := cmd.Flags().Arg(0) + resp, err := client.GetKeyspace(commandCtx, &vtctldatapb.GetKeyspaceRequest{ + Keyspace: ks, + }) + + if err != nil { + return err + } + + fmt.Printf("%+v\n", resp.Keyspace) + + return nil +} + +func commandGetKeyspaces(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + resp, err := client.GetKeyspaces(commandCtx, &vtctldatapb.GetKeyspacesRequest{}) + if err != nil { + return err + } + + data, err := cli.MarshalJSON(resp.Keyspaces) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + + return nil +} + +var removeKeyspaceCellOptions = struct { + Force bool + Recursive bool +}{} + +func commandRemoveKeyspaceCell(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + keyspace := cmd.Flags().Arg(0) + cell := cmd.Flags().Arg(1) + + _, err := client.RemoveKeyspaceCell(commandCtx, &vtctldatapb.RemoveKeyspaceCellRequest{ + Keyspace: keyspace, + Cell: cell, + Force: removeKeyspaceCellOptions.Force, + Recursive: removeKeyspaceCellOptions.Recursive, + }) + + if err != nil { + return err + } + + fmt.Printf("Successfully removed keyspace %s from cell %s\n", keyspace, cell) + + return nil +} + +func init() { + CreateKeyspace.Flags().BoolVarP(&createKeyspaceOptions.Force, "force", "f", false, "Proceeds even if the keyspace already exists. Does not overwrite the existing keyspace record") + CreateKeyspace.Flags().BoolVarP(&createKeyspaceOptions.AllowEmptyVSchema, "allow-empty-vschema", "e", false, "Allows a new keyspace to have no vschema") + CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.ShardingColumnName, "sharding-column-name", "", "The column name to use for sharding operations") + CreateKeyspace.Flags().Var(&createKeyspaceOptions.ShardingColumnType, "sharding-column-type", "The type of the column to use for sharding operations") + CreateKeyspace.Flags().Var(&createKeyspaceOptions.ServedFromsMap, "served-from", "TODO") + CreateKeyspace.Flags().Var(&createKeyspaceOptions.KeyspaceType, "type", "The type of the keyspace") + CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.BaseKeyspace, "base-keyspace", "", "The base keyspace for a snapshot keyspace.") + CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.SnapshotTimestamp, "snapshot-timestamp", "", "The snapshot time for a snapshot keyspace, as a timestamp in RFC3339 format.") + Root.AddCommand(CreateKeyspace) + + DeleteKeyspace.Flags().BoolVarP(&deleteKeyspaceOptions.Recursive, "recursive", "r", false, "Recursively delete all shards in the keyspace, and all tablets in those shards.") + Root.AddCommand(DeleteKeyspace) + + Root.AddCommand(FindAllShardsInKeyspace) + Root.AddCommand(GetKeyspace) + Root.AddCommand(GetKeyspaces) + + RemoveKeyspaceCell.Flags().BoolVarP(&removeKeyspaceCellOptions.Force, "force", "f", false, "Proceed even if the cell's topology server cannot be reached. The assumption is that you turned down the entire cell, and just need to update the global topo data.") + RemoveKeyspaceCell.Flags().BoolVarP(&removeKeyspaceCellOptions.Recursive, "recursive", "r", false, "Also delete all tablets in that cell beloning to the specified keyspace.") + Root.AddCommand(RemoveKeyspaceCell) +} diff --git a/go/cmd/vtctldclient/internal/command/reparents.go b/go/cmd/vtctldclient/internal/command/reparents.go new file mode 100644 index 00000000000..dbd621e021a --- /dev/null +++ b/go/cmd/vtctldclient/internal/command/reparents.go @@ -0,0 +1,278 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "fmt" + "time" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/protoutil" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +var ( + // EmergencyReparentShard makes an EmergencyReparent gRPC call to a vtctld. + EmergencyReparentShard = &cobra.Command{ + Use: "EmergencyReparentShard ", + Args: cobra.ExactArgs(1), + Long: "Reparents the shard to the new primary. Assumes the old primary is dead and not responding", + RunE: commandEmergencyReparentShard, + } + // InitShardPrimary makes an InitShardPrimary gRPC call to a vtctld. + InitShardPrimary = &cobra.Command{ + Use: "InitShardPrimary ", + Args: cobra.ExactArgs(2), + RunE: commandInitShardPrimary, + } + // PlannedReparentShard makes a PlannedReparentShard gRPC call to a vtctld. + PlannedReparentShard = &cobra.Command{ + Use: "PlannedReparentShard ", + Args: cobra.ExactArgs(1), + Long: "string", + RunE: commandPlannedReparentShard, + } + // ReparentTablet makes a ReparentTablet gRPC call to a vtctld. + ReparentTablet = &cobra.Command{ + Use: "ReparentTablet ", + Long: "Reparent a tablet to the current primary in the shard. This only works if the current replica position " + + "matches the last known reparent action.", + Args: cobra.ExactArgs(1), + RunE: commandReparentTablet, + } + // TabletExternallyReparented makes a TabletExternallyReparented gRPC call + // to a vtctld. + TabletExternallyReparented = &cobra.Command{ + Use: "TabletExternallyReparented ", + Args: cobra.ExactArgs(1), + RunE: commandTabletExternallyReparented, + } +) + +var emergencyReparentShardOptions = struct { + Force bool + WaitReplicasTimeout time.Duration + NewPrimaryAliasStr string + IgnoreReplicaAliasStrList []string +}{} + +func commandEmergencyReparentShard(cmd *cobra.Command, args []string) error { + keyspace, shard, err := topoproto.ParseKeyspaceShard(cmd.Flags().Arg(0)) + if err != nil { + return err + } + + var ( + newPrimaryAlias *topodatapb.TabletAlias + ignoreReplicaAliases = make([]*topodatapb.TabletAlias, len(emergencyReparentShardOptions.IgnoreReplicaAliasStrList)) + ) + + if emergencyReparentShardOptions.NewPrimaryAliasStr != "" { + newPrimaryAlias, err = topoproto.ParseTabletAlias(emergencyReparentShardOptions.NewPrimaryAliasStr) + if err != nil { + return err + } + } + + for i, aliasStr := range emergencyReparentShardOptions.IgnoreReplicaAliasStrList { + alias, err := topoproto.ParseTabletAlias(aliasStr) + if err != nil { + return err + } + + ignoreReplicaAliases[i] = alias + } + + cli.FinishedParsing(cmd) + + resp, err := client.EmergencyReparentShard(commandCtx, &vtctldatapb.EmergencyReparentShardRequest{ + Keyspace: keyspace, + Shard: shard, + NewPrimary: newPrimaryAlias, + IgnoreReplicas: ignoreReplicaAliases, + WaitReplicasTimeout: protoutil.DurationToProto(emergencyReparentShardOptions.WaitReplicasTimeout), + }) + if err != nil { + return err + } + + for _, event := range resp.Events { + fmt.Println(logutil.EventString(event)) + } + + return nil +} + +var initShardPrimaryOptions = struct { + WaitReplicasTimeout time.Duration + Force bool +}{} + +func commandInitShardPrimary(cmd *cobra.Command, args []string) error { + keyspace, shard, err := topoproto.ParseKeyspaceShard(cmd.Flags().Arg(0)) + if err != nil { + return err + } + + tabletAlias, err := topoproto.ParseTabletAlias(cmd.Flags().Arg(1)) + if err != nil { + return err + } + + cli.FinishedParsing(cmd) + + resp, err := client.InitShardPrimary(commandCtx, &vtctldatapb.InitShardPrimaryRequest{ + Keyspace: keyspace, + Shard: shard, + PrimaryElectTabletAlias: tabletAlias, + WaitReplicasTimeout: protoutil.DurationToProto(initShardPrimaryOptions.WaitReplicasTimeout), + Force: initShardPrimaryOptions.Force, + }) + if err != nil { + return err + } + + for _, event := range resp.Events { + log.Infof("%v", event) + } + + return err +} + +var plannedReparentShardOptions = struct { + NewPrimaryAliasStr string + AvoidPrimaryAliasStr string + WaitReplicasTimeout time.Duration +}{} + +func commandPlannedReparentShard(cmd *cobra.Command, args []string) error { + keyspace, shard, err := topoproto.ParseKeyspaceShard(cmd.Flags().Arg(0)) + if err != nil { + return err + } + + var ( + newPrimaryAlias *topodatapb.TabletAlias + avoidPrimaryAlias *topodatapb.TabletAlias + ) + + if plannedReparentShardOptions.NewPrimaryAliasStr != "" { + newPrimaryAlias, err = topoproto.ParseTabletAlias(plannedReparentShardOptions.NewPrimaryAliasStr) + if err != nil { + return err + } + } + + if plannedReparentShardOptions.AvoidPrimaryAliasStr != "" { + avoidPrimaryAlias, err = topoproto.ParseTabletAlias(plannedReparentShardOptions.AvoidPrimaryAliasStr) + if err != nil { + return err + } + } + + cli.FinishedParsing(cmd) + + resp, err := client.PlannedReparentShard(commandCtx, &vtctldatapb.PlannedReparentShardRequest{ + Keyspace: keyspace, + Shard: shard, + NewPrimary: newPrimaryAlias, + AvoidPrimary: avoidPrimaryAlias, + WaitReplicasTimeout: protoutil.DurationToProto(plannedReparentShardOptions.WaitReplicasTimeout), + }) + if err != nil { + return err + } + + for _, event := range resp.Events { + fmt.Println(logutil.EventString(event)) + } + + return nil +} + +func commandReparentTablet(cmd *cobra.Command, args []string) error { + alias, err := topoproto.ParseTabletAlias(cmd.Flags().Arg(0)) + if err != nil { + return err + } + + resp, err := client.ReparentTablet(commandCtx, &vtctldatapb.ReparentTabletRequest{ + Tablet: alias, + }) + if err != nil { + return err + } + + data, err := cli.MarshalJSON(resp) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + + return nil +} + +func commandTabletExternallyReparented(cmd *cobra.Command, args []string) error { + alias, err := topoproto.ParseTabletAlias(cmd.Flags().Arg(0)) + if err != nil { + return err + } + + resp, err := client.TabletExternallyReparented(commandCtx, &vtctldatapb.TabletExternallyReparentedRequest{ + Tablet: alias, + }) + if err != nil { + return err + } + + data, err := cli.MarshalJSON(resp) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + + return nil +} + +func init() { + EmergencyReparentShard.Flags().DurationVar(&emergencyReparentShardOptions.WaitReplicasTimeout, "wait-replicas-timeout", *topo.RemoteOperationTimeout, "Time to wait for replicas to catch up in reparenting.") + EmergencyReparentShard.Flags().StringVar(&emergencyReparentShardOptions.NewPrimaryAliasStr, "new-primary", "", "Alias of a tablet that should be the new primary. If not specified, the vtctld will select the best candidate to promote.") + EmergencyReparentShard.Flags().StringSliceVarP(&emergencyReparentShardOptions.IgnoreReplicaAliasStrList, "ignore-replicas", "i", nil, "Comma-separated, repeated list of replica tablet aliases to ignore during the emergency reparent.") + Root.AddCommand(EmergencyReparentShard) + + InitShardPrimary.Flags().DurationVar(&initShardPrimaryOptions.WaitReplicasTimeout, "wait-replicas-timeout", 30*time.Second, "time to wait for replicas to catch up in reparenting") + InitShardPrimary.Flags().BoolVar(&initShardPrimaryOptions.Force, "force", false, "will force the reparent even if the provided tablet is not a master or the shard master") + Root.AddCommand(InitShardPrimary) + + PlannedReparentShard.Flags().DurationVar(&plannedReparentShardOptions.WaitReplicasTimeout, "wait-replicas-timeout", *topo.RemoteOperationTimeout, "Time to wait for replicas to catch up on replication both before and after reparenting.") + PlannedReparentShard.Flags().StringVar(&plannedReparentShardOptions.NewPrimaryAliasStr, "new-primary", "", "Alias of a tablet that should be the new primary.") + PlannedReparentShard.Flags().StringVar(&plannedReparentShardOptions.AvoidPrimaryAliasStr, "avoid-primary", "", "Alias of a tablet that should not be the primary; i.e. \"reparent to any other tablet if this one is the primary\".") + Root.AddCommand(PlannedReparentShard) + + Root.AddCommand(ReparentTablet) + Root.AddCommand(TabletExternallyReparented) +} diff --git a/go/cmd/vtctldclient/internal/command/root.go b/go/cmd/vtctldclient/internal/command/root.go new file mode 100644 index 00000000000..7243c8836e5 --- /dev/null +++ b/go/cmd/vtctldclient/internal/command/root.go @@ -0,0 +1,81 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "context" + "errors" + "io" + "time" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/trace" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/vtctl/vtctldclient" +) + +var ( + client vtctldclient.VtctldClient + traceCloser io.Closer + commandCtx context.Context + commandCancel func() + + server string + actionTimeout time.Duration + + // Root is the main entrypoint to the vtctldclient CLI. + Root = &cobra.Command{ + // We use PersistentPreRun to set up the tracer, grpc client, and + // command context for every command. + PersistentPreRunE: func(cmd *cobra.Command, args []string) (err error) { + traceCloser = trace.StartTracing("vtctldclient") + if server == "" { + err = errors.New("please specify -server to specify the vtctld server to connect to") + log.Error(err) + return err + } + + client, err = vtctldclient.New("grpc", server) + + commandCtx, commandCancel = context.WithTimeout(context.Background(), actionTimeout) + return err + }, + // Similarly, PersistentPostRun cleans up the resources spawned by + // PersistentPreRun. + PersistentPostRunE: func(cmd *cobra.Command, args []string) error { + commandCancel() + err := client.Close() + trace.LogErrorsWhenClosing(traceCloser) + return err + }, + TraverseChildren: true, + // By default, cobra will print any error returned by a child command to + // stderr, and then return that error back up the call chain. Since we + // use vitess's log package to log any error we get back from + // Root.Execute() (in ../../main.go) this actually results in duplicate + // stderr lines. So, somewhat counterintuitively, we actually "silence" + // all errors in cobra (just from being output, they still get + // propagated). + SilenceErrors: true, + } +) + +func init() { + Root.PersistentFlags().StringVar(&server, "server", "", "server to use for connection") + Root.PersistentFlags().DurationVar(&actionTimeout, "action_timeout", time.Hour, "timeout for the total command") +} diff --git a/go/cmd/vtctldclient/internal/command/schema.go b/go/cmd/vtctldclient/internal/command/schema.go new file mode 100644 index 00000000000..f1e438df308 --- /dev/null +++ b/go/cmd/vtctldclient/internal/command/schema.go @@ -0,0 +1,101 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "errors" + "fmt" + "strings" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/vt/topo/topoproto" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +// GetSchema makes a GetSchema gRPC call to a vtctld. +var GetSchema = &cobra.Command{ + Use: "GetSchema [--tables TABLES ...] [--exclude-tables EXCLUDE_TABLES ...] [{--table-names-only | --table-sizes-only}] [--include-views] alias", + Args: cobra.ExactArgs(1), + RunE: commandGetSchema, +} + +var getSchemaOptions = struct { + Tables []string + ExcludeTables []string + IncludeViews bool + TableNamesOnly bool + TableSizesOnly bool +}{} + +func commandGetSchema(cmd *cobra.Command, args []string) error { + if getSchemaOptions.TableNamesOnly && getSchemaOptions.TableSizesOnly { + return errors.New("can only pass one of --table-names-only and --table-sizes-only") + } + + alias, err := topoproto.ParseTabletAlias(cmd.Flags().Arg(0)) + if err != nil { + return err + } + + cli.FinishedParsing(cmd) + + resp, err := client.GetSchema(commandCtx, &vtctldatapb.GetSchemaRequest{ + TabletAlias: alias, + Tables: getSchemaOptions.Tables, + ExcludeTables: getSchemaOptions.ExcludeTables, + IncludeViews: getSchemaOptions.IncludeViews, + TableNamesOnly: getSchemaOptions.TableNamesOnly, + TableSizesOnly: getSchemaOptions.TableSizesOnly, + }) + if err != nil { + return err + } + + if getSchemaOptions.TableNamesOnly { + names := make([]string, len(resp.Schema.TableDefinitions)) + + for i, td := range resp.Schema.TableDefinitions { + names[i] = td.Name + } + + fmt.Printf("%s\n", strings.Join(names, "\n")) + + return nil + } + + data, err := cli.MarshalJSON(resp.Schema) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + + return nil +} + +func init() { + GetSchema.Flags().StringSliceVar(&getSchemaOptions.Tables, "tables", nil, "TODO") + GetSchema.Flags().StringSliceVar(&getSchemaOptions.ExcludeTables, "exclude-tables", nil, "TODO") + GetSchema.Flags().BoolVar(&getSchemaOptions.IncludeViews, "include-views", false, "TODO") + GetSchema.Flags().BoolVarP(&getSchemaOptions.TableNamesOnly, "table-names-only", "n", false, "TODO") + GetSchema.Flags().BoolVarP(&getSchemaOptions.TableSizesOnly, "table-sizes-only", "s", false, "TODO") + + Root.AddCommand(GetSchema) +} diff --git a/go/cmd/vtctldclient/internal/command/serving_graph.go b/go/cmd/vtctldclient/internal/command/serving_graph.go new file mode 100644 index 00000000000..18a5d04ec37 --- /dev/null +++ b/go/cmd/vtctldclient/internal/command/serving_graph.go @@ -0,0 +1,93 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "fmt" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +var ( + // GetSrvKeyspaces makes a GetSrvKeyspaces gRPC call to a vtctld. + GetSrvKeyspaces = &cobra.Command{ + Use: "GetSrvKeyspaces [ ...]", + Args: cobra.MinimumNArgs(1), + RunE: commandGetSrvKeyspaces, + } + // GetSrvVSchema makes a GetSrvVSchema gRPC call to a vtctld. + GetSrvVSchema = &cobra.Command{ + Use: "GetSrvVSchema cell", + Args: cobra.ExactArgs(1), + RunE: commandGetSrvVSchema, + } +) + +func commandGetSrvKeyspaces(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + keyspace := cmd.Flags().Arg(0) + cells := cmd.Flags().Args()[1:] + + resp, err := client.GetSrvKeyspaces(commandCtx, &vtctldatapb.GetSrvKeyspacesRequest{ + Keyspace: keyspace, + Cells: cells, + }) + if err != nil { + return err + } + + data, err := cli.MarshalJSON(resp.SrvKeyspaces) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + + return nil +} + +func commandGetSrvVSchema(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + cell := cmd.Flags().Arg(0) + + resp, err := client.GetSrvVSchema(commandCtx, &vtctldatapb.GetSrvVSchemaRequest{ + Cell: cell, + }) + if err != nil { + return err + } + + data, err := cli.MarshalJSON(resp.SrvVSchema) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + + return nil +} + +func init() { + Root.AddCommand(GetSrvKeyspaces) + Root.AddCommand(GetSrvVSchema) +} diff --git a/go/cmd/vtctldclient/internal/command/shards.go b/go/cmd/vtctldclient/internal/command/shards.go new file mode 100644 index 00000000000..b43eff0769e --- /dev/null +++ b/go/cmd/vtctldclient/internal/command/shards.go @@ -0,0 +1,234 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "fmt" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/vt/topo/topoproto" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +var ( + // CreateShard makes a CreateShard gRPC request to a vtctld. + CreateShard = &cobra.Command{ + Use: "CreateShard ", + Args: cobra.ExactArgs(1), + RunE: commandCreateShard, + } + // DeleteShards makes a DeleteShards gRPC request to a vtctld. + DeleteShards = &cobra.Command{ + Use: "DeleteShards [ ...]", + Args: cobra.MinimumNArgs(1), + RunE: commandDeleteShards, + } + // GetShard makes a GetShard gRPC request to a vtctld. + GetShard = &cobra.Command{ + Use: "GetShard ", + Args: cobra.ExactArgs(1), + RunE: commandGetShard, + } + // RemoveShardCell makes a RemoveShardCell gRPC request to a vtctld. + RemoveShardCell = &cobra.Command{ + Use: "RemoveShardCell ", + Args: cobra.ExactArgs(2), + RunE: commandRemoveShardCell, + } + // ShardReplicationPositions makes a ShardReplicationPositions gRPC request + // to a vtctld. + ShardReplicationPositions = &cobra.Command{ + Use: "ShardReplicationPositions ", + Long: `Shows the replication status of each tablet in the shard graph. +Output is sorted by tablet type, then replication position. +Use ctrl-C to interrupt the command and see partial results if needed.`, + Args: cobra.ExactArgs(1), + RunE: commandShardReplicationPositions, + } +) + +var createShardOptions = struct { + Force bool + IncludeParent bool +}{} + +func commandCreateShard(cmd *cobra.Command, args []string) error { + keyspace, shard, err := topoproto.ParseKeyspaceShard(cmd.Flags().Arg(0)) + if err != nil { + return err + } + + cli.FinishedParsing(cmd) + + resp, err := client.CreateShard(commandCtx, &vtctldatapb.CreateShardRequest{ + Keyspace: keyspace, + ShardName: shard, + Force: createShardOptions.Force, + IncludeParent: createShardOptions.IncludeParent, + }) + if err != nil { + return err + } + + data, err := cli.MarshalJSON(resp) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + + return nil +} + +var deleteShardsOptions = struct { + Recursive bool + EvenIfServing bool +}{} + +func commandDeleteShards(cmd *cobra.Command, args []string) error { + shards, err := cli.ParseKeyspaceShards(cmd.Flags().Args()) + if err != nil { + return err + } + + cli.FinishedParsing(cmd) + + _, err = client.DeleteShards(commandCtx, &vtctldatapb.DeleteShardsRequest{ + Shards: shards, + EvenIfServing: deleteShardsOptions.EvenIfServing, + Recursive: deleteShardsOptions.Recursive, + }) + + if err != nil { + return fmt.Errorf("%w: while deleting %d shards; please inspect the topo", err, len(shards)) + } + + fmt.Printf("Successfully deleted %d shards\n", len(shards)) + + return nil +} + +func commandGetShard(cmd *cobra.Command, args []string) error { + keyspace, shard, err := topoproto.ParseKeyspaceShard(cmd.Flags().Arg(0)) + if err != nil { + return err + } + + cli.FinishedParsing(cmd) + + resp, err := client.GetShard(commandCtx, &vtctldatapb.GetShardRequest{ + Keyspace: keyspace, + ShardName: shard, + }) + if err != nil { + return err + } + + data, err := cli.MarshalJSON(resp.Shard) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + + return nil +} + +var removeShardCellOptions = struct { + Force bool + Recursive bool +}{} + +func commandRemoveShardCell(cmd *cobra.Command, args []string) error { + keyspace, shard, err := topoproto.ParseKeyspaceShard(cmd.Flags().Arg(0)) + if err != nil { + return err + } + + cli.FinishedParsing(cmd) + + cell := cmd.Flags().Arg(1) + + _, err = client.RemoveShardCell(commandCtx, &vtctldatapb.RemoveShardCellRequest{ + Keyspace: keyspace, + ShardName: shard, + Cell: cell, + Force: removeShardCellOptions.Force, + Recursive: removeShardCellOptions.Recursive, + }) + + if err != nil { + return err + } + + fmt.Printf("Successfully removed cell %v from shard %s/%s\n", cell, keyspace, shard) + + return nil +} + +func commandShardReplicationPositions(cmd *cobra.Command, args []string) error { + keyspace, shard, err := topoproto.ParseKeyspaceShard(cmd.Flags().Arg(0)) + if err != nil { + return err + } + + cli.FinishedParsing(cmd) + + resp, err := client.ShardReplicationPositions(commandCtx, &vtctldatapb.ShardReplicationPositionsRequest{ + Keyspace: keyspace, + Shard: shard, + }) + if err != nil { + return err + } + + for _, rt := range cli.SortedReplicatingTablets(resp.TabletMap, resp.ReplicationStatuses) { + var line string + + switch rt.Status { + case nil: + line = cli.MarshalTabletAWK(rt.Tablet) + " " + default: + line = cli.MarshalTabletAWK(rt.Tablet) + fmt.Sprintf(" %v %v", rt.Status.Position, rt.Status.SecondsBehindMaster) + } + + fmt.Println(line) + } + + return nil +} + +func init() { + CreateShard.Flags().BoolVarP(&createShardOptions.Force, "force", "f", false, "") + CreateShard.Flags().BoolVarP(&createShardOptions.IncludeParent, "include-parent", "p", false, "") + Root.AddCommand(CreateShard) + + DeleteShards.Flags().BoolVarP(&deleteShardsOptions.Recursive, "recursive", "r", false, "Also delete all tablets belonging to the shard. This is required to delete a non-empty shard.") + DeleteShards.Flags().BoolVarP(&deleteShardsOptions.EvenIfServing, "even-if-serving", "f", false, "Remove the shard even if it is serving. Use with caution.") + Root.AddCommand(DeleteShards) + + Root.AddCommand(GetShard) + + RemoveShardCell.Flags().BoolVarP(&removeShardCellOptions.Force, "force", "f", false, "Proceed even if the cell's topology server cannot be reached. The assumption is that you turned down the entire cell, and just need to update the global topo data.") + RemoveShardCell.Flags().BoolVarP(&removeShardCellOptions.Recursive, "recursive", "r", false, "Also delete all tablets in that cell beloning to the specified shard.") + Root.AddCommand(RemoveShardCell) + + Root.AddCommand(ShardReplicationPositions) +} diff --git a/go/cmd/vtctldclient/internal/command/tablets.go b/go/cmd/vtctldclient/internal/command/tablets.go new file mode 100644 index 00000000000..65aef8298fd --- /dev/null +++ b/go/cmd/vtctldclient/internal/command/tablets.go @@ -0,0 +1,237 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/vt/topo/topoproto" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +var ( + // ChangeTabletType makes a ChangeTabletType gRPC call to a vtctld. + ChangeTabletType = &cobra.Command{ + Use: "ChangeTabletType [--dry-run] TABLET_ALIAS TABLET_TYPE", + Args: cobra.ExactArgs(2), + RunE: commandChangeTabletType, + } + // DeleteTablets makes a DeleteTablets gRPC call to a vtctld. + DeleteTablets = &cobra.Command{ + Use: "DeleteTablets TABLET_ALIAS [ TABLET_ALIAS ... ]", + Args: cobra.MinimumNArgs(1), + RunE: commandDeleteTablets, + } + // GetTablet makes a GetTablet gRPC call to a vtctld. + GetTablet = &cobra.Command{ + Use: "GetTablet alias", + Args: cobra.ExactArgs(1), + RunE: commandGetTablet, + } + // GetTablets makes a GetTablets gRPC call to a vtctld. + GetTablets = &cobra.Command{ + Use: "GetTablets [--strict] [{--cell $c1 [--cell $c2 ...], --keyspace $ks [--shard $shard], --tablet-alias $alias}]", + Args: cobra.NoArgs, + RunE: commandGetTablets, + } +) + +var changeTabletTypeOptions = struct { + DryRun bool +}{} + +func commandChangeTabletType(cmd *cobra.Command, args []string) error { + aliasStr := cmd.Flags().Arg(0) + typeStr := cmd.Flags().Arg(1) + + alias, err := topoproto.ParseTabletAlias(aliasStr) + if err != nil { + return err + } + + newType, err := topoproto.ParseTabletType(typeStr) + if err != nil { + return err + } + + cli.FinishedParsing(cmd) + + resp, err := client.ChangeTabletType(commandCtx, &vtctldatapb.ChangeTabletTypeRequest{ + TabletAlias: alias, + DbType: newType, + DryRun: changeTabletTypeOptions.DryRun, + }) + if err != nil { + return err + } + + if resp.WasDryRun { + fmt.Println("--- DRY RUN ---") + } + + fmt.Printf("- %v\n", cli.MarshalTabletAWK(resp.BeforeTablet)) + fmt.Printf("+ %v\n", cli.MarshalTabletAWK(resp.AfterTablet)) + + return nil +} + +var deleteTabletsOptions = struct { + AllowPrimary bool +}{} + +func commandDeleteTablets(cmd *cobra.Command, args []string) error { + aliases, err := cli.TabletAliasesFromPosArgs(cmd.Flags().Args()) + if err != nil { + return err + } + + cli.FinishedParsing(cmd) + + _, err = client.DeleteTablets(commandCtx, &vtctldatapb.DeleteTabletsRequest{ + TabletAliases: aliases, + AllowPrimary: deleteTabletsOptions.AllowPrimary, + }) + + if err != nil { + return fmt.Errorf("%w: while deleting %d tablets; please inspect the topo", err, len(aliases)) + } + + fmt.Printf("Successfully deleted %d tablets\n", len(aliases)) + + return nil +} + +func commandGetTablet(cmd *cobra.Command, args []string) error { + aliasStr := cmd.Flags().Arg(0) + alias, err := topoproto.ParseTabletAlias(aliasStr) + if err != nil { + return err + } + + cli.FinishedParsing(cmd) + + resp, err := client.GetTablet(commandCtx, &vtctldatapb.GetTabletRequest{TabletAlias: alias}) + if err != nil { + return err + } + + data, err := cli.MarshalJSON(resp.Tablet) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + + return nil +} + +var getTabletsOptions = struct { + Cells []string + Keyspace string + Shard string + + TabletAliasStrings []string + + Format string + Strict bool +}{} + +func commandGetTablets(cmd *cobra.Command, args []string) error { + format := strings.ToLower(getTabletsOptions.Format) + + switch format { + case "awk", "json": + default: + return fmt.Errorf("invalid output format, got %s", getTabletsOptions.Format) + } + + var aliases []*topodatapb.TabletAlias + + if len(getTabletsOptions.TabletAliasStrings) > 0 { + switch { + case getTabletsOptions.Keyspace != "": + return fmt.Errorf("--keyspace (= %s) cannot be passed when using --tablet-alias (= %v)", getTabletsOptions.Keyspace, getTabletsOptions.TabletAliasStrings) + case getTabletsOptions.Shard != "": + return fmt.Errorf("--shard (= %s) cannot be passed when using --tablet-alias (= %v)", getTabletsOptions.Shard, getTabletsOptions.TabletAliasStrings) + case len(getTabletsOptions.Cells) > 0: + return fmt.Errorf("--cell (= %v) cannot be passed when using --tablet-alias (= %v)", getTabletsOptions.Cells, getTabletsOptions.TabletAliasStrings) + } + + var err error + aliases, err = cli.TabletAliasesFromPosArgs(getTabletsOptions.TabletAliasStrings) + if err != nil { + return err + } + } + + if getTabletsOptions.Keyspace == "" && getTabletsOptions.Shard != "" { + return fmt.Errorf("--shard (= %s) cannot be passed without also passing --keyspace", getTabletsOptions.Shard) + } + + cli.FinishedParsing(cmd) + + resp, err := client.GetTablets(commandCtx, &vtctldatapb.GetTabletsRequest{ + TabletAliases: aliases, + Cells: getTabletsOptions.Cells, + Keyspace: getTabletsOptions.Keyspace, + Shard: getTabletsOptions.Shard, + Strict: getTabletsOptions.Strict, + }) + if err != nil { + return err + } + + switch format { + case "awk": + for _, t := range resp.Tablets { + fmt.Println(cli.MarshalTabletAWK(t)) + } + case "json": + data, err := cli.MarshalJSON(resp.Tablets) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + } + + return nil +} + +func init() { + ChangeTabletType.Flags().BoolVarP(&changeTabletTypeOptions.DryRun, "dry-run", "d", false, "Shows the proposed change without actually executing it") + Root.AddCommand(ChangeTabletType) + + DeleteTablets.Flags().BoolVarP(&deleteTabletsOptions.AllowPrimary, "allow-primary", "p", false, "Allow the primary tablet of a shard to be deleted. Use with caution.") + Root.AddCommand(DeleteTablets) + + Root.AddCommand(GetTablet) + + GetTablets.Flags().StringSliceVarP(&getTabletsOptions.TabletAliasStrings, "tablet-alias", "t", nil, "List of tablet aliases to filter by") + GetTablets.Flags().StringSliceVarP(&getTabletsOptions.Cells, "cell", "c", nil, "List of cells to filter tablets by") + GetTablets.Flags().StringVarP(&getTabletsOptions.Keyspace, "keyspace", "k", "", "Keyspace to filter tablets by") + GetTablets.Flags().StringVarP(&getTabletsOptions.Shard, "shard", "s", "", "Shard to filter tablets by") + GetTablets.Flags().StringVar(&getTabletsOptions.Format, "format", "awk", "Output format to use; valid choices are (json, awk)") + GetTablets.Flags().BoolVar(&getTabletsOptions.Strict, "strict", false, "Require all cells to return successful tablet data. Without --strict, tablet listings may be partial.") + Root.AddCommand(GetTablets) +} diff --git a/go/cmd/vtctldclient/internal/command/vschemas.go b/go/cmd/vtctldclient/internal/command/vschemas.go new file mode 100644 index 00000000000..ac4f4499090 --- /dev/null +++ b/go/cmd/vtctldclient/internal/command/vschemas.go @@ -0,0 +1,62 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "fmt" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +var ( + // GetVSchema makes a GetVSchema gRPC call to a vtctld. + GetVSchema = &cobra.Command{ + Use: "GetVSchema keyspace", + Args: cobra.ExactArgs(1), + RunE: commandGetVSchema, + } +) + +func commandGetVSchema(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + keyspace := cmd.Flags().Arg(0) + + resp, err := client.GetVSchema(commandCtx, &vtctldatapb.GetVSchemaRequest{ + Keyspace: keyspace, + }) + if err != nil { + return err + } + + data, err := cli.MarshalJSON(resp.VSchema) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + + return nil +} + +func init() { + Root.AddCommand(GetVSchema) +} diff --git a/go/cmd/vtctldclient/internal/command/workflows.go b/go/cmd/vtctldclient/internal/command/workflows.go new file mode 100644 index 00000000000..760139a9be9 --- /dev/null +++ b/go/cmd/vtctldclient/internal/command/workflows.go @@ -0,0 +1,69 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "fmt" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +var ( + // GetWorkflows makes a GetWorkflows gRPC call to a vtctld. + GetWorkflows = &cobra.Command{ + Use: "GetWorkflows ", + Args: cobra.ExactArgs(1), + RunE: commandGetWorkflows, + } +) + +var getWorkflowsOptions = struct { + ShowAll bool +}{} + +func commandGetWorkflows(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + ks := cmd.Flags().Arg(0) + + resp, err := client.GetWorkflows(commandCtx, &vtctldatapb.GetWorkflowsRequest{ + Keyspace: ks, + ActiveOnly: !getWorkflowsOptions.ShowAll, + }) + + if err != nil { + return err + } + + data, err := cli.MarshalJSON(resp) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + + return nil +} + +func init() { + GetWorkflows.Flags().BoolVarP(&getWorkflowsOptions.ShowAll, "show-all", "a", false, "Show all workflows instead of just active workflows") + Root.AddCommand(GetWorkflows) +} diff --git a/go/cmd/vtctldclient/main.go b/go/cmd/vtctldclient/main.go index f00eee7a410..9086b289b8c 100644 --- a/go/cmd/vtctldclient/main.go +++ b/go/cmd/vtctldclient/main.go @@ -17,70 +17,19 @@ limitations under the License. package main import ( - "context" - "errors" "flag" - "io" "os" - "time" - - "github.com/spf13/cobra" + "vitess.io/vitess/go/cmd/vtctldclient/internal/command" "vitess.io/vitess/go/exit" - "vitess.io/vitess/go/trace" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/vtctl/vtctldclient" -) - -var ( - client vtctldclient.VtctldClient - traceCloser io.Closer - commandCtx context.Context - commandCancel func() - - server string - actionTimeout time.Duration - - // We use cobra to make subcommands easier to manage. And do a hack below - // in main to grab the rest of the flags globally scattered to make sure we - // pick up things like common servenv flags, tracing flags, etc. Refer to - // commands.go for all of the subcommands. - rootCmd = &cobra.Command{ - // We use PersistentPreRun to set up the tracer, grpc client, and - // command context for every command. - PersistentPreRunE: func(cmd *cobra.Command, args []string) (err error) { - traceCloser = trace.StartTracing("vtctldclient") - if server == "" { - err = errors.New("please specify -server to specify the vtctld server to connect to") - log.Error(err) - return err - } - - client, err = vtctldclient.New("grpc", server) - - commandCtx, commandCancel = context.WithTimeout(context.Background(), actionTimeout) - return err - }, - // Similarly, PersistentPostRun cleans up the resources spawned by - // PersistentPreRun. - PersistentPostRunE: func(cmd *cobra.Command, args []string) error { - commandCancel() - err := client.Close() - trace.LogErrorsWhenClosing(traceCloser) - return err - }, - TraverseChildren: true, - } ) func main() { defer exit.Recover() // Grab all those global flags across the codebase and shove 'em on in. - rootCmd.PersistentFlags().AddGoFlagSet(flag.CommandLine) - // Attach our local flags - rootCmd.PersistentFlags().StringVar(&server, "server", "", "server to use for connection") - rootCmd.PersistentFlags().DurationVar(&actionTimeout, "action_timeout", time.Hour, "timeout for the total command") + command.Root.PersistentFlags().AddGoFlagSet(flag.CommandLine) // hack to get rid of an "ERROR: logging before flag.Parse" args := os.Args[:] @@ -89,7 +38,7 @@ func main() { os.Args = args // back to your regularly scheduled cobra programming - if err := rootCmd.Execute(); err != nil { + if err := command.Root.Execute(); err != nil { log.Error(err) exit.Return(1) } diff --git a/go/cmd/vtgateclienttest/services/echo.go b/go/cmd/vtgateclienttest/services/echo.go index c12807e09b1..63102ce692e 100644 --- a/go/cmd/vtgateclienttest/services/echo.go +++ b/go/cmd/vtgateclienttest/services/echo.go @@ -143,7 +143,7 @@ func (c *echoClient) ExecuteBatch(ctx context.Context, session *vtgatepb.Session return c.fallbackClient.ExecuteBatch(ctx, session, sqlList, bindVariablesList) } -func (c *echoClient) VStream(ctx context.Context, tabletType topodatapb.TabletType, vgtid *binlogdatapb.VGtid, filter *binlogdatapb.Filter, callback func([]*binlogdatapb.VEvent) error) error { +func (c *echoClient) VStream(ctx context.Context, tabletType topodatapb.TabletType, vgtid *binlogdatapb.VGtid, filter *binlogdatapb.Filter, flags *vtgatepb.VStreamFlags, callback func([]*binlogdatapb.VEvent) error) error { if strings.HasPrefix(vgtid.ShardGtids[0].Shard, EchoPrefix) { _ = callback([]*binlogdatapb.VEvent{ { @@ -170,5 +170,5 @@ func (c *echoClient) VStream(ctx context.Context, tabletType topodatapb.TabletTy return nil } - return c.fallbackClient.VStream(ctx, tabletType, vgtid, filter, callback) + return c.fallbackClient.VStream(ctx, tabletType, vgtid, filter, flags, callback) } diff --git a/go/cmd/vtgateclienttest/services/fallback.go b/go/cmd/vtgateclienttest/services/fallback.go index b94ca031106..5119fa36587 100644 --- a/go/cmd/vtgateclienttest/services/fallback.go +++ b/go/cmd/vtgateclienttest/services/fallback.go @@ -56,8 +56,8 @@ func (c fallbackClient) ResolveTransaction(ctx context.Context, dtid string) err return c.fallback.ResolveTransaction(ctx, dtid) } -func (c fallbackClient) VStream(ctx context.Context, tabletType topodatapb.TabletType, vgtid *binlogdatapb.VGtid, filter *binlogdatapb.Filter, send func([]*binlogdatapb.VEvent) error) error { - return c.fallback.VStream(ctx, tabletType, vgtid, filter, send) +func (c fallbackClient) VStream(ctx context.Context, tabletType topodatapb.TabletType, vgtid *binlogdatapb.VGtid, filter *binlogdatapb.Filter, flags *vtgatepb.VStreamFlags, send func([]*binlogdatapb.VEvent) error) error { + return c.fallback.VStream(ctx, tabletType, vgtid, filter, flags, send) } func (c fallbackClient) HandlePanic(err *error) { diff --git a/go/cmd/vtgateclienttest/services/terminal.go b/go/cmd/vtgateclienttest/services/terminal.go index 3f4a4a7d30b..6a8e30fd9da 100644 --- a/go/cmd/vtgateclienttest/services/terminal.go +++ b/go/cmd/vtgateclienttest/services/terminal.go @@ -66,7 +66,7 @@ func (c *terminalClient) ResolveTransaction(ctx context.Context, dtid string) er return errTerminal } -func (c *terminalClient) VStream(ctx context.Context, tabletType topodatapb.TabletType, vgtid *binlogdatapb.VGtid, filter *binlogdatapb.Filter, send func([]*binlogdatapb.VEvent) error) error { +func (c *terminalClient) VStream(ctx context.Context, tabletType topodatapb.TabletType, vgtid *binlogdatapb.VGtid, filter *binlogdatapb.Filter, flags *vtgatepb.VStreamFlags, send func([]*binlogdatapb.VEvent) error) error { return errTerminal } diff --git a/go/cmd/vtorc/main.go b/go/cmd/vtorc/main.go index 19c7063f233..e64ced875d0 100644 --- a/go/cmd/vtorc/main.go +++ b/go/cmd/vtorc/main.go @@ -19,6 +19,9 @@ package main import ( "flag" + _ "github.com/go-sql-driver/mysql" + _ "github.com/mattn/go-sqlite3" + "vitess.io/vitess/go/vt/orchestrator/app" "vitess.io/vitess/go/vt/orchestrator/config" "vitess.io/vitess/go/vt/orchestrator/external/golib/log" diff --git a/go/cmd/vttablet/vttablet.go b/go/cmd/vttablet/vttablet.go index ddeba7a8894..1d6838c3e9b 100644 --- a/go/cmd/vttablet/vttablet.go +++ b/go/cmd/vttablet/vttablet.go @@ -103,7 +103,7 @@ func main() { DBConfigs: config.DB.Clone(), QueryServiceControl: qsc, UpdateStream: binlog.NewUpdateStream(ts, tablet.Keyspace, tabletAlias.Cell, qsc.SchemaEngine()), - VREngine: vreplication.NewEngine(config, ts, tabletAlias.Cell, mysqld), + VREngine: vreplication.NewEngine(config, ts, tabletAlias.Cell, mysqld, qsc.LagThrottler()), MetadataManager: &mysqlctl.MetadataManager{}, } if err := tm.Start(tablet, config.Healthcheck.IntervalSeconds.Get()); err != nil { diff --git a/go/cmd/vttestserver/main.go b/go/cmd/vttestserver/main.go index 700af9f5600..2b645ec0812 100644 --- a/go/cmd/vttestserver/main.go +++ b/go/cmd/vttestserver/main.go @@ -23,8 +23,10 @@ import ( "flag" "fmt" "os" + "os/signal" "strconv" "strings" + "syscall" "github.com/golang/protobuf/proto" @@ -81,6 +83,16 @@ func init() { " Also, the output specifies the mysql unix socket"+ " instead of the vtgate port.") + flag.BoolVar(&config.PersistentMode, "persistent_mode", false, + "If this flag is set, the MySQL data directory is not cleaned up"+ + " when LocalCluster.TearDown() is called. This is useful for running"+ + " vttestserver as a database container in local developer environments. Note"+ + " that db migration files (-schema_dir option) and seeding of"+ + " random data (-initialize_with_random_data option) will only run during"+ + " cluster startup if the data directory does not already exist. vschema"+ + " migrations are run every time the cluster starts, since persistence"+ + " for the topology server has not been implemented yet") + flag.BoolVar(&doSeed, "initialize_with_random_data", false, "If this flag is each table-shard will be initialized"+ " with random data. See also the 'rng_seed' and 'min_shard_size'"+ @@ -229,7 +241,9 @@ func main() { log.Fatal(err) } - select {} + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + <-c } func runCluster() (vttest.LocalCluster, error) { diff --git a/go/cmd/vttestserver/vttestserver_test.go b/go/cmd/vttestserver/vttestserver_test.go index fd98ad7b702..1fe55919d7f 100644 --- a/go/cmd/vttestserver/vttestserver_test.go +++ b/go/cmd/vttestserver/vttestserver_test.go @@ -21,12 +21,14 @@ import ( "fmt" "io" "io/ioutil" + "math/rand" "os" "path" "strings" "testing" "time" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/tlstest" "github.com/stretchr/testify/assert" @@ -52,10 +54,12 @@ type columnVindex struct { } func TestRunsVschemaMigrations(t *testing.T) { + args := os.Args + conf := config + defer resetFlags(args, conf) + cluster, err := startCluster() defer cluster.TearDown() - args := os.Args - defer resetFlags(args) assert.NoError(t, err) assertColumnVindex(t, cluster, columnVindex{keyspace: "test_keyspace", table: "test_table", vindex: "my_vdx", vindexType: "hash", column: "id"}) @@ -67,12 +71,69 @@ func TestRunsVschemaMigrations(t *testing.T) { assertColumnVindex(t, cluster, columnVindex{keyspace: "test_keyspace", table: "test_table1", vindex: "my_vdx", vindexType: "hash", column: "id"}) } +func TestPersistentMode(t *testing.T) { + args := os.Args + conf := config + defer resetFlags(args, conf) + + dir, err := ioutil.TempDir("/tmp", "vttestserver_persistent_mode_") + assert.NoError(t, err) + defer os.RemoveAll(dir) + + cluster, err := startPersistentCluster(dir) + assert.NoError(t, err) + + // basic sanity checks similar to TestRunsVschemaMigrations + assertColumnVindex(t, cluster, columnVindex{keyspace: "test_keyspace", table: "test_table", vindex: "my_vdx", vindexType: "hash", column: "id"}) + assertColumnVindex(t, cluster, columnVindex{keyspace: "app_customer", table: "customers", vindex: "hash", vindexType: "hash", column: "id"}) + + // insert some data to ensure persistence across teardowns + err = execOnCluster(cluster, "app_customer", func(conn *mysql.Conn) error { + _, err := conn.ExecuteFetch("insert into customers (id, name) values (1, 'gopherson')", 1, false) + return err + }) + assert.NoError(t, err) + + expectedRows := [][]sqltypes.Value{ + {sqltypes.NewInt64(1), sqltypes.NewVarChar("gopherson"), sqltypes.NULL}, + } + + // ensure data was actually inserted + var res *sqltypes.Result + err = execOnCluster(cluster, "app_customer", func(conn *mysql.Conn) (err error) { + res, err = conn.ExecuteFetch("SELECT * FROM customers", 1, false) + return err + }) + assert.NoError(t, err) + assert.Equal(t, expectedRows, res.Rows) + + // reboot the persistent cluster + cluster.TearDown() + cluster, err = startPersistentCluster(dir) + defer cluster.TearDown() + assert.NoError(t, err) + + // rerun our sanity checks to make sure vschema migrations are run during every startup + assertColumnVindex(t, cluster, columnVindex{keyspace: "test_keyspace", table: "test_table", vindex: "my_vdx", vindexType: "hash", column: "id"}) + assertColumnVindex(t, cluster, columnVindex{keyspace: "app_customer", table: "customers", vindex: "hash", vindexType: "hash", column: "id"}) + + // ensure previous data was successfully persisted + err = execOnCluster(cluster, "app_customer", func(conn *mysql.Conn) (err error) { + res, err = conn.ExecuteFetch("SELECT * FROM customers", 1, false) + return err + }) + assert.NoError(t, err) + assert.Equal(t, expectedRows, res.Rows) +} + func TestCanVtGateExecute(t *testing.T) { + args := os.Args + conf := config + defer resetFlags(args, conf) + cluster, err := startCluster() assert.NoError(t, err) defer cluster.TearDown() - args := os.Args - defer resetFlags(args) client, err := vtctlclient.New(fmt.Sprintf("localhost:%v", cluster.GrpcPort())) assert.NoError(t, err) @@ -109,6 +170,10 @@ Out: } func TestMtlsAuth(t *testing.T) { + args := os.Args + conf := config + defer resetFlags(args, conf) + // Our test root. root, err := ioutil.TempDir("", "tlstest") if err != nil { @@ -141,8 +206,6 @@ func TestMtlsAuth(t *testing.T) { fmt.Sprintf("-grpc_auth_mtls_allowed_substrings=%s", "CN=ClientApp")) assert.NoError(t, err) defer cluster.TearDown() - args := os.Args - defer resetFlags(args) // startCluster will apply vschema migrations using vtctl grpc and the clientCert. assertColumnVindex(t, cluster, columnVindex{keyspace: "test_keyspace", table: "test_table", vindex: "my_vdx", vindexType: "hash", column: "id"}) @@ -150,6 +213,10 @@ func TestMtlsAuth(t *testing.T) { } func TestMtlsAuthUnauthorizedFails(t *testing.T) { + args := os.Args + conf := config + defer resetFlags(args, conf) + // Our test root. root, err := ioutil.TempDir("", "tlstest") if err != nil { @@ -182,13 +249,21 @@ func TestMtlsAuthUnauthorizedFails(t *testing.T) { fmt.Sprintf("-vtctld_grpc_ca=%s", caCert), fmt.Sprintf("-grpc_auth_mtls_allowed_substrings=%s", "CN=ClientApp")) defer cluster.TearDown() - args := os.Args - defer resetFlags(args) assert.Error(t, err) assert.Contains(t, err.Error(), "code = Unauthenticated desc = client certificate not authorized") } +func startPersistentCluster(dir string, flags ...string) (vttest.LocalCluster, error) { + flags = append(flags, []string{ + "-persistent_mode", + // FIXME: if port is not provided, data_dir is not respected + fmt.Sprintf("-port=%d", randomPort()), + fmt.Sprintf("-data_dir=%s", dir), + }...) + return startCluster(flags...) +} + func startCluster(flags ...string) (vttest.LocalCluster, error) { schemaDirArg := "-schema_dir=data/schema" tabletHostname := "-tablet_hostname=localhost" @@ -201,6 +276,13 @@ func startCluster(flags ...string) (vttest.LocalCluster, error) { } func addColumnVindex(cluster vttest.LocalCluster, keyspace string, vschemaMigration string) error { + return execOnCluster(cluster, keyspace, func(conn *mysql.Conn) error { + _, err := conn.ExecuteFetch(vschemaMigration, 1, false) + return err + }) +} + +func execOnCluster(cluster vttest.LocalCluster, keyspace string, f func(*mysql.Conn) error) error { ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -213,8 +295,7 @@ func addColumnVindex(cluster vttest.LocalCluster, keyspace string, vschemaMigrat return err } defer conn.Close() - _, err = conn.ExecuteFetch(vschemaMigration, 1, false) - return err + return f(conn) } func assertColumnVindex(t *testing.T, cluster vttest.LocalCluster, expected columnVindex) { @@ -243,6 +324,12 @@ func assertEqual(t *testing.T, actual string, expected string, message string) { } } -func resetFlags(args []string) { +func resetFlags(args []string, conf vttest.Config) { os.Args = args + config = conf +} + +func randomPort() int { + v := rand.Int31n(20000) + return int(v + 10000) } diff --git a/go/flagutil/flagutil.go b/go/flagutil/flagutil.go index 0b9a449cccf..a03cb972d17 100644 --- a/go/flagutil/flagutil.go +++ b/go/flagutil/flagutil.go @@ -21,6 +21,7 @@ package flagutil import ( "errors" "flag" + "fmt" "sort" "strings" ) @@ -124,3 +125,58 @@ func (value StringMapValue) String() string { sort.Strings(parts) return strings.Join(parts, ",") } + +// DualFormatStringListVar creates a flag which supports both dashes and underscores +func DualFormatStringListVar(p *[]string, name string, value []string, usage string) { + dashes := strings.Replace(name, "_", "-", -1) + underscores := strings.Replace(name, "-", "_", -1) + + StringListVar(p, underscores, value, usage) + if dashes != underscores { + StringListVar(p, dashes, *p, fmt.Sprintf("Synonym to -%s", underscores)) + } +} + +// DualFormatStringVar creates a flag which supports both dashes and underscores +func DualFormatStringVar(p *string, name string, value string, usage string) { + dashes := strings.Replace(name, "_", "-", -1) + underscores := strings.Replace(name, "-", "_", -1) + + flag.StringVar(p, underscores, value, usage) + if dashes != underscores { + flag.StringVar(p, dashes, *p, fmt.Sprintf("Synonym to -%s", underscores)) + } +} + +// DualFormatInt64Var creates a flag which supports both dashes and underscores +func DualFormatInt64Var(p *int64, name string, value int64, usage string) { + dashes := strings.Replace(name, "_", "-", -1) + underscores := strings.Replace(name, "-", "_", -1) + + flag.Int64Var(p, underscores, value, usage) + if dashes != underscores { + flag.Int64Var(p, dashes, *p, fmt.Sprintf("Synonym to -%s", underscores)) + } +} + +// DualFormatIntVar creates a flag which supports both dashes and underscores +func DualFormatIntVar(p *int, name string, value int, usage string) { + dashes := strings.Replace(name, "_", "-", -1) + underscores := strings.Replace(name, "-", "_", -1) + + flag.IntVar(p, underscores, value, usage) + if dashes != underscores { + flag.IntVar(p, dashes, *p, fmt.Sprintf("Synonym to -%s", underscores)) + } +} + +// DualFormatBoolVar creates a flag which supports both dashes and underscores +func DualFormatBoolVar(p *bool, name string, value bool, usage string) { + dashes := strings.Replace(name, "_", "-", -1) + underscores := strings.Replace(name, "-", "_", -1) + + flag.BoolVar(p, underscores, value, usage) + if dashes != underscores { + flag.BoolVar(p, dashes, *p, fmt.Sprintf("Synonym to -%s", underscores)) + } +} diff --git a/go/hack/runtime.go b/go/hack/runtime.go new file mode 100644 index 00000000000..c7355769307 --- /dev/null +++ b/go/hack/runtime.go @@ -0,0 +1,45 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hack + +import ( + "reflect" + "unsafe" +) + +//go:noescape +//go:linkname memhash runtime.memhash +func memhash(p unsafe.Pointer, h, s uintptr) uintptr + +//go:noescape +//go:linkname strhash runtime.strhash +func strhash(p unsafe.Pointer, h uintptr) uintptr + +// RuntimeMemhash provides access to the Go runtime's default hash function for arbitrary bytes. +// This is an optimal hash function which takes an input seed and is potentially implemented in hardware +// for most architectures. This is the same hash function that the language's `map` uses. +func RuntimeMemhash(b []byte, seed uint64) uint64 { + pstring := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + return uint64(memhash(unsafe.Pointer(pstring.Data), uintptr(seed), uintptr(pstring.Len))) +} + +// RuntimeStrhash provides access to the Go runtime's default hash function for strings. +// This is an optimal hash function which takes an input seed and is potentially implemented in hardware +// for most architectures. This is the same hash function that the language's `map` uses. +func RuntimeStrhash(str string, seed uint64) uint64 { + return uint64(strhash(unsafe.Pointer(&str), uintptr(seed))) +} diff --git a/go/hack/runtime.s b/go/hack/runtime.s new file mode 100644 index 00000000000..ac00d502ab5 --- /dev/null +++ b/go/hack/runtime.s @@ -0,0 +1,3 @@ +// DO NOT REMOVE: this empty goassembly file forces the Go compiler to perform +// external linking on the sibling `runtime.go`, so that the symbols declared in that +// file become properly resolved diff --git a/go/mysql/auth_server.go b/go/mysql/auth_server.go index 5df328e1896..b568537e16e 100644 --- a/go/mysql/auth_server.go +++ b/go/mysql/auth_server.go @@ -19,7 +19,9 @@ package mysql import ( "bytes" "crypto/rand" + "crypto/rsa" "crypto/sha1" + "crypto/sha256" "encoding/hex" "net" "strings" @@ -117,8 +119,8 @@ func NewSalt() ([]byte, error) { return salt, nil } -// ScramblePassword computes the hash of the password using 4.1+ method. -func ScramblePassword(salt, password []byte) []byte { +// ScrambleMysqlNativePassword computes the hash of the password using 4.1+ method. +func ScrambleMysqlNativePassword(salt, password []byte) []byte { if len(password) == 0 { return nil } @@ -189,6 +191,58 @@ func isPassScrambleMysqlNativePassword(reply, salt []byte, mysqlNativePassword s return bytes.Equal(candidateHash2, hash) } +// ScrambleCachingSha2Password computes the hash of the password using SHA256 as required by +// caching_sha2_password plugin for "fast" authentication +func ScrambleCachingSha2Password(salt []byte, password []byte) []byte { + if len(password) == 0 { + return nil + } + + // stage1Hash = SHA256(password) + crypt := sha256.New() + crypt.Write(password) + stage1 := crypt.Sum(nil) + + // scrambleHash = SHA256(SHA256(stage1Hash) + salt) + crypt.Reset() + crypt.Write(stage1) + innerHash := crypt.Sum(nil) + + crypt.Reset() + crypt.Write(innerHash) + crypt.Write(salt) + scramble := crypt.Sum(nil) + + // token = stage1Hash XOR scrambleHash + for i := range stage1 { + stage1[i] ^= scramble[i] + } + + return stage1 +} + +// EncryptPasswordWithPublicKey obfuscates the password and encrypts it with server's public key as required by +// caching_sha2_password plugin for "full" authentication +func EncryptPasswordWithPublicKey(salt []byte, password []byte, pub *rsa.PublicKey) ([]byte, error) { + if len(password) == 0 { + return nil, nil + } + + buffer := make([]byte, len(password)+1) + copy(buffer, password) + for i := range buffer { + buffer[i] ^= salt[i%len(salt)] + } + + sha1Hash := sha1.New() + enc, err := rsa.EncryptOAEP(sha1Hash, rand.Reader, pub, buffer, nil) + if err != nil { + return nil, err + } + + return enc, nil +} + // Constants for the dialog plugin. const ( mysqlDialogMessage = "Enter password: " diff --git a/go/mysql/auth_server_clientcert_test.go b/go/mysql/auth_server_clientcert_test.go index 9dbfdfe0d72..ed9062d29bf 100644 --- a/go/mysql/auth_server_clientcert_test.go +++ b/go/mysql/auth_server_clientcert_test.go @@ -61,7 +61,8 @@ func TestValidCert(t *testing.T) { serverConfig, err := vttls.ServerConfig( path.Join(root, "server-cert.pem"), path.Join(root, "server-key.pem"), - path.Join(root, "ca-cert.pem")) + path.Join(root, "ca-cert.pem"), + "") if err != nil { t.Fatalf("TLSServerConfig failed: %v", err) } @@ -143,7 +144,8 @@ func TestNoCert(t *testing.T) { serverConfig, err := vttls.ServerConfig( path.Join(root, "server-cert.pem"), path.Join(root, "server-key.pem"), - path.Join(root, "ca-cert.pem")) + path.Join(root, "ca-cert.pem"), + "") if err != nil { t.Fatalf("TLSServerConfig failed: %v", err) } diff --git a/go/mysql/auth_server_static.go b/go/mysql/auth_server_static.go index acb90678f54..ef95febb292 100644 --- a/go/mysql/auth_server_static.go +++ b/go/mysql/auth_server_static.go @@ -244,7 +244,7 @@ func (a *AuthServerStatic) ValidateHash(salt []byte, user string, authResponse [ return &StaticUserData{entry.UserData, entry.Groups}, nil } } else { - computedAuthResponse := ScramblePassword(salt, []byte(entry.Password)) + computedAuthResponse := ScrambleMysqlNativePassword(salt, []byte(entry.Password)) // Validate the password. if matchSourceHost(remoteAddr, entry.SourceHost) && bytes.Equal(authResponse, computedAuthResponse) { return &StaticUserData{entry.UserData, entry.Groups}, nil diff --git a/go/mysql/auth_server_static_test.go b/go/mysql/auth_server_static_test.go index f195b4c72d1..d5e43f312fb 100644 --- a/go/mysql/auth_server_static_test.go +++ b/go/mysql/auth_server_static_test.go @@ -92,7 +92,7 @@ func TestValidateHashGetter(t *testing.T) { t.Fatalf("error generating salt: %v", err) } - scrambled := ScramblePassword(salt, []byte("password")) + scrambled := ScrambleMysqlNativePassword(salt, []byte("password")) getter, err := auth.ValidateHash(salt, "mysql_user", scrambled, addr) if err != nil { t.Fatalf("error validating password: %v", err) @@ -270,7 +270,7 @@ func TestStaticPasswords(t *testing.T) { t.Fatalf("error generating salt: %v", err) } - scrambled := ScramblePassword(salt, []byte(c.password)) + scrambled := ScrambleMysqlNativePassword(salt, []byte(c.password)) _, err = auth.ValidateHash(salt, c.user, scrambled, addr) if c.success { diff --git a/go/mysql/auth_server_vault.go b/go/mysql/auth_server_vault.go index 7f72cd72e19..25fe6611a40 100644 --- a/go/mysql/auth_server_vault.go +++ b/go/mysql/auth_server_vault.go @@ -251,7 +251,7 @@ func (a *AuthServerVault) ValidateHash(salt []byte, user string, authResponse [] return &StaticUserData{entry.UserData, entry.Groups}, nil } } else { - computedAuthResponse := ScramblePassword(salt, []byte(entry.Password)) + computedAuthResponse := ScrambleMysqlNativePassword(salt, []byte(entry.Password)) // Validate the password. if matchSourceHost(remoteAddr, entry.SourceHost) && bytes.Equal(authResponse, computedAuthResponse) { return &StaticUserData{entry.UserData, entry.Groups}, nil diff --git a/go/mysql/binlog_event.go b/go/mysql/binlog_event.go index e965c7faf1b..84ab17d71ce 100644 --- a/go/mysql/binlog_event.go +++ b/go/mysql/binlog_event.go @@ -122,6 +122,9 @@ type BinlogEvent interface { // IsPseudo is for custom implementations of GTID. IsPseudo() bool + + // IsCompressed returns true if a compressed event is found (binlog_transaction_compression=ON) + IsCompressed() bool } // BinlogFormat contains relevant data from the FORMAT_DESCRIPTION_EVENT. diff --git a/go/mysql/binlog_event_common.go b/go/mysql/binlog_event_common.go index e9f022e0b28..8abfd9ac953 100644 --- a/go/mysql/binlog_event_common.go +++ b/go/mysql/binlog_event_common.go @@ -167,6 +167,11 @@ func (ev binlogEvent) IsPseudo() bool { return false } +// IsCompressed returns true if a compressed event is found (binlog_transaction_compression=ON) +func (ev binlogEvent) IsCompressed() bool { + return ev.Type() == eCompressedEvent +} + // Format implements BinlogEvent.Format(). // // Expected format (L = total length of event data): diff --git a/go/mysql/binlog_event_filepos.go b/go/mysql/binlog_event_filepos.go index dfec653081e..2f6bbb5bbfa 100644 --- a/go/mysql/binlog_event_filepos.go +++ b/go/mysql/binlog_event_filepos.go @@ -212,6 +212,10 @@ func (ev filePosFakeEvent) IsPseudo() bool { return false } +func (ev filePosFakeEvent) IsCompressed() bool { + return false +} + //---------------------------------------------------------------------------- // filePosGTIDEvent is a fake GTID event for filePos. diff --git a/go/mysql/client.go b/go/mysql/client.go index c9fd577b190..fdb75a67076 100644 --- a/go/mysql/client.go +++ b/go/mysql/client.go @@ -17,7 +17,10 @@ limitations under the License. package mysql import ( + "crypto/rsa" "crypto/tls" + "crypto/x509" + "encoding/pem" "fmt" "net" "strconv" @@ -235,6 +238,7 @@ func (c *Conn) clientHandshake(characterSet uint8, params *ConnParams) error { return err } c.fillFlavor(params) + c.salt = salt // Sanity check. if capabilities&CapabilityClientProtocol41 == 0 { @@ -291,7 +295,12 @@ func (c *Conn) clientHandshake(characterSet uint8, params *ConnParams) error { } // Password encryption. - scrambledPassword := ScramblePassword(salt, []byte(params.Pass)) + var scrambledPassword []byte + if c.authPluginName == CachingSha2Password { + scrambledPassword = ScrambleCachingSha2Password(salt, []byte(params.Pass)) + } else { + scrambledPassword = ScrambleMysqlNativePassword(salt, []byte(params.Pass)) + } // Client Session Tracking Capability. if params.Flags&CapabilityClientSessionTrack == CapabilityClientSessionTrack { @@ -310,54 +319,8 @@ func (c *Conn) clientHandshake(characterSet uint8, params *ConnParams) error { } // Read the server response. - response, err := c.readPacket() - if err != nil { - return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) - } - switch response[0] { - case OKPacket: - // OK packet, we are authenticated. Save the user, keep going. - c.User = params.Uname - case AuthSwitchRequestPacket: - // Server is asking to use a different auth method. We - // only support cleartext plugin. - pluginName, salt, err := parseAuthSwitchRequest(response) - if err != nil { - return NewSQLError(CRServerHandshakeErr, SSUnknownSQLState, "cannot parse auth switch request: %v", err) - } - - if pluginName == MysqlClearPassword { - // Write the cleartext password packet. - if err := c.writeClearTextPassword(params); err != nil { - return err - } - } else if pluginName == MysqlNativePassword { - // Write the mysql_native_password packet. - if err := c.writeMysqlNativePassword(params, salt); err != nil { - return err - } - } else { - return NewSQLError(CRServerHandshakeErr, SSUnknownSQLState, "server asked for unsupported auth method: %v", pluginName) - } - - // Wait for OK packet. - response, err = c.readPacket() - if err != nil { - return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) - } - switch response[0] { - case OKPacket: - // OK packet, we are authenticated. Save the user, keep going. - c.User = params.Uname - case ErrPacket: - return ParseErrorPacket(response) - default: - return NewSQLError(CRServerHandshakeErr, SSUnknownSQLState, "initial server response cannot be parsed: %v", response) - } - case ErrPacket: - return ParseErrorPacket(response) - default: - return NewSQLError(CRServerHandshakeErr, SSUnknownSQLState, "initial server response cannot be parsed: %v", response) + if err := c.handleAuthResponse(params); err != nil { + return err } // If the server didn't support DbName in its handshake, set @@ -515,10 +478,7 @@ func (c *Conn) parseInitialHandshakePacket(data []byte) (uint32, []byte, error) // 5.6.2 that don't have a null terminated string. authPluginName = string(data[pos : len(data)-1]) } - - if authPluginName != MysqlNativePassword { - return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: only support %v auth plugin name, but got %v", MysqlNativePassword, authPluginName) - } + c.authPluginName = authPluginName } return capabilities, authPluginData, nil @@ -603,7 +563,7 @@ func (c *Conn) writeHandshakeResponse41(capabilities uint32, scrambledPassword [ lenNullString(params.Uname) + // length of scrambled password is handled below. len(scrambledPassword) + - 21 + // "mysql_native_password" string. + len(c.authPluginName) + 1 // terminating zero. // Add the DB name if the server supports it. @@ -652,7 +612,7 @@ func (c *Conn) writeHandshakeResponse41(capabilities uint32, scrambledPassword [ } // Assume native client during response - pos = writeNullString(data, pos, MysqlNativePassword) + pos = writeNullString(data, pos, c.authPluginName) // Sanity-check the length. if pos != len(data) { @@ -665,6 +625,110 @@ func (c *Conn) writeHandshakeResponse41(capabilities uint32, scrambledPassword [ return nil } +// handleAuthResponse parses server's response after client sends the password for authentication +// and handles next steps for AuthSwitchRequestPacket and AuthMoreDataPacket. +func (c *Conn) handleAuthResponse(params *ConnParams) error { + response, err := c.readPacket() + if err != nil { + return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) + } + + switch response[0] { + case OKPacket: + // OK packet, we are authenticated. Save the user, keep going. + c.User = params.Uname + case AuthSwitchRequestPacket: + // Server is asking to use a different auth method + if err = c.handleAuthSwitchPacket(params, response); err != nil { + return err + } + case AuthMoreDataPacket: + // Server is requesting more data - maybe un-scrambled password + if err := c.handleAuthMoreDataPacket(response[1], params); err != nil { + return err + } + case ErrPacket: + return ParseErrorPacket(response) + default: + return NewSQLError(CRServerHandshakeErr, SSUnknownSQLState, "initial server response cannot be parsed: %v", response) + } + + return nil +} + +// handleAuthSwitchPacket scrambles password for the plugin requested by the server and retries authentication +func (c *Conn) handleAuthSwitchPacket(params *ConnParams, response []byte) error { + var err error + var salt []byte + c.authPluginName, salt, err = parseAuthSwitchRequest(response) + if err != nil { + return NewSQLError(CRServerHandshakeErr, SSUnknownSQLState, "cannot parse auth switch request: %v", err) + } + if salt != nil { + c.salt = salt + } + switch c.authPluginName { + case MysqlClearPassword: + if err := c.writeClearTextPassword(params); err != nil { + return err + } + case MysqlNativePassword: + scrambledPassword := ScrambleMysqlNativePassword(c.salt, []byte(params.Pass)) + if err := c.writeScrambledPassword(scrambledPassword); err != nil { + return err + } + case CachingSha2Password: + scrambledPassword := ScrambleCachingSha2Password(c.salt, []byte(params.Pass)) + if err := c.writeScrambledPassword(scrambledPassword); err != nil { + return err + } + default: + return NewSQLError(CRServerHandshakeErr, SSUnknownSQLState, "server asked for unsupported auth method: %v", c.authPluginName) + } + + // The response could be an OKPacket, AuthMoreDataPacket or ErrPacket + return c.handleAuthResponse(params) +} + +// handleAuthMoreDataPacket handles response of CachingSha2Password authentication and sends full password to the +// server if requested +func (c *Conn) handleAuthMoreDataPacket(data byte, params *ConnParams) error { + switch data { + case CachingSha2FastAuth: + // User credentials are verified using the cache ("Fast" path). + // Next packet should be an OKPacket + return c.handleAuthResponse(params) + case CachingSha2FullAuth: + // User credentials are not cached, we have to exchange full password. + if c.Capabilities&CapabilityClientSSL > 0 || params.UnixSocket != "" { + // If we are using an SSL connection or Unix socket, write clear text password + if err := c.writeClearTextPassword(params); err != nil { + return err + } + } else { + // If we are not using an SSL connection or Unix socket, we have to fetch a public key + // from the server to encrypt password + pub, err := c.requestPublicKey() + if err != nil { + return err + } + // Encrypt password with public key + enc, err := EncryptPasswordWithPublicKey(c.salt, []byte(params.Pass), pub) + if err != nil { + return vterrors.Errorf(vtrpc.Code_INTERNAL, "error encrypting password with public key: %v", err) + } + // Write encrypted password + if err := c.writeScrambledPassword(enc); err != nil { + return err + } + } + // Next packet should either be an OKPacket or ErrPacket + return c.handleAuthResponse(params) + default: + return NewSQLError(CRServerHandshakeErr, SSUnknownSQLState, "cannot parse AuthMoreDataPacket: %v", data) + } +} + func parseAuthSwitchRequest(data []byte) (string, []byte, error) { pos := 1 pluginName, pos, ok := readNullString(data, pos) @@ -680,6 +744,34 @@ func parseAuthSwitchRequest(data []byte) (string, []byte, error) { return pluginName, salt, nil } +// requestPublicKey requests a public key from the server +func (c *Conn) requestPublicKey() (rsaKey *rsa.PublicKey, err error) { + // get public key from server + data, pos := c.startEphemeralPacketWithHeader(1) + data[pos] = 0x02 + if err := c.writeEphemeralPacket(); err != nil { + return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "error sending public key request packet: %v", err) + } + + response, err := c.readPacket() + if err != nil { + return nil, NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) + } + + // Server should respond with a AuthMoreDataPacket containing the public key + if response[0] != AuthMoreDataPacket { + return nil, ParseErrorPacket(response) + } + + block, _ := pem.Decode(response[1:]) + pub, err := x509.ParsePKIXPublicKey(block.Bytes) + if err != nil { + return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "failed to parse public key from server: %v", err) + } + + return pub.(*rsa.PublicKey), nil +} + // writeClearTextPassword writes the clear text password. // Returns a SQLError. func (c *Conn) writeClearTextPassword(params *ConnParams) error { @@ -693,15 +785,14 @@ func (c *Conn) writeClearTextPassword(params *ConnParams) error { return c.writeEphemeralPacket() } -// writeMysqlNativePassword writes the encrypted mysql_native_password format +// writeScrambledPassword writes the encrypted mysql_native_password format // Returns a SQLError. -func (c *Conn) writeMysqlNativePassword(params *ConnParams, salt []byte) error { - scrambledPassword := ScramblePassword(salt, []byte(params.Pass)) +func (c *Conn) writeScrambledPassword(scrambledPassword []byte) error { data, pos := c.startEphemeralPacketWithHeader(len(scrambledPassword)) pos += copy(data[pos:], scrambledPassword) // Sanity check. if pos != len(data) { - return vterrors.Errorf(vtrpc.Code_INTERNAL, "error building MysqlNativePassword packet: got %v bytes expected %v", pos, len(data)) + return vterrors.Errorf(vtrpc.Code_INTERNAL, "error building %v packet: got %v bytes expected %v", c.authPluginName, pos, len(data)) } return c.writeEphemeralPacket() } diff --git a/go/mysql/conn.go b/go/mysql/conn.go index c91840409e2..c2543ef9e01 100644 --- a/go/mysql/conn.go +++ b/go/mysql/conn.go @@ -86,6 +86,13 @@ type Conn struct { // fields, this is set to an empty array (but not nil). fields []*querypb.Field + // salt is sent by the server during initial handshake to be used for authentication + salt []byte + + // authPluginName is the name of server's authentication plugin. + // It is set during the initial handshake. + authPluginName string + // schemaName is the default database name to use. It is set // during handshake, and by ComInitDb packets. Both client and // servers maintain it. This member is private because it's @@ -942,7 +949,7 @@ func (c *Conn) handleComStmtReset(data []byte) bool { } func (c *Conn) handleComStmtSendLongData(data []byte) bool { - stmtID, paramID, chunkData, ok := c.parseComStmtSendLongData(data) + stmtID, paramID, chunk, ok := c.parseComStmtSendLongData(data) c.recycleReadPacket() if !ok { err := fmt.Errorf("error parsing statement send long data from client %v, returning error: %v", c.ConnectionID, data) @@ -962,9 +969,6 @@ func (c *Conn) handleComStmtSendLongData(data []byte) bool { return c.writeErrorPacketFromErrorAndLog(err) } - chunk := make([]byte, len(chunkData)) - copy(chunk, chunkData) - key := fmt.Sprintf("v%d", paramID+1) if val, ok := prepare.BindVars[key]; ok { val.Value = append(val.Value, chunk...) @@ -1064,7 +1068,15 @@ func (c *Conn) handleComStmtExecute(handler Handler, data []byte) (kontinue bool return true } -func (c *Conn) handleComPrepare(handler Handler, data []byte) bool { +func (c *Conn) handleComPrepare(handler Handler, data []byte) (kontinue bool) { + c.startWriterBuffering() + defer func() { + if err := c.endWriterBuffering(); err != nil { + log.Errorf("conn %v: flush() failed: %v", c.ID(), err) + kontinue = false + } + }() + query := c.parseComPrepare(data) c.recycleReadPacket() @@ -1169,7 +1181,7 @@ func (c *Conn) handleComPing() bool { c.recycleReadPacket() // Return error if listener was shut down and OK otherwise if c.listener.isShutdown() { - if !c.writeErrorAndLog(ERServerShutdown, SSServerShutdown, "Server shutdown in progress") { + if !c.writeErrorAndLog(ERServerShutdown, SSNetError, "Server shutdown in progress") { return false } } else { @@ -1181,6 +1193,8 @@ func (c *Conn) handleComPing() bool { return true } +var errEmptyStatement = NewSQLError(EREmptyQuery, SSClientError, "Query was empty") + func (c *Conn) handleComQuery(handler Handler, data []byte) (kontinue bool) { c.startWriterBuffering() defer func() { @@ -1207,8 +1221,7 @@ func (c *Conn) handleComQuery(handler Handler, data []byte) (kontinue bool) { } if len(queries) == 0 { - err := NewSQLError(EREmptyQuery, SSSyntaxErrorOrAccessViolation, "Query was empty") - return c.writeErrorPacketFromErrorAndLog(err) + return c.writeErrorPacketFromErrorAndLog(errEmptyStatement) } for index, sql := range queries { @@ -1331,16 +1344,16 @@ func isEOFPacket(data []byte) bool { // // Note: This is only valid on actual EOF packets and not on OK packets with the EOF // type code set, i.e. should not be used if ClientDeprecateEOF is set. -func parseEOFPacket(data []byte) (warnings uint16, more bool, err error) { +func parseEOFPacket(data []byte) (warnings uint16, statusFlags uint16, err error) { // The warning count is in position 2 & 3 warnings, _, _ = readUint16(data, 1) // The status flag is in position 4 & 5 statusFlags, _, ok := readUint16(data, 3) if !ok { - return 0, false, vterrors.Errorf(vtrpc.Code_INTERNAL, "invalid EOF packet statusFlags: %v", data) + return 0, 0, vterrors.Errorf(vtrpc.Code_INTERNAL, "invalid EOF packet statusFlags: %v", data) } - return warnings, (statusFlags & ServerMoreResultsExists) != 0, nil + return warnings, statusFlags, nil } // PacketOK contains the ok packet details diff --git a/go/mysql/conn_test.go b/go/mysql/conn_test.go index 39d2a3a4f50..668c86c1fea 100644 --- a/go/mysql/conn_test.go +++ b/go/mysql/conn_test.go @@ -29,16 +29,13 @@ import ( "testing" "time" - "vitess.io/vitess/go/vt/sqlparser" - "github.com/stretchr/testify/assert" - - "vitess.io/vitess/go/test/utils" - "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/utils" querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/sqlparser" ) func createSocketPair(t *testing.T) (net.Listener, *Conn, *Conn) { @@ -81,6 +78,7 @@ func createSocketPair(t *testing.T) (net.Listener, *Conn, *Conn) { // Create a Conn on both sides. cConn := newConn(clientConn) sConn := newConn(serverConn) + sConn.PrepareData = map[uint32]*PrepareData{} return listener, sConn, cConn } @@ -456,8 +454,8 @@ func TestMultiStatementStopsOnError(t *testing.T) { // panic if the query contains "panic" and it will return selectRowsResult in case of any other query handler := &testRun{t: t, err: fmt.Errorf("execution failed")} res := sConn.handleNextCommand(handler) - // Execution error will occur in this case becuase the query sent is error and testRun will throw an error. - // We shuold send an error packet but not close the connection. + // Execution error will occur in this case because the query sent is error and testRun will throw an error. + // We should send an error packet but not close the connection. require.True(t, res, "we should not break the connection because of execution errors") data, err := cConn.ReadPacket() @@ -482,7 +480,7 @@ func TestMultiStatement(t *testing.T) { // panic if the query contains "panic" and it will return selectRowsResult in case of any other query handler := &testRun{t: t, err: NewSQLError(CRMalformedPacket, SSUnknownSQLState, "cannot get column number")} res := sConn.handleNextCommand(handler) - //The queries run will be select 1; and select 2; These queries do not return any errors, so the connnection should still be open + //The queries run will be select 1; and select 2; These queries do not return any errors, so the connection should still be open require.True(t, res, "we should not break the connection in case of no errors") // Read the result of the query and assert that it is indeed what we want. This will contain the result of the first query. data, more, _, err := cConn.ReadQueryResult(100, true) @@ -652,6 +650,9 @@ func (t testRun) ComQuery(c *Conn, query string, callback func(*sqltypes.Result) if strings.Contains(query, "panic") { panic("test panic attack!") } + if strings.Contains(query, "twice") { + callback(selectRowsResult) + } callback(selectRowsResult) return nil } diff --git a/go/mysql/constants.go b/go/mysql/constants.go index d71aa8e00e2..965714e08ca 100644 --- a/go/mysql/constants.go +++ b/go/mysql/constants.go @@ -36,6 +36,9 @@ const ( // MysqlClearPassword transmits the password in the clear. MysqlClearPassword = "mysql_clear_password" + // CachingSha2Password uses a salt and transmits a SHA256 hash on the wire. + CachingSha2Password = "caching_sha2_password" + // MysqlDialog uses the dialog plugin on the client side. // It transmits data in the clear. MysqlDialog = "dialog" @@ -230,9 +233,6 @@ const ( // EOFPacket is the header of the EOF packet. EOFPacket = 0xfe - // AuthSwitchRequestPacket is used to switch auth method. - AuthSwitchRequestPacket = 0xfe - // ErrPacket is the header of the error packet. ErrPacket = 0xff @@ -240,6 +240,21 @@ const ( NullValue = 0xfb ) +// Auth packet types +const ( + // AuthMoreDataPacket is sent when server requires more data to authenticate + AuthMoreDataPacket = 0x01 + + // CachingSha2FastAuth is sent before OKPacket when server authenticates using cache + CachingSha2FastAuth = 0x03 + + // CachingSha2FullAuth is sent when server requests un-scrambled password to authenticate + CachingSha2FullAuth = 0x04 + + // AuthSwitchRequestPacket is used to switch auth method. + AuthSwitchRequestPacket = 0xfe +) + // Error codes for client-side errors. // Originally found in include/mysql/errmsg.h and // https://dev.mysql.com/doc/refman/5.7/en/error-messages-client.html @@ -307,8 +322,12 @@ const ( // unknown ERUnknownError = 1105 + // internal + ERInternalError = 1815 + // unimplemented ERNotSupportedYet = 1235 + ERUnsupportedPS = 1295 // resource exhausted ERDiskFull = 1021 @@ -343,6 +362,7 @@ const ( ERNoSuchTable = 1146 ERNonExistingTableGrant = 1147 ERKeyDoesNotExist = 1176 + ERDbDropExists = 1008 // permissions ERDBAccessDenied = 1044 @@ -377,14 +397,18 @@ const ( ERFeatureDisabled = 1289 EROptionPreventsStatement = 1290 ERDuplicatedValueInType = 1291 + ERSPDoesNotExist = 1305 ERRowIsReferenced2 = 1451 ErNoReferencedRow2 = 1452 + ErSPNotVarArg = 1414 + ERInnodbReadOnly = 1874 // already exists - ERTableExists = 1050 - ERDupEntry = 1062 - ERFileExists = 1086 - ERUDFExists = 1125 + ERTableExists = 1050 + ERDupEntry = 1062 + ERFileExists = 1086 + ERUDFExists = 1125 + ERDbCreateExists = 1007 // aborted ERGotSignal = 1078 @@ -453,6 +477,7 @@ const ( ERBlobKeyWithoutLength = 1170 ERPrimaryCantHaveNull = 1171 ERTooManyRows = 1172 + ERLockOrActiveTransaction = 1192 ERUnknownSystemVariable = 1193 ERSetConstantsOnly = 1204 ERWrongArguments = 1210 @@ -483,13 +508,13 @@ const ( ERInvalidOnUpdate = 1294 ERUnknownTimeZone = 1298 ERInvalidCharacterString = 1300 - ERSavepointNotExist = 1305 ERIllegalReference = 1247 ERDerivedMustHaveAlias = 1248 ERTableNameNotAllowedHere = 1250 ERQueryInterrupted = 1317 ERTruncatedWrongValueForField = 1366 ERDataTooLong = 1406 + ERForbidSchemaChange = 1450 ERDataOutOfRange = 1690 ) @@ -502,17 +527,14 @@ const ( // in client.c. So using that one. SSUnknownSQLState = "HY000" - //SSSyntaxErrorOrAccessViolation is the state on syntax errors or access violations - SSSyntaxErrorOrAccessViolation = "42000" - // SSUnknownComError is ER_UNKNOWN_COM_ERROR SSUnknownComError = "08S01" - // SSHandshakeError is ER_HANDSHAKE_ERROR - SSHandshakeError = "08S01" + // SSNetError is network related error + SSNetError = "08S01" - // SSServerShutdown is ER_SERVER_SHUTDOWN - SSServerShutdown = "08S01" + // SSWrongNumberOfColumns is related to columns error + SSWrongNumberOfColumns = "21000" // SSDataTooLong is ER_DATA_TOO_LONG SSDataTooLong = "22001" @@ -520,14 +542,8 @@ const ( // SSDataOutOfRange is ER_DATA_OUT_OF_RANGE SSDataOutOfRange = "22003" - // SSBadNullError is ER_BAD_NULL_ERROR - SSBadNullError = "23000" - - // SSBadFieldError is ER_BAD_FIELD_ERROR - SSBadFieldError = "42S22" - - // SSDupKey is ER_DUP_KEY - SSDupKey = "23000" + // SSConstraintViolation is constraint violation + SSConstraintViolation = "23000" // SSCantDoThisDuringAnTransaction is // ER_CANT_DO_THIS_DURING_AN_TRANSACTION @@ -536,8 +552,23 @@ const ( // SSAccessDeniedError is ER_ACCESS_DENIED_ERROR SSAccessDeniedError = "28000" + // SSNoDB is ER_NO_DB_ERROR + SSNoDB = "3D000" + // SSLockDeadlock is ER_LOCK_DEADLOCK SSLockDeadlock = "40001" + + //SSClientError is the state on client errors + SSClientError = "42000" + + // SSBadFieldError is ER_BAD_FIELD_ERROR + SSBadFieldError = "42S22" + + // SSUnknownTable is ER_UNKNOWN_TABLE + SSUnknownTable = "42S02" + + // SSQueryInterrupted is ER_QUERY_INTERRUPTED; + SSQueryInterrupted = "70100" ) // A few interesting character set values. diff --git a/go/mysql/endtoend/client_test.go b/go/mysql/endtoend/client_test.go index a1698833d0a..5f12c4f30f1 100644 --- a/go/mysql/endtoend/client_test.go +++ b/go/mysql/endtoend/client_test.go @@ -22,12 +22,13 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "context" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/sqltypes" ) // TestKill opens a connection, issues a command that @@ -123,7 +124,7 @@ func TestDupEntry(t *testing.T) { t.Fatalf("first insert failed: %v", err) } _, err = conn.ExecuteFetch("insert into dup_entry(id, name) values(2, 10)", 0, false) - assertSQLError(t, err, mysql.ERDupEntry, mysql.SSDupKey, "Duplicate entry", "insert into dup_entry(id, name) values(2, 10)") + assertSQLError(t, err, mysql.ERDupEntry, mysql.SSConstraintViolation, "Duplicate entry", "insert into dup_entry(id, name) values(2, 10)") } // TestClientFoundRows tests if the CLIENT_FOUND_ROWS flag works. @@ -145,19 +146,12 @@ func TestClientFoundRows(t *testing.T) { t.Fatalf("insert failed: %v", err) } qr, err := conn.ExecuteFetch("update found_rows set val=11 where id=1", 0, false) - if err != nil { - t.Fatalf("first update failed: %v", err) - } - if qr.RowsAffected != 1 { - t.Errorf("First update: RowsAffected: %d, want 1", qr.RowsAffected) - } + require.NoError(t, err) + assert.EqualValues(t, 1, qr.RowsAffected, "RowsAffected") + qr, err = conn.ExecuteFetch("update found_rows set val=11 where id=1", 0, false) - if err != nil { - t.Fatalf("second update failed: %v", err) - } - if qr.RowsAffected != 1 { - t.Errorf("Second update: RowsAffected: %d, want 1", qr.RowsAffected) - } + require.NoError(t, err) + assert.EqualValues(t, 1, qr.RowsAffected, "RowsAffected") } func doTestMultiResult(t *testing.T, disableClientDeprecateEOF bool) { @@ -171,27 +165,27 @@ func doTestMultiResult(t *testing.T, disableClientDeprecateEOF bool) { qr, more, err := conn.ExecuteFetchMulti("select 1 from dual; set autocommit=1; select 1 from dual", 10, true) expectNoError(t, err) expectFlag(t, "ExecuteMultiFetch(multi result)", more, true) - expectRows(t, "ExecuteMultiFetch(multi result)", qr, 1) + assert.EqualValues(t, 1, len(qr.Rows)) qr, more, _, err = conn.ReadQueryResult(10, true) expectNoError(t, err) expectFlag(t, "ReadQueryResult(1)", more, true) - expectRows(t, "ReadQueryResult(1)", qr, 0) + assert.EqualValues(t, 0, len(qr.Rows)) qr, more, _, err = conn.ReadQueryResult(10, true) expectNoError(t, err) expectFlag(t, "ReadQueryResult(2)", more, false) - expectRows(t, "ReadQueryResult(2)", qr, 1) + assert.EqualValues(t, 1, len(qr.Rows)) qr, more, err = conn.ExecuteFetchMulti("select 1 from dual", 10, true) expectNoError(t, err) expectFlag(t, "ExecuteMultiFetch(single result)", more, false) - expectRows(t, "ExecuteMultiFetch(single result)", qr, 1) + assert.EqualValues(t, 1, len(qr.Rows)) qr, more, err = conn.ExecuteFetchMulti("set autocommit=1", 10, true) expectNoError(t, err) expectFlag(t, "ExecuteMultiFetch(no result)", more, false) - expectRows(t, "ExecuteMultiFetch(no result)", qr, 0) + assert.EqualValues(t, 0, len(qr.Rows)) // The ClientDeprecateEOF protocol change has a subtle twist in which an EOF or OK // packet happens to have the status flags in the same position if the affected_rows @@ -206,42 +200,32 @@ func doTestMultiResult(t *testing.T, disableClientDeprecateEOF bool) { // negotiated version, it can properly send the status flags. // result, err := conn.ExecuteFetch("create table a(id int, name varchar(128), primary key(id))", 0, false) - if err != nil { - t.Fatalf("create table failed: %v", err) - } - if result.RowsAffected != 0 { - t.Errorf("create table returned RowsAffected %v, was expecting 0", result.RowsAffected) - } + require.NoError(t, err) + assert.Zero(t, result.RowsAffected, "create table RowsAffected ") for i := 0; i < 255; i++ { result, err := conn.ExecuteFetch(fmt.Sprintf("insert into a(id, name) values(%v, 'nice name %v')", 1000+i, i), 1000, true) - if err != nil { - t.Fatalf("ExecuteFetch(%v) failed: %v", i, err) - } - if result.RowsAffected != 1 { - t.Errorf("insert into returned RowsAffected %v, was expecting 1", result.RowsAffected) - } + require.NoError(t, err) + assert.EqualValues(t, 1, result.RowsAffected, "insert into returned RowsAffected") } qr, more, err = conn.ExecuteFetchMulti("update a set name = concat(name, ' updated'); select * from a; select count(*) from a", 300, true) expectNoError(t, err) expectFlag(t, "ExecuteMultiFetch(multi result)", more, true) - expectRows(t, "ExecuteMultiFetch(multi result)", qr, 255) + assert.EqualValues(t, 255, qr.RowsAffected) qr, more, _, err = conn.ReadQueryResult(300, true) expectNoError(t, err) expectFlag(t, "ReadQueryResult(1)", more, true) - expectRows(t, "ReadQueryResult(1)", qr, 255) + assert.EqualValues(t, 255, len(qr.Rows), "ReadQueryResult(1)") qr, more, _, err = conn.ReadQueryResult(300, true) expectNoError(t, err) expectFlag(t, "ReadQueryResult(2)", more, false) - expectRows(t, "ReadQueryResult(2)", qr, 1) + assert.EqualValues(t, 1, len(qr.Rows), "ReadQueryResult(1)") _, err = conn.ExecuteFetch("drop table a", 10, true) - if err != nil { - t.Fatalf("drop table failed: %v", err) - } + require.NoError(t, err) } func TestMultiResultDeprecateEOF(t *testing.T) { @@ -258,13 +242,6 @@ func expectNoError(t *testing.T, err error) { } } -func expectRows(t *testing.T, msg string, result *sqltypes.Result, want int) { - t.Helper() - if int(result.RowsAffected) != want { - t.Errorf("%s: %d, want %d", msg, result.RowsAffected, want) - } -} - func expectFlag(t *testing.T, msg string, flag, want bool) { t.Helper() if flag != want { @@ -339,3 +316,44 @@ func TestSessionTrackGTIDs(t *testing.T) { require.NoError(t, err) require.NotEmpty(t, qr.SessionStateChanges) } + +func TestCachingSha2Password(t *testing.T) { + ctx := context.Background() + + // connect as an existing user to create a user account with caching_sha2_password + params := connParams + conn, err := mysql.Connect(ctx, ¶ms) + expectNoError(t, err) + defer conn.Close() + + qr, err := conn.ExecuteFetch(`select true from information_schema.PLUGINS where PLUGIN_NAME='caching_sha2_password' and PLUGIN_STATUS='ACTIVE'`, 1, false) + if err != nil { + t.Errorf("select true from information_schema.PLUGINS failed: %v", err) + } + + if len(qr.Rows) != 1 { + t.Skip("Server does not support caching_sha2_password plugin") + } + + // create a user using caching_sha2_password password + if _, err = conn.ExecuteFetch(`create user 'sha2user'@'localhost' identified with caching_sha2_password by 'password';`, 0, false); err != nil { + t.Fatalf("Create user with caching_sha2_password failed: %v", err) + } + conn.Close() + + // connect as sha2user + params.Uname = "sha2user" + params.Pass = "password" + params.DbName = "information_schema" + conn, err = mysql.Connect(ctx, ¶ms) + expectNoError(t, err) + defer conn.Close() + + if qr, err = conn.ExecuteFetch(`select user()`, 1, true); err != nil { + t.Fatalf("select user() failed: %v", err) + } + + if len(qr.Rows) != 1 || qr.Rows[0][0].ToString() != "sha2user@localhost" { + t.Errorf("Logged in user is not sha2user") + } +} diff --git a/go/mysql/endtoend/query_test.go b/go/mysql/endtoend/query_test.go index 92fb2dab07e..1827372b17c 100644 --- a/go/mysql/endtoend/query_test.go +++ b/go/mysql/endtoend/query_test.go @@ -17,12 +17,14 @@ limitations under the License. package endtoend import ( + "context" "fmt" "math/rand" "strings" "testing" - "context" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" @@ -100,7 +102,6 @@ func TestQueries(t *testing.T) { sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("nice name")), }, }, - RowsAffected: 1, } if !result.Equal(expectedResult) { // MySQL 5.7 is adding the NO_DEFAULT_VALUE_FLAG to Flags. @@ -286,3 +287,46 @@ func TestWarningsDeprecateEOF(t *testing.T) { func TestWarningsNoDeprecateEOF(t *testing.T) { doTestWarnings(t, true) } + +func TestSysInfo(t *testing.T) { + ctx := context.Background() + conn, err := mysql.Connect(ctx, &connParams) + require.NoError(t, err) + defer conn.Close() + + _, err = conn.ExecuteFetch("drop table if exists `a`", 1000, true) + require.NoError(t, err) + + _, err = conn.ExecuteFetch("CREATE TABLE `a` (`one` int NOT NULL,`two` int NOT NULL,PRIMARY KEY (`one`,`two`)) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4", 1000, true) + require.NoError(t, err) + defer conn.ExecuteFetch("drop table `a`", 1000, true) + + qr, err := conn.ExecuteFetch(`SELECT + column_name column_name, + data_type data_type, + column_type full_data_type, + character_maximum_length character_maximum_length, + numeric_precision numeric_precision, + numeric_scale numeric_scale, + datetime_precision datetime_precision, + column_default column_default, + is_nullable is_nullable, + extra extra, + table_name table_name + FROM information_schema.columns + WHERE table_schema = 'vttest' and table_name = 'a' + ORDER BY ordinal_position`, 1000, true) + require.NoError(t, err) + require.Equal(t, 2, len(qr.Rows)) + + // is_nullable + assert.Equal(t, `VARCHAR("NO")`, qr.Rows[0][8].String()) + assert.Equal(t, `VARCHAR("NO")`, qr.Rows[1][8].String()) + + // table_name + assert.Equal(t, `VARCHAR("a")`, qr.Rows[0][10].String()) + assert.Equal(t, `VARCHAR("a")`, qr.Rows[1][10].String()) + + assert.EqualValues(t, sqltypes.Uint64, qr.Fields[4].Type) + assert.EqualValues(t, querypb.Type_UINT64, qr.Rows[0][4].Type()) +} diff --git a/go/mysql/fakesqldb/server.go b/go/mysql/fakesqldb/server.go index 71bf08f06c8..863ee2bbd91 100644 --- a/go/mysql/fakesqldb/server.go +++ b/go/mysql/fakesqldb/server.go @@ -50,8 +50,8 @@ const appendEntry = -1 type DB struct { // Fields set at construction time. - // t is our testing.T instance - t *testing.T + // t is our testing.TB instance + t testing.TB // listener is our mysql.Listener. listener *mysql.Listener @@ -136,6 +136,7 @@ type ExpectedResult struct { type exprResult struct { expr *regexp.Regexp result *sqltypes.Result + err string } // ExpectedExecuteFetch defines for an expected query the to be faked output. @@ -150,7 +151,7 @@ type ExpectedExecuteFetch struct { } // New creates a server, and starts listening. -func New(t *testing.T) *DB { +func New(t testing.TB) *DB { // Pick a path for our socket. socketDir, err := ioutil.TempDir("", "fakesqldb") if err != nil { @@ -391,6 +392,9 @@ func (db *DB) HandleQuery(c *mysql.Conn, query string, callback func(*sqltypes.R if ok { userCallback(query) } + if pat.err != "" { + return fmt.Errorf(pat.err) + } return callback(pat.result) } } @@ -504,7 +508,20 @@ func (db *DB) AddQueryPattern(queryPattern string, expectedResult *sqltypes.Resu result := *expectedResult db.mu.Lock() defer db.mu.Unlock() - db.patternData = append(db.patternData, exprResult{expr, &result}) + db.patternData = append(db.patternData, exprResult{expr: expr, result: &result}) +} + +// RejectQueryPattern allows a query pattern to be rejected with an error +func (db *DB) RejectQueryPattern(queryPattern, error string) { + expr := regexp.MustCompile("(?is)^" + queryPattern + "$") + db.mu.Lock() + defer db.mu.Unlock() + db.patternData = append(db.patternData, exprResult{expr: expr, err: error}) +} + +// ClearQueryPattern removes all query patterns set up +func (db *DB) ClearQueryPattern() { + db.patternData = nil } // AddQueryPatternWithCallback is similar to AddQueryPattern: in addition it calls the provided callback function diff --git a/go/mysql/flavor.go b/go/mysql/flavor.go index 86824be43ca..b178a5f0d5f 100644 --- a/go/mysql/flavor.go +++ b/go/mysql/flavor.go @@ -44,6 +44,10 @@ const ( mariaDBReplicationHackPrefix = "5.5.5-" // mariaDBVersionString is present in mariaDBVersionString = "MariaDB" + // mysql57VersionPrefix is the prefix for 5.7 mysql version, such as 5.7.31-log + mysql57VersionPrefix = "5.7." + // mysql80VersionPrefix is the prefix for 8.0 mysql version, such as 8.0.19 + mysql80VersionPrefix = "8.0." ) // flavor is the abstract interface for a flavor. @@ -111,6 +115,8 @@ type flavor interface { // timestamp cannot be set by regular clients. enableBinlogPlaybackCommand() string disableBinlogPlaybackCommand() string + + baseShowTablesWithSizes() string } // flavors maps flavor names to their implementation. @@ -131,23 +137,27 @@ var flavors = make(map[string]func() flavor) // as well (not matching what c.ServerVersion is, but matching after we remove // the prefix). func (c *Conn) fillFlavor(params *ConnParams) { - if flavorFunc := flavors[params.Flavor]; flavorFunc != nil { - c.flavor = flavorFunc() - return - } + flavorFunc := flavors[params.Flavor] - if strings.HasPrefix(c.ServerVersion, mariaDBReplicationHackPrefix) { + switch { + case flavorFunc != nil: + c.flavor = flavorFunc() + case strings.HasPrefix(c.ServerVersion, mariaDBReplicationHackPrefix): c.ServerVersion = c.ServerVersion[len(mariaDBReplicationHackPrefix):] - c.flavor = mariadbFlavor{} - return - } - - if strings.Contains(c.ServerVersion, mariaDBVersionString) { - c.flavor = mariadbFlavor{} - return + c.flavor = mariadbFlavor101{} + case strings.Contains(c.ServerVersion, mariaDBVersionString): + mariadbVersion, err := strconv.ParseFloat(c.ServerVersion[:4], 64) + if err != nil || mariadbVersion < 10.2 { + c.flavor = mariadbFlavor101{} + } + c.flavor = mariadbFlavor102{} + case strings.HasPrefix(c.ServerVersion, mysql57VersionPrefix): + c.flavor = mysqlFlavor57{} + case strings.HasPrefix(c.ServerVersion, mysql80VersionPrefix): + c.flavor = mysqlFlavor80{} + default: + c.flavor = mysqlFlavor56{} } - - c.flavor = mysqlFlavor{} } // @@ -159,8 +169,11 @@ func (c *Conn) fillFlavor(params *ConnParams) { // is identified as MariaDB. Most applications should not care, but // this is useful in tests. func (c *Conn) IsMariaDB() bool { - _, ok := c.flavor.(mariadbFlavor) - return ok + switch c.flavor.(type) { + case mariadbFlavor101, mariadbFlavor102: + return true + } + return false } // MasterPosition returns the current master replication position. @@ -390,3 +403,8 @@ func (c *Conn) EnableBinlogPlaybackCommand() string { func (c *Conn) DisableBinlogPlaybackCommand() string { return c.flavor.disableBinlogPlaybackCommand() } + +// BaseShowTables returns a query that shows tables and their sizes +func (c *Conn) BaseShowTables() string { + return c.flavor.baseShowTablesWithSizes() +} diff --git a/go/mysql/flavor_filepos.go b/go/mysql/flavor_filepos.go index 134de8ba707..33e67b76624 100644 --- a/go/mysql/flavor_filepos.go +++ b/go/mysql/flavor_filepos.go @@ -271,3 +271,8 @@ func (*filePosFlavor) enableBinlogPlaybackCommand() string { func (*filePosFlavor) disableBinlogPlaybackCommand() string { return "" } + +// baseShowTablesWithSizes is part of the Flavor interface. +func (*filePosFlavor) baseShowTablesWithSizes() string { + return TablesWithSize56 +} diff --git a/go/mysql/flavor_mariadb.go b/go/mysql/flavor_mariadb.go index 6d7db404442..422344d4f5a 100644 --- a/go/mysql/flavor_mariadb.go +++ b/go/mysql/flavor_mariadb.go @@ -30,6 +30,15 @@ import ( // mariadbFlavor implements the Flavor interface for MariaDB. type mariadbFlavor struct{} +type mariadbFlavor101 struct { + mariadbFlavor +} +type mariadbFlavor102 struct { + mariadbFlavor +} + +var _ flavor = (*mariadbFlavor101)(nil) +var _ flavor = (*mariadbFlavor102)(nil) // masterGTIDSet is part of the Flavor interface. func (mariadbFlavor) masterGTIDSet(c *Conn) (GTIDSet, error) { diff --git a/go/mysql/flavor_mariadb_binlog_playback.go b/go/mysql/flavor_mariadb_binlog_playback.go index c30e39d2787..e862e744d04 100644 --- a/go/mysql/flavor_mariadb_binlog_playback.go +++ b/go/mysql/flavor_mariadb_binlog_playback.go @@ -29,3 +29,13 @@ func (mariadbFlavor) enableBinlogPlaybackCommand() string { func (mariadbFlavor) disableBinlogPlaybackCommand() string { return "" } + +// baseShowTablesWithSizes is part of the Flavor interface. +func (mariadbFlavor101) baseShowTablesWithSizes() string { + return TablesWithSize56 +} + +// baseShowTablesWithSizes is part of the Flavor interface. +func (mariadbFlavor102) baseShowTablesWithSizes() string { + return TablesWithSize57 +} diff --git a/go/mysql/flavor_mariadb_test.go b/go/mysql/flavor_mariadb_test.go index 120cde37583..82a5b1312b4 100644 --- a/go/mysql/flavor_mariadb_test.go +++ b/go/mysql/flavor_mariadb_test.go @@ -40,7 +40,7 @@ func TestMariadbSetMasterCommands(t *testing.T) { MASTER_CONNECT_RETRY = 1234, MASTER_USE_GTID = current_pos` - conn := &Conn{flavor: mariadbFlavor{}} + conn := &Conn{flavor: mariadbFlavor101{}} got := conn.SetMasterCommand(params, masterHost, masterPort, masterConnectRetry) if got != want { t.Errorf("mariadbFlavor.SetMasterCommands(%#v, %#v, %#v, %#v) = %#v, want %#v", params, masterHost, masterPort, masterConnectRetry, got, want) @@ -73,7 +73,7 @@ func TestMariadbSetMasterCommandsSSL(t *testing.T) { MASTER_SSL_KEY = 'ssl-key', MASTER_USE_GTID = current_pos` - conn := &Conn{flavor: mariadbFlavor{}} + conn := &Conn{flavor: mariadbFlavor101{}} got := conn.SetMasterCommand(params, masterHost, masterPort, masterConnectRetry) if got != want { t.Errorf("mariadbFlavor.SetMasterCommands(%#v, %#v, %#v, %#v) = %#v, want %#v", params, masterHost, masterPort, masterConnectRetry, got, want) diff --git a/go/mysql/flavor_mysql.go b/go/mysql/flavor_mysql.go index 590cc3ed2d5..9d4dd5e08da 100644 --- a/go/mysql/flavor_mysql.go +++ b/go/mysql/flavor_mysql.go @@ -29,6 +29,19 @@ import ( // mysqlFlavor implements the Flavor interface for Mysql. type mysqlFlavor struct{} +type mysqlFlavor56 struct { + mysqlFlavor +} +type mysqlFlavor57 struct { + mysqlFlavor +} +type mysqlFlavor80 struct { + mysqlFlavor +} + +var _ flavor = (*mysqlFlavor56)(nil) +var _ flavor = (*mysqlFlavor57)(nil) +var _ flavor = (*mysqlFlavor80)(nil) // masterGTIDSet is part of the Flavor interface. func (mysqlFlavor) masterGTIDSet(c *Conn) (GTIDSet, error) { @@ -231,3 +244,40 @@ func (mysqlFlavor) enableBinlogPlaybackCommand() string { func (mysqlFlavor) disableBinlogPlaybackCommand() string { return "" } + +// TablesWithSize56 is a query to select table along with size for mysql 5.6 +const TablesWithSize56 = `SELECT table_name, table_type, unix_timestamp(create_time), table_comment, SUM( data_length + index_length), SUM( data_length + index_length) + FROM information_schema.tables WHERE table_schema = database() group by table_name` + +// TablesWithSize57 is a query to select table along with size for mysql 5.7. +// It's a little weird, because the JOIN predicate only works if the table and databases do not contain weird characters. +// As a fallback, we use the mysql 5.6 query, which is not always up to date, but works for all table/db names. +const TablesWithSize57 = `SELECT t.table_name, t.table_type, unix_timestamp(t.create_time), t.table_comment, i.file_size, i.allocated_size + FROM information_schema.tables t, information_schema.innodb_sys_tablespaces i + WHERE t.table_schema = database() and i.name = concat(t.table_schema,'/',t.table_name) +UNION ALL + SELECT table_name, table_type, unix_timestamp(create_time), table_comment, SUM( data_length + index_length), SUM( data_length + index_length) + FROM information_schema.tables t + WHERE table_schema = database() AND NOT EXISTS(SELECT * FROM information_schema.innodb_sys_tablespaces i WHERE i.name = concat(t.table_schema,'/',t.table_name)) + group by table_name, table_type, unix_timestamp(create_time), table_comment +` + +// TablesWithSize80 is a query to select table along with size for mysql 8.0 +const TablesWithSize80 = `SELECT t.table_name, t.table_type, unix_timestamp(t.create_time), t.table_comment, i.file_size, i.allocated_size + FROM information_schema.tables t, information_schema.innodb_tablespaces i + WHERE t.table_schema = database() and i.name = concat(t.table_schema,'/',t.table_name)` + +// baseShowTablesWithSizes is part of the Flavor interface. +func (mysqlFlavor56) baseShowTablesWithSizes() string { + return TablesWithSize56 +} + +// baseShowTablesWithSizes is part of the Flavor interface. +func (mysqlFlavor57) baseShowTablesWithSizes() string { + return TablesWithSize57 +} + +// baseShowTablesWithSizes is part of the Flavor interface. +func (mysqlFlavor80) baseShowTablesWithSizes() string { + return TablesWithSize80 +} diff --git a/go/mysql/flavor_mysql_test.go b/go/mysql/flavor_mysql_test.go index 398c3d4e147..8f72242a891 100644 --- a/go/mysql/flavor_mysql_test.go +++ b/go/mysql/flavor_mysql_test.go @@ -39,7 +39,7 @@ func TestMysql56SetMasterCommands(t *testing.T) { MASTER_CONNECT_RETRY = 1234, MASTER_AUTO_POSITION = 1` - conn := &Conn{flavor: mysqlFlavor{}} + conn := &Conn{flavor: mysqlFlavor57{}} got := conn.SetMasterCommand(params, masterHost, masterPort, masterConnectRetry) if got != want { t.Errorf("mysqlFlavor.SetMasterCommand(%#v, %#v, %#v, %#v) = %#v, want %#v", params, masterHost, masterPort, masterConnectRetry, got, want) @@ -72,7 +72,7 @@ func TestMysql56SetMasterCommandsSSL(t *testing.T) { MASTER_SSL_KEY = 'ssl-key', MASTER_AUTO_POSITION = 1` - conn := &Conn{flavor: mysqlFlavor{}} + conn := &Conn{flavor: mysqlFlavor57{}} got := conn.SetMasterCommand(params, masterHost, masterPort, masterConnectRetry) if got != want { t.Errorf("mysqlFlavor.SetMasterCommands(%#v, %#v, %#v, %#v) = %#v, want %#v", params, masterHost, masterPort, masterConnectRetry, got, want) diff --git a/go/mysql/handshake_test.go b/go/mysql/handshake_test.go index 696836c44c6..9015bcd0951 100644 --- a/go/mysql/handshake_test.go +++ b/go/mysql/handshake_test.go @@ -125,7 +125,8 @@ func TestSSLConnection(t *testing.T) { serverConfig, err := vttls.ServerConfig( path.Join(root, "server-cert.pem"), path.Join(root, "server-key.pem"), - path.Join(root, "ca-cert.pem")) + path.Join(root, "ca-cert.pem"), + "") if err != nil { t.Fatalf("TLSServerConfig failed: %v", err) } diff --git a/go/mysql/mysql56_gtid_set.go b/go/mysql/mysql56_gtid_set.go index 408c3cac0d6..071e0d929b7 100644 --- a/go/mysql/mysql56_gtid_set.go +++ b/go/mysql/mysql56_gtid_set.go @@ -573,7 +573,11 @@ func (set Mysql56GTIDSet) Difference(other Mysql56GTIDSet) Mysql56GTIDSet { diffIntervals = append(diffIntervals, intervals...) } - differenceSet[sid] = diffIntervals + if len(diffIntervals) == 0 { + delete(differenceSet, sid) + } else { + differenceSet[sid] = diffIntervals + } } return differenceSet diff --git a/go/mysql/mysql56_gtid_set_test.go b/go/mysql/mysql56_gtid_set_test.go index fdb0569c1a2..e30e6fbef0e 100644 --- a/go/mysql/mysql56_gtid_set_test.go +++ b/go/mysql/mysql56_gtid_set_test.go @@ -481,6 +481,20 @@ func TestMysql56GTIDSetDifference(t *testing.T) { if !got.Equal(want) { t.Errorf("got %#v; want %#v", got, want) } + + sid10 := SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} + sid11 := SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} + set10 := Mysql56GTIDSet{ + sid10: []interval{{1, 30}}, + } + set11 := Mysql56GTIDSet{ + sid11: []interval{{1, 30}}, + } + got = set10.Difference(set11) + want = Mysql56GTIDSet{} + if !got.Equal(want) { + t.Errorf("got %#v; want %#v", got, want) + } } func TestMysql56GTIDSetSIDBlock(t *testing.T) { diff --git a/go/mysql/mysql_fuzzer.go b/go/mysql/mysql_fuzzer.go new file mode 100644 index 00000000000..569475c8b8e --- /dev/null +++ b/go/mysql/mysql_fuzzer.go @@ -0,0 +1,385 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// +build gofuzz + +package mysql + +import ( + "context" + "fmt" + "io/ioutil" + "net" + "os" + "path" + "sync" + "time" + + gofuzzheaders "github.com/AdaLogics/go-fuzz-headers" + + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/tlstest" + "vitess.io/vitess/go/vt/vttls" +) + +func createFuzzingSocketPair() (net.Listener, *Conn, *Conn) { + // Create a listener. + listener, err := net.Listen("tcp", ":0") + if err != nil { + fmt.Println("We got an error early on") + return nil, nil, nil + } + addr := listener.Addr().String() + listener.(*net.TCPListener).SetDeadline(time.Now().Add(10 * time.Second)) + + // Dial a client, Accept a server. + wg := sync.WaitGroup{} + + var clientConn net.Conn + var clientErr error + wg.Add(1) + go func() { + defer wg.Done() + clientConn, clientErr = net.DialTimeout("tcp", addr, 10*time.Second) + }() + + var serverConn net.Conn + var serverErr error + wg.Add(1) + go func() { + defer wg.Done() + serverConn, serverErr = listener.Accept() + }() + + wg.Wait() + + if clientErr != nil { + return nil, nil, nil + } + if serverErr != nil { + return nil, nil, nil + } + + // Create a Conn on both sides. + cConn := newConn(clientConn) + sConn := newConn(serverConn) + + return listener, sConn, cConn +} + +type fuzztestRun struct{} + +func (t fuzztestRun) NewConnection(c *Conn) { + panic("implement me") +} + +func (t fuzztestRun) ConnectionClosed(c *Conn) { + panic("implement me") +} + +func (t fuzztestRun) ComQuery(c *Conn, query string, callback func(*sqltypes.Result) error) error { + return nil +} + +func (t fuzztestRun) ComPrepare(c *Conn, query string, bindVars map[string]*querypb.BindVariable) ([]*querypb.Field, error) { + panic("implement me") +} + +func (t fuzztestRun) ComStmtExecute(c *Conn, prepare *PrepareData, callback func(*sqltypes.Result) error) error { + panic("implement me") +} + +func (t fuzztestRun) WarningCount(c *Conn) uint16 { + return 0 +} + +func (t fuzztestRun) ComResetConnection(c *Conn) { + panic("implement me") +} + +var _ Handler = (*fuzztestRun)(nil) + +type fuzztestConn struct { + writeToPass []bool + pos int + queryPacket []byte +} + +func (t fuzztestConn) Read(b []byte) (n int, err error) { + for j, i := range t.queryPacket { + b[j] = i + } + return len(b), nil +} + +func (t fuzztestConn) Write(b []byte) (n int, err error) { + t.pos = t.pos + 1 + if t.writeToPass[t.pos] { + return 0, nil + } + return 0, fmt.Errorf("error in writing to connection") +} + +func (t fuzztestConn) Close() error { + panic("implement me") +} + +func (t fuzztestConn) LocalAddr() net.Addr { + panic("implement me") +} + +func (t fuzztestConn) RemoteAddr() net.Addr { + return fuzzmockAddress{s: "a"} +} + +func (t fuzztestConn) SetDeadline(t1 time.Time) error { + panic("implement me") +} + +func (t fuzztestConn) SetReadDeadline(t1 time.Time) error { + panic("implement me") +} + +func (t fuzztestConn) SetWriteDeadline(t1 time.Time) error { + panic("implement me") +} + +var _ net.Conn = (*fuzztestConn)(nil) + +type fuzzmockAddress struct { + s string +} + +func (m fuzzmockAddress) Network() string { + return m.s +} + +func (m fuzzmockAddress) String() string { + return m.s +} + +var _ net.Addr = (*fuzzmockAddress)(nil) + +// Fuzzers begin here: +func FuzzWritePacket(data []byte) int { + if len(data) < 10 { + return -1 + } + listener, sConn, cConn := createFuzzingSocketPair() + defer func() { + listener.Close() + sConn.Close() + cConn.Close() + }() + + err := cConn.writePacket(data) + if err != nil { + return 0 + } + _, err = sConn.ReadPacket() + if err != nil { + return 0 + } + return 1 +} + +func FuzzHandleNextCommand(data []byte) int { + if len(data) < 10 { + return -1 + } + sConn := newConn(fuzztestConn{ + writeToPass: []bool{false}, + pos: -1, + queryPacket: data, + }) + + handler := &fuzztestRun{} + _ = sConn.handleNextCommand(handler) + return 1 +} + +func FuzzReadQueryResults(data []byte) int { + listener, sConn, cConn := createFuzzingSocketPair() + defer func() { + listener.Close() + sConn.Close() + cConn.Close() + }() + err := cConn.WriteComQuery(string(data)) + if err != nil { + return 0 + } + handler := &fuzztestRun{} + _ = sConn.handleNextCommand(handler) + _, _, _, err = cConn.ReadQueryResult(100, true) + if err != nil { + return 0 + } + return 1 +} + +type fuzzTestHandler struct { + mu sync.Mutex + lastConn *Conn + result *sqltypes.Result + err error + warnings uint16 +} + +func (th *fuzzTestHandler) LastConn() *Conn { + th.mu.Lock() + defer th.mu.Unlock() + return th.lastConn +} + +func (th *fuzzTestHandler) Result() *sqltypes.Result { + th.mu.Lock() + defer th.mu.Unlock() + return th.result +} + +func (th *fuzzTestHandler) SetErr(err error) { + th.mu.Lock() + defer th.mu.Unlock() + th.err = err +} + +func (th *fuzzTestHandler) Err() error { + th.mu.Lock() + defer th.mu.Unlock() + return th.err +} + +func (th *fuzzTestHandler) SetWarnings(count uint16) { + th.mu.Lock() + defer th.mu.Unlock() + th.warnings = count +} + +func (th *fuzzTestHandler) NewConnection(c *Conn) { + th.mu.Lock() + defer th.mu.Unlock() + th.lastConn = c +} + +func (th *fuzzTestHandler) ConnectionClosed(_ *Conn) { +} + +func (th *fuzzTestHandler) ComQuery(c *Conn, query string, callback func(*sqltypes.Result) error) error { + + return nil +} + +func (th *fuzzTestHandler) ComPrepare(c *Conn, query string, bindVars map[string]*querypb.BindVariable) ([]*querypb.Field, error) { + return nil, nil +} + +func (th *fuzzTestHandler) ComStmtExecute(c *Conn, prepare *PrepareData, callback func(*sqltypes.Result) error) error { + return nil +} + +func (th *fuzzTestHandler) ComResetConnection(c *Conn) { + +} + +func (th *fuzzTestHandler) WarningCount(c *Conn) uint16 { + th.mu.Lock() + defer th.mu.Unlock() + return th.warnings +} + +func FuzzTLSServer(data []byte) int { + // totalQueries is the number of queries the fuzzer + // makes in each fuzz iteration + totalQueries := 20 + var queries []string + c := gofuzzheaders.NewConsumer(data) + for i := 0; i < totalQueries; i++ { + query, err := c.GetString() + if err != nil { + return -1 + } + + // We parse each query now to exit if the queries + // are invalid + _, err = sqlparser.Parse(query) + if err != nil { + return -1 + } + queries = append(queries, query) + } + + th := &fuzzTestHandler{} + + authServer := NewAuthServerStatic("", "", 0) + authServer.entries["user1"] = []*AuthServerStaticEntry{{ + Password: "password1", + }} + defer authServer.close() + l, err := NewListener("tcp", ":0", authServer, th, 0, 0, false) + if err != nil { + return -1 + } + defer l.Close() + host, err := os.Hostname() + if err != nil { + return -1 + } + port := l.Addr().(*net.TCPAddr).Port + root, err := ioutil.TempDir("", "TestTLSServer") + if err != nil { + return -1 + } + defer os.RemoveAll(root) + tlstest.CreateCA(root) + tlstest.CreateSignedCert(root, tlstest.CA, "01", "server", host) + tlstest.CreateSignedCert(root, tlstest.CA, "02", "client", "Client Cert") + + serverConfig, err := vttls.ServerConfig( + path.Join(root, "server-cert.pem"), + path.Join(root, "server-key.pem"), + path.Join(root, "ca-cert.pem"), + "") + if err != nil { + return -1 + } + l.TLSConfig.Store(serverConfig) + go l.Accept() + + connCountByTLSVer.ResetAll() + // Setup the right parameters. + params := &ConnParams{ + Host: host, + Port: port, + Uname: "user1", + Pass: "password1", + // SSL flags. + Flags: CapabilityClientSSL, + SslCa: path.Join(root, "ca-cert.pem"), + SslCert: path.Join(root, "client-cert.pem"), + SslKey: path.Join(root, "client-key.pem"), + } + conn, err := Connect(context.Background(), params) + if err != nil { + return -1 + } + + for i := 0; i < len(queries); i++ { + _, _ = conn.ExecuteFetch(queries[i], 1000, true) + } + return 1 +} diff --git a/go/mysql/query.go b/go/mysql/query.go index 929b9a87dc0..61858b73f7d 100644 --- a/go/mysql/query.go +++ b/go/mysql/query.go @@ -319,6 +319,9 @@ func (c *Conn) ExecuteFetchMulti(query string, maxrows int, wantfields bool) (re } res, more, _, err := c.ReadQueryResult(maxrows, wantfields) + if err != nil { + return nil, false, err + } return res, more, err } @@ -358,6 +361,7 @@ func (c *Conn) ReadQueryResult(maxrows int, wantfields bool) (*sqltypes.Result, RowsAffected: packetOk.affectedRows, InsertID: packetOk.lastInsertID, SessionStateChanges: packetOk.sessionStateData, + StatusFlags: packetOk.statusFlags, }, more, warnings, nil } @@ -421,15 +425,17 @@ func (c *Conn) ReadQueryResult(maxrows int, wantfields bool) (*sqltypes.Result, if !wantfields { result.Fields = nil } - result.RowsAffected = uint64(len(result.Rows)) // The deprecated EOF packets change means that this is either an // EOF packet or an OK packet with the EOF type code. if c.Capabilities&CapabilityClientDeprecateEOF == 0 { - warnings, more, err = parseEOFPacket(data) + var statusFlags uint16 + warnings, statusFlags, err = parseEOFPacket(data) if err != nil { return nil, false, 0, err } + more = (statusFlags & ServerMoreResultsExists) != 0 + result.StatusFlags = statusFlags } else { packetOk, err := c.parseOKPacket(data) if err != nil { @@ -438,6 +444,7 @@ func (c *Conn) ReadQueryResult(maxrows int, wantfields bool) (*sqltypes.Result, warnings = packetOk.warnings more = (packetOk.statusFlags & ServerMoreResultsExists) != 0 result.SessionStateChanges = packetOk.sessionStateData + result.StatusFlags = packetOk.statusFlags } return result, more, warnings, nil @@ -857,7 +864,11 @@ func (c *Conn) parseComStmtSendLongData(data []byte) (uint32, uint16, []byte, bo return 0, 0, nil, false } - return statementID, paramID, data[pos:], true + chunkData := data[pos:] + chunk := make([]byte, len(chunkData)) + copy(chunk, chunkData) + + return statementID, paramID, chunk, true } func (c *Conn) parseComStmtClose(data []byte) (uint32, bool) { diff --git a/go/mysql/query_test.go b/go/mysql/query_test.go index a58da8fb486..8b5fae975e1 100644 --- a/go/mysql/query_test.go +++ b/go/mysql/query_test.go @@ -33,7 +33,7 @@ import ( ) // Utility function to write sql query as packets to test parseComPrepare -func MockQueryPackets(t *testing.T, query string) []byte { +func preparePacket(t *testing.T, query string) []byte { data := make([]byte, len(query)+1+packetHeaderSize) // Not sure if it makes a difference pos := packetHeaderSize @@ -130,7 +130,7 @@ func TestComStmtPrepare(t *testing.T) { }() sql := "select * from test_table where id = ?" - mockData := MockQueryPackets(t, sql) + mockData := preparePacket(t, sql) if err := cConn.writePacket(mockData); err != nil { t.Fatalf("writePacket failed: %v", err) @@ -173,7 +173,7 @@ func TestComStmtPrepareUpdStmt(t *testing.T) { }() sql := "UPDATE test SET __bit = ?, __tinyInt = ?, __tinyIntU = ?, __smallInt = ?, __smallIntU = ?, __mediumInt = ?, __mediumIntU = ?, __int = ?, __intU = ?, __bigInt = ?, __bigIntU = ?, __decimal = ?, __float = ?, __double = ?, __date = ?, __datetime = ?, __timestamp = ?, __time = ?, __year = ?, __char = ?, __varchar = ?, __binary = ?, __varbinary = ?, __tinyblob = ?, __tinytext = ?, __blob = ?, __text = ?, __enum = ?, __set = ? WHERE __id = 0" - mockData := MockQueryPackets(t, sql) + mockData := preparePacket(t, sql) err := cConn.writePacket(mockData) require.NoError(t, err, "writePacket failed") @@ -415,7 +415,6 @@ func TestQueries(t *testing.T) { sqltypes.NULL, }, }, - RowsAffected: 2, }) // Typical Select with TYPE_AND_NAME. @@ -518,7 +517,6 @@ func TestQueries(t *testing.T) { sqltypes.NULL, }, }, - RowsAffected: 2, }) // Typical Select with TYPE_AND_NAME. @@ -538,7 +536,6 @@ func TestQueries(t *testing.T) { sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("nice name")), }, }, - RowsAffected: 2, }) // Typical Select with TYPE_ONLY. @@ -556,7 +553,6 @@ func TestQueries(t *testing.T) { sqltypes.MakeTrusted(querypb.Type_INT64, []byte("20")), }, }, - RowsAffected: 2, }) // Typical Select with ALL. @@ -589,7 +585,6 @@ func TestQueries(t *testing.T) { sqltypes.MakeTrusted(querypb.Type_INT64, []byte("30")), }, }, - RowsAffected: 3, }) } @@ -649,7 +644,6 @@ func checkQueryInternal(t *testing.T, query string, sConn, cConn *Conn, result * go func() { defer wg.Done() - // Test ExecuteFetch. maxrows := 10000 if !allRows { // Asking for just one row max. The results that have more will fail. @@ -687,6 +681,7 @@ func checkQueryInternal(t *testing.T, query string, sConn, cConn *Conn, result * if gotWarnings != warningCount { t.Errorf("ExecuteFetch(%v) expected %v warnings got %v", query, warningCount, gotWarnings) + return } // Test ExecuteStreamFetch, build a Result. @@ -733,6 +728,10 @@ func checkQueryInternal(t *testing.T, query string, sConn, cConn *Conn, result * t.Logf("========== Expected row(%v) = %v", i, RowString(expected.Rows[i])) } } + if expected.RowsAffected != got.RowsAffected { + t.Logf("========== Got RowsAffected = %v", got.RowsAffected) + t.Logf("========== Expected RowsAffected = %v", expected.RowsAffected) + } t.Errorf("\nExecuteStreamFetch(%v) returned:\n%+v\nBut was expecting:\n%+v\n", query, got, &expected) } }() diff --git a/go/mysql/replication_constants.go b/go/mysql/replication_constants.go index 53d51c944eb..096bd902a04 100644 --- a/go/mysql/replication_constants.go +++ b/go/mysql/replication_constants.go @@ -207,6 +207,9 @@ const ( //eViewChangeEvent = 37 //eXAPrepareLogEvent = 38 + // Transaction_payload_event when binlog compression is turned on + eCompressedEvent = 40 + // MariaDB specific values. They start at 160. //eMariaAnnotateRowsEvent = 160 // Unused diff --git a/go/mysql/replication_status.go b/go/mysql/replication_status.go index 58f17835b5f..38503e10d0e 100644 --- a/go/mysql/replication_status.go +++ b/go/mysql/replication_status.go @@ -112,7 +112,7 @@ func ProtoToReplicationStatus(s *replicationdatapb.Status) ReplicationStatus { // provided as a list of ReplicationStatus's. This method only works if the flavor for all retrieved ReplicationStatus's is MySQL. // The result is returned as a Mysql56GTIDSet, each of whose elements is a found errant GTID. func (s *ReplicationStatus) FindErrantGTIDs(otherReplicaStatuses []*ReplicationStatus) (Mysql56GTIDSet, error) { - set, ok := s.RelayLogPosition.GTIDSet.(Mysql56GTIDSet) + relayLogSet, ok := s.RelayLogPosition.GTIDSet.(Mysql56GTIDSet) if !ok { return nil, fmt.Errorf("errant GTIDs can only be computed on the MySQL flavor") } @@ -136,8 +136,8 @@ func (s *ReplicationStatus) FindErrantGTIDs(otherReplicaStatuses []*ReplicationS } // Copy set for final diffSet so we don't mutate receiver. - diffSet := make(Mysql56GTIDSet, len(set)) - for sid, intervals := range set { + diffSet := make(Mysql56GTIDSet, len(relayLogSet)) + for sid, intervals := range relayLogSet { if sid == s.MasterUUID { continue } diff --git a/go/mysql/schema.go b/go/mysql/schema.go index 3174dad7274..01841429732 100644 --- a/go/mysql/schema.go +++ b/go/mysql/schema.go @@ -30,58 +30,64 @@ import ( // data. const ( - // BaseShowTables is the base query used in further methods. - BaseShowTables = "SELECT table_name, table_type, unix_timestamp(create_time), table_comment FROM information_schema.tables WHERE table_schema = database()" - // BaseShowPrimary is the base query for fetching primary key info. BaseShowPrimary = "SELECT table_name, column_name FROM information_schema.key_column_usage WHERE table_schema=database() AND constraint_name='PRIMARY' ORDER BY table_name, ordinal_position" + // ShowRowsRead is the query used to find the number of rows read. + ShowRowsRead = "show status like 'Innodb_rows_read'" ) // BaseShowTablesFields contains the fields returned by a BaseShowTables or a BaseShowTablesForTable command. // They are validated by the // testBaseShowTables test. -var BaseShowTablesFields = []*querypb.Field{ - { - Name: "table_name", - Type: querypb.Type_VARCHAR, - Table: "tables", - OrgTable: "TABLES", - Database: "information_schema", - OrgName: "TABLE_NAME", - ColumnLength: 192, - Charset: CharacterSetUtf8, - Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG), - }, - { - Name: "table_type", - Type: querypb.Type_VARCHAR, - Table: "tables", - OrgTable: "TABLES", - Database: "information_schema", - OrgName: "TABLE_TYPE", - ColumnLength: 192, - Charset: CharacterSetUtf8, - Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG), - }, - { - Name: "unix_timestamp(create_time)", - Type: querypb.Type_INT64, - ColumnLength: 11, - Charset: CharacterSetBinary, - Flags: uint32(querypb.MySqlFlag_BINARY_FLAG | querypb.MySqlFlag_NUM_FLAG), - }, - { - Name: "table_comment", - Type: querypb.Type_VARCHAR, - Table: "tables", - OrgTable: "TABLES", - Database: "information_schema", - OrgName: "TABLE_COMMENT", - ColumnLength: 6144, - Charset: CharacterSetUtf8, - Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG), - }, -} +var BaseShowTablesFields = []*querypb.Field{{ + Name: "t.table_name", + Type: querypb.Type_VARCHAR, + Table: "tables", + OrgTable: "TABLES", + Database: "information_schema", + OrgName: "TABLE_NAME", + ColumnLength: 192, + Charset: CharacterSetUtf8, + Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG), +}, { + Name: "t.table_type", + Type: querypb.Type_VARCHAR, + Table: "tables", + OrgTable: "TABLES", + Database: "information_schema", + OrgName: "TABLE_TYPE", + ColumnLength: 192, + Charset: CharacterSetUtf8, + Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG), +}, { + Name: "unix_timestamp(t.create_time)", + Type: querypb.Type_INT64, + ColumnLength: 11, + Charset: CharacterSetBinary, + Flags: uint32(querypb.MySqlFlag_BINARY_FLAG | querypb.MySqlFlag_NUM_FLAG), +}, { + Name: "t.table_comment", + Type: querypb.Type_VARCHAR, + Table: "tables", + OrgTable: "TABLES", + Database: "information_schema", + OrgName: "TABLE_COMMENT", + ColumnLength: 6144, + Charset: CharacterSetUtf8, + Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG), +}, { + Name: "i.file_size", + Type: querypb.Type_INT64, + ColumnLength: 11, + Charset: CharacterSetBinary, + Flags: uint32(querypb.MySqlFlag_BINARY_FLAG | querypb.MySqlFlag_NUM_FLAG), +}, { + Name: "i.allocated_size", + Type: querypb.Type_INT64, + ColumnLength: 11, + Charset: CharacterSetBinary, + Flags: uint32(querypb.MySqlFlag_BINARY_FLAG | querypb.MySqlFlag_NUM_FLAG), +}} // BaseShowTablesRow returns the fields from a BaseShowTables or // BaseShowTablesForTable command. @@ -95,6 +101,8 @@ func BaseShowTablesRow(tableName string, isView bool, comment string) []sqltypes sqltypes.MakeTrusted(sqltypes.VarChar, []byte(tableType)), sqltypes.MakeTrusted(sqltypes.Int64, []byte("1427325875")), // unix_timestamp(create_time) sqltypes.MakeTrusted(sqltypes.VarChar, []byte(comment)), + sqltypes.MakeTrusted(sqltypes.Int64, []byte("100")), // file_size + sqltypes.MakeTrusted(sqltypes.Int64, []byte("150")), // allocated_size } } diff --git a/go/mysql/server.go b/go/mysql/server.go index 6e702489e2b..0647e06644f 100644 --- a/go/mysql/server.go +++ b/go/mysql/server.go @@ -17,6 +17,7 @@ limitations under the License. package mysql import ( + "context" "crypto/tls" "io" "net" @@ -24,6 +25,8 @@ import ( "sync/atomic" "time" + "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/sqlescape" proxyproto "github.com/pires/go-proxyproto" @@ -42,7 +45,6 @@ import ( const ( // DefaultServerVersion is the default server version we're sending to the client. // Can be changed. - DefaultServerVersion = "5.7.9-Vitess" // timing metric keys connectTimingKey = "Connect" @@ -172,6 +174,13 @@ type Listener struct { // RequireSecureTransport configures the server to reject connections from insecure clients RequireSecureTransport bool + + // PreHandleFunc is called for each incoming connection, immediately after + // accepting a new connection. By default it's no-op. Useful for custom + // connection inspection or TLS termination. The returned connection is + // handled further by the MySQL handler. An non-nil error will stop + // processing the connection by the MySQL handler. + PreHandleFunc func(context.Context, net.Conn, uint32) (net.Conn, error) } // NewFromListener creares a new mysql listener from an existing net.Listener @@ -232,7 +241,7 @@ func NewListenerWithConfig(cfg ListenerConfig) (*Listener, error) { authServer: cfg.AuthServer, handler: cfg.Handler, listener: l, - ServerVersion: DefaultServerVersion, + ServerVersion: servenv.AppVersion.MySQLVersion(), connectionID: 1, connReadTimeout: cfg.ConnReadTimeout, connWriteTimeout: cfg.ConnWriteTimeout, @@ -247,6 +256,8 @@ func (l *Listener) Addr() net.Addr { // Accept runs an accept loop until the listener is closed. func (l *Listener) Accept() { + ctx := context.Background() + for { conn, err := l.listener.Accept() if err != nil { @@ -263,7 +274,17 @@ func (l *Listener) Accept() { connCount.Add(1) connAccept.Add(1) - go l.handle(conn, connectionID, acceptTime) + go func() { + if l.PreHandleFunc != nil { + conn, err = l.PreHandleFunc(ctx, conn, connectionID) + if err != nil { + log.Errorf("mysql_server pre hook: %s", err) + return + } + } + + l.handle(conn, connectionID, acceptTime) + }() } } diff --git a/go/mysql/server_test.go b/go/mysql/server_test.go index 6afa2b0d6b5..a1ca1271255 100644 --- a/go/mysql/server_test.go +++ b/go/mysql/server_test.go @@ -17,6 +17,7 @@ limitations under the License. package mysql import ( + "context" "crypto/tls" "fmt" "io/ioutil" @@ -29,8 +30,6 @@ import ( "testing" "time" - "context" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -65,7 +64,6 @@ var selectRowsResult = &sqltypes.Result{ sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("nicer name")), }, }, - RowsAffected: 2, } type testHandler struct { @@ -112,7 +110,7 @@ func (th *testHandler) NewConnection(c *Conn) { th.lastConn = c } -func (th *testHandler) ConnectionClosed(c *Conn) { +func (th *testHandler) ConnectionClosed(_ *Conn) { } func (th *testHandler) ComQuery(c *Conn, query string, callback func(*sqltypes.Result) error) error { @@ -244,9 +242,7 @@ func getHostPort(t *testing.T, a net.Addr) (string, int) { // For the host name, we resolve 'localhost' into an address. // This works around a few travis issues where IPv6 is not 100% enabled. hosts, err := net.LookupHost("localhost") - if err != nil { - t.Fatalf("LookupHost(localhost) failed: %v", err) - } + require.NoError(t, err, "LookupHost(localhost) failed") host := hosts[0] port := a.(*net.TCPAddr).Port t.Logf("listening on address '%v' port %v", host, port) @@ -265,14 +261,10 @@ func TestConnectionFromListener(t *testing.T) { // Make sure we can create our own net.Listener for use with the mysql // listener listener, err := net.Listen("tcp", ":0") - if err != nil { - t.Fatalf("net.Listener failed: %v", err) - } + require.NoError(t, err, "net.Listener failed") l, err := NewFromListener(listener, authServer, th, 0, 0) - if err != nil { - t.Fatalf("NewListener failed: %v", err) - } + require.NoError(t, err, "NewListener failed") defer l.Close() go l.Accept() @@ -287,9 +279,7 @@ func TestConnectionFromListener(t *testing.T) { } c, err := Connect(context.Background(), params) - if err != nil { - t.Errorf("Should be able to connect to server but found error: %v", err) - } + require.NoError(t, err, "Should be able to connect to server") c.Close() } @@ -303,9 +293,7 @@ func TestConnectionWithoutSourceHost(t *testing.T) { }} defer authServer.close() l, err := NewListener("tcp", ":0", authServer, th, 0, 0, false) - if err != nil { - t.Fatalf("NewListener failed: %v", err) - } + require.NoError(t, err, "NewListener failed") defer l.Close() go l.Accept() @@ -320,9 +308,7 @@ func TestConnectionWithoutSourceHost(t *testing.T) { } c, err := Connect(context.Background(), params) - if err != nil { - t.Errorf("Should be able to connect to server but found error: %v", err) - } + require.NoError(t, err, "Should be able to connect to server") c.Close() } @@ -340,9 +326,7 @@ func TestConnectionWithSourceHost(t *testing.T) { defer authServer.close() l, err := NewListener("tcp", ":0", authServer, th, 0, 0, false) - if err != nil { - t.Fatalf("NewListener failed: %v", err) - } + require.NoError(t, err, "NewListener failed") defer l.Close() go l.Accept() @@ -358,9 +342,7 @@ func TestConnectionWithSourceHost(t *testing.T) { _, err = Connect(context.Background(), params) // target is localhost, should not work from tcp connection - if err == nil { - t.Errorf("Should be able to connect to server but found error: %v", err) - } + require.EqualError(t, err, "Access denied for user 'user1' (errno 1045) (sqlstate 28000)", "Should not be able to connect to server") } func TestConnectionUseMysqlNativePasswordWithSourceHost(t *testing.T) { @@ -377,9 +359,7 @@ func TestConnectionUseMysqlNativePasswordWithSourceHost(t *testing.T) { defer authServer.close() l, err := NewListener("tcp", ":0", authServer, th, 0, 0, false) - if err != nil { - t.Fatalf("NewListener failed: %v", err) - } + require.NoError(t, err, "NewListener failed") defer l.Close() go l.Accept() @@ -395,9 +375,7 @@ func TestConnectionUseMysqlNativePasswordWithSourceHost(t *testing.T) { _, err = Connect(context.Background(), params) // target is localhost, should not work from tcp connection - if err == nil { - t.Errorf("Should be able to connect to server but found error: %v", err) - } + require.EqualError(t, err, "Access denied for user 'user1' (errno 1045) (sqlstate 28000)", "Should not be able to connect to server") } func TestConnectionUnixSocket(t *testing.T) { @@ -414,15 +392,12 @@ func TestConnectionUnixSocket(t *testing.T) { defer authServer.close() unixSocket, err := ioutil.TempFile("", "mysql_vitess_test.sock") - if err != nil { - t.Fatalf("Failed to create temp file") - } + require.NoError(t, err, "Failed to create temp file") + os.Remove(unixSocket.Name()) l, err := NewListener("unix", unixSocket.Name(), authServer, th, 0, 0, false) - if err != nil { - t.Fatalf("NewListener failed: %v", err) - } + require.NoError(t, err, "NewListener failed") defer l.Close() go l.Accept() @@ -434,9 +409,7 @@ func TestConnectionUnixSocket(t *testing.T) { } c, err := Connect(context.Background(), params) - if err != nil { - t.Errorf("Should be able to connect to server but found error: %v", err) - } + require.NoError(t, err, "Should be able to connect to server") c.Close() } @@ -450,9 +423,7 @@ func TestClientFoundRows(t *testing.T) { }} defer authServer.close() l, err := NewListener("tcp", ":0", authServer, th, 0, 0, false) - if err != nil { - t.Fatalf("NewListener failed: %v", err) - } + require.NoError(t, err, "NewListener failed") defer l.Close() go l.Accept() @@ -468,28 +439,18 @@ func TestClientFoundRows(t *testing.T) { // Test without flag. c, err := Connect(context.Background(), params) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err, "Connect failed") foundRows := th.LastConn().Capabilities & CapabilityClientFoundRows - if foundRows != 0 { - t.Errorf("FoundRows flag: %x, second bit must be 0", th.LastConn().Capabilities) - } + assert.Equal(t, uint32(0), foundRows, "FoundRows flag: %x, second bit must be 0", th.LastConn().Capabilities) c.Close() - if !c.IsClosed() { - t.Errorf("IsClosed returned true on Close-d connection.") - } + assert.True(t, c.IsClosed(), "IsClosed should be true on Close-d connection.") // Test with flag. params.Flags |= CapabilityClientFoundRows c, err = Connect(context.Background(), params) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err, "Connect failed") foundRows = th.LastConn().Capabilities & CapabilityClientFoundRows - if foundRows == 0 { - t.Errorf("FoundRows flag: %x, second bit must be set", th.LastConn().Capabilities) - } + assert.NotZero(t, foundRows, "FoundRows flag: %x, second bit must be set", th.LastConn().Capabilities) c.Close() } @@ -498,6 +459,9 @@ func TestConnCounts(t *testing.T) { initialNumUsers := len(connCountPerUser.Counts()) + // FIXME: we should be able to ResetAll counters instead of computing a delta, but it doesn't work for some reason + // connCountPerUser.ResetAll() + user := "anotherNotYetConnectedUser1" passwd := "password1" @@ -508,9 +472,7 @@ func TestConnCounts(t *testing.T) { }} defer authServer.close() l, err := NewListener("tcp", ":0", authServer, th, 0, 0, false) - if err != nil { - t.Fatalf("NewListener failed: %v", err) - } + require.NoError(t, err, "NewListener failed") defer l.Close() go l.Accept() @@ -525,27 +487,18 @@ func TestConnCounts(t *testing.T) { } c, err := Connect(context.Background(), params) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err, "Connect failed") connCounts := connCountPerUser.Counts() - if l := len(connCounts); l-initialNumUsers != 1 { - t.Errorf("Expected 1 new user, got %d", l) - } + assert.Equal(t, 1, len(connCounts)-initialNumUsers) checkCountsForUser(t, user, 1) // Test with a second new connection. c2, err := Connect(context.Background(), params) - if err != nil { - t.Fatal(err) - } - + require.NoError(t, err) connCounts = connCountPerUser.Counts() // There is still only one new user. - if l2 := len(connCounts); l2-initialNumUsers != 1 { - t.Errorf("Expected 1 new user, got %d", l2) - } + assert.Equal(t, 1, len(connCounts)-initialNumUsers) checkCountsForUser(t, user, 2) // Test after closing connections. time.Sleep lets it work, but seems flakey. @@ -562,13 +515,8 @@ func checkCountsForUser(t *testing.T, user string, expected int64) { connCounts := connCountPerUser.Counts() userCount, ok := connCounts[user] - if ok { - if userCount != expected { - t.Errorf("Expected connection count for user to be %d, got %d", expected, userCount) - } - } else { - t.Errorf("No count found for user %s", user) - } + assert.True(t, ok, "No count found for user %s", user) + assert.Equal(t, expected, userCount) } func TestServer(t *testing.T) { @@ -582,7 +530,7 @@ func TestServer(t *testing.T) { defer authServer.close() l, err := NewListener("tcp", ":0", authServer, th, 0, 0, false) require.NoError(t, err) - l.SlowConnectWarnThreshold.Set(time.Duration(time.Nanosecond * 1)) + l.SlowConnectWarnThreshold.Set(time.Nanosecond * 1) defer l.Close() go l.Accept() @@ -600,23 +548,19 @@ func TestServer(t *testing.T) { output, err := runMysqlWithErr(t, params, "select rows") require.NoError(t, err) - if !strings.Contains(output, "nice name") || - !strings.Contains(output, "nicer name") || - !strings.Contains(output, "2 rows in set") { - t.Errorf("Unexpected output for 'select rows'") - } + assert.Contains(t, output, "nice name", "Unexpected output for 'select rows'") + assert.Contains(t, output, "nicer name", "Unexpected output for 'select rows'") + assert.Contains(t, output, "2 rows in set", "Unexpected output for 'select rows'") assert.NotContains(t, output, "warnings") // Run a 'select rows' command with warnings th.SetWarnings(13) output, err = runMysqlWithErr(t, params, "select rows") require.NoError(t, err) - if !strings.Contains(output, "nice name") || - !strings.Contains(output, "nicer name") || - !strings.Contains(output, "2 rows in set") || - !strings.Contains(output, "13 warnings") { - t.Errorf("Unexpected output for 'select rows': %v", output) - } + assert.Contains(t, output, "nice name", "Unexpected output for 'select rows'") + assert.Contains(t, output, "nicer name", "Unexpected output for 'select rows'") + assert.Contains(t, output, "2 rows in set", "Unexpected output for 'select rows'") + assert.Contains(t, output, "13 warnings", "Unexpected output for 'select rows'") th.SetWarnings(0) // If there's an error after streaming has started, @@ -624,64 +568,50 @@ func TestServer(t *testing.T) { th.SetErr(NewSQLError(ERUnknownComError, SSUnknownComError, "forced error after send")) output, err = runMysqlWithErr(t, params, "error after send") require.Error(t, err) - if !strings.Contains(output, "ERROR 2013 (HY000)") || - !strings.Contains(output, "Lost connection to MySQL server during query") { - t.Errorf("Unexpected output for 'panic'") - } + assert.Contains(t, output, "ERROR 2013 (HY000)", "Unexpected output for 'panic'") + assert.Contains(t, output, "Lost connection to MySQL server during query", "Unexpected output for 'panic'") // Run an 'insert' command, no rows, but rows affected. output, err = runMysqlWithErr(t, params, "insert") require.NoError(t, err) - if !strings.Contains(output, "Query OK, 123 rows affected") { - t.Errorf("Unexpected output for 'insert'") - } + assert.Contains(t, output, "Query OK, 123 rows affected", "Unexpected output for 'insert'") // Run a 'schema echo' command, to make sure db name is right. params.DbName = "XXXfancyXXX" output, err = runMysqlWithErr(t, params, "schema echo") require.NoError(t, err) - if !strings.Contains(output, params.DbName) { - t.Errorf("Unexpected output for 'schema echo'") - } + assert.Contains(t, output, params.DbName, "Unexpected output for 'schema echo'") // Sanity check: make sure this didn't go through SSL output, err = runMysqlWithErr(t, params, "ssl echo") require.NoError(t, err) - if !strings.Contains(output, "ssl_flag") || - !strings.Contains(output, "OFF") || - !strings.Contains(output, "1 row in set") { - t.Errorf("Unexpected output for 'ssl echo': %v", output) - } + assert.Contains(t, output, "ssl_flag") + assert.Contains(t, output, "OFF") + assert.Contains(t, output, "1 row in set", "Unexpected output for 'ssl echo': %v", output) // UserData check: checks the server user data is correct. output, err = runMysqlWithErr(t, params, "userData echo") require.NoError(t, err) - if !strings.Contains(output, "user1") || - !strings.Contains(output, "user_data") || - !strings.Contains(output, "userData1") { - t.Errorf("Unexpected output for 'userData echo': %v", output) - } + assert.Contains(t, output, "user1") + assert.Contains(t, output, "user_data") + assert.Contains(t, output, "userData1", "Unexpected output for 'userData echo': %v", output) // Permissions check: check a bad password is rejected. params.Pass = "bad" output, err = runMysqlWithErr(t, params, "select rows") require.Error(t, err) - if !strings.Contains(output, "1045") || - !strings.Contains(output, "28000") || - !strings.Contains(output, "Access denied") { - t.Errorf("Unexpected output for invalid password: %v", output) - } + assert.Contains(t, output, "1045") + assert.Contains(t, output, "28000") + assert.Contains(t, output, "Access denied", "Unexpected output for invalid password: %v", output) // Permissions check: check an unknown user is rejected. params.Pass = "password1" params.Uname = "user2" output, err = runMysqlWithErr(t, params, "select rows") require.Error(t, err) - if !strings.Contains(output, "1045") || - !strings.Contains(output, "28000") || - !strings.Contains(output, "Access denied") { - t.Errorf("Unexpected output for invalid password: %v", output) - } + assert.Contains(t, output, "1045") + assert.Contains(t, output, "28000") + assert.Contains(t, output, "Access denied", "Unexpected output for invalid password: %v", output) // Uncomment to leave setup up for a while, to run tests manually. // fmt.Printf("Listening to server on host '%v' port '%v'.\n", host, port) @@ -698,10 +628,8 @@ func TestServerStats(t *testing.T) { }} defer authServer.close() l, err := NewListener("tcp", ":0", authServer, th, 0, 0, false) - if err != nil { - t.Fatalf("NewListener failed: %v", err) - } - l.SlowConnectWarnThreshold.Set(time.Duration(time.Nanosecond * 1)) + require.NoError(t, err) + l.SlowConnectWarnThreshold.Set(time.Nanosecond * 1) defer l.Close() go l.Accept() @@ -724,13 +652,11 @@ func TestServerStats(t *testing.T) { // Run an 'error' command. th.SetErr(NewSQLError(ERUnknownComError, SSUnknownComError, "forced query error")) output, ok := runMysql(t, params, "error") - if ok { - t.Fatalf("mysql should have failed: %v", output) - } - if !strings.Contains(output, "ERROR 1047 (08S01)") || - !strings.Contains(output, "forced query error") { - t.Errorf("Unexpected output for 'error': %v", output) - } + require.False(t, ok, "mysql should have failed: %v", output) + + assert.Contains(t, output, "ERROR 1047 (08S01)") + assert.Contains(t, output, "forced query error", "Unexpected output for 'error': %v", output) + assert.EqualValues(t, 0, connCount.Get(), "connCount") assert.EqualValues(t, 1, connAccept.Get(), "connAccept") assert.EqualValues(t, 1, connSlow.Get(), "connSlow") @@ -744,22 +670,18 @@ func TestServerStats(t *testing.T) { gotTimingCounts := timings.Counts() for key, got := range gotTimingCounts { expected := expectedTimingDeltas[key] - if got < expected { - t.Errorf("Expected Timing count delta %s should be >= %d, got %d", key, expected, got) - } + assert.GreaterOrEqual(t, got, expected, "Expected Timing count delta %s should be >= %d, got %d", key, expected, got) } // Set the slow connect threshold to something high that we don't expect to trigger - l.SlowConnectWarnThreshold.Set(time.Duration(time.Second * 1)) + l.SlowConnectWarnThreshold.Set(time.Second * 1) // Run a 'panic' command, other side should panic, recover and // close the connection. output, err = runMysqlWithErr(t, params, "panic") require.Error(t, err) - if !strings.Contains(output, "ERROR 2013 (HY000)") || - !strings.Contains(output, "Lost connection to MySQL server during query") { - t.Errorf("Unexpected output for 'panic'") - } + assert.Contains(t, output, "ERROR 2013 (HY000)") + assert.Contains(t, output, "Lost connection to MySQL server during query", "Unexpected output for 'panic': %v", output) assert.EqualValues(t, 0, connCount.Get(), "connCount") assert.EqualValues(t, 2, connAccept.Get(), "connAccept") @@ -780,9 +702,7 @@ func TestClearTextServer(t *testing.T) { authServer.method = MysqlClearPassword defer authServer.close() l, err := NewListener("tcp", ":0", authServer, th, 0, 0, false) - if err != nil { - t.Fatalf("NewListener failed: %v", err) - } + require.NoError(t, err) defer l.Close() go l.Accept() @@ -809,16 +729,14 @@ func TestClearTextServer(t *testing.T) { if isMariaDB { t.Logf("mysql should have failed but returned: %v\nbut letting it go on MariaDB", output) } else { - t.Fatalf("mysql should have failed but returned: %v", output) + require.Fail(t, "mysql should have failed but returned: %v", output) } } else { if strings.Contains(output, "No such file or directory") { t.Logf("skipping mysql clear text tests, as the clear text plugin cannot be loaded: %v", err) return } - if !strings.Contains(output, "plugin not enabled") { - t.Errorf("Unexpected output for 'select rows': %v", output) - } + assert.Contains(t, output, "plugin not enabled", "Unexpected output for 'select rows': %v", output) } // Now enable clear text plugin in client, but server requires SSL. @@ -827,34 +745,23 @@ func TestClearTextServer(t *testing.T) { sql = enableCleartextPluginPrefix + sql } output, ok = runMysql(t, params, sql) - if ok { - t.Fatalf("mysql should have failed but returned: %v", output) - } - if !strings.Contains(output, "Cannot use clear text authentication over non-SSL connections") { - t.Errorf("Unexpected output for 'select rows': %v", output) - } + assert.False(t, ok, "mysql should have failed but returned: %v", output) + assert.Contains(t, output, "Cannot use clear text authentication over non-SSL connections", "Unexpected output for 'select rows': %v", output) // Now enable clear text plugin, it should now work. l.AllowClearTextWithoutTLS.Set(true) output, ok = runMysql(t, params, sql) - if !ok { - t.Fatalf("mysql failed: %v", output) - } - if !strings.Contains(output, "nice name") || - !strings.Contains(output, "nicer name") || - !strings.Contains(output, "2 rows in set") { - t.Errorf("Unexpected output for 'select rows'") - } + require.True(t, ok, "mysql failed: %v", output) + + assert.Contains(t, output, "nice name", "Unexpected output for 'select rows'") + assert.Contains(t, output, "nicer name", "Unexpected output for 'select rows'") + assert.Contains(t, output, "2 rows in set", "Unexpected output for 'select rows'") // Change password, make sure server rejects us. params.Pass = "bad" output, ok = runMysql(t, params, sql) - if ok { - t.Fatalf("mysql should have failed but returned: %v", output) - } - if !strings.Contains(output, "Access denied for user 'user1'") { - t.Errorf("Unexpected output for 'select rows': %v", output) - } + assert.False(t, ok, "mysql should have failed but returned: %v", output) + assert.Contains(t, output, "Access denied for user 'user1'", "Unexpected output for 'select rows': %v", output) } // TestDialogServer creates a Server that uses the dialog plugin on the client. @@ -869,9 +776,7 @@ func TestDialogServer(t *testing.T) { authServer.method = MysqlDialog defer authServer.close() l, err := NewListener("tcp", ":0", authServer, th, 0, 0, false) - if err != nil { - t.Fatalf("NewListener failed: %v", err) - } + require.NoError(t, err) l.AllowClearTextWithoutTLS.Set(true) defer l.Close() go l.Accept() @@ -891,14 +796,10 @@ func TestDialogServer(t *testing.T) { t.Logf("skipping dialog plugin tests, as the dialog plugin cannot be loaded: %v", err) return } - if !ok { - t.Fatalf("mysql failed: %v", output) - } - if !strings.Contains(output, "nice name") || - !strings.Contains(output, "nicer name") || - !strings.Contains(output, "2 rows in set") { - t.Errorf("Unexpected output for 'select rows': %v", output) - } + require.True(t, ok, "mysql failed: %v", output) + assert.Contains(t, output, "nice name", "Unexpected output for 'select rows': %v", output) + assert.Contains(t, output, "nicer name", "Unexpected output for 'select rows': %v", output) + assert.Contains(t, output, "2 rows in set", "Unexpected output for 'select rows': %v", output) } // TestTLSServer creates a Server with TLS support, then uses mysql @@ -917,24 +818,18 @@ func TestTLSServer(t *testing.T) { // a check that the common name of the certificate matches the // server host name we connect to. l, err := NewListener("tcp", ":0", authServer, th, 0, 0, false) - if err != nil { - t.Fatalf("NewListener failed: %v", err) - } + require.NoError(t, err) defer l.Close() // Make sure hostname is added as an entry to /etc/hosts, otherwise ssl handshake will fail host, err := os.Hostname() - if err != nil { - t.Fatalf("Failed to get os Hostname: %v", err) - } + require.NoError(t, err) port := l.Addr().(*net.TCPAddr).Port // Create the certs. root, err := ioutil.TempDir("", "TestTLSServer") - if err != nil { - t.Fatalf("TempDir failed: %v", err) - } + require.NoError(t, err) defer os.RemoveAll(root) tlstest.CreateCA(root) tlstest.CreateSignedCert(root, tlstest.CA, "01", "server", host) @@ -944,13 +839,13 @@ func TestTLSServer(t *testing.T) { serverConfig, err := vttls.ServerConfig( path.Join(root, "server-cert.pem"), path.Join(root, "server-key.pem"), - path.Join(root, "ca-cert.pem")) - if err != nil { - t.Fatalf("TLSServerConfig failed: %v", err) - } + path.Join(root, "ca-cert.pem"), + "") + require.NoError(t, err) l.TLSConfig.Store(serverConfig) go l.Accept() + connCountByTLSVer.ResetAll() // Setup the right parameters. params := &ConnParams{ Host: host, @@ -967,13 +862,9 @@ func TestTLSServer(t *testing.T) { // Run a 'select rows' command with results. conn, err := Connect(context.Background(), params) //output, ok := runMysql(t, params, "select rows") - if err != nil { - t.Fatalf("mysql failed: %v", err) - } + require.NoError(t, err) results, err := conn.ExecuteFetch("select rows", 1000, true) - if err != nil { - t.Fatalf("mysql fetch failed: %v", err) - } + require.NoError(t, err) output := "" for _, row := range results.Rows { r := make([]string, 0) @@ -983,27 +874,20 @@ func TestTLSServer(t *testing.T) { output = output + strings.Join(r, ",") + "\n" } - if results.Rows[0][1].ToString() != "nice name" || - results.Rows[1][1].ToString() != "nicer name" || - len(results.Rows) != 2 { - t.Errorf("Unexpected output for 'select rows': %v", output) - } + assert.Equal(t, "nice name", results.Rows[0][1].ToString()) + assert.Equal(t, "nicer name", results.Rows[1][1].ToString()) + assert.Equal(t, 2, len(results.Rows)) // make sure this went through SSL results, err = conn.ExecuteFetch("ssl echo", 1000, true) - if err != nil { - t.Fatalf("mysql fetch failed: %v", err) - } - if results.Rows[0][0].ToString() != "ON" { - t.Errorf("Unexpected output for 'ssl echo': %v", results) - } + require.NoError(t, err) + assert.Equal(t, "ON", results.Rows[0][0].ToString()) // Find out which TLS version the connection actually used, // so we can check that the corresponding counter was incremented. tlsVersion := conn.conn.(*tls.Conn).ConnectionState().Version checkCountForTLSVer(t, tlsVersionToString(tlsVersion), 1) - checkCountForTLSVer(t, versionNoTLS, 0) conn.Close() } @@ -1024,24 +908,18 @@ func TestTLSRequired(t *testing.T) { // a check that the common name of the certificate matches the // server host name we connect to. l, err := NewListener("tcp", ":0", authServer, th, 0, 0, false) - if err != nil { - t.Fatalf("NewListener failed: %v", err) - } + require.NoError(t, err) defer l.Close() // Make sure hostname is added as an entry to /etc/hosts, otherwise ssl handshake will fail host, err := os.Hostname() - if err != nil { - t.Fatalf("Failed to get os Hostname: %v", err) - } + require.NoError(t, err) port := l.Addr().(*net.TCPAddr).Port // Create the certs. root, err := ioutil.TempDir("", "TestTLSRequired") - if err != nil { - t.Fatalf("TempDir failed: %v", err) - } + require.NoError(t, err) defer os.RemoveAll(root) tlstest.CreateCA(root) tlstest.CreateSignedCert(root, tlstest.CA, "01", "server", host) @@ -1050,10 +928,9 @@ func TestTLSRequired(t *testing.T) { serverConfig, err := vttls.ServerConfig( path.Join(root, "server-cert.pem"), path.Join(root, "server-key.pem"), - path.Join(root, "ca-cert.pem")) - if err != nil { - t.Fatalf("TLSServerConfig failed: %v", err) - } + path.Join(root, "ca-cert.pem"), + "") + require.NoError(t, err) l.TLSConfig.Store(serverConfig) l.RequireSecureTransport = true go l.Accept() @@ -1066,9 +943,10 @@ func TestTLSRequired(t *testing.T) { Pass: "password1", } conn, err := Connect(context.Background(), params) - if err == nil { - t.Fatal("mysql should have failed") - } + require.NotNil(t, err) + require.Contains(t, err.Error(), "Code: UNAVAILABLE") + require.Contains(t, err.Error(), "server does not allow insecure connections, client must use SSL/TLS") + require.Contains(t, err.Error(), "(errno 1105) (sqlstate HY000)") if conn != nil { conn.Close() } @@ -1081,9 +959,7 @@ func TestTLSRequired(t *testing.T) { params.SslKey = path.Join(root, "client-key.pem") conn, err = Connect(context.Background(), params) - if err != nil { - t.Fatalf("mysql failed: %v", err) - } + require.NoError(t, err) if conn != nil { conn.Close() } @@ -1092,13 +968,8 @@ func TestTLSRequired(t *testing.T) { func checkCountForTLSVer(t *testing.T, version string, expected int64) { connCounts := connCountByTLSVer.Counts() count, ok := connCounts[version] - if ok { - if count != expected { - t.Errorf("Expected connection count for version %s to be %d, got %d", version, expected, count) - } - } else { - t.Errorf("No count for version %s found in %v", version, connCounts) - } + assert.True(t, ok, "No count found for version %s", version) + assert.Equal(t, expected, count, "Unexpected connection count for version %s", version) } func TestErrorCodes(t *testing.T) { @@ -1111,9 +982,7 @@ func TestErrorCodes(t *testing.T) { }} defer authServer.close() l, err := NewListener("tcp", ":0", authServer, th, 0, 0, false) - if err != nil { - t.Fatalf("NewListener failed: %v", err) - } + require.NoError(t, err) defer l.Close() go l.Accept() @@ -1129,9 +998,7 @@ func TestErrorCodes(t *testing.T) { ctx := context.Background() client, err := Connect(ctx, params) - if err != nil { - t.Fatalf("error in connect: %v", err) - } + require.NoError(t, err) // Test that the right mysql errno/sqlstate are returned for various // internal vitess errors @@ -1152,9 +1019,9 @@ func TestErrorCodes(t *testing.T) { { err: vterrors.Errorf( vtrpcpb.Code_INVALID_ARGUMENT, - "(errno %v) (sqlstate %v) invalid argument with errno", ERDupEntry, SSDupKey), + "(errno %v) (sqlstate %v) invalid argument with errno", ERDupEntry, SSConstraintViolation), code: ERDupEntry, - sqlState: SSDupKey, + sqlState: SSConstraintViolation, text: "invalid argument with errno", }, { @@ -1162,7 +1029,7 @@ func TestErrorCodes(t *testing.T) { vtrpcpb.Code_DEADLINE_EXCEEDED, "connection deadline exceeded"), code: ERQueryInterrupted, - sqlState: SSUnknownSQLState, + sqlState: SSQueryInterrupted, text: "deadline exceeded", }, { @@ -1170,7 +1037,7 @@ func TestErrorCodes(t *testing.T) { vtrpcpb.Code_RESOURCE_EXHAUSTED, "query pool timeout"), code: ERTooManyUserConnections, - sqlState: SSUnknownSQLState, + sqlState: SSClientError, text: "resource exhausted", }, { @@ -1182,27 +1049,17 @@ func TestErrorCodes(t *testing.T) { } for _, test := range tests { - th.SetErr(NewSQLErrorFromError(test.err)) - result, err := client.ExecuteFetch("error", 100, false) - if err == nil { - t.Fatalf("mysql should have failed but returned: %v", result) - } - serr, ok := err.(*SQLError) - if !ok { - t.Fatalf("mysql should have returned a SQLError") - } - - if serr.Number() != test.code { - t.Errorf("error in %s: want code %v got %v", test.text, test.code, serr.Number()) - } - - if serr.SQLState() != test.sqlState { - t.Errorf("error in %s: want sqlState %v got %v", test.text, test.sqlState, serr.SQLState()) - } - - if !strings.Contains(serr.Error(), test.err.Error()) { - t.Errorf("error in %s: want err %v got %v", test.text, test.err.Error(), serr.Error()) - } + t.Run(test.err.Error(), func(t *testing.T) { + th.SetErr(NewSQLErrorFromError(test.err)) + rs, err := client.ExecuteFetch("error", 100, false) + require.Error(t, err, "mysql should have failed but returned: %v", rs) + serr, ok := err.(*SQLError) + require.True(t, ok, "mysql should have returned a SQLError") + + assert.Equal(t, test.code, serr.Number(), "error in %s: want code %v got %v", test.text, test.code, serr.Number()) + assert.Equal(t, test.sqlState, serr.SQLState(), "error in %s: want sqlState %v got %v", test.text, test.sqlState, serr.SQLState()) + assert.Contains(t, serr.Error(), test.err.Error()) + }) } } @@ -1219,13 +1076,9 @@ func runMysql(t *testing.T, params *ConnParams, command string) (string, bool) { } func runMysqlWithErr(t *testing.T, params *ConnParams, command string) (string, error) { dir, err := vtenv.VtMysqlRoot() - if err != nil { - t.Fatalf("vtenv.VtMysqlRoot failed: %v", err) - } + require.NoError(t, err) name, err := binaryPath(dir, "mysql") - if err != nil { - t.Fatalf("binaryPath failed: %v", err) - } + require.NoError(t, err) // The args contain '-v' 3 times, to switch to very verbose output. // In particular, it has the message: // Query OK, 1 row affected (0.00 sec) @@ -1307,9 +1160,7 @@ func TestListenerShutdown(t *testing.T) { }} defer authServer.close() l, err := NewListener("tcp", ":0", authServer, th, 0, 0, false) - if err != nil { - t.Fatalf("NewListener failed: %v", err) - } + require.NoError(t, err) defer l.Close() go l.Accept() @@ -1322,43 +1173,29 @@ func TestListenerShutdown(t *testing.T) { Uname: "user1", Pass: "password1", } - initialconnRefuse := connRefuse.Get() + connRefuse.Reset() ctx, cancel := context.WithCancel(context.Background()) defer cancel() conn, err := Connect(ctx, params) - if err != nil { - t.Fatalf("Can't connect to listener: %v", err) - } + require.NoError(t, err) - if err := conn.Ping(); err != nil { - t.Fatalf("Ping failed: %v", err) - } + err = conn.Ping() + require.NoError(t, err) l.Shutdown() - if connRefuse.Get()-initialconnRefuse != 1 { - t.Errorf("Expected connRefuse delta=1, got %d", connRefuse.Get()-initialconnRefuse) - } + assert.EqualValues(t, 1, connRefuse.Get(), "connRefuse") - if err := conn.Ping(); err != nil { - sqlErr, ok := err.(*SQLError) - if !ok { - t.Fatalf("Wrong error type: %T", err) - } - if sqlErr.Number() != ERServerShutdown { - t.Fatalf("Unexpected sql error code: %d", sqlErr.Number()) - } - if sqlErr.SQLState() != SSServerShutdown { - t.Fatalf("Unexpected error sql state: %s", sqlErr.SQLState()) - } - if sqlErr.Message != "Server shutdown in progress" { - t.Fatalf("Unexpected error message: %s", sqlErr.Message) - } - } else { - t.Fatalf("Ping should fail after shutdown") - } + err = conn.Ping() + require.EqualError(t, err, "Server shutdown in progress (errno 1053) (sqlstate 08S01)") + sqlErr, ok := err.(*SQLError) + require.True(t, ok, "Wrong error type: %T", err) + + require.Equal(t, ERServerShutdown, sqlErr.Number()) + require.Equal(t, SSNetError, sqlErr.SQLState()) + require.Equal(t, "Server shutdown in progress", sqlErr.Message) } func TestParseConnAttrs(t *testing.T) { @@ -1380,20 +1217,12 @@ func TestParseConnAttrs(t *testing.T) { 0x72, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x05, 0x6d, 0x79, 0x73, 0x71, 0x6c} attrs, pos, err := parseConnAttrs(data, 0) - if err != nil { - t.Fatalf("Failed to read connection attributes: %v", err) - } - if pos != 113 { - t.Fatalf("Unexpeded pos after reading connection attributes: %d instead of 113", pos) - } + require.NoError(t, err) + require.Equal(t, 113, pos) for k, v := range expected { - if val, ok := attrs[k]; ok { - if val != v { - t.Fatalf("Unexpected value found in attrs for key %s: got %s expected %s", k, val, v) - } - } else { - t.Fatalf("Error reading key %s from connection attributes: attrs: %-v", k, attrs) - } + val, ok := attrs[k] + require.True(t, ok, "Error reading key %s from connection attributes: attrs: %-v", k, attrs) + require.Equal(t, v, val, "Unexpected value found in attrs for key %s", k) } } @@ -1425,7 +1254,7 @@ func TestServerFlush(t *testing.T) { flds, err := c.Fields() require.NoError(t, err) if duration, want := time.Since(start), 20*time.Millisecond; duration < *mysqlServerFlushDelay || duration > want { - t.Errorf("duration: %v, want between %v and %v", duration, *mysqlServerFlushDelay, want) + assert.Fail(t, "duration: %v, want between %v and %v", duration, *mysqlServerFlushDelay, want) } want1 := []*querypb.Field{{ Name: "result", @@ -1436,7 +1265,7 @@ func TestServerFlush(t *testing.T) { row, err := c.FetchNext() require.NoError(t, err) if duration, want := time.Since(start), 50*time.Millisecond; duration < want { - t.Errorf("duration: %v, want > %v", duration, want) + assert.Fail(t, "duration: %v, want > %v", duration, want) } want2 := []sqltypes.Value{sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("delayed"))} assert.Equal(t, want2, row) diff --git a/go/mysql/sql_error.go b/go/mysql/sql_error.go index 42f65b0a96b..fcf02abaf13 100644 --- a/go/mysql/sql_error.go +++ b/go/mysql/sql_error.go @@ -91,56 +91,43 @@ func NewSQLErrorFromError(err error) error { return serr } + sErr := convertToMysqlError(err) + if _, ok := sErr.(*SQLError); ok { + return sErr + } + msg := err.Error() match := errExtract.FindStringSubmatch(msg) if len(match) < 2 { // Map vitess error codes into the mysql equivalent code := vterrors.Code(err) num := ERUnknownError + ss := SSUnknownSQLState switch code { - case vtrpcpb.Code_CANCELED: - num = ERQueryInterrupted - case vtrpcpb.Code_UNKNOWN: - num = ERUnknownError - case vtrpcpb.Code_INVALID_ARGUMENT: - // TODO/demmer there are several more appropriate mysql error - // codes for the various invalid argument cases. - // it would be better to change the call sites to use - // the mysql style "(errno X) (sqlstate Y)" format rather than - // trying to add vitess error codes for all these cases - num = ERUnknownError - case vtrpcpb.Code_DEADLINE_EXCEEDED: + case vtrpcpb.Code_CANCELED, vtrpcpb.Code_DEADLINE_EXCEEDED, vtrpcpb.Code_ABORTED: num = ERQueryInterrupted - case vtrpcpb.Code_NOT_FOUND: - num = ERUnknownError - case vtrpcpb.Code_ALREADY_EXISTS: + ss = SSQueryInterrupted + case vtrpcpb.Code_UNKNOWN, vtrpcpb.Code_INVALID_ARGUMENT, vtrpcpb.Code_NOT_FOUND, vtrpcpb.Code_ALREADY_EXISTS, + vtrpcpb.Code_FAILED_PRECONDITION, vtrpcpb.Code_OUT_OF_RANGE, vtrpcpb.Code_UNAVAILABLE, vtrpcpb.Code_DATA_LOSS: num = ERUnknownError - case vtrpcpb.Code_PERMISSION_DENIED: - num = ERAccessDeniedError - case vtrpcpb.Code_UNAUTHENTICATED: + case vtrpcpb.Code_PERMISSION_DENIED, vtrpcpb.Code_UNAUTHENTICATED: num = ERAccessDeniedError + ss = SSAccessDeniedError case vtrpcpb.Code_RESOURCE_EXHAUSTED: num = demuxResourceExhaustedErrors(err.Error()) - case vtrpcpb.Code_FAILED_PRECONDITION: - num = ERUnknownError - case vtrpcpb.Code_ABORTED: - num = ERQueryInterrupted - case vtrpcpb.Code_OUT_OF_RANGE: - num = ERUnknownError + ss = SSClientError case vtrpcpb.Code_UNIMPLEMENTED: num = ERNotSupportedYet + ss = SSClientError case vtrpcpb.Code_INTERNAL: - num = ERUnknownError - case vtrpcpb.Code_UNAVAILABLE: - num = ERUnknownError - case vtrpcpb.Code_DATA_LOSS: - num = ERUnknownError + num = ERInternalError + ss = SSUnknownSQLState } // Not found, build a generic SQLError. return &SQLError{ Num: num, - State: SSUnknownSQLState, + State: ss, Message: msg, } } @@ -162,6 +149,58 @@ func NewSQLErrorFromError(err error) error { return serr } +var stateToMysqlCode = map[vterrors.State]struct { + num int + state string +}{ + vterrors.Undefined: {num: ERUnknownError, state: SSUnknownSQLState}, + vterrors.AccessDeniedError: {num: ERAccessDeniedError, state: SSAccessDeniedError}, + vterrors.BadDb: {num: ERBadDb, state: SSClientError}, + vterrors.BadFieldError: {num: ERBadFieldError, state: SSBadFieldError}, + vterrors.CantUseOptionHere: {num: ERCantUseOptionHere, state: SSClientError}, + vterrors.DataOutOfRange: {num: ERDataOutOfRange, state: SSDataOutOfRange}, + vterrors.DbCreateExists: {num: ERDbCreateExists, state: SSUnknownSQLState}, + vterrors.DbDropExists: {num: ERDbDropExists, state: SSUnknownSQLState}, + vterrors.EmptyQuery: {num: EREmptyQuery, state: SSClientError}, + vterrors.IncorrectGlobalLocalVar: {num: ERIncorrectGlobalLocalVar, state: SSUnknownSQLState}, + vterrors.InnodbReadOnly: {num: ERInnodbReadOnly, state: SSUnknownSQLState}, + vterrors.LockOrActiveTransaction: {num: ERLockOrActiveTransaction, state: SSUnknownSQLState}, + vterrors.NoDB: {num: ERNoDb, state: SSNoDB}, + vterrors.NoSuchTable: {num: ERNoSuchTable, state: SSUnknownTable}, + vterrors.NotSupportedYet: {num: ERNotSupportedYet, state: SSClientError}, + vterrors.ForbidSchemaChange: {num: ERForbidSchemaChange, state: SSUnknownSQLState}, + vterrors.NetPacketTooLarge: {num: ERNetPacketTooLarge, state: SSNetError}, + vterrors.NonUniqTable: {num: ERNonUniqTable, state: SSClientError}, + vterrors.QueryInterrupted: {num: ERQueryInterrupted, state: SSQueryInterrupted}, + vterrors.SPDoesNotExist: {num: ERSPDoesNotExist, state: SSClientError}, + vterrors.SyntaxError: {num: ERSyntaxError, state: SSClientError}, + vterrors.UnsupportedPS: {num: ERUnsupportedPS, state: SSUnknownSQLState}, + vterrors.UnknownSystemVariable: {num: ERUnknownSystemVariable, state: SSUnknownSQLState}, + vterrors.UnknownTable: {num: ERUnknownTable, state: SSUnknownTable}, + vterrors.WrongGroupField: {num: ERWrongGroupField, state: SSClientError}, + vterrors.WrongNumberOfColumnsInSelect: {num: ERWrongNumberOfColumnsInSelect, state: SSWrongNumberOfColumns}, + vterrors.WrongTypeForVar: {num: ERWrongTypeForVar, state: SSClientError}, + vterrors.WrongValueForVar: {num: ERWrongValueForVar, state: SSClientError}, +} + +func init() { + if len(stateToMysqlCode) != int(vterrors.NumOfStates) { + panic("all vterrors states are not mapped to mysql errors") + } +} + +func convertToMysqlError(err error) error { + errState := vterrors.ErrState(err) + if errState == vterrors.Undefined { + return err + } + mysqlCode, ok := stateToMysqlCode[errState] + if !ok { + return err + } + return NewSQLError(mysqlCode.num, mysqlCode.state, err.Error()) +} + var isGRPCOverflowRE = regexp.MustCompile(`.*grpc: received message larger than max \(\d+ vs. \d+\)`) func demuxResourceExhaustedErrors(msg string) int { diff --git a/go/mysql/sql_error_test.go b/go/mysql/sql_error_test.go index d12e11ba1cd..223d26ce2c4 100644 --- a/go/mysql/sql_error_test.go +++ b/go/mysql/sql_error_test.go @@ -19,6 +19,11 @@ package mysql import ( "testing" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" + "github.com/stretchr/testify/assert" ) @@ -43,3 +48,117 @@ func TestDumuxResourceExhaustedErrors(t *testing.T) { assert.Equalf(t, c.want, got, c.msg) } } + +func TestNewSQLErrorFromError(t *testing.T) { + var tCases = []struct { + err error + num int + ss string + }{ + { + err: vterrors.Errorf(vtrpc.Code_OK, "ok"), + num: ERUnknownError, + ss: SSUnknownSQLState, + }, + { + err: vterrors.Errorf(vtrpc.Code_CANCELED, "cancelled"), + num: ERQueryInterrupted, + ss: SSQueryInterrupted, + }, + { + err: vterrors.Errorf(vtrpc.Code_UNKNOWN, "unknown"), + num: ERUnknownError, + ss: SSUnknownSQLState, + }, + { + err: vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid argument"), + num: ERUnknownError, + ss: SSUnknownSQLState, + }, + { + err: vterrors.Errorf(vtrpc.Code_DEADLINE_EXCEEDED, "deadline exceeded"), + num: ERQueryInterrupted, + ss: SSQueryInterrupted, + }, + { + err: vterrors.Errorf(vtrpc.Code_NOT_FOUND, "code not found"), + num: ERUnknownError, + ss: SSUnknownSQLState, + }, + { + err: vterrors.Errorf(vtrpc.Code_ALREADY_EXISTS, "already exists"), + num: ERUnknownError, + ss: SSUnknownSQLState, + }, + { + err: vterrors.Errorf(vtrpc.Code_PERMISSION_DENIED, "permission denied"), + num: ERAccessDeniedError, + ss: SSAccessDeniedError, + }, + { + err: vterrors.Errorf(vtrpc.Code_UNAUTHENTICATED, "unauthenticated"), + num: ERAccessDeniedError, + ss: SSAccessDeniedError, + }, + { + err: vterrors.Errorf(vtrpc.Code_RESOURCE_EXHAUSTED, "resource exhausted"), + num: ERTooManyUserConnections, + ss: SSClientError, + }, + { + err: vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "failed precondition"), + num: ERUnknownError, + ss: SSUnknownSQLState, + }, + { + err: vterrors.Errorf(vtrpc.Code_ABORTED, "aborted"), + num: ERQueryInterrupted, + ss: SSQueryInterrupted, + }, + { + err: vterrors.Errorf(vtrpc.Code_OUT_OF_RANGE, "out of range"), + num: ERUnknownError, + ss: SSUnknownSQLState, + }, + { + err: vterrors.Errorf(vtrpc.Code_UNIMPLEMENTED, "unimplemented"), + num: ERNotSupportedYet, + ss: SSClientError, + }, + { + err: vterrors.Errorf(vtrpc.Code_INTERNAL, "internal"), + num: ERInternalError, + ss: SSUnknownSQLState, + }, + { + err: vterrors.Errorf(vtrpc.Code_UNAVAILABLE, "unavailable"), + num: ERUnknownError, + ss: SSUnknownSQLState, + }, + { + err: vterrors.Errorf(vtrpc.Code_DATA_LOSS, "data loss"), + num: ERUnknownError, + ss: SSUnknownSQLState, + }, + { + err: vterrors.NewErrorf(vtrpc.Code_ALREADY_EXISTS, vterrors.DbCreateExists, "create db exists"), + num: ERDbCreateExists, + ss: SSUnknownSQLState, + }, + { + err: vterrors.NewErrorf(vtrpc.Code_FAILED_PRECONDITION, vterrors.NoDB, "no db selected"), + num: ERNoDb, + ss: SSNoDB, + }, + } + + for _, tc := range tCases { + t.Run(tc.err.Error(), func(t *testing.T) { + err := NewSQLErrorFromError(tc.err) + sErr, ok := err.(*SQLError) + require.True(t, ok) + assert.Equal(t, tc.num, sErr.Number()) + assert.Equal(t, tc.ss, sErr.SQLState()) + }) + } +} diff --git a/go/pools/numbered.go b/go/pools/numbered.go index 1f88ae3d7da..04cc5807d55 100644 --- a/go/pools/numbered.go +++ b/go/pools/numbered.go @@ -47,15 +47,13 @@ type unregistered struct { timeUnregistered time.Time } -func (u *unregistered) Size() int { - return 1 -} - //NewNumbered creates a new numbered func NewNumbered() *Numbered { n := &Numbered{ - resources: make(map[int64]*numberedWrapper), - recentlyUnregistered: cache.NewLRUCache(1000), + resources: make(map[int64]*numberedWrapper), + recentlyUnregistered: cache.NewLRUCache(1000, func(_ interface{}) int64 { + return 1 + }), } n.empty = sync.NewCond(&n.mu) return n diff --git a/go/proc/counting_listener.go b/go/proc/counting_listener.go deleted file mode 100644 index fe29bbccabf..00000000000 --- a/go/proc/counting_listener.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package proc - -import ( - "net" - - "vitess.io/vitess/go/stats" -) - -type CountingListener struct { - net.Listener - ConnCount *stats.Gauge - ConnAccept *stats.Counter -} - -type countingConnection struct { - net.Conn - listener *CountingListener -} - -// Published creates a wrapper for net.Listener that -// publishes connection stats. -func Published(l net.Listener, countTag, acceptTag string) net.Listener { - return &CountingListener{ - Listener: l, - ConnCount: stats.NewGauge(countTag, "Active connections accepted by counting listener"), - ConnAccept: stats.NewCounter(acceptTag, "Count of connections accepted by the counting listener"), - } -} - -// Accept increments stats counters before returning -// a connection. -func (l *CountingListener) Accept() (c net.Conn, err error) { - conn, err := l.Listener.Accept() - if err != nil { - return nil, err - } - l.ConnCount.Add(1) - l.ConnAccept.Add(1) - return &countingConnection{conn, l}, nil -} - -// Close decrements the stats counter and -// closes the connection. -func (c *countingConnection) Close() error { - if c.listener != nil { - c.listener.ConnCount.Add(-1) - c.listener = nil - } - return c.Conn.Close() -} diff --git a/go/proc/counting_listener_test.go b/go/proc/counting_listener_test.go deleted file mode 100644 index 4b4a20a1d42..00000000000 --- a/go/proc/counting_listener_test.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package proc - -import ( - "expvar" - "fmt" - "net" - "testing" -) - -func TestPublished(t *testing.T) { - l, err := Listen("") - if err != nil { - t.Fatal(err) - } - opened := make(chan struct{}) - closed := make(chan struct{}) - go func() { - for { - conn, err := l.Accept() - opened <- struct{}{} - if err != nil { - t.Error(err) - } - go func() { - b := make([]byte, 100) - for { - _, err := conn.Read(b) - if err != nil { - conn.Close() - closed <- struct{}{} - return - } - } - }() - } - }() - - addr := l.Addr().String() - for i := 1; i <= 3; i++ { - conn1, err := net.Dial("tcp", addr) - if err != nil { - t.Fatal(err) - } - <-opened - if v := expvar.Get("ConnCount").String(); v != "1" { - t.Errorf("ConnCount: %v, want 1", v) - } - conn1.Close() - <-closed - if v := expvar.Get("ConnCount").String(); v != "0" { - t.Errorf("ConnCount: %v, want 1", v) - } - if v := expvar.Get("ConnAccepted").String(); v != fmt.Sprintf("%d", i) { - t.Errorf("ConnAccepted: %v, want %d", v, i) - } - } -} diff --git a/go/proc/proc.go b/go/proc/proc.go deleted file mode 100644 index 54152c167aa..00000000000 --- a/go/proc/proc.go +++ /dev/null @@ -1,123 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package proc allows you to configure servers to be -// restarted with negligible downtime. -package proc - -import ( - "fmt" - "io/ioutil" - "net" - "net/http" - "os" - "os/signal" - "strconv" - "strings" - "syscall" - "time" - - "vitess.io/vitess/go/vt/log" -) - -const pidURL = "/debug/pid" - -// Listen tries to create a listener on the specified tcp port. -// Before creating the listener, it checks to see if there is another -// server already using the port. If there is one, it sends a USR1 -// signal requesting the server to shutdown, and then attempts to -// to create the listener. -func Listen(port string) (l net.Listener, err error) { - if port != "" { - killPredecessor(port) - } - return listen(port) -} - -// Wait creates an HTTP handler on pidURL, and serves the current process -// pid on it. It then creates a signal handler and waits for SIGTERM or -// SIGUSR1, and returns when the signal is received. A new server that comes -// up will query this URL. If it receives a valid response, it will send a -// SIGUSR1 signal and attempt to bind to the port the current server is using. -func Wait() os.Signal { - c := make(chan os.Signal, 1) - signal.Notify(c, syscall.SIGTERM, syscall.SIGUSR1, syscall.SIGINT) - - http.HandleFunc(pidURL, func(r http.ResponseWriter, req *http.Request) { - r.Write(strconv.AppendInt(nil, int64(os.Getpid()), 10)) - }) - - return <-c -} - -// ListenAndServe combines Listen and Wait to also run an http -// server on the specified port. If it fails to obtain a listener, -// the program is fatally terminated. The return value is the signal -// received for termination -func ListenAndServe(port string) os.Signal { - l, err := Listen(port) - if err != nil { - log.Fatalf("%s", err) - } - go http.Serve(l, nil) - s := Wait() - l.Close() - return s -} - -func killPredecessor(port string) { - resp, err := http.Get(fmt.Sprintf("http://localhost:%s%s", port, pidURL)) - if err != nil { - if !strings.Contains(err.Error(), "connection refused") { - log.Errorf("unexpected error on port %v: %v, trying to start anyway", port, err) - } - return - } - num, err := ioutil.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - log.Errorf("could not read pid: %vd, trying to start anyway", err) - return - } - pid, err := strconv.Atoi(string(num)) - if err != nil { - log.Errorf("could not read pid: %vd, trying to start anyway", err) - return - } - err = syscall.Kill(pid, syscall.SIGUSR1) - if err != nil { - log.Errorf("error killing %v: %v, trying to start anyway", pid, err) - } -} - -func listen(port string) (l net.Listener, err error) { - laddr := ":" + port - if laddr == ":" { - laddr = ":0" - } - for i := 0; i < 100; i++ { - l, err = net.Listen("tcp", laddr) - if err != nil { - if strings.Contains(err.Error(), "already in use") { - time.Sleep(1 * time.Millisecond) - continue - } - return nil, err - } - break - } - return Published(l, "ConnCount", "ConnAccepted"), err -} diff --git a/go/proc/proc_flaky_test.go b/go/proc/proc_flaky_test.go deleted file mode 100644 index 4c9d6aa6c2e..00000000000 --- a/go/proc/proc_flaky_test.go +++ /dev/null @@ -1,151 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package proc - -import ( - "fmt" - "io/ioutil" - "net" - "net/http" - "os" - "os/exec" - "strconv" - "strings" - "syscall" - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -func TestRestart(t *testing.T) { - if testing.Short() { - t.Skip("skipping integration test in short mode.") - } - - switch os.Getenv("SERVER_NUM") { - case "": - testLaunch(t) - case "1": - testServer(t, syscall.SIGUSR1) - case "2": - testServer(t, syscall.SIGTERM) - } -} - -func testLaunch(t *testing.T) { - var err error - l, err := net.Listen("tcp", "") - if err != nil { - t.Fatalf("could not initialize listener: %v", err) - } - hostport := l.Addr().String() - l.Close() - _, port, err := net.SplitHostPort(hostport) - if err != nil { - t.Fatal(err) - } - - cmd1 := launchServer(t, port, 1) - defer cmd1.Process.Kill() - testPid(t, port, cmd1.Process.Pid) - - cmd2 := launchServer(t, port, 2) - defer cmd2.Process.Kill() - err = cmd1.Wait() - require.NoError(t, err) - testPid(t, port, cmd2.Process.Pid) - - err = syscall.Kill(cmd2.Process.Pid, syscall.SIGTERM) - require.NoError(t, err) - err = cmd2.Wait() - require.NoError(t, err) -} - -func launchServer(t *testing.T, port string, num int) *exec.Cmd { - cmd := exec.Command(os.Args[0], "-test.run=^TestRestart$") - cmd.Env = []string{ - fmt.Sprintf("SERVER_NUM=%d", num), - fmt.Sprintf("PORT=%s", port), - } - cmd.Stdin = os.Stdin - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - err := cmd.Start() - if err != nil { - t.Fatal(err) - } - return cmd -} - -func testPid(t *testing.T, port string, want int) { - var resp *http.Response - var err error - retryIntervalMs := 50 - // Retry up to two seconds. - for i := 0; i < (2000 / retryIntervalMs); i++ { - resp, err = http.Get(fmt.Sprintf("http://localhost:%s%s", port, pidURL)) - var retryableErr bool - if err != nil { - if strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "EOF") || - strings.Contains(err.Error(), "net/http: transport closed before response was received") || - strings.Contains(err.Error(), "http: can't write HTTP request on broken connection") { - retryableErr = true - } - } - if err == nil && resp.StatusCode != http.StatusOK { - err = fmt.Errorf("http request was not successful. status: %v", resp.Status) - if resp.StatusCode == http.StatusNotFound { - // Observable when the handler is not installed yet. - retryableErr = true - } - } - if err != nil && retryableErr { - time.Sleep(50 * time.Millisecond) - continue - } - break - } - if err != nil { - t.Fatalf("unexpected error on port %v: %v", port, err) - } - num, err := ioutil.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - t.Fatalf("could not read pid: %vd", err) - } - got, err := strconv.Atoi(string(num)) - if err != nil { - t.Fatalf("could not read pid: %vd", err) - } - if want != got { - t.Errorf("want %d, got %d", want, got) - } -} - -func testServer(t *testing.T, want syscall.Signal) { - l, err := Listen(os.Getenv("PORT")) - if err != nil { - t.Fatalf("could not initialize listener: %v", err) - } - go http.Serve(l, nil) - got := Wait() - l.Close() - if want != got { - t.Errorf("want %v, got %v", want, got) - } -} diff --git a/go/protoutil/doc.go b/go/protoutil/doc.go new file mode 100644 index 00000000000..15a0aad339e --- /dev/null +++ b/go/protoutil/doc.go @@ -0,0 +1,25 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package protoutil provides helper functions for working with well-known protobuf +types. + +It aims to serve a purpose similar to packages topoproto and mysqlctlproto, but +for general, common types, rather than types related to a particular Vitess RPC +service. +*/ +package protoutil diff --git a/go/protoutil/duration.go b/go/protoutil/duration.go new file mode 100644 index 00000000000..f959b15e5e4 --- /dev/null +++ b/go/protoutil/duration.go @@ -0,0 +1,57 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package protoutil + +import ( + "fmt" + "time" + + "vitess.io/vitess/go/vt/proto/vttime" +) + +// DurationFromProto converts a durationpb type to a time.Duration. It returns a +// three-tuple of (dgo, ok, err) where dgo is the go time.Duration, ok indicates +// whether the proto value was set, and err is set on failure to convert the +// proto value. +func DurationFromProto(dpb *vttime.Duration) (time.Duration, bool, error) { + if dpb == nil { + return 0, false, nil + } + + d := time.Duration(dpb.Seconds) * time.Second + if int64(d/time.Second) != dpb.Seconds { + return 0, true, fmt.Errorf("duration: %v is out of range for time.Duration", dpb) + } + if dpb.Nanos != 0 { + d += time.Duration(dpb.Nanos) * time.Nanosecond + if (d < 0) != (dpb.Nanos < 0) { + return 0, true, fmt.Errorf("duration: %v is out of range for time.Duration", dpb) + } + } + return d, true, nil +} + +// DurationToProto converts a time.Duration to a durpb.Duration. +func DurationToProto(d time.Duration) *vttime.Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &vttime.Duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/go/protoutil/duration_test.go b/go/protoutil/duration_test.go new file mode 100644 index 00000000000..20f01482563 --- /dev/null +++ b/go/protoutil/duration_test.go @@ -0,0 +1,82 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package protoutil + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "vitess.io/vitess/go/vt/proto/vttime" +) + +func TestDurationFromProto(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + in *vttime.Duration + expected time.Duration + isOk bool + shouldErr bool + }{ + { + name: "success", + in: &vttime.Duration{Seconds: 1000}, + expected: time.Second * 1000, + isOk: true, + shouldErr: false, + }, + { + name: "nil value", + in: nil, + expected: 0, + isOk: false, + shouldErr: false, + }, + { + name: "error", + in: &vttime.Duration{ + // This is the max allowed seconds for a durationpb, plus 1. + Seconds: int64(10000*365.25*24*60*60) + 1, + }, + expected: 0, + isOk: true, + shouldErr: true, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + actual, ok, err := DurationFromProto(tt.in) + if tt.shouldErr { + assert.Error(t, err) + assert.Equal(t, tt.isOk, ok, "expected (_, ok, _) = DurationFromProto; to be ok = %v", tt.isOk) + return + } + + assert.NoError(t, err) + assert.Equal(t, tt.expected, actual) + assert.Equal(t, tt.isOk, ok, "expected (_, ok, _) = DurationFromProto; to be ok = %v", tt.isOk) + }) + } +} diff --git a/go/sqltypes/bind_variables.go b/go/sqltypes/bind_variables.go index 3c9bc6bbe9e..9d60c1c8e1a 100644 --- a/go/sqltypes/bind_variables.go +++ b/go/sqltypes/bind_variables.go @@ -75,12 +75,12 @@ func Int32BindVariable(v int32) *querypb.BindVariable { return ValueBindVariable(NewInt32(v)) } -// BoolBindVariable converts an bool to a int32 bind var. +// BoolBindVariable converts an bool to a int64 bind var. func BoolBindVariable(v bool) *querypb.BindVariable { if v { - return Int32BindVariable(1) + return Int64BindVariable(1) } - return Int32BindVariable(0) + return Int64BindVariable(0) } // Int64BindVariable converts an int64 to a bind var. diff --git a/go/sqltypes/named_result.go b/go/sqltypes/named_result.go index 7a67b5d4489..6d1621a8f1a 100644 --- a/go/sqltypes/named_result.go +++ b/go/sqltypes/named_result.go @@ -78,6 +78,22 @@ func (r RowNamedValues) AsUint64(fieldName string, def uint64) uint64 { return def } +// AsFloat64 returns the named field as float64, or default value if nonexistent/error +func (r RowNamedValues) AsFloat64(fieldName string, def float64) float64 { + if v, err := r.ToFloat64(fieldName); err == nil { + return v + } + return def +} + +// ToFloat64 returns the named field as float64 +func (r RowNamedValues) ToFloat64(fieldName string) (float64, error) { + if v, ok := r[fieldName]; ok { + return v.ToFloat64() + } + return 0, ErrNoSuchField +} + // ToBool returns the named field as bool func (r RowNamedValues) ToBool(fieldName string) (bool, error) { if v, ok := r[fieldName]; ok { diff --git a/go/sqltypes/result.go b/go/sqltypes/result.go index d033d9ad5ca..7df1ef2aebf 100644 --- a/go/sqltypes/result.go +++ b/go/sqltypes/result.go @@ -31,8 +31,27 @@ type Result struct { InsertID uint64 `json:"insert_id"` Rows [][]Value `json:"rows"` SessionStateChanges string `json:"session_state_changes"` + StatusFlags uint16 `json:"status_flags"` } +//goland:noinspection GoUnusedConst +const ( + ServerStatusInTrans = 0x0001 + ServerStatusAutocommit = 0x0002 + ServerMoreResultsExists = 0x0008 + ServerStatusNoGoodIndexUsed = 0x0010 + ServerStatusNoIndexUsed = 0x0020 + ServerStatusCursorExists = 0x0040 + ServerStatusLastRowSent = 0x0080 + ServerStatusDbDropped = 0x0100 + ServerStatusNoBackslashEscapes = 0x0200 + ServerStatusMetadataChanged = 0x0400 + ServerQueryWasSlow = 0x0800 + ServerPsOutParams = 0x1000 + ServerStatusInTransReadonly = 0x2000 + ServerSessionStateChanged = 0x4000 +) + // ResultStream is an interface for receiving Result. It is used for // RPC interfaces. type ResultStream interface { @@ -220,7 +239,7 @@ func (result *Result) StripMetadata(incl querypb.ExecuteOptions_IncludedFields) // to another result.Note currently it doesn't handle cases like // if two results have different fields.We will enhance this function. func (result *Result) AppendResult(src *Result) { - if src.RowsAffected == 0 && len(src.Fields) == 0 { + if src.RowsAffected == 0 && len(src.Rows) == 0 && len(src.Fields) == 0 { return } if result.Fields == nil { @@ -237,3 +256,13 @@ func (result *Result) AppendResult(src *Result) { func (result *Result) Named() *NamedResult { return ToNamedResult(result) } + +// IsMoreResultsExists returns true if the status flag has SERVER_MORE_RESULTS_EXISTS set +func (result *Result) IsMoreResultsExists() bool { + return result.StatusFlags&ServerMoreResultsExists == ServerMoreResultsExists +} + +// IsInTransaction returns true if the status flag has SERVER_STATUS_IN_TRANS set +func (result *Result) IsInTransaction() bool { + return result.StatusFlags&ServerStatusInTrans == ServerStatusInTrans +} diff --git a/go/sqltypes/testing.go b/go/sqltypes/testing.go index 932b4b6573c..54cfdcedef8 100644 --- a/go/sqltypes/testing.go +++ b/go/sqltypes/testing.go @@ -73,7 +73,6 @@ func MakeTestResult(fields []*querypb.Field, rows ...string) *Result { result.Rows[i][j] = MakeTrusted(fields[j].Type, []byte(col)) } } - result.RowsAffected = uint64(len(result.Rows)) return result } diff --git a/go/sqltypes/value.go b/go/sqltypes/value.go index 550e8b86eaf..8b8bf84ad5c 100644 --- a/go/sqltypes/value.go +++ b/go/sqltypes/value.go @@ -23,6 +23,7 @@ import ( "errors" "fmt" "strconv" + "strings" "vitess.io/vitess/go/bytes2" "vitess.io/vitess/go/hack" @@ -399,6 +400,31 @@ func encodeBytesSQL(val []byte, b BinWriter) { b.Write(buf.Bytes()) } +// BufEncodeStringSQL encodes the string into a strings.Builder +func BufEncodeStringSQL(buf *strings.Builder, val string) { + buf.WriteByte('\'') + for _, ch := range val { + if ch > 255 { + buf.WriteRune(ch) + continue + } + if encodedChar := SQLEncodeMap[ch]; encodedChar == DontEscape { + buf.WriteRune(ch) + } else { + buf.WriteByte('\\') + buf.WriteByte(encodedChar) + } + } + buf.WriteByte('\'') +} + +// EncodeStringSQL encodes the string as a SQL string. +func EncodeStringSQL(val string) string { + var buf strings.Builder + BufEncodeStringSQL(&buf, val) + return buf.String() +} + func encodeBytesSQLBits(val []byte, b BinWriter) { fmt.Fprint(b, "b'") for _, ch := range val { diff --git a/go/stats/histogram.go b/go/stats/histogram.go index 11844f253ce..3fe3562cade 100644 --- a/go/stats/histogram.go +++ b/go/stats/histogram.go @@ -27,6 +27,7 @@ import ( // splitting the counts under different buckets // using specified cutoffs. type Histogram struct { + name string help string cutoffs []int64 labels []string @@ -60,6 +61,7 @@ func NewGenericHistogram(name, help string, cutoffs []int64, labels []string, co panic("mismatched cutoff and label lengths") } h := &Histogram{ + name: name, help: help, cutoffs: cutoffs, labels: labels, @@ -85,6 +87,9 @@ func (h *Histogram) Add(value int64) { if h.hook != nil { h.hook(value) } + if defaultStatsdHook.histogramHook != nil && h.name != "" { + defaultStatsdHook.histogramHook(h.name, value) + } } // String returns a string representation of the Histogram. diff --git a/go/stats/hooks.go b/go/stats/hooks.go new file mode 100644 index 00000000000..8a13d0c2bae --- /dev/null +++ b/go/stats/hooks.go @@ -0,0 +1,34 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package stats + +type statsdHook struct { + timerHook func(string, string, int64, *Timings) + histogramHook func(string, int64) +} + +var defaultStatsdHook = statsdHook{} + +// RegisterTimerHook registers timer hook +func RegisterTimerHook(hook func(string, string, int64, *Timings)) { + defaultStatsdHook.timerHook = hook +} + +// RegisterHistogramHook registers timer hook +func RegisterHistogramHook(hook func(string, int64)) { + defaultStatsdHook.histogramHook = hook +} diff --git a/go/stats/statsd/statsd.go b/go/stats/statsd/statsd.go index 1011d16fb0b..aa54ffab506 100644 --- a/go/stats/statsd/statsd.go +++ b/go/stats/statsd/statsd.go @@ -46,25 +46,6 @@ func makeLabels(labelNames []string, labelValsCombined string) []string { return tags } -func (sb StatsBackend) addHistogram(name string, h *stats.Histogram, tags []string) { - labels := h.Labels() - buckets := h.Buckets() - for i := range labels { - name := fmt.Sprintf("%s.%s", name, labels[i]) - sb.statsdClient.Gauge(name, float64(buckets[i]), tags, sb.sampleRate) - } - sb.statsdClient.Gauge(fmt.Sprintf("%s.%s", name, h.CountLabel()), - (float64)(h.Count()), - tags, - sb.sampleRate, - ) - sb.statsdClient.Gauge(fmt.Sprintf("%s.%s", name, h.TotalLabel()), - (float64)(h.Total()), - tags, - sb.sampleRate, - ) -} - // Init initializes the statsd with the given namespace. func Init(namespace string) { servenv.OnRun(func() { @@ -81,16 +62,23 @@ func Init(namespace string) { sb.statsdClient = statsdC sb.sampleRate = *statsdSampleRate stats.RegisterPushBackend("statsd", sb) + stats.RegisterTimerHook(func(statsName, name string, value int64, timings *stats.Timings) { + tags := makeLabels(strings.Split(timings.Label(), "."), name) + if err := statsdC.TimeInMilliseconds(statsName, float64(value), tags, sb.sampleRate); err != nil { + log.Errorf("Fail to TimeInMilliseconds %v: %v", statsName, err) + } + }) + stats.RegisterHistogramHook(func(name string, val int64) { + if err := statsdC.Histogram(name, float64(val), []string{}, sb.sampleRate); err != nil { + log.Errorf("Fail to Histogram for %v: %v", name, err) + } + }) }) } func (sb StatsBackend) addExpVar(kv expvar.KeyValue) { k := kv.Key switch v := kv.Value.(type) { - case *stats.String: - if err := sb.statsdClient.Set(k, v.Get(), nil, sb.sampleRate); err != nil { - log.Errorf("Failed to add String %v for key %v", v, k) - } case *stats.Counter: if err := sb.statsdClient.Count(k, v.Get(), nil, sb.sampleRate); err != nil { log.Errorf("Failed to add Counter %v for key %v", v, k) @@ -159,25 +147,9 @@ func (sb StatsBackend) addExpVar(kv expvar.KeyValue) { log.Errorf("Failed to add GaugesWithSingleLabel %v for key %v", v, k) } } - case *stats.MultiTimings: - labels := v.Labels() - hists := v.Histograms() - for labelValsCombined, histogram := range hists { - sb.addHistogram(k, histogram, makeLabels(labels, labelValsCombined)) - } - case *stats.Timings: - // TODO: for statsd.timing metrics, there is no good way to transfer the histogram to it - // If we store a in memory buffer for stats.Timings and flush it here it's hard to make the stats - // thread safe. - // Instead, we export the timings stats as histogram here. We won't have the percentile breakdown - // for the metrics, but we can still get the average from total and count - labels := []string{v.Label()} - hists := v.Histograms() - for labelValsCombined, histogram := range hists { - sb.addHistogram(k, histogram, makeLabels(labels, labelValsCombined)) - } - case *stats.Histogram: - sb.addHistogram(k, v, []string{}) + case *stats.Timings, *stats.MultiTimings, *stats.Histogram: + // it does not make sense to export static expvar to statsd, + // instead we rely on hooks to integrate with statsd' timing and histogram api directly case expvar.Func: // Export memstats as gauge so that we don't need to call extra ReadMemStats if k == "memstats" { @@ -209,7 +181,7 @@ func (sb StatsBackend) addExpVar(kv expvar.KeyValue) { } } } - case *stats.StringMapFunc, *stats.Rates, *stats.RatesFunc: + case *stats.Rates, *stats.RatesFunc, *stats.String, *stats.StringFunc, *stats.StringMapFunc: // Silently ignore metrics that does not make sense to be exported to statsd default: log.Warningf("Silently ignore metrics with key %v [%T]", k, kv.Value) diff --git a/go/stats/statsd/statsd_test.go b/go/stats/statsd/statsd_test.go index 7338eb27f18..a7ebce3769d 100644 --- a/go/stats/statsd/statsd_test.go +++ b/go/stats/statsd/statsd_test.go @@ -25,35 +25,14 @@ func getBackend(t *testing.T) (StatsBackend, *net.UDPConn) { sb.namespace = "foo" sb.sampleRate = 1 sb.statsdClient = client - return sb, server -} - -func TestStatsdString(t *testing.T) { - sb, server := getBackend(t) - defer server.Close() - name := "string_name" - stats.NewString(name).Set("foo") - found := false - expvar.Do(func(kv expvar.KeyValue) { - if kv.Key == name { - found = true - sb.addExpVar(kv) - if err := sb.statsdClient.Flush(); err != nil { - t.Errorf("Error flushing: %s", err) - } - bytes := make([]byte, 4096) - n, err := server.Read(bytes) - if err != nil { - t.Fatal(err) - } - result := string(bytes[:n]) - expected := "test.string_name:foo|s" - assert.Equal(t, result, expected) - } + stats.RegisterTimerHook(func(stats, name string, value int64, timings *stats.Timings) { + tags := makeLabels(strings.Split(timings.Label(), "."), name) + client.TimeInMilliseconds(stats, float64(value), tags, sb.sampleRate) }) - if !found { - t.Errorf("Stat %s not found...", name) - } + stats.RegisterHistogramHook(func(name string, val int64) { + client.Histogram(name, float64(val), []string{}, sb.sampleRate) + }) + return sb, server } func TestStatsdCounter(t *testing.T) { @@ -393,20 +372,8 @@ func TestStatsdMultiTimings(t *testing.T) { t.Fatal(err) } result := string(bytes[:n]) - expected := []string{ - "test.multi_timings_name.500000:0.000000|g|#label1:foo,label2:bar", - "test.multi_timings_name.1000000:0.000000|g|#label1:foo,label2:bar", - "test.multi_timings_name.5000000:0.000000|g|#label1:foo,label2:bar", - "test.multi_timings_name.10000000:1.000000|g|#label1:foo,label2:bar", - "test.multi_timings_name.50000000:0.000000|g|#label1:foo,label2:bar", - "test.multi_timings_name.100000000:0.000000|g|#label1:foo,label2:bar", - "test.multi_timings_name.500000000:0.000000|g|#label1:foo,label2:bar", - "test.multi_timings_name.1000000000:0.000000|g|#label1:foo,label2:bar", - "test.multi_timings_name.5000000000:0.000000|g|#label1:foo,label2:bar", - } - for i, res := range strings.Split(result, "\n") { - assert.Equal(t, res, expected[i]) - } + expected := "test.multi_timings_name:10.000000|ms|#label1:foo,label2:bar" + assert.Equal(t, result, expected) } }) if !found { @@ -434,20 +401,8 @@ func TestStatsdTimings(t *testing.T) { t.Fatal(err) } result := string(bytes[:n]) - expected := []string{ - "test.timings_name.500000:0.000000|g|#label1:foo", - "test.timings_name.1000000:0.000000|g|#label1:foo", - "test.timings_name.5000000:1.000000|g|#label1:foo", - "test.timings_name.10000000:0.000000|g|#label1:foo", - "test.timings_name.50000000:0.000000|g|#label1:foo", - "test.timings_name.100000000:0.000000|g|#label1:foo", - "test.timings_name.500000000:0.000000|g|#label1:foo", - "test.timings_name.1000000000:0.000000|g|#label1:foo", - "test.timings_name.5000000000:0.000000|g|#label1:foo", - } - for i, res := range strings.Split(result, "\n") { - assert.Equal(t, res, expected[i]) - } + expected := "test.timings_name:2.000000|ms|#label1:foo" + assert.Equal(t, result, expected) } }) if !found { @@ -478,12 +433,9 @@ func TestStatsdHistogram(t *testing.T) { } result := string(bytes[:n]) expected := []string{ - "test.histogram_name.1:0.000000|g", - "test.histogram_name.5:2.000000|g", - "test.histogram_name.10:1.000000|g", - "test.histogram_name.inf:0.000000|g", - "test.histogram_name.Count:3.000000|g", - "test.histogram_name.Total:11.000000|g", + "test.histogram_name:2.000000|h", + "test.histogram_name:3.000000|h", + "test.histogram_name:6.000000|h", } for i, res := range strings.Split(result, "\n") { assert.Equal(t, res, expected[i]) diff --git a/go/stats/timings.go b/go/stats/timings.go index 38d3c77f749..9048bedee11 100644 --- a/go/stats/timings.go +++ b/go/stats/timings.go @@ -34,6 +34,7 @@ type Timings struct { mu sync.RWMutex histograms map[string]*Histogram + name string help string label string labelCombined bool @@ -46,6 +47,7 @@ type Timings struct { func NewTimings(name, help, label string, categories ...string) *Timings { t := &Timings{ histograms: make(map[string]*Histogram), + name: name, help: help, label: label, labelCombined: IsDimensionCombined(label), @@ -87,6 +89,9 @@ func (t *Timings) Add(name string, elapsed time.Duration) { } t.mu.Unlock() } + if defaultStatsdHook.timerHook != nil && t.name != "" { + defaultStatsdHook.timerHook(t.name, name, elapsed.Milliseconds(), t) + } elapsedNs := int64(elapsed) hist.Add(elapsedNs) @@ -198,16 +203,19 @@ type MultiTimings struct { // NewMultiTimings creates a new MultiTimings object. func NewMultiTimings(name string, help string, labels []string) *MultiTimings { + combinedLabels := make([]bool, len(labels)) + for i, label := range labels { + combinedLabels[i] = IsDimensionCombined(label) + } t := &MultiTimings{ Timings: Timings{ histograms: make(map[string]*Histogram), + name: name, help: help, + label: safeJoinLabels(labels, combinedLabels), }, labels: labels, - combinedLabels: make([]bool, len(labels)), - } - for i, label := range labels { - t.combinedLabels[i] = IsDimensionCombined(label) + combinedLabels: combinedLabels, } if name != "" { publish(name, t) diff --git a/go/streamlog/streamlog.go b/go/streamlog/streamlog.go index 9c881a877bd..32fdb6be241 100644 --- a/go/streamlog/streamlog.go +++ b/go/streamlog/streamlog.go @@ -42,7 +42,10 @@ var ( QueryLogFormat = flag.String("querylog-format", "text", "format for query logs (\"text\" or \"json\")") // QueryLogFilterTag contains an optional string that must be present in the query for it to be logged - QueryLogFilterTag = flag.String("querylog-filter-tag", "", "string that must be present in the query for it to be logged") + QueryLogFilterTag = flag.String("querylog-filter-tag", "", "string that must be present in the query for it to be logged; if using a value as the tag, you need to disable query normalization") + + // QueryLogRowThreshold only log queries returning or affecting this many rows + QueryLogRowThreshold = flag.Uint64("querylog-row-threshold", 0, "Number of rows a query has to return or affect before being logged; not useful for streaming queries. 0 means all queries will be logged.") sendCount = stats.NewCountersWithSingleLabel("StreamlogSend", "stream log send count", "logger_names") deliveredCount = stats.NewCountersWithMultiLabels( @@ -208,9 +211,19 @@ func GetFormatter(logger *StreamLogger) LogFormatter { // ShouldEmitLog returns whether the log with the given SQL query // should be emitted or filtered -func ShouldEmitLog(sql string) bool { - if *QueryLogFilterTag == "" { - return true +func ShouldEmitLog(sql string, rowsAffected, rowsReturned uint64) bool { + if *QueryLogRowThreshold > maxUint64(rowsAffected, rowsReturned) && *QueryLogFilterTag == "" { + return false + } + if *QueryLogFilterTag != "" { + return strings.Contains(sql, *QueryLogFilterTag) + } + return true +} + +func maxUint64(a, b uint64) uint64 { + if a < b { + return b } - return strings.Contains(sql, *QueryLogFilterTag) + return a } diff --git a/go/sync2/consolidator.go b/go/sync2/consolidator.go index 5e7698996c9..d0515615763 100644 --- a/go/sync2/consolidator.go +++ b/go/sync2/consolidator.go @@ -94,7 +94,9 @@ type ConsolidatorCache struct { // NewConsolidatorCache creates a new cache with the given capacity. func NewConsolidatorCache(capacity int64) *ConsolidatorCache { - return &ConsolidatorCache{cache.NewLRUCache(capacity)} + return &ConsolidatorCache{cache.NewLRUCache(capacity, func(_ interface{}) int64 { + return 1 + })} } // Record increments the count for "query" by 1. @@ -128,13 +130,6 @@ func (cc *ConsolidatorCache) Items() []ConsolidatorCacheItem { // request for the same query is already in progress. type ccount int64 -// Size always returns 1 because we use the cache only to track queries, -// independent of the number of requests waiting for them. -// This implements the cache.Value interface. -func (cc *ccount) Size() int { - return 1 -} - func (cc *ccount) add(n int64) int64 { return atomic.AddInt64((*int64)(cc), n) } diff --git a/go/test/endtoend/apps/mysql_log_reader.go b/go/test/endtoend/apps/mysql_log_reader.go deleted file mode 100644 index 9106aa56c25..00000000000 --- a/go/test/endtoend/apps/mysql_log_reader.go +++ /dev/null @@ -1,127 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apps - -import ( - "fmt" - "io/ioutil" - "regexp" - "strconv" - "strings" - "time" - - "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/vterrors" -) - -type EntryType int - -const ( - Connect EntryType = iota - Quit - Query - InitDb -) - -type LogEntry struct { - ConnectionID int - Time time.Time - Typ EntryType - Text string -} - -func ReadLogFile(filePath string) ([]*LogEntry, error) { - fil, err := ioutil.ReadFile(filePath) - if err != nil { - return nil, err - } - - return ReadLogLines(string(fil)) -} - -func ReadLogLines(input string) ([]*LogEntry, error) { - lines := strings.Split(input, "\n") - var currentQuery *LogEntry - var result []*LogEntry - - for _, line := range lines { - entry, normalLogLine := readLogLine(line) - if normalLogLine { - currentQuery = &entry - result = append(result, currentQuery) - } else { - if currentQuery == nil { - return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "encountered a weird log line %s", line) - } - currentQuery.Text = currentQuery.Text + "\n" + line - } - } - - return result, nil -} - -func readLogLine(in string) (entry LogEntry, success bool) { - endOfDateTime := strings.Index(in, "\t") - if endOfDateTime < 0 { - return LogEntry{}, false - } - dateTimeText := in[:endOfDateTime] - tid, err := time.Parse(time.RFC3339, dateTimeText) - if err != nil { - return LogEntry{}, false - } - - endOfTypeAndID := strings.LastIndex(in, "\t") - text := strings.TrimSpace(in[endOfTypeAndID:]) - - idAndCommand := in[endOfDateTime+1 : endOfTypeAndID] - idText, commandText := splitIDAndCommand(idAndCommand) - id, err := strconv.Atoi(idText) - if err != nil { - return LogEntry{}, false - } - - return LogEntry{ - ConnectionID: id, - Typ: parseCommand(commandText), - Time: tid, - Text: text, - }, true -} - -func splitIDAndCommand(in string) (string, string) { - r := regexp.MustCompile(`\s+(\d+) (\w+)`) - result := r.FindAllStringSubmatch(in, -1) - id := result[0][1] - text := result[0][2] - return id, text -} - -func parseCommand(in string) EntryType { - switch in { - case "Connect": - return Connect - case "Init": - return InitDb - case "Quit": - return Quit - case "Query": - return Query - } - - panic(fmt.Sprintf("unknown command type %s", in)) -} diff --git a/go/test/endtoend/apps/mysql_log_reader_test.go b/go/test/endtoend/apps/mysql_log_reader_test.go deleted file mode 100644 index 00fd88a323d..00000000000 --- a/go/test/endtoend/apps/mysql_log_reader_test.go +++ /dev/null @@ -1,232 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apps - -import ( - "testing" - "time" - - "vitess.io/vitess/go/test/utils" - - "github.com/google/go-cmp/cmp" - "github.com/stretchr/testify/require" -) - -func TestReadLogLine(t *testing.T) { - type test struct { - logLine string - expected LogEntry - } - - tests := []test{ - { - logLine: "2020-03-02T15:06:48.894157Z 2 Connect root@localhost on using Socket", - expected: LogEntry{ - ConnectionID: 2, - Time: time.Date(2020, 3, 2, 15, 6, 48, 894157*1000, time.UTC), - Typ: Connect, - Text: "root@localhost on using Socket", - }, - }, - { - logLine: "2020-03-02T15:08:22.551372Z 5 Quit ", - expected: LogEntry{ - ConnectionID: 5, - Time: time.Date(2020, 3, 2, 15, 8, 22, 551372*1000, time.UTC), - Typ: Quit, - Text: "", - }, - }, - { - logLine: "2020-03-02T15:15:24.533709Z 6 Init DB wordpressdb", - expected: LogEntry{ - ConnectionID: 6, - Time: time.Date(2020, 3, 2, 15, 15, 24, 533709*1000, time.UTC), - Typ: InitDb, - Text: "wordpressdb", - }, - }, - { - logLine: "2020-03-02T15:07:32.400439Z 5 Query select @@version_comment limit 1", - expected: LogEntry{ - ConnectionID: 5, - Time: time.Date(2020, 3, 2, 15, 7, 32, 400439*1000, time.UTC), - Typ: Query, - Text: "select @@version_comment limit 1", - }, - }, - } - - for _, test := range tests { - t.Run(test.logLine, func(t *testing.T) { - actual, success := readLogLine(test.logLine) - require.True(t, success, "should be successful") - if diff := cmp.Diff(test.expected, actual); diff != "" { - t.Error(diff) - } - }) - } -} - -func TestReadFullLog(t *testing.T) { - input := `2020-03-02T15:07:32.400210Z 5 Connect root@localhost on using Socket -2020-03-02T15:07:32.400439Z 5 Query select @@version_comment limit 1 -2020-03-02T15:08:04.272295Z 5 Query select 42 -2020-03-02T15:08:22.551372Z 5 Quit ` - result, err := ReadLogLines(input) - require.NoError(t, err) - - expected := []*LogEntry{{ - ConnectionID: 5, - Time: time.Date(2020, 3, 2, 15, 7, 32, 400210*1000, time.UTC), - Typ: Connect, - Text: "root@localhost on using Socket", - }, { - ConnectionID: 5, - Time: time.Date(2020, 3, 2, 15, 7, 32, 400439*1000, time.UTC), - Typ: Query, - Text: "select @@version_comment limit 1", - }, { - ConnectionID: 5, - Time: time.Date(2020, 3, 2, 15, 8, 4, 272295*1000, time.UTC), - Typ: Query, - Text: "select 42", - }, { - ConnectionID: 5, - Time: time.Date(2020, 3, 2, 15, 8, 22, 551372*1000, time.UTC), - Typ: Quit, - Text: "", - }} - - utils.MustMatch(t, expected, result, "reading logs") -} - -func TestReadFullLogWithInterleavedChunks(t *testing.T) { - input := `2020-03-02T15:07:32.400210Z 5 Connect root@localhost on using Socket -2020-03-02T15:07:32.400439Z 5 Query select @@version_comment limit 1 -2020-03-02T15:15:24.532950Z 6 Connect wp_user@localhost on using TCP/IP -2020-03-02T15:08:04.272295Z 5 Query select 42 -2020-03-02T15:15:24.533709Z 6 Init DB wordpressdb -2020-03-02T15:15:24.533921Z 6 Query SELECT wp_ -2020-03-02T15:08:22.551372Z 5 Quit -2020-03-02T15:15:24.536723Z 6 Quit ` - result, err := ReadLogLines(input) - require.NoError(t, err) - - expected := []*LogEntry{{ - ConnectionID: 5, - Time: time.Date(2020, 3, 2, 15, 7, 32, 400210*1000, time.UTC), - Typ: Connect, - Text: "root@localhost on using Socket", - }, { - ConnectionID: 5, - Time: time.Date(2020, 3, 2, 15, 7, 32, 400439*1000, time.UTC), - Typ: Query, - Text: "select @@version_comment limit 1", - }, { - ConnectionID: 6, - Time: time.Date(2020, 3, 2, 15, 15, 24, 532950*1000, time.UTC), - Typ: Connect, - Text: "wp_user@localhost on using TCP/IP", - }, { - ConnectionID: 5, - Time: time.Date(2020, 3, 2, 15, 8, 4, 272295*1000, time.UTC), - Typ: Query, - Text: "select 42", - }, { - ConnectionID: 6, - Time: time.Date(2020, 3, 2, 15, 15, 24, 533709*1000, time.UTC), - Typ: InitDb, - Text: "wordpressdb", - }, { - ConnectionID: 6, - Time: time.Date(2020, 3, 2, 15, 15, 24, 533921*1000, time.UTC), - Typ: Query, - Text: "SELECT wp_", - }, { - ConnectionID: 5, - Time: time.Date(2020, 3, 2, 15, 8, 22, 551372*1000, time.UTC), - Typ: Quit, - Text: "", - }, { - ConnectionID: 6, - Time: time.Date(2020, 3, 2, 15, 15, 24, 536723*1000, time.UTC), - Typ: Quit, - Text: "", - }} - - if diff := cmp.Diff(expected, result); diff != "" { - t.Error(diff) - } -} - -func TestReadFullLogWithMultiLineQueries(t *testing.T) { - input := `2020-03-02T15:07:32.400210Z 5 Connect root@localhost on using Socket -2020-03-02T15:16:50.431748Z 5 Query CREATE TABLE wp_users ( - ID bigint(20) unsigned NOT NULL auto_increment, - user_login varchar(60) NOT NULL default '', - user_pass varchar(255) NOT NULL default '', - user_nicename varchar(50) NOT NULL default '', - user_email varchar(100) NOT NULL default '', - user_url varchar(100) NOT NULL default '', - user_registered datetime NOT NULL default '0000-00-00 00:00:00', - user_activation_key varchar(255) NOT NULL default '', - user_status int(11) NOT NULL default '0', - display_name varchar(250) NOT NULL default '', - PRIMARY KEY (ID), - KEY user_login_key (user_login), - KEY user_nicename (user_nicename), - KEY user_email (user_email) -) DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_520_ci -2020-03-02T15:08:22.551372Z 5 Quit ` - result, err := ReadLogLines(input) - require.NoError(t, err) - - expected := []*LogEntry{{ - ConnectionID: 5, - Time: time.Date(2020, 3, 2, 15, 7, 32, 400210*1000, time.UTC), - Typ: Connect, - Text: "root@localhost on using Socket", - }, { - ConnectionID: 5, - Time: time.Date(2020, 3, 2, 15, 16, 50, 431748*1000, time.UTC), - Typ: Query, - Text: `CREATE TABLE wp_users ( - ID bigint(20) unsigned NOT NULL auto_increment, - user_login varchar(60) NOT NULL default '', - user_pass varchar(255) NOT NULL default '', - user_nicename varchar(50) NOT NULL default '', - user_email varchar(100) NOT NULL default '', - user_url varchar(100) NOT NULL default '', - user_registered datetime NOT NULL default '0000-00-00 00:00:00', - user_activation_key varchar(255) NOT NULL default '', - user_status int(11) NOT NULL default '0', - display_name varchar(250) NOT NULL default '', - PRIMARY KEY (ID), - KEY user_login_key (user_login), - KEY user_nicename (user_nicename), - KEY user_email (user_email) -) DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_520_ci`, - }, { - ConnectionID: 5, - Time: time.Date(2020, 3, 2, 15, 8, 22, 551372*1000, time.UTC), - Typ: Quit, - Text: "", - }} - - utils.MustMatch(t, expected, result, "") -} diff --git a/go/test/endtoend/apps/wordpress/install_wordpress_test.go b/go/test/endtoend/apps/wordpress/install_wordpress_test.go deleted file mode 100644 index 4713f590392..00000000000 --- a/go/test/endtoend/apps/wordpress/install_wordpress_test.go +++ /dev/null @@ -1,149 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package wordpress - -import ( - "context" - "database/sql" - "fmt" - "testing" - - _ "github.com/go-sql-driver/mysql" - "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/test/endtoend/apps" -) - -func TestInstallation(t *testing.T) { - t.Skip("not successful yet. run manually until this test reliably passes") - queryLog, err := apps.ReadLogFile("wordpres_install_querylog.txt") - require.NoError(t, err) - - mySQLDb, err := sql.Open("mysql", fmt.Sprintf("root@unix(%s)/", socketFile)) - require.NoError(t, err) - - vitessDb, err := sql.Open("mysql", fmt.Sprintf("root@tcp(127.0.0.1:%d)/", vtParams.Port)) - require.NoError(t, err) - - sessions := make(map[int]*QueryRunner) - for i, line := range queryLog { - t.Run(fmt.Sprintf("%d %s", i, line.Text), func(t *testing.T) { - runner, found := sessions[line.ConnectionID] - if !found { - runner = &QueryRunner{ - mysqlF: func() (*sql.Conn, error) { - return mySQLDb.Conn(ctx) - }, - vitessF: func() (*sql.Conn, error) { - return vitessDb.Conn(ctx) - }, - } - sessions[line.ConnectionID] = runner - } - switch line.Typ { - case apps.Connect: - runner.Reconnect() - case apps.InitDb: - runner.Query(t, "use "+line.Text) - case apps.Query: - runner.Query(t, line.Text) - case apps.Quit: - runner.mysql.Close() - runner.vitess.Close() - delete(sessions, line.ConnectionID) - } - }) - } -} - -type conCreator func() (*sql.Conn, error) - -type QueryRunner struct { - mysqlF, vitessF conCreator - mysql, vitess *sql.Conn -} - -func (qr *QueryRunner) Reconnect() error { - if qr.mysql != nil { - qr.mysql.Close() - } - if qr.vitess != nil { - qr.vitess.Close() - } - db, err := qr.vitessF() - if err != nil { - return err - } - qr.vitess = db - - db, err = qr.mysqlF() - if err != nil { - return err - } - qr.mysql = db - - return nil -} - -var ctx = context.Background() - -func (qr *QueryRunner) Query(t *testing.T, q string) { - resultM, errM := qr.mysql.QueryContext(ctx, q) - resultV, errV := qr.vitess.QueryContext(ctx, q) - if errM == nil { - defer resultM.Close() - } - if errV == nil { - defer resultV.Close() - } - - checkErrors := func(mysql, vitess error) { - if mysql == nil && vitess != nil { - t.Errorf("Vitess returned an error but mysql did not. Query: %s\nError: %s", q, errV.Error()) - } - if mysql != nil && vitess == nil { - t.Errorf("Mysql returned an error but Vitess did not. Query: %s\nError: %s", q, errM.Error()) - } - } - - checkErrors(errM, errV) - - if errV == nil && errM == nil { - _, errM := resultM.Columns() - _, errV := resultV.Columns() - checkErrors(errM, errV) - - // TODO check that the column names are equal - //if diff := cmp.Diff(mysqlColumns, vitessColumns); diff != "" { - // t.Error(diff) - //} - - m := count(resultM) - v := count(resultV) - if m != v { - t.Errorf("Query worked against both, but returned different number of rows. Query:%s\nmysql: %d vitess: %d", q, m, v) - } - } -} - -func count(in *sql.Rows) int { - i := 0 - for in.Next() { - i++ - } - return i -} diff --git a/go/test/endtoend/apps/wordpress/main_test.go b/go/test/endtoend/apps/wordpress/main_test.go deleted file mode 100644 index d3034a7b1d4..00000000000 --- a/go/test/endtoend/apps/wordpress/main_test.go +++ /dev/null @@ -1,207 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package wordpress - -import ( - "flag" - "fmt" - "io/ioutil" - "net" - "os" - "os/exec" - "path" - "strings" - "syscall" - "testing" - "time" - - "database/sql" - - vtenv "vitess.io/vitess/go/vt/env" - - _ "github.com/go-sql-driver/mysql" - - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" -) - -var ( - clusterInstance *cluster.LocalProcessCluster - vtParams mysql.ConnParams - KeyspaceName = "wordpressdb" - Cell = "test" - - VSchema = `{ - "sharded": false, - "tables": { - "wp_term_relationships":{}, - "wp_comments":{}, - "wp_links":{}, - "wp_options":{}, - "wp_postmeta":{}, - "wp_term_taxonomy":{}, - "wp_usermeta":{}, - "wp_termmeta":{}, - "wp_terms":{}, - "wp_commentmeta":{}, - "wp_posts":{}, - "wp_users":{} - } -}` -) - -func TestMain(m *testing.M) { - flag.Parse() - current, err := os.Getwd() - if err != nil { - panic(err) - } - - path := current + "/wordpress.cnf" - os.Setenv("EXTRA_MY_CNF", path) - exitCode := func() int { - clusterInstance = cluster.NewCluster(Cell, "localhost") - defer clusterInstance.Teardown() - - // Start topo server - err := clusterInstance.StartTopo() - if err != nil { - return 1 - } - - // Start keyspace - keyspace := &cluster.Keyspace{ - Name: KeyspaceName, - SchemaSQL: "", - VSchema: VSchema, - } - err = clusterInstance.StartUnshardedKeyspace(*keyspace, 1, true) - if err != nil { - return 1 - } - - // Start vtgate - err = clusterInstance.StartVtgate() - if err != nil { - return 1 - } - vtParams = mysql.ConnParams{ - Host: clusterInstance.Hostname, - Port: clusterInstance.VtgateMySQLPort, - } - - startVanillaMySQL() - - return m.Run() - }() - - if mysqld != nil { - fmt.Println("killing mysqld after tests") - mysqld.Process.Signal(syscall.SIGKILL) - } - - os.Exit(exitCode) -} - -var mysqld *exec.Cmd -var socketFile string - -func startVanillaMySQL() { - handleErr := func(err error) { - if err != nil { - panic(err) - } - } - - tmpDir, err := ioutil.TempDir("", "vitess_tests") - handleErr(err) - - vtMysqlRoot, err := vtenv.VtMysqlRoot() - handleErr(err) - - mysqldPath, err := binaryPath(vtMysqlRoot, "mysqld") - handleErr(err) - - datadir := fmt.Sprintf("--datadir=%s", tmpDir) - basedir := "--basedir=" + vtMysqlRoot - args := []string{ - basedir, - datadir, - "--initialize-insecure", - } - - initDbCmd, err := startCommand(mysqldPath, args) - handleErr(err) - - err = initDbCmd.Wait() - handleErr(err) - - tmpPort, err := getFreePort() - handleErr(err) - socketFile = tmpDir + "socket_file" - args = []string{ - basedir, - datadir, - fmt.Sprintf("--port=%d", tmpPort), - "--socket=" + socketFile, - } - - mysqld, err = startCommand(mysqldPath, args) - handleErr(err) - time.Sleep(1 * time.Second) // give mysqld a chance to start listening to the socket before running tests - - planMysql, err := sql.Open("mysql", fmt.Sprintf("root@unix(%s)/", socketFile)) - handleErr(err) - defer planMysql.Close() - _, err = planMysql.Exec("create database wordpressdb") - handleErr(err) -} - -func startCommand(mysqldPath string, args []string) (*exec.Cmd, error) { - cmd := exec.Command(mysqldPath, args...) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - return cmd, cmd.Start() -} - -// binaryPath does a limited path lookup for a command, -// searching only within sbin and bin in the given root. -func binaryPath(root, binary string) (string, error) { - subdirs := []string{"sbin", "bin", "libexec", "scripts"} - for _, subdir := range subdirs { - binPath := path.Join(root, subdir, binary) - if _, err := os.Stat(binPath); err == nil { - return binPath, nil - } - } - return "", fmt.Errorf("%s not found in any of %s/{%s}", - binary, root, strings.Join(subdirs, ",")) -} - -func getFreePort() (int, error) { - addr, err := net.ResolveTCPAddr("tcp", "localhost:0") - if err != nil { - return 0, err - } - - l, err := net.ListenTCP("tcp", addr) - if err != nil { - return 0, err - } - defer l.Close() - return l.Addr().(*net.TCPAddr).Port, nil -} diff --git a/go/test/endtoend/apps/wordpress/wordpres_install_querylog.txt b/go/test/endtoend/apps/wordpress/wordpres_install_querylog.txt deleted file mode 100644 index c1a286b28a1..00000000000 --- a/go/test/endtoend/apps/wordpress/wordpres_install_querylog.txt +++ /dev/null @@ -1,1307 +0,0 @@ -2020-03-02T15:06:48.894157Z 2 Connect root@localhost on using Socket -2020-03-02T15:06:48.894195Z 2 Connect Access denied for user 'root'@'localhost' -2020-03-02T15:06:56.657050Z 3 Connect root@localhost on using Socket -2020-03-02T15:06:56.657084Z 3 Connect Access denied for user 'root'@'localhost' -2020-03-02T15:07:10.169849Z 4 Connect root@localhost on using Socket -2020-03-02T15:07:10.169888Z 4 Connect Access denied for user 'root'@'localhost' -2020-03-02T15:07:32.400210Z 5 Connect root@localhost on using Socket -2020-03-02T15:07:32.400439Z 5 Query select @@version_comment limit 1 -2020-03-02T15:08:04.272295Z 5 Query select 42 -2020-03-02T15:08:22.551372Z 5 Quit -2020-03-02T15:15:24.532950Z 6 Connect wp_user@localhost on using TCP/IP -2020-03-02T15:15:24.533235Z 6 Query SELECT @@SESSION.sql_mode -2020-03-02T15:15:24.533488Z 6 Query SET SESSION sql_mode='NO_ZERO_IN_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION' -2020-03-02T15:15:24.533709Z 6 Init DB wordpressdb -2020-03-02T15:15:24.533921Z 6 Query SELECT wp_ -2020-03-02T15:15:24.536723Z 6 Quit -2020-03-02T15:15:40.733513Z 7 Connect wp_user@localhost on using TCP/IP -2020-03-02T15:15:40.733827Z 7 Query SET NAMES utf8mb4 -2020-03-02T15:15:40.734252Z 7 Query SET NAMES 'utf8mb4' COLLATE 'utf8mb4_unicode_520_ci' -2020-03-02T15:15:40.734422Z 7 Query SELECT @@SESSION.sql_mode -2020-03-02T15:15:40.734607Z 7 Query SET SESSION sql_mode='NO_ZERO_IN_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION' -2020-03-02T15:15:40.734746Z 7 Init DB wordpressdb -2020-03-02T15:15:40.735478Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' -2020-03-02T15:15:40.735770Z 7 Query DESCRIBE wp_users -2020-03-02T15:15:40.736213Z 7 Query DESCRIBE wp_usermeta -2020-03-02T15:15:40.736482Z 7 Query DESCRIBE wp_posts -2020-03-02T15:15:40.736758Z 7 Query DESCRIBE wp_comments -2020-03-02T15:15:40.737028Z 7 Query DESCRIBE wp_links -2020-03-02T15:15:40.737302Z 7 Query DESCRIBE wp_options -2020-03-02T15:15:40.737540Z 7 Query DESCRIBE wp_postmeta -2020-03-02T15:15:40.737797Z 7 Query DESCRIBE wp_terms -2020-03-02T15:15:40.738043Z 7 Query DESCRIBE wp_term_taxonomy -2020-03-02T15:15:40.738300Z 7 Query DESCRIBE wp_term_relationships -2020-03-02T15:15:40.738544Z 7 Query DESCRIBE wp_termmeta -2020-03-02T15:15:40.738792Z 7 Query DESCRIBE wp_commentmeta -2020-03-02T15:15:40.740823Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:15:40.741046Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:15:40.741299Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'home' LIMIT 1 -2020-03-02T15:15:40.741532Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:15:40.741738Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:15:40.742722Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'cron' LIMIT 1 -2020-03-02T15:15:40.742903Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'active_plugins' LIMIT 1 -2020-03-02T15:15:40.743063Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'hack_file' LIMIT 1 -2020-03-02T15:15:40.743507Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'permalink_structure' LIMIT 1 -2020-03-02T15:15:40.743748Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:15:40.743910Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'template' LIMIT 1 -2020-03-02T15:15:40.744127Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'stylesheet' LIMIT 1 -2020-03-02T15:15:40.744350Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'WPLANG' LIMIT 1 -2020-03-02T15:15:40.745116Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'category_base' LIMIT 1 -2020-03-02T15:15:40.745338Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'category_base' LIMIT 1 -2020-03-02T15:15:40.745553Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'tag_base' LIMIT 1 -2020-03-02T15:15:40.745769Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'tag_base' LIMIT 1 -2020-03-02T15:15:40.746008Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'permalink_structure' LIMIT 1 -2020-03-02T15:15:40.746278Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'permalink_structure' LIMIT 1 -2020-03-02T15:15:40.746513Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'permalink_structure' LIMIT 1 -2020-03-02T15:15:40.746792Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'permalink_structure' LIMIT 1 -2020-03-02T15:15:40.747038Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'permalink_structure' LIMIT 1 -2020-03-02T15:15:40.747229Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'permalink_structure' LIMIT 1 -2020-03-02T15:15:40.747396Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' -2020-03-02T15:15:40.747530Z 7 Query DESCRIBE wp_users -2020-03-02T15:15:40.747921Z 7 Query DESCRIBE wp_usermeta -2020-03-02T15:15:40.748168Z 7 Query DESCRIBE wp_posts -2020-03-02T15:15:40.748378Z 7 Query DESCRIBE wp_comments -2020-03-02T15:15:40.748565Z 7 Query DESCRIBE wp_links -2020-03-02T15:15:40.748809Z 7 Query DESCRIBE wp_options -2020-03-02T15:15:40.749065Z 7 Query DESCRIBE wp_postmeta -2020-03-02T15:15:40.749328Z 7 Query DESCRIBE wp_terms -2020-03-02T15:15:40.749597Z 7 Query DESCRIBE wp_term_taxonomy -2020-03-02T15:15:40.749804Z 7 Query DESCRIBE wp_term_relationships -2020-03-02T15:15:40.750008Z 7 Query DESCRIBE wp_termmeta -2020-03-02T15:15:40.750221Z 7 Query DESCRIBE wp_commentmeta -2020-03-02T15:15:40.750493Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'use_smilies' LIMIT 1 -2020-03-02T15:15:40.750670Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'cron' LIMIT 1 -2020-03-02T15:15:40.750860Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'cron' LIMIT 1 -2020-03-02T15:15:40.750992Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'cron' LIMIT 1 -2020-03-02T15:15:40.751148Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'cron' LIMIT 1 -2020-03-02T15:15:40.751438Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'theme_switched' LIMIT 1 -2020-03-02T15:15:40.752300Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' -2020-03-02T15:15:40.752519Z 7 Query DESCRIBE wp_users -2020-03-02T15:15:40.752803Z 7 Query DESCRIBE wp_usermeta -2020-03-02T15:15:40.753046Z 7 Query DESCRIBE wp_posts -2020-03-02T15:15:40.753301Z 7 Query DESCRIBE wp_comments -2020-03-02T15:15:40.753518Z 7 Query DESCRIBE wp_links -2020-03-02T15:15:40.753742Z 7 Query DESCRIBE wp_options -2020-03-02T15:15:40.753960Z 7 Query DESCRIBE wp_postmeta -2020-03-02T15:15:40.754155Z 7 Query DESCRIBE wp_terms -2020-03-02T15:15:40.754345Z 7 Query DESCRIBE wp_term_taxonomy -2020-03-02T15:15:40.754585Z 7 Query DESCRIBE wp_term_relationships -2020-03-02T15:15:40.754825Z 7 Query DESCRIBE wp_termmeta -2020-03-02T15:15:40.755017Z 7 Query DESCRIBE wp_commentmeta -2020-03-02T15:15:40.755846Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'ftp_credentials' LIMIT 1 -2020-03-02T15:15:40.756132Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'blog_charset' LIMIT 1 -2020-03-02T15:15:40.756480Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'home' LIMIT 1 -2020-03-02T15:15:41.180189Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'html_type' LIMIT 1 -2020-03-02T15:15:41.180505Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'html_type' LIMIT 1 -2020-03-02T15:15:41.180856Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:15:41.181351Z 7 Query SHOW TABLES LIKE 'wp\\_users' -2020-03-02T15:15:41.181843Z 7 Query SELECT option_name, option_value FROM wp_options WHERE autoload = 'yes' -2020-03-02T15:15:41.182062Z 7 Query SELECT option_name, option_value FROM wp_options -2020-03-02T15:15:41.182363Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:15:41.182597Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:15:41.182850Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'permalink_structure' LIMIT 1 -2020-03-02T15:15:41.183130Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'home' LIMIT 1 -2020-03-02T15:15:41.183393Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:15:41.183707Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:15:41.183994Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:15:41.184221Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:15:41.184458Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:15:41.184670Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:15:41.185014Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'start_of_week' LIMIT 1 -2020-03-02T15:15:41.185282Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'time_format' LIMIT 1 -2020-03-02T15:15:41.185559Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'date_format' LIMIT 1 -2020-03-02T15:15:41.185865Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'can_compress_scripts' LIMIT 1 -2020-03-02T15:15:41.186196Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'can_compress_scripts' LIMIT 1 -2020-03-02T15:15:41.186472Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:15:41.186700Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:15:41.186877Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:15:41.187115Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'permalink_structure' LIMIT 1 -2020-03-02T15:15:41.187296Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'home' LIMIT 1 -2020-03-02T15:15:41.187517Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:15:41.187863Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'time_format' LIMIT 1 -2020-03-02T15:15:41.188127Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'date_format' LIMIT 1 -2020-03-02T15:15:41.188330Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'timezone_string' LIMIT 1 -2020-03-02T15:15:41.188468Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'gmt_offset' LIMIT 1 -2020-03-02T15:15:41.188708Z 7 Query SELECT option_value FROM wp_options WHERE option_name = 'timezone_string' LIMIT 1 -2020-03-02T15:15:41.189234Z 7 Quit -2020-03-02T15:16:50.397006Z 8 Connect wp_user@localhost on using TCP/IP -2020-03-02T15:16:50.397306Z 8 Query SET NAMES utf8mb4 -2020-03-02T15:16:50.397965Z 8 Query SET NAMES 'utf8mb4' COLLATE 'utf8mb4_unicode_520_ci' -2020-03-02T15:16:50.398116Z 8 Query SELECT @@SESSION.sql_mode -2020-03-02T15:16:50.398321Z 8 Query SET SESSION sql_mode='NO_ZERO_IN_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION' -2020-03-02T15:16:50.398490Z 8 Init DB wordpressdb -2020-03-02T15:16:50.399252Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' -2020-03-02T15:16:50.399444Z 8 Query DESCRIBE wp_users -2020-03-02T15:16:50.399897Z 8 Query DESCRIBE wp_usermeta -2020-03-02T15:16:50.400176Z 8 Query DESCRIBE wp_posts -2020-03-02T15:16:50.400447Z 8 Query DESCRIBE wp_comments -2020-03-02T15:16:50.400700Z 8 Query DESCRIBE wp_links -2020-03-02T15:16:50.400971Z 8 Query DESCRIBE wp_options -2020-03-02T15:16:50.401218Z 8 Query DESCRIBE wp_postmeta -2020-03-02T15:16:50.401467Z 8 Query DESCRIBE wp_terms -2020-03-02T15:16:50.401751Z 8 Query DESCRIBE wp_term_taxonomy -2020-03-02T15:16:50.404026Z 8 Query DESCRIBE wp_term_relationships -2020-03-02T15:16:50.404322Z 8 Query DESCRIBE wp_termmeta -2020-03-02T15:16:50.404623Z 8 Query DESCRIBE wp_commentmeta -2020-03-02T15:16:50.406577Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:16:50.407092Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:16:50.407453Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'home' LIMIT 1 -2020-03-02T15:16:50.409881Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:16:50.410233Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:16:50.411344Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'cron' LIMIT 1 -2020-03-02T15:16:50.411551Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'active_plugins' LIMIT 1 -2020-03-02T15:16:50.411902Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'hack_file' LIMIT 1 -2020-03-02T15:16:50.412427Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'permalink_structure' LIMIT 1 -2020-03-02T15:16:50.412700Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:50.412975Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'template' LIMIT 1 -2020-03-02T15:16:50.413196Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'stylesheet' LIMIT 1 -2020-03-02T15:16:50.413430Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'WPLANG' LIMIT 1 -2020-03-02T15:16:50.414287Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'category_base' LIMIT 1 -2020-03-02T15:16:50.414495Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'category_base' LIMIT 1 -2020-03-02T15:16:50.414730Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'tag_base' LIMIT 1 -2020-03-02T15:16:50.414998Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'tag_base' LIMIT 1 -2020-03-02T15:16:50.415245Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'permalink_structure' LIMIT 1 -2020-03-02T15:16:50.415508Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'permalink_structure' LIMIT 1 -2020-03-02T15:16:50.416000Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'permalink_structure' LIMIT 1 -2020-03-02T15:16:50.416308Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'permalink_structure' LIMIT 1 -2020-03-02T15:16:50.416613Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'permalink_structure' LIMIT 1 -2020-03-02T15:16:50.416858Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'permalink_structure' LIMIT 1 -2020-03-02T15:16:50.417075Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' -2020-03-02T15:16:50.417302Z 8 Query DESCRIBE wp_users -2020-03-02T15:16:50.417651Z 8 Query DESCRIBE wp_usermeta -2020-03-02T15:16:50.417960Z 8 Query DESCRIBE wp_posts -2020-03-02T15:16:50.418225Z 8 Query DESCRIBE wp_comments -2020-03-02T15:16:50.418594Z 8 Query DESCRIBE wp_links -2020-03-02T15:16:50.418909Z 8 Query DESCRIBE wp_options -2020-03-02T15:16:50.419203Z 8 Query DESCRIBE wp_postmeta -2020-03-02T15:16:50.419511Z 8 Query DESCRIBE wp_terms -2020-03-02T15:16:50.419873Z 8 Query DESCRIBE wp_term_taxonomy -2020-03-02T15:16:50.420130Z 8 Query DESCRIBE wp_term_relationships -2020-03-02T15:16:50.420408Z 8 Query DESCRIBE wp_termmeta -2020-03-02T15:16:50.420709Z 8 Query DESCRIBE wp_commentmeta -2020-03-02T15:16:50.421024Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'use_smilies' LIMIT 1 -2020-03-02T15:16:50.421256Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'cron' LIMIT 1 -2020-03-02T15:16:50.421538Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'cron' LIMIT 1 -2020-03-02T15:16:50.421744Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'cron' LIMIT 1 -2020-03-02T15:16:50.421925Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'cron' LIMIT 1 -2020-03-02T15:16:50.422196Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'theme_switched' LIMIT 1 -2020-03-02T15:16:50.422968Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' -2020-03-02T15:16:50.423180Z 8 Query DESCRIBE wp_users -2020-03-02T15:16:50.423453Z 8 Query DESCRIBE wp_usermeta -2020-03-02T15:16:50.423692Z 8 Query DESCRIBE wp_posts -2020-03-02T15:16:50.423997Z 8 Query DESCRIBE wp_comments -2020-03-02T15:16:50.424246Z 8 Query DESCRIBE wp_links -2020-03-02T15:16:50.424478Z 8 Query DESCRIBE wp_options -2020-03-02T15:16:50.424707Z 8 Query DESCRIBE wp_postmeta -2020-03-02T15:16:50.424939Z 8 Query DESCRIBE wp_terms -2020-03-02T15:16:50.425161Z 8 Query DESCRIBE wp_term_taxonomy -2020-03-02T15:16:50.425384Z 8 Query DESCRIBE wp_term_relationships -2020-03-02T15:16:50.425600Z 8 Query DESCRIBE wp_termmeta -2020-03-02T15:16:50.425867Z 8 Query DESCRIBE wp_commentmeta -2020-03-02T15:16:50.426469Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'html_type' LIMIT 1 -2020-03-02T15:16:50.426697Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'blog_charset' LIMIT 1 -2020-03-02T15:16:50.426932Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'html_type' LIMIT 1 -2020-03-02T15:16:50.427332Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:16:50.428140Z 8 Query FAIL FAIL FAIL wp_users -2020-03-02T15:16:50.428469Z 8 Query DESCRIBE wp_usermeta -2020-03-02T15:16:50.428754Z 8 Query DESCRIBE wp_termmeta -2020-03-02T15:16:50.429109Z 8 Query DESCRIBE wp_terms -2020-03-02T15:16:50.429424Z 8 Query DESCRIBE wp_term_taxonomy -2020-03-02T15:16:50.429711Z 8 Query DESCRIBE wp_term_relationships -2020-03-02T15:16:50.430025Z 8 Query DESCRIBE wp_commentmeta -2020-03-02T15:16:50.430306Z 8 Query DESCRIBE wp_comments -2020-03-02T15:16:50.430609Z 8 Query DESCRIBE wp_links -2020-03-02T15:16:50.430868Z 8 Query DESCRIBE wp_options -2020-03-02T15:16:50.431121Z 8 Query DESCRIBE wp_postmeta -2020-03-02T15:16:50.431423Z 8 Query DESCRIBE wp_posts -2020-03-02T15:16:50.431748Z 8 Query CREATE TABLE wp_users ( - ID bigint(20) unsigned NOT NULL auto_increment, - user_login varchar(60) NOT NULL default '', - user_pass varchar(255) NOT NULL default '', - user_nicename varchar(50) NOT NULL default '', - user_email varchar(100) NOT NULL default '', - user_url varchar(100) NOT NULL default '', - user_registered datetime NOT NULL default '0000-00-00 00:00:00', - user_activation_key varchar(255) NOT NULL default '', - user_status int(11) NOT NULL default '0', - display_name varchar(250) NOT NULL default '', - PRIMARY KEY (ID), - KEY user_login_key (user_login), - KEY user_nicename (user_nicename), - KEY user_email (user_email) -) DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_520_ci -2020-03-02T15:16:50.473912Z 8 Query CREATE TABLE wp_usermeta ( - umeta_id bigint(20) unsigned NOT NULL auto_increment, - user_id bigint(20) unsigned NOT NULL default '0', - meta_key varchar(255) default NULL, - meta_value longtext, - PRIMARY KEY (umeta_id), - KEY user_id (user_id), - KEY meta_key (meta_key(191)) -) DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_520_ci -2020-03-02T15:16:50.545635Z 8 Query CREATE TABLE wp_termmeta ( - meta_id bigint(20) unsigned NOT NULL auto_increment, - term_id bigint(20) unsigned NOT NULL default '0', - meta_key varchar(255) default NULL, - meta_value longtext, - PRIMARY KEY (meta_id), - KEY term_id (term_id), - KEY meta_key (meta_key(191)) -) DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_520_ci -2020-03-02T15:16:50.609438Z 8 Query CREATE TABLE wp_terms ( - term_id bigint(20) unsigned NOT NULL auto_increment, - name varchar(200) NOT NULL default '', - slug varchar(200) NOT NULL default '', - term_group bigint(10) NOT NULL default 0, - PRIMARY KEY (term_id), - KEY slug (slug(191)), - KEY name (name(191)) -) DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_520_ci -2020-03-02T15:16:50.681655Z 8 Query CREATE TABLE wp_term_taxonomy ( - term_taxonomy_id bigint(20) unsigned NOT NULL auto_increment, - term_id bigint(20) unsigned NOT NULL default 0, - taxonomy varchar(32) NOT NULL default '', - description longtext NOT NULL, - parent bigint(20) unsigned NOT NULL default 0, - count bigint(20) NOT NULL default 0, - PRIMARY KEY (term_taxonomy_id), - UNIQUE KEY term_id_taxonomy (term_id,taxonomy), - KEY taxonomy (taxonomy) -) DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_520_ci -2020-03-02T15:16:50.746487Z 8 Query CREATE TABLE wp_term_relationships ( - object_id bigint(20) unsigned NOT NULL default 0, - term_taxonomy_id bigint(20) unsigned NOT NULL default 0, - term_order int(11) NOT NULL default 0, - PRIMARY KEY (object_id,term_taxonomy_id), - KEY term_taxonomy_id (term_taxonomy_id) -) DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_520_ci -2020-03-02T15:16:50.793460Z 8 Query CREATE TABLE wp_commentmeta ( - meta_id bigint(20) unsigned NOT NULL auto_increment, - comment_id bigint(20) unsigned NOT NULL default '0', - meta_key varchar(255) default NULL, - meta_value longtext, - PRIMARY KEY (meta_id), - KEY comment_id (comment_id), - KEY meta_key (meta_key(191)) -) DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_520_ci -2020-03-02T15:16:50.864137Z 8 Query CREATE TABLE wp_comments ( - comment_ID bigint(20) unsigned NOT NULL auto_increment, - comment_post_ID bigint(20) unsigned NOT NULL default '0', - comment_author tinytext NOT NULL, - comment_author_email varchar(100) NOT NULL default '', - comment_author_url varchar(200) NOT NULL default '', - comment_author_IP varchar(100) NOT NULL default '', - comment_date datetime NOT NULL default '0000-00-00 00:00:00', - comment_date_gmt datetime NOT NULL default '0000-00-00 00:00:00', - comment_content text NOT NULL, - comment_karma int(11) NOT NULL default '0', - comment_approved varchar(20) NOT NULL default '1', - comment_agent varchar(255) NOT NULL default '', - comment_type varchar(20) NOT NULL default '', - comment_parent bigint(20) unsigned NOT NULL default '0', - user_id bigint(20) unsigned NOT NULL default '0', - PRIMARY KEY (comment_ID), - KEY comment_post_ID (comment_post_ID), - KEY comment_approved_date_gmt (comment_approved,comment_date_gmt), - KEY comment_date_gmt (comment_date_gmt), - KEY comment_parent (comment_parent), - KEY comment_author_email (comment_author_email(10)) -) DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_520_ci -2020-03-02T15:16:50.954822Z 8 Query CREATE TABLE wp_links ( - link_id bigint(20) unsigned NOT NULL auto_increment, - link_url varchar(255) NOT NULL default '', - link_name varchar(255) NOT NULL default '', - link_image varchar(255) NOT NULL default '', - link_target varchar(25) NOT NULL default '', - link_description varchar(255) NOT NULL default '', - link_visible varchar(20) NOT NULL default 'Y', - link_owner bigint(20) unsigned NOT NULL default '1', - link_rating int(11) NOT NULL default '0', - link_updated datetime NOT NULL default '0000-00-00 00:00:00', - link_rel varchar(255) NOT NULL default '', - link_notes mediumtext NOT NULL, - link_rss varchar(255) NOT NULL default '', - PRIMARY KEY (link_id), - KEY link_visible (link_visible) -) DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_520_ci -2020-03-02T15:16:51.015931Z 8 Query CREATE TABLE wp_options ( - option_id bigint(20) unsigned NOT NULL auto_increment, - option_name varchar(191) NOT NULL default '', - option_value longtext NOT NULL, - autoload varchar(20) NOT NULL default 'yes', - PRIMARY KEY (option_id), - UNIQUE KEY option_name (option_name), - KEY autoload (autoload) -) DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_520_ci -2020-03-02T15:16:51.081235Z 8 Query CREATE TABLE wp_postmeta ( - meta_id bigint(20) unsigned NOT NULL auto_increment, - post_id bigint(20) unsigned NOT NULL default '0', - meta_key varchar(255) default NULL, - meta_value longtext, - PRIMARY KEY (meta_id), - KEY post_id (post_id), - KEY meta_key (meta_key(191)) -) DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_520_ci -2020-03-02T15:16:51.162003Z 8 Query CREATE TABLE wp_posts ( - ID bigint(20) unsigned NOT NULL auto_increment, - post_author bigint(20) unsigned NOT NULL default '0', - post_date datetime NOT NULL default '0000-00-00 00:00:00', - post_date_gmt datetime NOT NULL default '0000-00-00 00:00:00', - post_content longtext NOT NULL, - post_title text NOT NULL, - post_excerpt text NOT NULL, - post_status varchar(20) NOT NULL default 'publish', - comment_status varchar(20) NOT NULL default 'open', - ping_status varchar(20) NOT NULL default 'open', - post_password varchar(255) NOT NULL default '', - post_name varchar(200) NOT NULL default '', - to_ping text NOT NULL, - pinged text NOT NULL, - post_modified datetime NOT NULL default '0000-00-00 00:00:00', - post_modified_gmt datetime NOT NULL default '0000-00-00 00:00:00', - post_content_filtered longtext NOT NULL, - post_parent bigint(20) unsigned NOT NULL default '0', - guid varchar(255) NOT NULL default '', - menu_order int(11) NOT NULL default '0', - post_type varchar(20) NOT NULL default 'post', - post_mime_type varchar(100) NOT NULL default '', - comment_count bigint(20) NOT NULL default '0', - PRIMARY KEY (ID), - KEY post_name (post_name(191)), - KEY type_status_date (post_type,post_status,post_date,ID), - KEY post_parent (post_parent), - KEY post_author (post_author) -) DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_520_ci -2020-03-02T15:16:51.236431Z 8 Query SELECT option_name FROM wp_options WHERE option_name in ( 'siteurl', 'home', 'blogname', 'blogdescription', 'users_can_register', 'admin_email', 'start_of_week', 'use_balanceTags', 'use_smilies', 'require_name_email', 'comments_notify', 'posts_per_rss', 'rss_use_excerpt', 'mailserver_url', 'mailserver_login', 'mailserver_pass', 'mailserver_port', 'default_category', 'default_comment_status', 'default_ping_status', 'default_pingback_flag', 'posts_per_page', 'date_format', 'time_format', 'links_updated_date_format', 'comment_moderation', 'moderation_notify', 'permalink_structure', 'rewrite_rules', 'hack_file', 'blog_charset', 'moderation_keys', 'active_plugins', 'category_base', 'ping_sites', 'comment_max_links', 'gmt_offset', 'default_email_category', 'recently_edited', 'template', 'stylesheet', 'comment_whitelist', 'blacklist_keys', 'comment_registration', 'html_type', 'use_trackback', 'default_role', 'db_version', 'uploads_use_yearmonth_folders', 'upload_path', 'blog_public', 'default_link_category', 'show_on_front', 'tag_base', 'show_avatars', 'avatar_rating', 'upload_url_path', 'thumbnail_size_w', 'thumbnail_size_h', 'thumbnail_crop', 'medium_size_w', 'medium_size_h', 'avatar_default', 'large_size_w', 'large_size_h', 'image_default_link_type', 'image_default_size', 'image_default_align', 'close_comments_for_old_posts', 'close_comments_days_old', 'thread_comments', 'thread_comments_depth', 'page_comments', 'comments_per_page', 'default_comments_page', 'comment_order', 'sticky_posts', 'widget_categories', 'widget_text', 'widget_rss', 'uninstall_plugins', 'timezone_string', 'page_for_posts', 'page_on_front', 'default_post_format', 'link_manager_enabled', 'finished_splitting_shared_terms', 'site_icon', 'medium_large_size_w', 'medium_large_size_h', 'wp_page_for_privacy_policy', 'show_comments_cookies_opt_in', 'admin_email_lifespan', 'initial_db_version' ) -2020-03-02T15:16:51.241730Z 8 Query INSERT INTO wp_options (option_name, option_value, autoload) VALUES ('siteurl', 'http://127.0.0.1/wordpress', 'yes'), ('home', 'http://127.0.0.1/wordpress', 'yes'), ('blogname', 'My Site', 'yes'), ('blogdescription', 'Just another WordPress site', 'yes'), ('users_can_register', '0', 'yes'), ('admin_email', 'you@example.com', 'yes'), ('start_of_week', '1', 'yes'), ('use_balanceTags', '0', 'yes'), ('use_smilies', '1', 'yes'), ('require_name_email', '1', 'yes'), ('comments_notify', '1', 'yes'), ('posts_per_rss', '10', 'yes'), ('rss_use_excerpt', '0', 'yes'), ('mailserver_url', 'mail.example.com', 'yes'), ('mailserver_login', 'login@example.com', 'yes'), ('mailserver_pass', 'password', 'yes'), ('mailserver_port', '110', 'yes'), ('default_category', '1', 'yes'), ('default_comment_status', 'open', 'yes'), ('default_ping_status', 'open', 'yes'), ('default_pingback_flag', '1', 'yes'), ('posts_per_page', '10', 'yes'), ('date_format', 'F j, Y', 'yes'), ('time_format', 'g:i a', 'yes'), ('links_updated_date_format', 'F j, Y g:i a', 'yes'), ('comment_moderation', '0', 'yes'), ('moderation_notify', '1', 'yes'), ('permalink_structure', '', 'yes'), ('rewrite_rules', '', 'yes'), ('hack_file', '0', 'yes'), ('blog_charset', 'UTF-8', 'yes'), ('moderation_keys', '', 'no'), ('active_plugins', 'a:0:{}', 'yes'), ('category_base', '', 'yes'), ('ping_sites', 'http://rpc.pingomatic.com/', 'yes'), ('comment_max_links', '2', 'yes'), ('gmt_offset', '0', 'yes'), ('default_email_category', '1', 'yes'), ('recently_edited', '', 'no'), ('template', 'twentytwenty', 'yes'), ('stylesheet', 'twentytwenty', 'yes'), ('comment_whitelist', '1', 'yes'), ('blacklist_keys', '', 'no'), ('comment_registration', '0', 'yes'), ('html_type', 'text/html', 'yes'), ('use_trackback', '0', 'yes'), ('default_role', 'subscriber', 'yes'), ('db_version', '45805', 'yes'), ('uploads_use_yearmonth_folders', '1', 'yes'), ('upload_path', '', 'yes'), ('blog_public', '1', 'yes'), ('default_link_category', '2', 'yes'), ('show_on_front', 'posts', 'yes'), ('tag_base', '', 'yes'), ('show_avatars', '1', 'yes'), ('avatar_rating', 'G', 'yes'), ('upload_url_path', '', 'yes'), ('thumbnail_size_w', '150', 'yes'), ('thumbnail_size_h', '150', 'yes'), ('thumbnail_crop', '1', 'yes'), ('medium_size_w', '300', 'yes'), ('medium_size_h', '300', 'yes'), ('avatar_default', 'mystery', 'yes'), ('large_size_w', '1024', 'yes'), ('large_size_h', '1024', 'yes'), ('image_default_link_type', 'none', 'yes'), ('image_default_size', '', 'yes'), ('image_default_align', '', 'yes'), ('close_comments_for_old_posts', '0', 'yes'), ('close_comments_days_old', '14', 'yes'), ('thread_comments', '1', 'yes'), ('thread_comments_depth', '5', 'yes'), ('page_comments', '0', 'yes'), ('comments_per_page', '50', 'yes'), ('default_comments_page', 'newest', 'yes'), ('comment_order', 'asc', 'yes'), ('sticky_posts', 'a:0:{}', 'yes'), ('widget_categories', 'a:0:{}', 'yes'), ('widget_text', 'a:0:{}', 'yes'), ('widget_rss', 'a:0:{}', 'yes'), ('uninstall_plugins', 'a:0:{}', 'no'), ('timezone_string', '', 'yes'), ('page_for_posts', '0', 'yes'), ('page_on_front', '0', 'yes'), ('default_post_format', '0', 'yes'), ('link_manager_enabled', '0', 'yes'), ('finished_splitting_shared_terms', '1', 'yes'), ('site_icon', '0', 'yes'), ('medium_large_size_w', '768', 'yes'), ('medium_large_size_h', '0', 'yes'), ('wp_page_for_privacy_policy', '0', 'yes'), ('show_comments_cookies_opt_in', '1', 'yes'), ('admin_email_lifespan', '1598714211', 'yes'), ('initial_db_version', '45805', 'yes') -2020-03-02T15:16:51.253244Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'home' -2020-03-02T15:16:51.253581Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'blodotgsping_url' -2020-03-02T15:16:51.253827Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'bodyterminator' -2020-03-02T15:16:51.254038Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'emailtestonly' -2020-03-02T15:16:51.254240Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'phoneemail_separator' -2020-03-02T15:16:51.254431Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'smilies_directory' -2020-03-02T15:16:51.254668Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'subjectprefix' -2020-03-02T15:16:51.254847Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'use_bbcode' -2020-03-02T15:16:51.255036Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'use_blodotgsping' -2020-03-02T15:16:51.255309Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'use_phoneemail' -2020-03-02T15:16:51.255549Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'use_quicktags' -2020-03-02T15:16:51.255788Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'use_weblogsping' -2020-03-02T15:16:51.256025Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'weblogs_cache_file' -2020-03-02T15:16:51.256232Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'use_preview' -2020-03-02T15:16:51.256477Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'use_htmltrans' -2020-03-02T15:16:51.256683Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'smilies_directory' -2020-03-02T15:16:51.256944Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'fileupload_allowedusers' -2020-03-02T15:16:51.257150Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'use_phoneemail' -2020-03-02T15:16:51.257393Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'default_post_status' -2020-03-02T15:16:51.257602Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'default_post_category' -2020-03-02T15:16:51.257792Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'archive_mode' -2020-03-02T15:16:51.257979Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'time_difference' -2020-03-02T15:16:51.258165Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'links_minadminlevel' -2020-03-02T15:16:51.258391Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'links_use_adminlevels' -2020-03-02T15:16:51.258649Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'links_rating_type' -2020-03-02T15:16:51.258909Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'links_rating_char' -2020-03-02T15:16:51.259166Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'links_rating_ignore_zero' -2020-03-02T15:16:51.259425Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'links_rating_single_image' -2020-03-02T15:16:51.259683Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'links_rating_image0' -2020-03-02T15:16:51.260031Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'links_rating_image1' -2020-03-02T15:16:51.260373Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'links_rating_image2' -2020-03-02T15:16:51.260570Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'links_rating_image3' -2020-03-02T15:16:51.260822Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'links_rating_image4' -2020-03-02T15:16:51.261067Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'links_rating_image5' -2020-03-02T15:16:51.261980Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'links_rating_image6' -2020-03-02T15:16:51.262213Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'links_rating_image7' -2020-03-02T15:16:51.262445Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'links_rating_image8' -2020-03-02T15:16:51.262678Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'links_rating_image9' -2020-03-02T15:16:51.262903Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'links_recently_updated_time' -2020-03-02T15:16:51.263148Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'links_recently_updated_prepend' -2020-03-02T15:16:51.263414Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'links_recently_updated_append' -2020-03-02T15:16:51.263667Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'weblogs_cacheminutes' -2020-03-02T15:16:51.263973Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'comment_allowed_tags' -2020-03-02T15:16:51.264190Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'search_engine_friendly_urls' -2020-03-02T15:16:51.264388Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'default_geourl_lat' -2020-03-02T15:16:51.264616Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'default_geourl_lon' -2020-03-02T15:16:51.264821Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'use_default_geourl' -2020-03-02T15:16:51.265060Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'weblogs_xml_url' -2020-03-02T15:16:51.265287Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'new_users_can_blog' -2020-03-02T15:16:51.265450Z 8 Query SELECT autoload FROM wp_options WHERE option_name = '_wpnonce' -2020-03-02T15:16:51.265694Z 8 Query SELECT autoload FROM wp_options WHERE option_name = '_wp_http_referer' -2020-03-02T15:16:51.265889Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'Update' -2020-03-02T15:16:51.266080Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'action' -2020-03-02T15:16:51.266273Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'rich_editing' -2020-03-02T15:16:51.266451Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'autosave_interval' -2020-03-02T15:16:51.266633Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'deactivated_plugins' -2020-03-02T15:16:51.266834Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'can_compress_scripts' -2020-03-02T15:16:51.267015Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'page_uris' -2020-03-02T15:16:51.267196Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'update_core' -2020-03-02T15:16:51.267370Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'update_plugins' -2020-03-02T15:16:51.267543Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'update_themes' -2020-03-02T15:16:51.267854Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'doing_cron' -2020-03-02T15:16:51.268129Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'random_seed' -2020-03-02T15:16:51.268347Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'rss_excerpt_length' -2020-03-02T15:16:51.268530Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'secret' -2020-03-02T15:16:51.268706Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'use_linksupdate' -2020-03-02T15:16:51.268887Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'default_comment_status_page' -2020-03-02T15:16:51.269072Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'wporg_popular_tags' -2020-03-02T15:16:51.269250Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'what_to_show' -2020-03-02T15:16:51.269424Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'rss_language' -2020-03-02T15:16:51.269598Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'language' -2020-03-02T15:16:51.269774Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'enable_xmlrpc' -2020-03-02T15:16:51.269948Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'enable_app' -2020-03-02T15:16:51.270122Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'embed_autourls' -2020-03-02T15:16:51.270297Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'default_post_edit_rows' -2020-03-02T15:16:51.270473Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'gzipcompression' -2020-03-02T15:16:51.270651Z 8 Query SELECT autoload FROM wp_options WHERE option_name = 'advanced_edit' -2020-03-02T15:16:51.270820Z 8 Query DELETE FROM wp_options WHERE option_name REGEXP '^rss_[0-9a-f]{32}(_ts)?$' -2020-03-02T15:16:51.271454Z 8 Query DELETE a, b FROM wp_options a, wp_options b - WHERE a.option_name LIKE '\\_transient\\_%' - AND a.option_name NOT LIKE '\\_transient\\_timeout\\_%' - AND b.option_name = CONCAT( '_transient_timeout_', SUBSTRING( a.option_name, 12 ) ) - AND b.option_value < 1583162211 -2020-03-02T15:16:51.271864Z 8 Query DELETE a, b FROM wp_options a, wp_options b - WHERE a.option_name LIKE '\\_site\\_transient\\_%' - AND a.option_name NOT LIKE '\\_site\\_transient\\_timeout\\_%' - AND b.option_name = CONCAT( '_site_transient_timeout_', SUBSTRING( a.option_name, 17 ) ) - AND b.option_value < 1583162211 -2020-03-02T15:16:51.272148Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.272368Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.272740Z 8 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('wp_user_roles', 'a:1:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:0:{}}}', 'yes') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:16:51.275230Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.275448Z 8 Query SHOW FULL COLUMNS FROM `wp_options` -2020-03-02T15:16:51.275842Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:2:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:0:{}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.281457Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.281902Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:3:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:0:{}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:0:{}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.283305Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.283498Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:4:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:0:{}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:0:{}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.291659Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.292017Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:0:{}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:0:{}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.293811Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.294046Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:1:{s:13:\"switch_themes\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:0:{}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.301890Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.302235Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:2:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:0:{}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.304151Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.304426Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:3:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:0:{}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.306162Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.306409Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:4:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:0:{}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.314669Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.315063Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:5:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:0:{}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.316785Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.317081Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:6:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:0:{}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.325683Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.326047Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:7:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:0:{}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.335095Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.335436Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:8:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:0:{}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.337131Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.337401Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:9:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:0:{}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.345668Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.346066Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:10:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:0:{}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.356783Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.357124Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:11:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:0:{}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.365161Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.365490Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:12:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:0:{}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.366761Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.367001Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:13:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:0:{}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.375249Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.375719Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:14:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:0:{}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.377359Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.377746Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:15:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:0:{}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.385243Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.385606Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:16:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:0:{}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.387045Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.387313Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:17:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:0:{}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.395579Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.396114Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:18:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:0:{}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.397707Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.397982Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:19:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:0:{}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.398683Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.398939Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:20:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:0:{}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.399781Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.400145Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:21:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:0:{}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.401591Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.401887Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:22:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:0:{}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.409590Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.409938Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:23:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:0:{}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.411823Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.412132Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:24:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:0:{}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.413712Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.413977Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:25:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:0:{}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.423525Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.423957Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:26:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:0:{}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.425641Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.425969Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:27:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:0:{}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.433631Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.433971Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:28:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:0:{}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.435133Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.435386Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:29:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:0:{}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.436068Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.436333Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:30:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:0:{}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.437429Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.437680Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:30:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:1:{s:17:\"moderate_comments\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.445778Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.446108Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:30:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:2:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.447565Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.447883Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:30:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:3:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.456680Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.457098Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:30:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:4:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.464837Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.465224Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:30:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:5:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.473377Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.473834Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:30:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:6:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.475141Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.475447Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:30:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:7:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.483623Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.484309Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:30:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:8:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.485702Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.486161Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:30:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:9:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.494213Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.494619Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:30:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:10:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.496518Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.496855Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:30:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:11:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.498105Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.498378Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:30:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:12:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.506897Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.507283Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:30:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:13:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.508762Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.509107Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:30:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:14:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.517888Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.518246Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:30:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:15:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.527100Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.527634Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:30:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:16:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.529546Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.529898Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:30:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:17:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.537967Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.538378Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:30:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:18:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.546649Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.547005Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:30:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:19:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:0:{}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.558255Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.558637Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:30:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:19:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:1:{s:12:\"upload_files\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.560061Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.560503Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:30:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:19:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:2:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.569016Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.569449Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:30:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:19:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:3:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.577487Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.577882Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:30:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:19:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:4:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.586179Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.586784Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:30:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:19:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:5:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.594695Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.595191Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:30:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:19:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:6:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.603288Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.603641Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:30:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:19:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:7:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.605045Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.605375Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:30:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:19:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:0:{}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.617684Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.618274Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:30:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:19:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:1:{s:10:\"edit_posts\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.620418Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.620812Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:30:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:19:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:2:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.622184Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.622480Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:30:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:19:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:3:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.630833Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.631324Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:30:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:19:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:0:{}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.633317Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.633663Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:30:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:19:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:1:{s:4:\"read\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.642806Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.643266Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:30:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:19:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.644876Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.645214Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:31:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:19:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.653769Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.654147Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:32:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:19:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.663205Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.663647Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:33:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:19:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.671593Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.672051Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:34:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:19:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.679929Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.680391Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:35:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:19:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.688554Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.688918Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:36:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:19:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.690406Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.690691Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:37:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:19:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.699127Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.699554Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:38:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:19:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.701027Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.701311Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:39:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:19:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.709818Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.710343Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:40:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:19:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.712001Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.712284Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:41:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:19:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.713505Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.713826Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:42:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:19:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.721764Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.722154Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:43:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:19:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.723645Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.724079Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:44:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:19:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.732686Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.733117Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:45:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:19:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.741324Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.741765Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:45:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:20:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.749618Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.750045Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:45:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:21:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.751794Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.752124Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:45:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:22:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.760501Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.760940Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:45:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:23:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.768907Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.769351Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:45:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:24:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.777922Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.778355Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:45:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:25:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.787068Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.787590Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:45:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:26:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.795789Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.796194Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:45:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:27:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.803957Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.804468Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:45:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:28:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.805945Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.806256Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:45:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:29:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.814142Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.814623Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:45:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:30:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.816615Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.816948Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:45:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:31:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.825343Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.825835Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:45:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:32:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.833714Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.834127Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:45:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:33:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.842905Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.843373Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:45:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:34:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.845407Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.845735Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:46:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;s:12:\"delete_users\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:34:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.853466Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.853876Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:47:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;s:12:\"delete_users\";b:1;s:12:\"create_users\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:34:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:8:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.856179Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.856641Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:47:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;s:12:\"delete_users\";b:1;s:12:\"create_users\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:34:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:9:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:12:\"delete_posts\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.864995Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.865526Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:47:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;s:12:\"delete_users\";b:1;s:12:\"create_users\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:34:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:10:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:12:\"delete_posts\";b:1;s:22:\"delete_published_posts\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:4:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.873905Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.874413Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:47:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;s:12:\"delete_users\";b:1;s:12:\"create_users\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:34:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:10:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:12:\"delete_posts\";b:1;s:22:\"delete_published_posts\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:5:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:12:\"delete_posts\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.876575Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.876966Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:48:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;s:12:\"delete_users\";b:1;s:12:\"create_users\";b:1;s:17:\"unfiltered_upload\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:34:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:10:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:12:\"delete_posts\";b:1;s:22:\"delete_published_posts\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:5:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:12:\"delete_posts\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.885597Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.886111Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:49:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;s:12:\"delete_users\";b:1;s:12:\"create_users\";b:1;s:17:\"unfiltered_upload\";b:1;s:14:\"edit_dashboard\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:34:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:10:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:12:\"delete_posts\";b:1;s:22:\"delete_published_posts\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:5:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:12:\"delete_posts\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.894105Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.894523Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:50:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;s:12:\"delete_users\";b:1;s:12:\"create_users\";b:1;s:17:\"unfiltered_upload\";b:1;s:14:\"edit_dashboard\";b:1;s:14:\"update_plugins\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:34:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:10:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:12:\"delete_posts\";b:1;s:22:\"delete_published_posts\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:5:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:12:\"delete_posts\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.902987Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.903423Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:51:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;s:12:\"delete_users\";b:1;s:12:\"create_users\";b:1;s:17:\"unfiltered_upload\";b:1;s:14:\"edit_dashboard\";b:1;s:14:\"update_plugins\";b:1;s:14:\"delete_plugins\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:34:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:10:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:12:\"delete_posts\";b:1;s:22:\"delete_published_posts\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:5:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:12:\"delete_posts\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.911641Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.912183Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:52:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;s:12:\"delete_users\";b:1;s:12:\"create_users\";b:1;s:17:\"unfiltered_upload\";b:1;s:14:\"edit_dashboard\";b:1;s:14:\"update_plugins\";b:1;s:14:\"delete_plugins\";b:1;s:15:\"install_plugins\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:34:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:10:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:12:\"delete_posts\";b:1;s:22:\"delete_published_posts\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:5:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:12:\"delete_posts\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.913759Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.914091Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:53:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;s:12:\"delete_users\";b:1;s:12:\"create_users\";b:1;s:17:\"unfiltered_upload\";b:1;s:14:\"edit_dashboard\";b:1;s:14:\"update_plugins\";b:1;s:14:\"delete_plugins\";b:1;s:15:\"install_plugins\";b:1;s:13:\"update_themes\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:34:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:10:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:12:\"delete_posts\";b:1;s:22:\"delete_published_posts\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:5:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:12:\"delete_posts\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.922169Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.922596Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:54:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;s:12:\"delete_users\";b:1;s:12:\"create_users\";b:1;s:17:\"unfiltered_upload\";b:1;s:14:\"edit_dashboard\";b:1;s:14:\"update_plugins\";b:1;s:14:\"delete_plugins\";b:1;s:15:\"install_plugins\";b:1;s:13:\"update_themes\";b:1;s:14:\"install_themes\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:34:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:10:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:12:\"delete_posts\";b:1;s:22:\"delete_published_posts\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:5:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:12:\"delete_posts\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.924332Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.924718Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:55:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;s:12:\"delete_users\";b:1;s:12:\"create_users\";b:1;s:17:\"unfiltered_upload\";b:1;s:14:\"edit_dashboard\";b:1;s:14:\"update_plugins\";b:1;s:14:\"delete_plugins\";b:1;s:15:\"install_plugins\";b:1;s:13:\"update_themes\";b:1;s:14:\"install_themes\";b:1;s:11:\"update_core\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:34:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:10:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:12:\"delete_posts\";b:1;s:22:\"delete_published_posts\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:5:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:12:\"delete_posts\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.937541Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.938134Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:56:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;s:12:\"delete_users\";b:1;s:12:\"create_users\";b:1;s:17:\"unfiltered_upload\";b:1;s:14:\"edit_dashboard\";b:1;s:14:\"update_plugins\";b:1;s:14:\"delete_plugins\";b:1;s:15:\"install_plugins\";b:1;s:13:\"update_themes\";b:1;s:14:\"install_themes\";b:1;s:11:\"update_core\";b:1;s:10:\"list_users\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:34:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:10:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:12:\"delete_posts\";b:1;s:22:\"delete_published_posts\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:5:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:12:\"delete_posts\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.940126Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.940506Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:57:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;s:12:\"delete_users\";b:1;s:12:\"create_users\";b:1;s:17:\"unfiltered_upload\";b:1;s:14:\"edit_dashboard\";b:1;s:14:\"update_plugins\";b:1;s:14:\"delete_plugins\";b:1;s:15:\"install_plugins\";b:1;s:13:\"update_themes\";b:1;s:14:\"install_themes\";b:1;s:11:\"update_core\";b:1;s:10:\"list_users\";b:1;s:12:\"remove_users\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:34:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:10:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:12:\"delete_posts\";b:1;s:22:\"delete_published_posts\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:5:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:12:\"delete_posts\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.949296Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.949826Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:58:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;s:12:\"delete_users\";b:1;s:12:\"create_users\";b:1;s:17:\"unfiltered_upload\";b:1;s:14:\"edit_dashboard\";b:1;s:14:\"update_plugins\";b:1;s:14:\"delete_plugins\";b:1;s:15:\"install_plugins\";b:1;s:13:\"update_themes\";b:1;s:14:\"install_themes\";b:1;s:11:\"update_core\";b:1;s:10:\"list_users\";b:1;s:12:\"remove_users\";b:1;s:13:\"promote_users\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:34:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:10:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:12:\"delete_posts\";b:1;s:22:\"delete_published_posts\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:5:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:12:\"delete_posts\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.958330Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.958795Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:59:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;s:12:\"delete_users\";b:1;s:12:\"create_users\";b:1;s:17:\"unfiltered_upload\";b:1;s:14:\"edit_dashboard\";b:1;s:14:\"update_plugins\";b:1;s:14:\"delete_plugins\";b:1;s:15:\"install_plugins\";b:1;s:13:\"update_themes\";b:1;s:14:\"install_themes\";b:1;s:11:\"update_core\";b:1;s:10:\"list_users\";b:1;s:12:\"remove_users\";b:1;s:13:\"promote_users\";b:1;s:18:\"edit_theme_options\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:34:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:10:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:12:\"delete_posts\";b:1;s:22:\"delete_published_posts\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:5:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:12:\"delete_posts\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.960562Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.960983Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:60:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;s:12:\"delete_users\";b:1;s:12:\"create_users\";b:1;s:17:\"unfiltered_upload\";b:1;s:14:\"edit_dashboard\";b:1;s:14:\"update_plugins\";b:1;s:14:\"delete_plugins\";b:1;s:15:\"install_plugins\";b:1;s:13:\"update_themes\";b:1;s:14:\"install_themes\";b:1;s:11:\"update_core\";b:1;s:10:\"list_users\";b:1;s:12:\"remove_users\";b:1;s:13:\"promote_users\";b:1;s:18:\"edit_theme_options\";b:1;s:13:\"delete_themes\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:34:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:10:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:12:\"delete_posts\";b:1;s:22:\"delete_published_posts\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:5:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:12:\"delete_posts\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.969292Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_user_roles' LIMIT 1 -2020-03-02T15:16:51.969869Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:5:{s:13:\"administrator\";a:2:{s:4:\"name\";s:13:\"Administrator\";s:12:\"capabilities\";a:61:{s:13:\"switch_themes\";b:1;s:11:\"edit_themes\";b:1;s:16:\"activate_plugins\";b:1;s:12:\"edit_plugins\";b:1;s:10:\"edit_users\";b:1;s:10:\"edit_files\";b:1;s:14:\"manage_options\";b:1;s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:6:\"import\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:8:\"level_10\";b:1;s:7:\"level_9\";b:1;s:7:\"level_8\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;s:12:\"delete_users\";b:1;s:12:\"create_users\";b:1;s:17:\"unfiltered_upload\";b:1;s:14:\"edit_dashboard\";b:1;s:14:\"update_plugins\";b:1;s:14:\"delete_plugins\";b:1;s:15:\"install_plugins\";b:1;s:13:\"update_themes\";b:1;s:14:\"install_themes\";b:1;s:11:\"update_core\";b:1;s:10:\"list_users\";b:1;s:12:\"remove_users\";b:1;s:13:\"promote_users\";b:1;s:18:\"edit_theme_options\";b:1;s:13:\"delete_themes\";b:1;s:6:\"export\";b:1;}}s:6:\"editor\";a:2:{s:4:\"name\";s:6:\"Editor\";s:12:\"capabilities\";a:34:{s:17:\"moderate_comments\";b:1;s:17:\"manage_categories\";b:1;s:12:\"manage_links\";b:1;s:12:\"upload_files\";b:1;s:15:\"unfiltered_html\";b:1;s:10:\"edit_posts\";b:1;s:17:\"edit_others_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:10:\"edit_pages\";b:1;s:4:\"read\";b:1;s:7:\"level_7\";b:1;s:7:\"level_6\";b:1;s:7:\"level_5\";b:1;s:7:\"level_4\";b:1;s:7:\"level_3\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:17:\"edit_others_pages\";b:1;s:20:\"edit_published_pages\";b:1;s:13:\"publish_pages\";b:1;s:12:\"delete_pages\";b:1;s:19:\"delete_others_pages\";b:1;s:22:\"delete_published_pages\";b:1;s:12:\"delete_posts\";b:1;s:19:\"delete_others_posts\";b:1;s:22:\"delete_published_posts\";b:1;s:20:\"delete_private_posts\";b:1;s:18:\"edit_private_posts\";b:1;s:18:\"read_private_posts\";b:1;s:20:\"delete_private_pages\";b:1;s:18:\"edit_private_pages\";b:1;s:18:\"read_private_pages\";b:1;}}s:6:\"author\";a:2:{s:4:\"name\";s:6:\"Author\";s:12:\"capabilities\";a:10:{s:12:\"upload_files\";b:1;s:10:\"edit_posts\";b:1;s:20:\"edit_published_posts\";b:1;s:13:\"publish_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_2\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:12:\"delete_posts\";b:1;s:22:\"delete_published_posts\";b:1;}}s:11:\"contributor\";a:2:{s:4:\"name\";s:11:\"Contributor\";s:12:\"capabilities\";a:5:{s:10:\"edit_posts\";b:1;s:4:\"read\";b:1;s:7:\"level_1\";b:1;s:7:\"level_0\";b:1;s:12:\"delete_posts\";b:1;}}s:10:\"subscriber\";a:2:{s:4:\"name\";s:10:\"Subscriber\";s:12:\"capabilities\";a:2:{s:4:\"read\";b:1;s:7:\"level_0\";b:1;}}}' WHERE `option_name` = 'wp_user_roles' -2020-03-02T15:16:51.978003Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'blogname' LIMIT 1 -2020-03-02T15:16:51.978334Z 8 Query UPDATE `wp_options` SET `option_value` = 'Vitess Test' WHERE `option_name` = 'blogname' -2020-03-02T15:16:51.987738Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'admin_email' LIMIT 1 -2020-03-02T15:16:51.988163Z 8 Query UPDATE `wp_options` SET `option_value` = 'andres@taylor.se' WHERE `option_name` = 'admin_email' -2020-03-02T15:16:51.995742Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'blog_public' LIMIT 1 -2020-03-02T15:16:51.996131Z 8 Query UPDATE `wp_options` SET `option_value` = '1' WHERE `option_name` = 'blog_public' -2020-03-02T15:16:52.004314Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'fresh_site' LIMIT 1 -2020-03-02T15:16:52.004626Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'fresh_site' LIMIT 1 -2020-03-02T15:16:52.004852Z 8 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('fresh_site', '1', 'yes') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:16:52.006035Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'WPLANG' LIMIT 1 -2020-03-02T15:16:52.006240Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'WPLANG' LIMIT 1 -2020-03-02T15:16:52.006488Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:16:52.006771Z 8 Query SELECT * FROM wp_users WHERE user_login = 'wordpress-user' LIMIT 1 -2020-03-02T15:16:52.009717Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'blog_charset' LIMIT 1 -2020-03-02T15:16:52.010050Z 8 Query SELECT * FROM wp_users WHERE user_login = 'wordpress-user' LIMIT 1 -2020-03-02T15:16:52.010324Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'blog_charset' LIMIT 1 -2020-03-02T15:16:52.010587Z 8 Query SELECT ID FROM wp_users WHERE user_nicename = 'wordpress-user' AND user_login != 'wordpress-user' LIMIT 1 -2020-03-02T15:16:52.011067Z 8 Query SELECT * FROM wp_users WHERE user_email = 'andres@taylor.se' LIMIT 1 -2020-03-02T15:16:52.011412Z 8 Query SHOW FULL COLUMNS FROM `wp_users` -2020-03-02T15:16:52.012069Z 8 Query INSERT INTO `wp_users` (`user_pass`, `user_nicename`, `user_email`, `user_url`, `user_registered`, `user_activation_key`, `display_name`, `user_login`) VALUES ('$P$BsdeZUeiXTcgnssTdznPM8dhFubsNQ.', 'wordpress-user', 'andres@taylor.se', '', '2020-03-02 15:16:52', '', 'wordpress-user', 'wordpress-user') -2020-03-02T15:16:52.014956Z 8 Query SELECT * FROM wp_users WHERE ID = '1' LIMIT 1 -2020-03-02T15:16:52.015312Z 8 Query SELECT user_id, meta_key, meta_value FROM wp_usermeta WHERE user_id IN (1) ORDER BY umeta_id ASC -2020-03-02T15:16:52.015872Z 8 Query SELECT umeta_id FROM wp_usermeta WHERE meta_key = 'nickname' AND user_id = 1 -2020-03-02T15:16:52.016204Z 8 Query SHOW FULL COLUMNS FROM `wp_usermeta` -2020-03-02T15:16:52.016787Z 8 Query INSERT INTO `wp_usermeta` (`user_id`, `meta_key`, `meta_value`) VALUES (1, 'nickname', 'wordpress-user') -2020-03-02T15:16:52.023131Z 8 Query SELECT user_id, meta_key, meta_value FROM wp_usermeta WHERE user_id IN (1) ORDER BY umeta_id ASC -2020-03-02T15:16:52.023555Z 8 Query SELECT umeta_id FROM wp_usermeta WHERE meta_key = 'first_name' AND user_id = 1 -2020-03-02T15:16:52.023910Z 8 Query INSERT INTO `wp_usermeta` (`user_id`, `meta_key`, `meta_value`) VALUES (1, 'first_name', '') -2020-03-02T15:16:52.032034Z 8 Query SELECT user_id, meta_key, meta_value FROM wp_usermeta WHERE user_id IN (1) ORDER BY umeta_id ASC -2020-03-02T15:16:52.032391Z 8 Query SELECT umeta_id FROM wp_usermeta WHERE meta_key = 'last_name' AND user_id = 1 -2020-03-02T15:16:52.032806Z 8 Query INSERT INTO `wp_usermeta` (`user_id`, `meta_key`, `meta_value`) VALUES (1, 'last_name', '') -2020-03-02T15:16:52.040924Z 8 Query SELECT user_id, meta_key, meta_value FROM wp_usermeta WHERE user_id IN (1) ORDER BY umeta_id ASC -2020-03-02T15:16:52.041258Z 8 Query SELECT umeta_id FROM wp_usermeta WHERE meta_key = 'description' AND user_id = 1 -2020-03-02T15:16:52.041523Z 8 Query INSERT INTO `wp_usermeta` (`user_id`, `meta_key`, `meta_value`) VALUES (1, 'description', '') -2020-03-02T15:16:52.050187Z 8 Query SELECT user_id, meta_key, meta_value FROM wp_usermeta WHERE user_id IN (1) ORDER BY umeta_id ASC -2020-03-02T15:16:52.050595Z 8 Query SELECT umeta_id FROM wp_usermeta WHERE meta_key = 'rich_editing' AND user_id = 1 -2020-03-02T15:16:52.050916Z 8 Query INSERT INTO `wp_usermeta` (`user_id`, `meta_key`, `meta_value`) VALUES (1, 'rich_editing', 'true') -2020-03-02T15:16:52.053111Z 8 Query SELECT user_id, meta_key, meta_value FROM wp_usermeta WHERE user_id IN (1) ORDER BY umeta_id ASC -2020-03-02T15:16:52.053441Z 8 Query SELECT umeta_id FROM wp_usermeta WHERE meta_key = 'syntax_highlighting' AND user_id = 1 -2020-03-02T15:16:52.053848Z 8 Query INSERT INTO `wp_usermeta` (`user_id`, `meta_key`, `meta_value`) VALUES (1, 'syntax_highlighting', 'true') -2020-03-02T15:16:52.061219Z 8 Query SELECT user_id, meta_key, meta_value FROM wp_usermeta WHERE user_id IN (1) ORDER BY umeta_id ASC -2020-03-02T15:16:52.061593Z 8 Query SELECT umeta_id FROM wp_usermeta WHERE meta_key = 'comment_shortcuts' AND user_id = 1 -2020-03-02T15:16:52.061902Z 8 Query INSERT INTO `wp_usermeta` (`user_id`, `meta_key`, `meta_value`) VALUES (1, 'comment_shortcuts', 'false') -2020-03-02T15:16:52.069821Z 8 Query SELECT user_id, meta_key, meta_value FROM wp_usermeta WHERE user_id IN (1) ORDER BY umeta_id ASC -2020-03-02T15:16:52.070144Z 8 Query SELECT umeta_id FROM wp_usermeta WHERE meta_key = 'admin_color' AND user_id = 1 -2020-03-02T15:16:52.070837Z 8 Query INSERT INTO `wp_usermeta` (`user_id`, `meta_key`, `meta_value`) VALUES (1, 'admin_color', 'fresh') -2020-03-02T15:16:52.078501Z 8 Query SELECT user_id, meta_key, meta_value FROM wp_usermeta WHERE user_id IN (1) ORDER BY umeta_id ASC -2020-03-02T15:16:52.078859Z 8 Query SELECT umeta_id FROM wp_usermeta WHERE meta_key = 'use_ssl' AND user_id = 1 -2020-03-02T15:16:52.079173Z 8 Query INSERT INTO `wp_usermeta` (`user_id`, `meta_key`, `meta_value`) VALUES (1, 'use_ssl', '0') -2020-03-02T15:16:52.087158Z 8 Query SELECT user_id, meta_key, meta_value FROM wp_usermeta WHERE user_id IN (1) ORDER BY umeta_id ASC -2020-03-02T15:16:52.087502Z 8 Query SELECT umeta_id FROM wp_usermeta WHERE meta_key = 'show_admin_bar_front' AND user_id = 1 -2020-03-02T15:16:52.087787Z 8 Query INSERT INTO `wp_usermeta` (`user_id`, `meta_key`, `meta_value`) VALUES (1, 'show_admin_bar_front', 'true') -2020-03-02T15:16:52.095635Z 8 Query SELECT user_id, meta_key, meta_value FROM wp_usermeta WHERE user_id IN (1) ORDER BY umeta_id ASC -2020-03-02T15:16:52.096020Z 8 Query SELECT umeta_id FROM wp_usermeta WHERE meta_key = 'locale' AND user_id = 1 -2020-03-02T15:16:52.096292Z 8 Query INSERT INTO `wp_usermeta` (`user_id`, `meta_key`, `meta_value`) VALUES (1, 'locale', '') -2020-03-02T15:16:52.097408Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'initial_db_version' LIMIT 1 -2020-03-02T15:16:52.097637Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'default_role' LIMIT 1 -2020-03-02T15:16:52.097863Z 8 Query SELECT user_id, meta_key, meta_value FROM wp_usermeta WHERE user_id IN (1) ORDER BY umeta_id ASC -2020-03-02T15:16:52.098103Z 8 Query SELECT umeta_id FROM wp_usermeta WHERE meta_key = 'wp_capabilities' AND user_id = 1 -2020-03-02T15:16:52.098341Z 8 Query INSERT INTO `wp_usermeta` (`user_id`, `meta_key`, `meta_value`) VALUES (1, 'wp_capabilities', 'a:1:{s:10:\"subscriber\";b:1;}') -2020-03-02T15:16:52.106385Z 8 Query SELECT user_id, meta_key, meta_value FROM wp_usermeta WHERE user_id IN (1) ORDER BY umeta_id ASC -2020-03-02T15:16:52.106892Z 8 Query SELECT umeta_id FROM wp_usermeta WHERE meta_key = 'wp_user_level' AND user_id = 1 -2020-03-02T15:16:52.107211Z 8 Query INSERT INTO `wp_usermeta` (`user_id`, `meta_key`, `meta_value`) VALUES (1, 'wp_user_level', '0') -2020-03-02T15:16:52.108488Z 8 Query SELECT user_id, meta_key, meta_value FROM wp_usermeta WHERE user_id IN (1) ORDER BY umeta_id ASC -2020-03-02T15:16:52.108847Z 8 Query SELECT * FROM wp_users WHERE ID = '1' LIMIT 1 -2020-03-02T15:16:52.109142Z 8 Query INSERT INTO `wp_usermeta` (`user_id`, `meta_key`, `meta_value`) VALUES (1, 'dismissed_wp_pointers', '') -2020-03-02T15:16:52.117275Z 8 Query SELECT user_id, meta_key, meta_value FROM wp_usermeta WHERE user_id IN (1) ORDER BY umeta_id ASC -2020-03-02T15:16:52.117745Z 8 Query SELECT umeta_id FROM wp_usermeta WHERE meta_key = 'wp_capabilities' AND user_id = 1 -2020-03-02T15:16:52.118092Z 8 Query UPDATE `wp_usermeta` SET `meta_value` = 'a:1:{s:13:\"administrator\";b:1;}' WHERE `user_id` = 1 AND `meta_key` = 'wp_capabilities' -2020-03-02T15:16:52.125656Z 8 Query SELECT user_id, meta_key, meta_value FROM wp_usermeta WHERE user_id IN (1) ORDER BY umeta_id ASC -2020-03-02T15:16:52.126105Z 8 Query SELECT umeta_id FROM wp_usermeta WHERE meta_key = 'wp_user_level' AND user_id = 1 -2020-03-02T15:16:52.126474Z 8 Query UPDATE `wp_usermeta` SET `meta_value` = '10' WHERE `user_id` = 1 AND `meta_key` = 'wp_user_level' -2020-03-02T15:16:52.141944Z 8 Query SHOW FULL COLUMNS FROM `wp_terms` -2020-03-02T15:16:52.142654Z 8 Query INSERT INTO `wp_terms` (`term_id`, `name`, `slug`, `term_group`) VALUES (1, 'Uncategorized', 'uncategorized', 0) -2020-03-02T15:16:52.144823Z 8 Query SHOW FULL COLUMNS FROM `wp_term_taxonomy` -2020-03-02T15:16:52.145510Z 8 Query INSERT INTO `wp_term_taxonomy` (`term_id`, `taxonomy`, `description`, `parent`, `count`) VALUES (1, 'category', '', 0, 1) -2020-03-02T15:16:52.146984Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'timezone_string' LIMIT 1 -2020-03-02T15:16:52.147211Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'timezone_string' LIMIT 1 -2020-03-02T15:16:52.147383Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'gmt_offset' LIMIT 1 -2020-03-02T15:16:52.147622Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'home' LIMIT 1 -2020-03-02T15:16:52.147883Z 8 Query SHOW FULL COLUMNS FROM `wp_posts` -2020-03-02T15:16:52.148612Z 8 Query INSERT INTO `wp_posts` (`post_author`, `post_date`, `post_date_gmt`, `post_content`, `post_excerpt`, `post_title`, `post_name`, `post_modified`, `post_modified_gmt`, `guid`, `comment_count`, `to_ping`, `pinged`, `post_content_filtered`) VALUES (1, '2020-03-02 15:16:52', '2020-03-02 15:16:52', '\n

Welcome to WordPress. This is your first post. Edit or delete it, then start writing!

\n', '', 'Hello world!', 'hello-world', '2020-03-02 15:16:52', '2020-03-02 15:16:52', 'http://127.0.0.1/wordpress/?p=1', 1, '', '', '') -2020-03-02T15:16:52.156283Z 8 Query INSERT INTO `wp_term_relationships` (`term_taxonomy_id`, `object_id`) VALUES (1, 1) -2020-03-02T15:16:52.164653Z 8 Query SHOW FULL COLUMNS FROM `wp_comments` -2020-03-02T15:16:52.165431Z 8 Query INSERT INTO `wp_comments` (`comment_post_ID`, `comment_author`, `comment_author_email`, `comment_author_url`, `comment_date`, `comment_date_gmt`, `comment_content`) VALUES (1, 'A WordPress Commenter', 'wapuu@wordpress.example', 'https://wordpress.org/', '2020-03-02 15:16:52', '2020-03-02 15:16:52', 'Hi, this is a comment.\nTo get started with moderating, editing, and deleting comments, please visit the Comments screen in the dashboard.\nCommenter avatars come from Gravatar.') -2020-03-02T15:16:52.173346Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:16:52.173636Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'home' LIMIT 1 -2020-03-02T15:16:52.173959Z 8 Query INSERT INTO `wp_posts` (`post_author`, `post_date`, `post_date_gmt`, `post_content`, `post_excerpt`, `comment_status`, `post_title`, `post_name`, `post_modified`, `post_modified_gmt`, `guid`, `post_type`, `to_ping`, `pinged`, `post_content_filtered`) VALUES (1, '2020-03-02 15:16:52', '2020-03-02 15:16:52', '\n

This is an example page. It\'s different from a blog post because it will stay in one place and will show up in your site navigation (in most themes). Most people start with an About page that introduces them to potential site visitors. It might say something like this:

\n\n\n\n

Hi there! I\'m a bike messenger by day, aspiring actor by night, and this is my website. I live in Los Angeles, have a great dog named Jack, and I like piña coladas. (And gettin\' caught in the rain.)

\n\n\n\n

...or something like this:

\n\n\n\n

The XYZ Doohickey Company was founded in 1971, and has been providing quality doohickeys to the public ever since. Located in Gotham City, XYZ employs over 2,000 people and does all kinds of awesome things for the Gotham community.

\n\n\n\n

As a new WordPress user, you should go to your dashboard to delete this page and create new pages for your content. Have fun!

\n', '', 'closed', 'Sample Page', 'sample-page', '2020-03-02 15:16:52', '2020-03-02 15:16:52', 'http://127.0.0.1/wordpress/?page_id=2', 'page', '', '', '') -2020-03-02T15:16:52.183452Z 8 Query SHOW FULL COLUMNS FROM `wp_postmeta` -2020-03-02T15:16:52.184124Z 8 Query INSERT INTO `wp_postmeta` (`post_id`, `meta_key`, `meta_value`) VALUES (2, '_wp_page_template', 'default') -2020-03-02T15:16:52.191913Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'home' LIMIT 1 -2020-03-02T15:16:52.192253Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'home' LIMIT 1 -2020-03-02T15:16:52.192682Z 8 Query INSERT INTO `wp_posts` (`post_author`, `post_date`, `post_date_gmt`, `post_content`, `post_excerpt`, `comment_status`, `post_title`, `post_name`, `post_modified`, `post_modified_gmt`, `guid`, `post_type`, `post_status`, `to_ping`, `pinged`, `post_content_filtered`) VALUES (1, '2020-03-02 15:16:52', '2020-03-02 15:16:52', '

Who we are

Our website address is: http://127.0.0.1/wordpress.

What personal data we collect and why we collect it

Comments

When visitors leave comments on the site we collect the data shown in the comments form, and also the visitor’s IP address and browser user agent string to help spam detection.

An anonymized string created from your email address (also called a hash) may be provided to the Gravatar service to see if you are using it. The Gravatar service privacy policy is available here: https://automattic.com/privacy/. After approval of your comment, your profile picture is visible to the public in the context of your comment.

Media

If you upload images to the website, you should avoid uploading images with embedded location data (EXIF GPS) included. Visitors to the website can download and extract any location data from images on the website.

Contact forms

Cookies

If you leave a comment on our site you may opt-in to saving your name, email address and website in cookies. These are for your convenience so that you do not have to fill in your details again when you leave another comment. These cookies will last for one year.

If you visit our login page, we will set a temporary cookie to determine if your browser accepts cookies. This cookie contains no personal data and is discarded when you close your browser.

When you log in, we will also set up several cookies to save your login information and your screen display choices. Login cookies last for two days, and screen options cookies last for a year. If you select "Remember Me", your login will persist for two weeks. If you log out of your account, the login cookies will be removed.

If you edit or publish an article, an additional cookie will be saved in your browser. This cookie includes no personal data and simply indicates the post ID of the article you just edited. It expires after 1 day.

Embedded content from other websites

Articles on this site may include embedded content (e.g. videos, images, articles, etc.). Embedded content from other websites behaves in the exact same way as if the visitor has visited the other website.

These websites may collect data about you, use cookies, embed additional third-party tracking, and monitor your interaction with that embedded content, including tracking your interaction with the embedded content if you have an account and are logged in to that website.

Analytics

Who we share your data with

How long we retain your data

If you leave a comment, the comment and its metadata are retained indefinitely. This is so we can recognize and approve any follow-up comments automatically instead of holding them in a moderation queue.

For users that register on our website (if any), we also store the personal information they provide in their user profile. All users can see, edit, or delete their personal information at any time (except they cannot change their username). Website administrators can also see and edit that information.

What rights you have over your data

If you have an account on this site, or have left comments, you can request to receive an exported file of the personal data we hold about you, including any data you have provided to us. You can also request that we erase any personal data we hold about you. This does not include any data we are obliged to keep for administrative, legal, or security purposes.

Where we send your data

Visitor comments may be checked through an automated spam detection service.

Your contact information

Additional information

How we protect your data

What data breach procedures we have in place

What third parties we receive data from

What automated decision making and/or profiling we do with user data

Industry regulatory disclosure requirements

', '', 'closed', 'Privacy Policy', 'privacy-policy', '2020-03-02 15:16:52', '2020-03-02 15:16:52', 'http://127.0.0.1/wordpress/?page_id=3', 'page', 'draft', '', '', '') -2020-03-02T15:16:52.200849Z 8 Query INSERT INTO `wp_postmeta` (`post_id`, `meta_key`, `meta_value`) VALUES (3, '_wp_page_template', 'default') -2020-03-02T15:16:52.210152Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'wp_page_for_privacy_policy' LIMIT 1 -2020-03-02T15:16:52.210575Z 8 Query UPDATE `wp_options` SET `option_value` = '3' WHERE `option_name` = 'wp_page_for_privacy_policy' -2020-03-02T15:16:52.218395Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'widget_search' LIMIT 1 -2020-03-02T15:16:52.218748Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'widget_search' LIMIT 1 -2020-03-02T15:16:52.219017Z 8 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('widget_search', 'a:2:{i:2;a:1:{s:5:\"title\";s:0:\"\";}s:12:\"_multiwidget\";i:1;}', 'yes') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:16:52.227070Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'widget_recent-posts' LIMIT 1 -2020-03-02T15:16:52.227337Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'widget_recent-posts' LIMIT 1 -2020-03-02T15:16:52.227563Z 8 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('widget_recent-posts', 'a:2:{i:2;a:2:{s:5:\"title\";s:0:\"\";s:6:\"number\";i:5;}s:12:\"_multiwidget\";i:1;}', 'yes') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:16:52.235220Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'widget_recent-comments' LIMIT 1 -2020-03-02T15:16:52.235558Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'widget_recent-comments' LIMIT 1 -2020-03-02T15:16:52.235901Z 8 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('widget_recent-comments', 'a:2:{i:2;a:2:{s:5:\"title\";s:0:\"\";s:6:\"number\";i:5;}s:12:\"_multiwidget\";i:1;}', 'yes') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:16:52.238181Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'widget_archives' LIMIT 1 -2020-03-02T15:16:52.238391Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'widget_archives' LIMIT 1 -2020-03-02T15:16:52.238580Z 8 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('widget_archives', 'a:2:{i:2;a:3:{s:5:\"title\";s:0:\"\";s:5:\"count\";i:0;s:8:\"dropdown\";i:0;}s:12:\"_multiwidget\";i:1;}', 'yes') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:16:52.246007Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'widget_categories' LIMIT 1 -2020-03-02T15:16:52.246381Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:2:{i:2;a:4:{s:5:\"title\";s:0:\"\";s:5:\"count\";i:0;s:12:\"hierarchical\";i:0;s:8:\"dropdown\";i:0;}s:12:\"_multiwidget\";i:1;}' WHERE `option_name` = 'widget_categories' -2020-03-02T15:16:52.248146Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'widget_meta' LIMIT 1 -2020-03-02T15:16:52.248427Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'widget_meta' LIMIT 1 -2020-03-02T15:16:52.248695Z 8 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('widget_meta', 'a:2:{i:2;a:1:{s:5:\"title\";s:0:\"\";}s:12:\"_multiwidget\";i:1;}', 'yes') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:16:52.256849Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'sidebars_widgets' LIMIT 1 -2020-03-02T15:16:52.257176Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'sidebars_widgets' LIMIT 1 -2020-03-02T15:16:52.257453Z 8 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('sidebars_widgets', 'a:4:{s:19:\"wp_inactive_widgets\";a:0:{}s:9:\"sidebar-1\";a:3:{i:0;s:8:\"search-2\";i:1;s:14:\"recent-posts-2\";i:2;s:17:\"recent-comments-2\";}s:9:\"sidebar-2\";a:3:{i:0;s:10:\"archives-2\";i:1;s:12:\"categories-2\";i:2;s:6:\"meta-2\";}s:13:\"array_version\";i:3;}', 'yes') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:16:52.265672Z 8 Query SELECT user_id, meta_key, meta_value FROM wp_usermeta WHERE user_id IN (1) ORDER BY umeta_id ASC -2020-03-02T15:16:52.266127Z 8 Query SELECT umeta_id FROM wp_usermeta WHERE meta_key = 'show_welcome_panel' AND user_id = 1 -2020-03-02T15:16:52.266384Z 8 Query INSERT INTO `wp_usermeta` (`user_id`, `meta_key`, `meta_value`) VALUES (1, 'show_welcome_panel', '1') -2020-03-02T15:16:52.276570Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'permalink_structure' LIMIT 1 -2020-03-02T15:16:52.276959Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'permalink_structure' LIMIT 1 -2020-03-02T15:16:52.277249Z 8 Query UPDATE `wp_options` SET `option_value` = '/%year%/%monthnum%/%day%/%postname%/' WHERE `option_name` = 'permalink_structure' -2020-03-02T15:16:52.279126Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'permalink_structure' LIMIT 1 -2020-03-02T15:16:52.279414Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'rewrite_rules' LIMIT 1 -2020-03-02T15:16:52.279604Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'rewrite_rules' LIMIT 1 -2020-03-02T15:16:52.279816Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'home' LIMIT 1 -2020-03-02T15:16:52.280139Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.280342Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.280546Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.280720Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.280942Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.281198Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.281406Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.281583Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.281762Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.281923Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.282110Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.282286Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.282487Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.282684Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'rewrite_rules' LIMIT 1 -2020-03-02T15:16:52.283047Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:74:{s:11:\"^wp-json/?$\";s:22:\"index.php?rest_route=/\";s:14:\"^wp-json/(.*)?\";s:33:\"index.php?rest_route=/$matches[1]\";s:21:\"^index.php/wp-json/?$\";s:22:\"index.php?rest_route=/\";s:24:\"^index.php/wp-json/(.*)?\";s:33:\"index.php?rest_route=/$matches[1]\";s:48:\".*wp-(atom|rdf|rss|rss2|feed|commentsrss2)\\.php$\";s:18:\"index.php?feed=old\";s:20:\".*wp-app\\.php(/.*)?$\";s:19:\"index.php?error=403\";s:18:\".*wp-register.php$\";s:23:\"index.php?register=true\";s:32:\"feed/(feed|rdf|rss|rss2|atom)/?$\";s:27:\"index.php?&feed=$matches[1]\";s:27:\"(feed|rdf|rss|rss2|atom)/?$\";s:27:\"index.php?&feed=$matches[1]\";s:8:\"embed/?$\";s:21:\"index.php?&embed=true\";s:20:\"page/?([0-9]{1,})/?$\";s:28:\"index.php?&paged=$matches[1]\";s:41:\"comments/feed/(feed|rdf|rss|rss2|atom)/?$\";s:42:\"index.php?&feed=$matches[1]&withcomments=1\";s:36:\"comments/(feed|rdf|rss|rss2|atom)/?$\";s:42:\"index.php?&feed=$matches[1]&withcomments=1\";s:17:\"comments/embed/?$\";s:21:\"index.php?&embed=true\";s:44:\"search/(.+)/feed/(feed|rdf|rss|rss2|atom)/?$\";s:40:\"index.php?s=$matches[1]&feed=$matches[2]\";s:39:\"search/(.+)/(feed|rdf|rss|rss2|atom)/?$\";s:40:\"index.php?s=$matches[1]&feed=$matches[2]\";s:20:\"search/(.+)/embed/?$\";s:34:\"index.php?s=$matches[1]&embed=true\";s:32:\"search/(.+)/page/?([0-9]{1,})/?$\";s:41:\"index.php?s=$matches[1]&paged=$matches[2]\";s:14:\"search/(.+)/?$\";s:23:\"index.php?s=$matches[1]\";s:47:\"author/([^/]+)/feed/(feed|rdf|rss|rss2|atom)/?$\";s:50:\"index.php?author_name=$matches[1]&feed=$matches[2]\";s:42:\"author/([^/]+)/(feed|rdf|rss|rss2|atom)/?$\";s:50:\"index.php?author_name=$matches[1]&feed=$matches[2]\";s:23:\"author/([^/]+)/embed/?$\";s:44:\"index.php?author_name=$matches[1]&embed=true\";s:35:\"author/([^/]+)/page/?([0-9]{1,})/?$\";s:51:\"index.php?author_name=$matches[1]&paged=$matches[2]\";s:17:\"author/([^/]+)/?$\";s:33:\"index.php?author_name=$matches[1]\";s:69:\"([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/feed/(feed|rdf|rss|rss2|atom)/?$\";s:80:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&feed=$matches[4]\";s:64:\"([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/(feed|rdf|rss|rss2|atom)/?$\";s:80:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&feed=$matches[4]\";s:45:\"([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/embed/?$\";s:74:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&embed=true\";s:57:\"([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/page/?([0-9]{1,})/?$\";s:81:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&paged=$matches[4]\";s:39:\"([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/?$\";s:63:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]\";s:56:\"([0-9]{4})/([0-9]{1,2})/feed/(feed|rdf|rss|rss2|atom)/?$\";s:64:\"index.php?year=$matches[1]&monthnum=$matches[2]&feed=$matches[3]\";s:51:\"([0-9]{4})/([0-9]{1,2})/(feed|rdf|rss|rss2|atom)/?$\";s:64:\"index.php?year=$matches[1]&monthnum=$matches[2]&feed=$matches[3]\";s:32:\"([0-9]{4})/([0-9]{1,2})/embed/?$\";s:58:\"index.php?year=$matches[1]&monthnum=$matches[2]&embed=true\";s:44:\"([0-9]{4})/([0-9]{1,2})/page/?([0-9]{1,})/?$\";s:65:\"index.php?year=$matches[1]&monthnum=$matches[2]&paged=$matches[3]\";s:26:\"([0-9]{4})/([0-9]{1,2})/?$\";s:47:\"index.php?year=$matches[1]&monthnum=$matches[2]\";s:43:\"([0-9]{4})/feed/(feed|rdf|rss|rss2|atom)/?$\";s:43:\"index.php?year=$matches[1]&feed=$matches[2]\";s:38:\"([0-9]{4})/(feed|rdf|rss|rss2|atom)/?$\";s:43:\"index.php?year=$matches[1]&feed=$matches[2]\";s:19:\"([0-9]{4})/embed/?$\";s:37:\"index.php?year=$matches[1]&embed=true\";s:31:\"([0-9]{4})/page/?([0-9]{1,})/?$\";s:44:\"index.php?year=$matches[1]&paged=$matches[2]\";s:13:\"([0-9]{4})/?$\";s:26:\"index.php?year=$matches[1]\";s:58:\"[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}/[^/]+/attachment/([^/]+)/?$\";s:32:\"index.php?attachment=$matches[1]\";s:68:\"[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}/[^/]+/attachment/([^/]+)/trackback/?$\";s:37:\"index.php?attachment=$matches[1]&tb=1\";s:88:\"[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}/[^/]+/attachment/([^/]+)/feed/(feed|rdf|rss|rss2|atom)/?$\";s:49:\"index.php?attachment=$matches[1]&feed=$matches[2]\";s:83:\"[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}/[^/]+/attachment/([^/]+)/(feed|rdf|rss|rss2|atom)/?$\";s:49:\"index.php?attachment=$matches[1]&feed=$matches[2]\";s:83:\"[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}/[^/]+/attachment/([^/]+)/comment-page-([0-9]{1,})/?$\";s:50:\"index.php?attachment=$matches[1]&cpage=$matches[2]\";s:64:\"[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}/[^/]+/attachment/([^/]+)/embed/?$\";s:43:\"index.php?attachment=$matches[1]&embed=true\";s:53:\"([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/([^/]+)/embed/?$\";s:91:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&name=$matches[4]&embed=true\";s:57:\"([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/([^/]+)/trackback/?$\";s:85:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&name=$matches[4]&tb=1\";s:77:\"([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/([^/]+)/feed/(feed|rdf|rss|rss2|atom)/?$\";s:97:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&name=$matches[4]&feed=$matches[5]\";s:72:\"([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/([^/]+)/(feed|rdf|rss|rss2|atom)/?$\";s:97:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&name=$matches[4]&feed=$matches[5]\";s:65:\"([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/([^/]+)/page/?([0-9]{1,})/?$\";s:98:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&name=$matches[4]&paged=$matches[5]\";s:72:\"([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/([^/]+)/comment-page-([0-9]{1,})/?$\";s:98:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&name=$matches[4]&cpage=$matches[5]\";s:61:\"([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/([^/]+)(?:/([0-9]+))?/?$\";s:97:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&name=$matches[4]&page=$matches[5]\";s:47:\"[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}/[^/]+/([^/]+)/?$\";s:32:\"index.php?attachment=$matches[1]\";s:57:\"[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}/[^/]+/([^/]+)/trackback/?$\";s:37:\"index.php?attachment=$matches[1]&tb=1\";s:77:\"[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}/[^/]+/([^/]+)/feed/(feed|rdf|rss|rss2|atom)/?$\";s:49:\"index.php?attachment=$matches[1]&feed=$matches[2]\";s:72:\"[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}/[^/]+/([^/]+)/(feed|rdf|rss|rss2|atom)/?$\";s:49:\"index.php?attachment=$matches[1]&feed=$matches[2]\";s:72:\"[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}/[^/]+/([^/]+)/comment-page-([0-9]{1,})/?$\";s:50:\"index.php?attachment=$matches[1]&cpage=$matches[2]\";s:53:\"[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}/[^/]+/([^/]+)/embed/?$\";s:43:\"index.php?attachment=$matches[1]&embed=true\";s:64:\"([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/comment-page-([0-9]{1,})/?$\";s:81:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&cpage=$matches[4]\";s:51:\"([0-9]{4})/([0-9]{1,2})/comment-page-([0-9]{1,})/?$\";s:65:\"index.php?year=$matches[1]&monthnum=$matches[2]&cpage=$matches[3]\";s:38:\"([0-9]{4})/comment-page-([0-9]{1,})/?$\";s:44:\"index.php?year=$matches[1]&cpage=$matches[2]\";s:27:\".?.+?/attachment/([^/]+)/?$\";s:32:\"index.php?attachment=$matches[1]\";s:37:\".?.+?/attachment/([^/]+)/trackback/?$\";s:37:\"index.php?attachment=$matches[1]&tb=1\";s:57:\".?.+?/attachment/([^/]+)/feed/(feed|rdf|rss|rss2|atom)/?$\";s:49:\"index.php?attachment=$matches[1]&feed=$matches[2]\";s:52:\".?.+?/attachment/([^/]+)/(feed|rdf|rss|rss2|atom)/?$\";s:49:\"index.php?attachment=$matches[1]&feed=$matches[2]\";s:52:\".?.+?/attachment/([^/]+)/comment-page-([0-9]{1,})/?$\";s:50:\"index.php?attachment=$matches[1]&cpage=$matches[2]\";s:33:\".?.+?/attachment/([^/]+)/embed/?$\";s:43:\"index.php?attachment=$matches[1]&embed=true\";s:16:\"(.?.+?)/embed/?$\";s:41:\"index.php?pagename=$matches[1]&embed=true\";s:20:\"(.?.+?)/trackback/?$\";s:35:\"index.php?pagename=$matches[1]&tb=1\";s:40:\"(.?.+?)/feed/(feed|rdf|rss|rss2|atom)/?$\";s:47:\"index.php?pagename=$matches[1]&feed=$matches[2]\";s:35:\"(.?.+?)/(feed|rdf|rss|rss2|atom)/?$\";s:47:\"index.php?pagename=$matches[1]&feed=$matches[2]\";s:28:\"(.?.+?)/page/?([0-9]{1,})/?$\";s:48:\"index.php?pagename=$matches[1]&paged=$matches[2]\";s:35:\"(.?.+?)/comment-page-([0-9]{1,})/?$\";s:48:\"index.php?pagename=$matches[1]&cpage=$matches[2]\";s:24:\"(.?.+?)(?:/([0-9]+))?/?$\";s:47:\"index.php?pagename=$matches[1]&page=$matches[2]\";}' WHERE `option_name` = 'rewrite_rules' -2020-03-02T15:16:52.287836Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'home' LIMIT 1 -2020-03-02T15:16:52.288121Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:16:52.288423Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:16:52.288683Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'home' LIMIT 1 -2020-03-02T15:16:52.289061Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'home' LIMIT 1 -2020-03-02T15:16:52.289295Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:16:52.289658Z 8 Query SELECT ID, post_name, post_parent, post_type - FROM wp_posts - WHERE post_name IN ('hello-world') - AND post_type IN ('post','attachment') -2020-03-02T15:16:52.289950Z 8 Query SELECT * FROM wp_posts WHERE ID = 1 LIMIT 1 -2020-03-02T15:16:52.290314Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'permalink_structure' LIMIT 1 -2020-03-02T15:16:52.290642Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'home' LIMIT 1 -2020-03-02T15:16:52.290898Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'home' LIMIT 1 -2020-03-02T15:16:52.292067Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'permalink_structure' LIMIT 1 -2020-03-02T15:16:52.292328Z 8 Query UPDATE `wp_options` SET `option_value` = '/index.php/%year%/%monthnum%/%day%/%postname%/' WHERE `option_name` = 'permalink_structure' -2020-03-02T15:16:52.296126Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'permalink_structure' LIMIT 1 -2020-03-02T15:16:52.296351Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'rewrite_rules' LIMIT 1 -2020-03-02T15:16:52.296660Z 8 Query UPDATE `wp_options` SET `option_value` = '' WHERE `option_name` = 'rewrite_rules' -2020-03-02T15:16:52.304469Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'rewrite_rules' LIMIT 1 -2020-03-02T15:16:52.304733Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'home' LIMIT 1 -2020-03-02T15:16:52.304937Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.305122Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.305307Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.305536Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.305724Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.305898Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.306070Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.306242Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.306426Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.306587Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.306750Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.306923Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.307098Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.307291Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'rewrite_rules' LIMIT 1 -2020-03-02T15:16:52.307679Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:74:{s:11:\"^wp-json/?$\";s:22:\"index.php?rest_route=/\";s:14:\"^wp-json/(.*)?\";s:33:\"index.php?rest_route=/$matches[1]\";s:21:\"^index.php/wp-json/?$\";s:22:\"index.php?rest_route=/\";s:24:\"^index.php/wp-json/(.*)?\";s:33:\"index.php?rest_route=/$matches[1]\";s:48:\".*wp-(atom|rdf|rss|rss2|feed|commentsrss2)\\.php$\";s:18:\"index.php?feed=old\";s:20:\".*wp-app\\.php(/.*)?$\";s:19:\"index.php?error=403\";s:18:\".*wp-register.php$\";s:23:\"index.php?register=true\";s:42:\"index.php/feed/(feed|rdf|rss|rss2|atom)/?$\";s:27:\"index.php?&feed=$matches[1]\";s:37:\"index.php/(feed|rdf|rss|rss2|atom)/?$\";s:27:\"index.php?&feed=$matches[1]\";s:18:\"index.php/embed/?$\";s:21:\"index.php?&embed=true\";s:30:\"index.php/page/?([0-9]{1,})/?$\";s:28:\"index.php?&paged=$matches[1]\";s:51:\"index.php/comments/feed/(feed|rdf|rss|rss2|atom)/?$\";s:42:\"index.php?&feed=$matches[1]&withcomments=1\";s:46:\"index.php/comments/(feed|rdf|rss|rss2|atom)/?$\";s:42:\"index.php?&feed=$matches[1]&withcomments=1\";s:27:\"index.php/comments/embed/?$\";s:21:\"index.php?&embed=true\";s:54:\"index.php/search/(.+)/feed/(feed|rdf|rss|rss2|atom)/?$\";s:40:\"index.php?s=$matches[1]&feed=$matches[2]\";s:49:\"index.php/search/(.+)/(feed|rdf|rss|rss2|atom)/?$\";s:40:\"index.php?s=$matches[1]&feed=$matches[2]\";s:30:\"index.php/search/(.+)/embed/?$\";s:34:\"index.php?s=$matches[1]&embed=true\";s:42:\"index.php/search/(.+)/page/?([0-9]{1,})/?$\";s:41:\"index.php?s=$matches[1]&paged=$matches[2]\";s:24:\"index.php/search/(.+)/?$\";s:23:\"index.php?s=$matches[1]\";s:57:\"index.php/author/([^/]+)/feed/(feed|rdf|rss|rss2|atom)/?$\";s:50:\"index.php?author_name=$matches[1]&feed=$matches[2]\";s:52:\"index.php/author/([^/]+)/(feed|rdf|rss|rss2|atom)/?$\";s:50:\"index.php?author_name=$matches[1]&feed=$matches[2]\";s:33:\"index.php/author/([^/]+)/embed/?$\";s:44:\"index.php?author_name=$matches[1]&embed=true\";s:45:\"index.php/author/([^/]+)/page/?([0-9]{1,})/?$\";s:51:\"index.php?author_name=$matches[1]&paged=$matches[2]\";s:27:\"index.php/author/([^/]+)/?$\";s:33:\"index.php?author_name=$matches[1]\";s:79:\"index.php/([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/feed/(feed|rdf|rss|rss2|atom)/?$\";s:80:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&feed=$matches[4]\";s:74:\"index.php/([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/(feed|rdf|rss|rss2|atom)/?$\";s:80:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&feed=$matches[4]\";s:55:\"index.php/([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/embed/?$\";s:74:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&embed=true\";s:67:\"index.php/([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/page/?([0-9]{1,})/?$\";s:81:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&paged=$matches[4]\";s:49:\"index.php/([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/?$\";s:63:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]\";s:66:\"index.php/([0-9]{4})/([0-9]{1,2})/feed/(feed|rdf|rss|rss2|atom)/?$\";s:64:\"index.php?year=$matches[1]&monthnum=$matches[2]&feed=$matches[3]\";s:61:\"index.php/([0-9]{4})/([0-9]{1,2})/(feed|rdf|rss|rss2|atom)/?$\";s:64:\"index.php?year=$matches[1]&monthnum=$matches[2]&feed=$matches[3]\";s:42:\"index.php/([0-9]{4})/([0-9]{1,2})/embed/?$\";s:58:\"index.php?year=$matches[1]&monthnum=$matches[2]&embed=true\";s:54:\"index.php/([0-9]{4})/([0-9]{1,2})/page/?([0-9]{1,})/?$\";s:65:\"index.php?year=$matches[1]&monthnum=$matches[2]&paged=$matches[3]\";s:36:\"index.php/([0-9]{4})/([0-9]{1,2})/?$\";s:47:\"index.php?year=$matches[1]&monthnum=$matches[2]\";s:53:\"index.php/([0-9]{4})/feed/(feed|rdf|rss|rss2|atom)/?$\";s:43:\"index.php?year=$matches[1]&feed=$matches[2]\";s:48:\"index.php/([0-9]{4})/(feed|rdf|rss|rss2|atom)/?$\";s:43:\"index.php?year=$matches[1]&feed=$matches[2]\";s:29:\"index.php/([0-9]{4})/embed/?$\";s:37:\"index.php?year=$matches[1]&embed=true\";s:41:\"index.php/([0-9]{4})/page/?([0-9]{1,})/?$\";s:44:\"index.php?year=$matches[1]&paged=$matches[2]\";s:23:\"index.php/([0-9]{4})/?$\";s:26:\"index.php?year=$matches[1]\";s:68:\"index.php/[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}/[^/]+/attachment/([^/]+)/?$\";s:32:\"index.php?attachment=$matches[1]\";s:78:\"index.php/[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}/[^/]+/attachment/([^/]+)/trackback/?$\";s:37:\"index.php?attachment=$matches[1]&tb=1\";s:98:\"index.php/[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}/[^/]+/attachment/([^/]+)/feed/(feed|rdf|rss|rss2|atom)/?$\";s:49:\"index.php?attachment=$matches[1]&feed=$matches[2]\";s:93:\"index.php/[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}/[^/]+/attachment/([^/]+)/(feed|rdf|rss|rss2|atom)/?$\";s:49:\"index.php?attachment=$matches[1]&feed=$matches[2]\";s:93:\"index.php/[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}/[^/]+/attachment/([^/]+)/comment-page-([0-9]{1,})/?$\";s:50:\"index.php?attachment=$matches[1]&cpage=$matches[2]\";s:74:\"index.php/[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}/[^/]+/attachment/([^/]+)/embed/?$\";s:43:\"index.php?attachment=$matches[1]&embed=true\";s:63:\"index.php/([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/([^/]+)/embed/?$\";s:91:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&name=$matches[4]&embed=true\";s:67:\"index.php/([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/([^/]+)/trackback/?$\";s:85:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&name=$matches[4]&tb=1\";s:87:\"index.php/([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/([^/]+)/feed/(feed|rdf|rss|rss2|atom)/?$\";s:97:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&name=$matches[4]&feed=$matches[5]\";s:82:\"index.php/([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/([^/]+)/(feed|rdf|rss|rss2|atom)/?$\";s:97:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&name=$matches[4]&feed=$matches[5]\";s:75:\"index.php/([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/([^/]+)/page/?([0-9]{1,})/?$\";s:98:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&name=$matches[4]&paged=$matches[5]\";s:82:\"index.php/([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/([^/]+)/comment-page-([0-9]{1,})/?$\";s:98:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&name=$matches[4]&cpage=$matches[5]\";s:71:\"index.php/([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/([^/]+)(?:/([0-9]+))?/?$\";s:97:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&name=$matches[4]&page=$matches[5]\";s:57:\"index.php/[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}/[^/]+/([^/]+)/?$\";s:32:\"index.php?attachment=$matches[1]\";s:67:\"index.php/[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}/[^/]+/([^/]+)/trackback/?$\";s:37:\"index.php?attachment=$matches[1]&tb=1\";s:87:\"index.php/[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}/[^/]+/([^/]+)/feed/(feed|rdf|rss|rss2|atom)/?$\";s:49:\"index.php?attachment=$matches[1]&feed=$matches[2]\";s:82:\"index.php/[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}/[^/]+/([^/]+)/(feed|rdf|rss|rss2|atom)/?$\";s:49:\"index.php?attachment=$matches[1]&feed=$matches[2]\";s:82:\"index.php/[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}/[^/]+/([^/]+)/comment-page-([0-9]{1,})/?$\";s:50:\"index.php?attachment=$matches[1]&cpage=$matches[2]\";s:63:\"index.php/[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}/[^/]+/([^/]+)/embed/?$\";s:43:\"index.php?attachment=$matches[1]&embed=true\";s:74:\"index.php/([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/comment-page-([0-9]{1,})/?$\";s:81:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&cpage=$matches[4]\";s:61:\"index.php/([0-9]{4})/([0-9]{1,2})/comment-page-([0-9]{1,})/?$\";s:65:\"index.php?year=$matches[1]&monthnum=$matches[2]&cpage=$matches[3]\";s:48:\"index.php/([0-9]{4})/comment-page-([0-9]{1,})/?$\";s:44:\"index.php?year=$matches[1]&cpage=$matches[2]\";s:37:\"index.php/.?.+?/attachment/([^/]+)/?$\";s:32:\"index.php?attachment=$matches[1]\";s:47:\"index.php/.?.+?/attachment/([^/]+)/trackback/?$\";s:37:\"index.php?attachment=$matches[1]&tb=1\";s:67:\"index.php/.?.+?/attachment/([^/]+)/feed/(feed|rdf|rss|rss2|atom)/?$\";s:49:\"index.php?attachment=$matches[1]&feed=$matches[2]\";s:62:\"index.php/.?.+?/attachment/([^/]+)/(feed|rdf|rss|rss2|atom)/?$\";s:49:\"index.php?attachment=$matches[1]&feed=$matches[2]\";s:62:\"index.php/.?.+?/attachment/([^/]+)/comment-page-([0-9]{1,})/?$\";s:50:\"index.php?attachment=$matches[1]&cpage=$matches[2]\";s:43:\"index.php/.?.+?/attachment/([^/]+)/embed/?$\";s:43:\"index.php?attachment=$matches[1]&embed=true\";s:26:\"index.php/(.?.+?)/embed/?$\";s:41:\"index.php?pagename=$matches[1]&embed=true\";s:30:\"index.php/(.?.+?)/trackback/?$\";s:35:\"index.php?pagename=$matches[1]&tb=1\";s:50:\"index.php/(.?.+?)/feed/(feed|rdf|rss|rss2|atom)/?$\";s:47:\"index.php?pagename=$matches[1]&feed=$matches[2]\";s:45:\"index.php/(.?.+?)/(feed|rdf|rss|rss2|atom)/?$\";s:47:\"index.php?pagename=$matches[1]&feed=$matches[2]\";s:38:\"index.php/(.?.+?)/page/?([0-9]{1,})/?$\";s:48:\"index.php?pagename=$matches[1]&paged=$matches[2]\";s:45:\"index.php/(.?.+?)/comment-page-([0-9]{1,})/?$\";s:48:\"index.php?pagename=$matches[1]&cpage=$matches[2]\";s:34:\"index.php/(.?.+?)(?:/([0-9]+))?/?$\";s:47:\"index.php?pagename=$matches[1]&page=$matches[2]\";}' WHERE `option_name` = 'rewrite_rules' -2020-03-02T15:16:52.316173Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'home' LIMIT 1 -2020-03-02T15:16:52.316536Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:16:52.316902Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:16:52.317180Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'home' LIMIT 1 -2020-03-02T15:16:52.317562Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'home' LIMIT 1 -2020-03-02T15:16:52.317927Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:16:52.318257Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'permalink_structure' LIMIT 1 -2020-03-02T15:16:52.318477Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'home' LIMIT 1 -2020-03-02T15:16:52.318707Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'home' LIMIT 1 -2020-03-02T15:16:52.322938Z 9 Connect wp_user@localhost on using TCP/IP -2020-03-02T15:16:52.323222Z 9 Query SET NAMES utf8mb4 -2020-03-02T15:16:52.323580Z 9 Query SET NAMES 'utf8mb4' COLLATE 'utf8mb4_unicode_520_ci' -2020-03-02T15:16:52.323798Z 9 Query SELECT @@SESSION.sql_mode -2020-03-02T15:16:52.324084Z 9 Query SET SESSION sql_mode='NO_ZERO_IN_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION' -2020-03-02T15:16:52.324265Z 9 Init DB wordpressdb -2020-03-02T15:16:52.324931Z 9 Query SELECT option_name, option_value FROM wp_options WHERE autoload = 'yes' -2020-03-02T15:16:52.327435Z 9 Query SELECT option_value FROM wp_options WHERE option_name = 'cron' LIMIT 1 -2020-03-02T15:16:52.327806Z 9 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('cron', 'a:2:{i:1583162212;a:1:{s:32:\"recovery_mode_clean_expired_keys\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}s:7:\"version\";i:2;}', 'yes') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:16:52.330154Z 9 Query SELECT option_value FROM wp_options WHERE option_name = 'WPLANG' LIMIT 1 -2020-03-02T15:16:52.336092Z 9 Query SELECT option_value FROM wp_options WHERE option_name = 'theme_mods_twentytwenty' LIMIT 1 -2020-03-02T15:16:52.336432Z 9 Query SELECT option_value FROM wp_options WHERE option_name = 'current_theme' LIMIT 1 -2020-03-02T15:16:52.337203Z 9 Query SELECT option_value FROM wp_options WHERE option_name = 'mods_Twenty Twenty' LIMIT 1 -2020-03-02T15:16:52.338716Z 9 Query SELECT option_value FROM wp_options WHERE option_name = 'widget_pages' LIMIT 1 -2020-03-02T15:16:52.338982Z 9 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('widget_pages', 'a:1:{s:12:\"_multiwidget\";i:1;}', 'yes') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:16:52.340778Z 9 Query SELECT option_value FROM wp_options WHERE option_name = 'widget_calendar' LIMIT 1 -2020-03-02T15:16:52.341099Z 9 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('widget_calendar', 'a:1:{s:12:\"_multiwidget\";i:1;}', 'yes') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:16:52.342555Z 9 Query SELECT option_value FROM wp_options WHERE option_name = 'widget_media_audio' LIMIT 1 -2020-03-02T15:16:52.342863Z 9 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('widget_media_audio', 'a:1:{s:12:\"_multiwidget\";i:1;}', 'yes') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:16:52.350655Z 9 Query SELECT option_value FROM wp_options WHERE option_name = 'widget_media_image' LIMIT 1 -2020-03-02T15:16:52.350929Z 9 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('widget_media_image', 'a:1:{s:12:\"_multiwidget\";i:1;}', 'yes') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:16:52.352115Z 9 Query SELECT option_value FROM wp_options WHERE option_name = 'widget_media_gallery' LIMIT 1 -2020-03-02T15:16:52.352329Z 9 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('widget_media_gallery', 'a:1:{s:12:\"_multiwidget\";i:1;}', 'yes') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:16:52.353436Z 9 Query SELECT option_value FROM wp_options WHERE option_name = 'widget_media_video' LIMIT 1 -2020-03-02T15:16:52.353684Z 9 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('widget_media_video', 'a:1:{s:12:\"_multiwidget\";i:1;}', 'yes') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:16:52.363950Z 9 Query SELECT option_value FROM wp_options WHERE option_name = 'can_compress_scripts' LIMIT 1 -2020-03-02T15:16:52.364491Z 9 Query SELECT option_value FROM wp_options WHERE option_name = 'widget_tag_cloud' LIMIT 1 -2020-03-02T15:16:52.364797Z 9 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('widget_tag_cloud', 'a:1:{s:12:\"_multiwidget\";i:1;}', 'yes') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:16:52.366509Z 9 Query SELECT option_value FROM wp_options WHERE option_name = 'widget_nav_menu' LIMIT 1 -2020-03-02T15:16:52.366779Z 9 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('widget_nav_menu', 'a:1:{s:12:\"_multiwidget\";i:1;}', 'yes') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:16:52.374423Z 9 Query SELECT option_value FROM wp_options WHERE option_name = 'widget_custom_html' LIMIT 1 -2020-03-02T15:16:52.374797Z 9 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('widget_custom_html', 'a:1:{s:12:\"_multiwidget\";i:1;}', 'yes') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:16:52.376661Z 9 Query SELECT option_value FROM wp_options WHERE option_name = '_transient_timeout_doing_cron' LIMIT 1 -2020-03-02T15:16:52.376982Z 9 Query SELECT option_value FROM wp_options WHERE option_name = '_transient_doing_cron' LIMIT 1 -2020-03-02T15:16:52.377292Z 9 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('_transient_doing_cron', '1583162212.3764901161193847656250', 'yes') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:16:52.379507Z 9 Query SHOW FULL COLUMNS FROM `wp_options` -2020-03-02T15:16:52.380211Z 9 Query UPDATE `wp_options` SET `option_value` = 'a:2:{i:1583162212;a:2:{s:32:\"recovery_mode_clean_expired_keys\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}s:34:\"wp_privacy_delete_old_export_files\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:6:\"hourly\";s:4:\"args\";a:0:{}s:8:\"interval\";i:3600;}}}s:7:\"version\";i:2;}' WHERE `option_name` = 'cron' -2020-03-02T15:16:52.380952Z 10 Connect wp_user@localhost on using TCP/IP -2020-03-02T15:16:52.381172Z 10 Query SET NAMES utf8mb4 -2020-03-02T15:16:52.381456Z 10 Query SET NAMES 'utf8mb4' COLLATE 'utf8mb4_unicode_520_ci' -2020-03-02T15:16:52.381573Z 10 Query SELECT @@SESSION.sql_mode -2020-03-02T15:16:52.381717Z 10 Query SET SESSION sql_mode='NO_ZERO_IN_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION' -2020-03-02T15:16:52.381855Z 10 Init DB wordpressdb -2020-03-02T15:16:52.382266Z 10 Query SELECT option_name, option_value FROM wp_options WHERE autoload = 'yes' -2020-03-02T15:16:52.384438Z 10 Query SELECT option_value FROM wp_options WHERE option_name = 'WPLANG' LIMIT 1 -2020-03-02T15:16:52.384894Z 10 Query SELECT option_value FROM wp_options WHERE option_name = 'theme_mods_twentytwenty' LIMIT 1 -2020-03-02T15:16:52.385156Z 10 Query SELECT option_value FROM wp_options WHERE option_name = 'current_theme' LIMIT 1 -2020-03-02T15:16:52.385929Z 10 Query SELECT option_value FROM wp_options WHERE option_name = 'mods_Twenty Twenty' LIMIT 1 -2020-03-02T15:16:52.387233Z 9 Query UPDATE `wp_options` SET `option_value` = 'a:2:{i:1583162212;a:3:{s:32:\"recovery_mode_clean_expired_keys\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}s:34:\"wp_privacy_delete_old_export_files\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:6:\"hourly\";s:4:\"args\";a:0:{}s:8:\"interval\";i:3600;}}s:16:\"wp_version_check\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}s:7:\"version\";i:2;}' WHERE `option_name` = 'cron' -2020-03-02T15:16:52.388212Z 10 Query SELECT option_value FROM wp_options WHERE option_name = 'can_compress_scripts' LIMIT 1 -2020-03-02T15:16:52.388840Z 10 Query SHOW FULL COLUMNS FROM `wp_options` -2020-03-02T15:16:52.389306Z 10 Query UPDATE `wp_options` SET `option_value` = 'a:2:{i:1583162212;a:3:{s:32:\"recovery_mode_clean_expired_keys\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}s:34:\"wp_privacy_delete_old_export_files\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:6:\"hourly\";s:4:\"args\";a:0:{}s:8:\"interval\";i:3600;}}s:16:\"wp_version_check\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}s:7:\"version\";i:2;}' WHERE `option_name` = 'cron' -2020-03-02T15:16:52.395756Z 9 Query UPDATE `wp_options` SET `option_value` = 'a:2:{i:1583162212;a:4:{s:32:\"recovery_mode_clean_expired_keys\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}s:34:\"wp_privacy_delete_old_export_files\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:6:\"hourly\";s:4:\"args\";a:0:{}s:8:\"interval\";i:3600;}}s:16:\"wp_version_check\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}s:17:\"wp_update_plugins\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}s:7:\"version\";i:2;}' WHERE `option_name` = 'cron' -2020-03-02T15:16:52.404367Z 10 Query UPDATE `wp_options` SET `option_value` = 'a:2:{i:1583162212;a:3:{s:32:\"recovery_mode_clean_expired_keys\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}s:34:\"wp_privacy_delete_old_export_files\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:6:\"hourly\";s:4:\"args\";a:0:{}s:8:\"interval\";i:3600;}}s:17:\"wp_update_plugins\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}s:7:\"version\";i:2;}' WHERE `option_name` = 'cron' -2020-03-02T15:16:52.413155Z 9 Query UPDATE `wp_options` SET `option_value` = 'a:2:{i:1583162212;a:5:{s:32:\"recovery_mode_clean_expired_keys\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}s:34:\"wp_privacy_delete_old_export_files\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:6:\"hourly\";s:4:\"args\";a:0:{}s:8:\"interval\";i:3600;}}s:16:\"wp_version_check\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}s:17:\"wp_update_plugins\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}s:16:\"wp_update_themes\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}s:7:\"version\";i:2;}' WHERE `option_name` = 'cron' -2020-03-02T15:16:52.422277Z 10 Query UPDATE `wp_options` SET `option_value` = 'a:2:{i:1583162212;a:4:{s:32:\"recovery_mode_clean_expired_keys\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}s:34:\"wp_privacy_delete_old_export_files\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:6:\"hourly\";s:4:\"args\";a:0:{}s:8:\"interval\";i:3600;}}s:17:\"wp_update_plugins\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}s:16:\"wp_update_themes\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}s:7:\"version\";i:2;}' WHERE `option_name` = 'cron' -2020-03-02T15:16:52.423236Z 9 Query SELECT option_value FROM wp_options WHERE option_name = 'theme_switched' LIMIT 1 -2020-03-02T15:16:52.424676Z 9 Query SELECT wp_posts.* FROM wp_posts WHERE 1=1 AND ( - ( YEAR( wp_posts.post_date ) = 2020 AND MONTH( wp_posts.post_date ) = 3 AND DAYOFMONTH( wp_posts.post_date ) = 2 ) -) AND wp_posts.post_name = 'hello-world' AND wp_posts.post_type = 'post' ORDER BY wp_posts.post_date DESC -2020-03-02T15:16:52.425288Z 9 Query SELECT t.*, tt.*, tr.object_id FROM wp_terms AS t INNER JOIN wp_term_taxonomy AS tt ON t.term_id = tt.term_id INNER JOIN wp_term_relationships AS tr ON tr.term_taxonomy_id = tt.term_taxonomy_id WHERE tt.taxonomy IN ('category', 'post_tag', 'post_format') AND tr.object_id IN (1) ORDER BY t.name ASC -2020-03-02T15:16:52.425638Z 9 Query SELECT post_id, meta_key, meta_value FROM wp_postmeta WHERE post_id IN (1) ORDER BY meta_id ASC -2020-03-02T15:16:52.428555Z 9 Query SELECT p.ID FROM wp_posts AS p WHERE p.post_date < '2020-03-02 15:16:52' AND p.post_type = 'post' AND p.post_status = 'publish' ORDER BY p.post_date DESC LIMIT 1 -2020-03-02T15:16:52.428762Z 9 Query SELECT p.ID FROM wp_posts AS p WHERE p.post_date > '2020-03-02 15:16:52' AND p.post_type = 'post' AND p.post_status = 'publish' ORDER BY p.post_date ASC LIMIT 1 -2020-03-02T15:16:52.429464Z 9 Query SELECT wp_posts.* FROM wp_posts WHERE 1=1 AND wp_posts.post_name = 'twentytwenty' AND wp_posts.post_type = 'custom_css' AND ((wp_posts.post_status = 'publish' OR wp_posts.post_status = 'future' OR wp_posts.post_status = 'draft' OR wp_posts.post_status = 'pending' OR wp_posts.post_status = 'trash' OR wp_posts.post_status = 'auto-draft' OR wp_posts.post_status = 'inherit' OR wp_posts.post_status = 'request-pending' OR wp_posts.post_status = 'request-confirmed' OR wp_posts.post_status = 'request-failed' OR wp_posts.post_status = 'request-completed' OR wp_posts.post_status = 'private')) ORDER BY wp_posts.post_date DESC -2020-03-02T15:16:52.429846Z 9 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('theme_mods_twentytwenty', 'a:1:{s:18:\"custom_css_post_id\";i:-1;}', 'yes') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:16:52.431257Z 10 Query SELECT option_value FROM wp_options WHERE option_name = 'theme_switched' LIMIT 1 -2020-03-02T15:16:52.431999Z 10 Query UPDATE `wp_options` SET `option_value` = 'a:3:{i:1583162212;a:4:{s:32:\"recovery_mode_clean_expired_keys\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}s:34:\"wp_privacy_delete_old_export_files\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:6:\"hourly\";s:4:\"args\";a:0:{}s:8:\"interval\";i:3600;}}s:17:\"wp_update_plugins\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}s:16:\"wp_update_themes\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}i:1583248612;a:1:{s:32:\"recovery_mode_clean_expired_keys\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}s:7:\"version\";i:2;}' WHERE `option_name` = 'cron' -2020-03-02T15:16:52.433867Z 9 Query SELECT * FROM wp_posts WHERE (post_type = 'page' AND post_status = 'publish') ORDER BY menu_order,wp_posts.post_title ASC -2020-03-02T15:16:52.435357Z 9 Query SELECT * FROM wp_users WHERE ID = '1' LIMIT 1 -2020-03-02T15:16:52.435693Z 9 Query SELECT user_id, meta_key, meta_value FROM wp_usermeta WHERE user_id IN (1) ORDER BY umeta_id ASC -2020-03-02T15:16:52.438900Z 9 Query SELECT SQL_CALC_FOUND_ROWS wp_comments.comment_ID FROM wp_comments WHERE ( comment_approved = '1' ) AND comment_post_ID = 1 AND comment_parent = 0 ORDER BY wp_comments.comment_date_gmt ASC, wp_comments.comment_ID ASC -2020-03-02T15:16:52.439455Z 9 Query SELECT wp_comments.* FROM wp_comments WHERE comment_ID IN (1) -2020-03-02T15:16:52.439911Z 9 Query SELECT wp_comments.comment_ID FROM wp_comments WHERE ( comment_approved = '1' ) AND comment_post_ID = 1 AND comment_parent IN ( 1 ) ORDER BY wp_comments.comment_date_gmt ASC, wp_comments.comment_ID ASC -2020-03-02T15:16:52.441133Z 10 Query UPDATE `wp_options` SET `option_value` = 'a:3:{i:1583162212;a:3:{s:34:\"wp_privacy_delete_old_export_files\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:6:\"hourly\";s:4:\"args\";a:0:{}s:8:\"interval\";i:3600;}}s:17:\"wp_update_plugins\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}s:16:\"wp_update_themes\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}i:1583248612;a:1:{s:32:\"recovery_mode_clean_expired_keys\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}s:7:\"version\";i:2;}' WHERE `option_name` = 'cron' -2020-03-02T15:16:52.443008Z 9 Query SELECT wp_posts.ID FROM wp_posts WHERE 1=1 AND wp_posts.post_type = 'post' AND ((wp_posts.post_status = 'publish')) ORDER BY wp_posts.post_date DESC LIMIT 0, 5 -2020-03-02T15:16:52.443676Z 9 Query SELECT wp_comments.comment_ID FROM wp_comments JOIN wp_posts ON wp_posts.ID = wp_comments.comment_post_ID WHERE ( comment_approved = '1' ) AND wp_posts.post_status IN ('publish') ORDER BY wp_comments.comment_date_gmt DESC LIMIT 0,5 -2020-03-02T15:16:52.444274Z 9 Query SELECT YEAR(post_date) AS `year`, MONTH(post_date) AS `month`, count(ID) as posts FROM wp_posts WHERE post_type = 'post' AND post_status = 'publish' GROUP BY YEAR(post_date), MONTH(post_date) ORDER BY post_date DESC -2020-03-02T15:16:52.444795Z 9 Query SELECT t.*, tt.* FROM wp_terms AS t INNER JOIN wp_term_taxonomy AS tt ON t.term_id = tt.term_id WHERE tt.taxonomy IN ('category') AND tt.count > 0 ORDER BY t.name ASC -2020-03-02T15:16:52.445131Z 9 Query SELECT term_id, meta_key, meta_value FROM wp_termmeta WHERE term_id IN (1) ORDER BY meta_id ASC -2020-03-02T15:16:52.446432Z 9 Quit -2020-03-02T15:16:52.447451Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:16:52.447782Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'rewrite_rules' LIMIT 1 -2020-03-02T15:16:52.448187Z 8 Query UPDATE `wp_options` SET `option_value` = '' WHERE `option_name` = 'rewrite_rules' -2020-03-02T15:16:52.449835Z 10 Query SELECT option_value FROM wp_options WHERE option_name = 'recovery_keys' LIMIT 1 -2020-03-02T15:16:52.450177Z 10 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('recovery_keys', 'a:0:{}', 'yes') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:16:52.458212Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'rewrite_rules' LIMIT 1 -2020-03-02T15:16:52.458513Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'home' LIMIT 1 -2020-03-02T15:16:52.458779Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.459038Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.459301Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.459532Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.459921Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.460185Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.460414Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.460618Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.460782Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.460960Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.461135Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.461322Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.461529Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'page_on_front' LIMIT 1 -2020-03-02T15:16:52.461723Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'rewrite_rules' LIMIT 1 -2020-03-02T15:16:52.462083Z 8 Query UPDATE `wp_options` SET `option_value` = 'a:74:{s:11:\"^wp-json/?$\";s:22:\"index.php?rest_route=/\";s:14:\"^wp-json/(.*)?\";s:33:\"index.php?rest_route=/$matches[1]\";s:21:\"^index.php/wp-json/?$\";s:22:\"index.php?rest_route=/\";s:24:\"^index.php/wp-json/(.*)?\";s:33:\"index.php?rest_route=/$matches[1]\";s:48:\".*wp-(atom|rdf|rss|rss2|feed|commentsrss2)\\.php$\";s:18:\"index.php?feed=old\";s:20:\".*wp-app\\.php(/.*)?$\";s:19:\"index.php?error=403\";s:18:\".*wp-register.php$\";s:23:\"index.php?register=true\";s:42:\"index.php/feed/(feed|rdf|rss|rss2|atom)/?$\";s:27:\"index.php?&feed=$matches[1]\";s:37:\"index.php/(feed|rdf|rss|rss2|atom)/?$\";s:27:\"index.php?&feed=$matches[1]\";s:18:\"index.php/embed/?$\";s:21:\"index.php?&embed=true\";s:30:\"index.php/page/?([0-9]{1,})/?$\";s:28:\"index.php?&paged=$matches[1]\";s:51:\"index.php/comments/feed/(feed|rdf|rss|rss2|atom)/?$\";s:42:\"index.php?&feed=$matches[1]&withcomments=1\";s:46:\"index.php/comments/(feed|rdf|rss|rss2|atom)/?$\";s:42:\"index.php?&feed=$matches[1]&withcomments=1\";s:27:\"index.php/comments/embed/?$\";s:21:\"index.php?&embed=true\";s:54:\"index.php/search/(.+)/feed/(feed|rdf|rss|rss2|atom)/?$\";s:40:\"index.php?s=$matches[1]&feed=$matches[2]\";s:49:\"index.php/search/(.+)/(feed|rdf|rss|rss2|atom)/?$\";s:40:\"index.php?s=$matches[1]&feed=$matches[2]\";s:30:\"index.php/search/(.+)/embed/?$\";s:34:\"index.php?s=$matches[1]&embed=true\";s:42:\"index.php/search/(.+)/page/?([0-9]{1,})/?$\";s:41:\"index.php?s=$matches[1]&paged=$matches[2]\";s:24:\"index.php/search/(.+)/?$\";s:23:\"index.php?s=$matches[1]\";s:57:\"index.php/author/([^/]+)/feed/(feed|rdf|rss|rss2|atom)/?$\";s:50:\"index.php?author_name=$matches[1]&feed=$matches[2]\";s:52:\"index.php/author/([^/]+)/(feed|rdf|rss|rss2|atom)/?$\";s:50:\"index.php?author_name=$matches[1]&feed=$matches[2]\";s:33:\"index.php/author/([^/]+)/embed/?$\";s:44:\"index.php?author_name=$matches[1]&embed=true\";s:45:\"index.php/author/([^/]+)/page/?([0-9]{1,})/?$\";s:51:\"index.php?author_name=$matches[1]&paged=$matches[2]\";s:27:\"index.php/author/([^/]+)/?$\";s:33:\"index.php?author_name=$matches[1]\";s:79:\"index.php/([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/feed/(feed|rdf|rss|rss2|atom)/?$\";s:80:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&feed=$matches[4]\";s:74:\"index.php/([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/(feed|rdf|rss|rss2|atom)/?$\";s:80:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&feed=$matches[4]\";s:55:\"index.php/([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/embed/?$\";s:74:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&embed=true\";s:67:\"index.php/([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/page/?([0-9]{1,})/?$\";s:81:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&paged=$matches[4]\";s:49:\"index.php/([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/?$\";s:63:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]\";s:66:\"index.php/([0-9]{4})/([0-9]{1,2})/feed/(feed|rdf|rss|rss2|atom)/?$\";s:64:\"index.php?year=$matches[1]&monthnum=$matches[2]&feed=$matches[3]\";s:61:\"index.php/([0-9]{4})/([0-9]{1,2})/(feed|rdf|rss|rss2|atom)/?$\";s:64:\"index.php?year=$matches[1]&monthnum=$matches[2]&feed=$matches[3]\";s:42:\"index.php/([0-9]{4})/([0-9]{1,2})/embed/?$\";s:58:\"index.php?year=$matches[1]&monthnum=$matches[2]&embed=true\";s:54:\"index.php/([0-9]{4})/([0-9]{1,2})/page/?([0-9]{1,})/?$\";s:65:\"index.php?year=$matches[1]&monthnum=$matches[2]&paged=$matches[3]\";s:36:\"index.php/([0-9]{4})/([0-9]{1,2})/?$\";s:47:\"index.php?year=$matches[1]&monthnum=$matches[2]\";s:53:\"index.php/([0-9]{4})/feed/(feed|rdf|rss|rss2|atom)/?$\";s:43:\"index.php?year=$matches[1]&feed=$matches[2]\";s:48:\"index.php/([0-9]{4})/(feed|rdf|rss|rss2|atom)/?$\";s:43:\"index.php?year=$matches[1]&feed=$matches[2]\";s:29:\"index.php/([0-9]{4})/embed/?$\";s:37:\"index.php?year=$matches[1]&embed=true\";s:41:\"index.php/([0-9]{4})/page/?([0-9]{1,})/?$\";s:44:\"index.php?year=$matches[1]&paged=$matches[2]\";s:23:\"index.php/([0-9]{4})/?$\";s:26:\"index.php?year=$matches[1]\";s:68:\"index.php/[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}/[^/]+/attachment/([^/]+)/?$\";s:32:\"index.php?attachment=$matches[1]\";s:78:\"index.php/[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}/[^/]+/attachment/([^/]+)/trackback/?$\";s:37:\"index.php?attachment=$matches[1]&tb=1\";s:98:\"index.php/[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}/[^/]+/attachment/([^/]+)/feed/(feed|rdf|rss|rss2|atom)/?$\";s:49:\"index.php?attachment=$matches[1]&feed=$matches[2]\";s:93:\"index.php/[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}/[^/]+/attachment/([^/]+)/(feed|rdf|rss|rss2|atom)/?$\";s:49:\"index.php?attachment=$matches[1]&feed=$matches[2]\";s:93:\"index.php/[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}/[^/]+/attachment/([^/]+)/comment-page-([0-9]{1,})/?$\";s:50:\"index.php?attachment=$matches[1]&cpage=$matches[2]\";s:74:\"index.php/[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}/[^/]+/attachment/([^/]+)/embed/?$\";s:43:\"index.php?attachment=$matches[1]&embed=true\";s:63:\"index.php/([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/([^/]+)/embed/?$\";s:91:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&name=$matches[4]&embed=true\";s:67:\"index.php/([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/([^/]+)/trackback/?$\";s:85:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&name=$matches[4]&tb=1\";s:87:\"index.php/([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/([^/]+)/feed/(feed|rdf|rss|rss2|atom)/?$\";s:97:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&name=$matches[4]&feed=$matches[5]\";s:82:\"index.php/([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/([^/]+)/(feed|rdf|rss|rss2|atom)/?$\";s:97:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&name=$matches[4]&feed=$matches[5]\";s:75:\"index.php/([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/([^/]+)/page/?([0-9]{1,})/?$\";s:98:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&name=$matches[4]&paged=$matches[5]\";s:82:\"index.php/([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/([^/]+)/comment-page-([0-9]{1,})/?$\";s:98:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&name=$matches[4]&cpage=$matches[5]\";s:71:\"index.php/([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/([^/]+)(?:/([0-9]+))?/?$\";s:97:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&name=$matches[4]&page=$matches[5]\";s:57:\"index.php/[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}/[^/]+/([^/]+)/?$\";s:32:\"index.php?attachment=$matches[1]\";s:67:\"index.php/[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}/[^/]+/([^/]+)/trackback/?$\";s:37:\"index.php?attachment=$matches[1]&tb=1\";s:87:\"index.php/[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}/[^/]+/([^/]+)/feed/(feed|rdf|rss|rss2|atom)/?$\";s:49:\"index.php?attachment=$matches[1]&feed=$matches[2]\";s:82:\"index.php/[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}/[^/]+/([^/]+)/(feed|rdf|rss|rss2|atom)/?$\";s:49:\"index.php?attachment=$matches[1]&feed=$matches[2]\";s:82:\"index.php/[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}/[^/]+/([^/]+)/comment-page-([0-9]{1,})/?$\";s:50:\"index.php?attachment=$matches[1]&cpage=$matches[2]\";s:63:\"index.php/[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}/[^/]+/([^/]+)/embed/?$\";s:43:\"index.php?attachment=$matches[1]&embed=true\";s:74:\"index.php/([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/comment-page-([0-9]{1,})/?$\";s:81:\"index.php?year=$matches[1]&monthnum=$matches[2]&day=$matches[3]&cpage=$matches[4]\";s:61:\"index.php/([0-9]{4})/([0-9]{1,2})/comment-page-([0-9]{1,})/?$\";s:65:\"index.php?year=$matches[1]&monthnum=$matches[2]&cpage=$matches[3]\";s:48:\"index.php/([0-9]{4})/comment-page-([0-9]{1,})/?$\";s:44:\"index.php?year=$matches[1]&cpage=$matches[2]\";s:37:\"index.php/.?.+?/attachment/([^/]+)/?$\";s:32:\"index.php?attachment=$matches[1]\";s:47:\"index.php/.?.+?/attachment/([^/]+)/trackback/?$\";s:37:\"index.php?attachment=$matches[1]&tb=1\";s:67:\"index.php/.?.+?/attachment/([^/]+)/feed/(feed|rdf|rss|rss2|atom)/?$\";s:49:\"index.php?attachment=$matches[1]&feed=$matches[2]\";s:62:\"index.php/.?.+?/attachment/([^/]+)/(feed|rdf|rss|rss2|atom)/?$\";s:49:\"index.php?attachment=$matches[1]&feed=$matches[2]\";s:62:\"index.php/.?.+?/attachment/([^/]+)/comment-page-([0-9]{1,})/?$\";s:50:\"index.php?attachment=$matches[1]&cpage=$matches[2]\";s:43:\"index.php/.?.+?/attachment/([^/]+)/embed/?$\";s:43:\"index.php?attachment=$matches[1]&embed=true\";s:26:\"index.php/(.?.+?)/embed/?$\";s:41:\"index.php?pagename=$matches[1]&embed=true\";s:30:\"index.php/(.?.+?)/trackback/?$\";s:35:\"index.php?pagename=$matches[1]&tb=1\";s:50:\"index.php/(.?.+?)/feed/(feed|rdf|rss|rss2|atom)/?$\";s:47:\"index.php?pagename=$matches[1]&feed=$matches[2]\";s:45:\"index.php/(.?.+?)/(feed|rdf|rss|rss2|atom)/?$\";s:47:\"index.php?pagename=$matches[1]&feed=$matches[2]\";s:38:\"index.php/(.?.+?)/page/?([0-9]{1,})/?$\";s:48:\"index.php?pagename=$matches[1]&paged=$matches[2]\";s:45:\"index.php/(.?.+?)/comment-page-([0-9]{1,})/?$\";s:48:\"index.php?pagename=$matches[1]&cpage=$matches[2]\";s:34:\"index.php/(.?.+?)(?:/([0-9]+))?/?$\";s:47:\"index.php?pagename=$matches[1]&page=$matches[2]\";}' WHERE `option_name` = 'rewrite_rules' -2020-03-02T15:16:52.466398Z 10 Query SELECT option_value FROM wp_options WHERE option_name = '_transient_doing_cron' LIMIT 1 -2020-03-02T15:16:52.466760Z 10 Query UPDATE `wp_options` SET `option_value` = 'a:4:{i:1583162212;a:3:{s:34:\"wp_privacy_delete_old_export_files\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:6:\"hourly\";s:4:\"args\";a:0:{}s:8:\"interval\";i:3600;}}s:17:\"wp_update_plugins\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}s:16:\"wp_update_themes\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}i:1583165812;a:1:{s:34:\"wp_privacy_delete_old_export_files\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:6:\"hourly\";s:4:\"args\";a:0:{}s:8:\"interval\";i:3600;}}}i:1583248612;a:1:{s:32:\"recovery_mode_clean_expired_keys\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}s:7:\"version\";i:2;}' WHERE `option_name` = 'cron' -2020-03-02T15:16:52.475393Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'home' LIMIT 1 -2020-03-02T15:16:52.475993Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:16:52.476310Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:16:52.476596Z 10 Query UPDATE `wp_options` SET `option_value` = 'a:4:{i:1583162212;a:2:{s:17:\"wp_update_plugins\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}s:16:\"wp_update_themes\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}i:1583165812;a:1:{s:34:\"wp_privacy_delete_old_export_files\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:6:\"hourly\";s:4:\"args\";a:0:{}s:8:\"interval\";i:3600;}}}i:1583248612;a:1:{s:32:\"recovery_mode_clean_expired_keys\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}s:7:\"version\";i:2;}' WHERE `option_name` = 'cron' -2020-03-02T15:16:52.480477Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'home' LIMIT 1 -2020-03-02T15:16:52.481627Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'home' LIMIT 1 -2020-03-02T15:16:52.481899Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:16:52.482166Z 8 Query SELECT user_id, meta_key, meta_value FROM wp_usermeta WHERE user_id IN (1) ORDER BY umeta_id ASC -2020-03-02T15:16:52.482442Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:16:52.485717Z 10 Query SELECT option_value FROM wp_options WHERE option_name = '_transient_doing_cron' LIMIT 1 -2020-03-02T15:16:52.486126Z 10 Query UPDATE `wp_options` SET `option_value` = 'a:5:{i:1583162212;a:2:{s:17:\"wp_update_plugins\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}s:16:\"wp_update_themes\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}i:1583165812;a:1:{s:34:\"wp_privacy_delete_old_export_files\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:6:\"hourly\";s:4:\"args\";a:0:{}s:8:\"interval\";i:3600;}}}i:1583205412;a:1:{s:17:\"wp_update_plugins\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}i:1583248612;a:1:{s:32:\"recovery_mode_clean_expired_keys\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}s:7:\"version\";i:2;}' WHERE `option_name` = 'cron' -2020-03-02T15:16:52.492497Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'blog_charset' LIMIT 1 -2020-03-02T15:16:52.494185Z 10 Query UPDATE `wp_options` SET `option_value` = 'a:5:{i:1583162212;a:1:{s:16:\"wp_update_themes\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}i:1583165812;a:1:{s:34:\"wp_privacy_delete_old_export_files\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:6:\"hourly\";s:4:\"args\";a:0:{}s:8:\"interval\";i:3600;}}}i:1583205412;a:1:{s:17:\"wp_update_plugins\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}i:1583248612;a:1:{s:32:\"recovery_mode_clean_expired_keys\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}s:7:\"version\";i:2;}' WHERE `option_name` = 'cron' -2020-03-02T15:16:52.495389Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:16:52.495798Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:16:52.496069Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:16:52.496314Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'permalink_structure' LIMIT 1 -2020-03-02T15:16:52.496616Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'home' LIMIT 1 -2020-03-02T15:16:52.496839Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:16:52.497102Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:16:52.497337Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:16:52.497644Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:16:52.497852Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:16:52.498085Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:16:52.498304Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:16:52.498574Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'start_of_week' LIMIT 1 -2020-03-02T15:16:52.498763Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'time_format' LIMIT 1 -2020-03-02T15:16:52.498933Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'date_format' LIMIT 1 -2020-03-02T15:16:52.499120Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'can_compress_scripts' LIMIT 1 -2020-03-02T15:16:52.499279Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'can_compress_scripts' LIMIT 1 -2020-03-02T15:16:52.499428Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:16:52.499593Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:16:52.499835Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:16:52.500249Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'permalink_structure' LIMIT 1 -2020-03-02T15:16:52.500484Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'home' LIMIT 1 -2020-03-02T15:16:52.500759Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'siteurl' LIMIT 1 -2020-03-02T15:16:52.501072Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'time_format' LIMIT 1 -2020-03-02T15:16:52.501332Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'date_format' LIMIT 1 -2020-03-02T15:16:52.501537Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'timezone_string' LIMIT 1 -2020-03-02T15:16:52.501732Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'gmt_offset' LIMIT 1 -2020-03-02T15:16:52.501953Z 8 Query SELECT option_value FROM wp_options WHERE option_name = 'timezone_string' LIMIT 1 -2020-03-02T15:16:52.503867Z 10 Query SELECT option_value FROM wp_options WHERE option_name = '_site_transient_update_plugins' LIMIT 1 -2020-03-02T15:16:52.503968Z 8 Quit -2020-03-02T15:16:52.504560Z 10 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('_site_transient_update_plugins', 'O:8:\"stdClass\":1:{s:12:\"last_checked\";i:1583162212;}', 'no') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:16:52.925193Z 10 Query UPDATE `wp_options` SET `option_value` = 'O:8:\"stdClass\":4:{s:12:\"last_checked\";i:1583162212;s:8:\"response\";a:0:{}s:12:\"translations\";a:0:{}s:9:\"no_update\";a:2:{s:19:\"akismet/akismet.php\";O:8:\"stdClass\":9:{s:2:\"id\";s:21:\"w.org/plugins/akismet\";s:4:\"slug\";s:7:\"akismet\";s:6:\"plugin\";s:19:\"akismet/akismet.php\";s:11:\"new_version\";s:5:\"4.1.3\";s:3:\"url\";s:38:\"https://wordpress.org/plugins/akismet/\";s:7:\"package\";s:56:\"https://downloads.wordpress.org/plugin/akismet.4.1.3.zip\";s:5:\"icons\";a:2:{s:2:\"2x\";s:59:\"https://ps.w.org/akismet/assets/icon-256x256.png?rev=969272\";s:2:\"1x\";s:59:\"https://ps.w.org/akismet/assets/icon-128x128.png?rev=969272\";}s:7:\"banners\";a:1:{s:2:\"1x\";s:61:\"https://ps.w.org/akismet/assets/banner-772x250.jpg?rev=479904\";}s:11:\"banners_rtl\";a:0:{}}s:9:\"hello.php\";O:8:\"stdClass\":9:{s:2:\"id\";s:25:\"w.org/plugins/hello-dolly\";s:4:\"slug\";s:11:\"hello-dolly\";s:6:\"plugin\";s:9:\"hello.php\";s:11:\"new_version\";s:5:\"1.7.2\";s:3:\"url\";s:42:\"https://wordpress.org/plugins/hello-dolly/\";s:7:\"package\";s:60:\"https://downloads.wordpress.org/plugin/hello-dolly.1.7.2.zip\";s:5:\"icons\";a:2:{s:2:\"2x\";s:64:\"https://ps.w.org/hello-dolly/assets/icon-256x256.jpg?rev=2052855\";s:2:\"1x\";s:64:\"https://ps.w.org/hello-dolly/assets/icon-128x128.jpg?rev=2052855\";}s:7:\"banners\";a:1:{s:2:\"1x\";s:66:\"https://ps.w.org/hello-dolly/assets/banner-772x250.jpg?rev=2052855\";}s:11:\"banners_rtl\";a:0:{}}}}', `autoload` = 'no' WHERE `option_name` = '_site_transient_update_plugins' -2020-03-02T15:16:52.928240Z 10 Query SELECT option_value FROM wp_options WHERE option_name = '_transient_doing_cron' LIMIT 1 -2020-03-02T15:16:52.928650Z 10 Query UPDATE `wp_options` SET `option_value` = 'a:5:{i:1583162212;a:1:{s:16:\"wp_update_themes\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}i:1583165812;a:1:{s:34:\"wp_privacy_delete_old_export_files\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:6:\"hourly\";s:4:\"args\";a:0:{}s:8:\"interval\";i:3600;}}}i:1583205412;a:2:{s:17:\"wp_update_plugins\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}s:16:\"wp_update_themes\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}i:1583248612;a:1:{s:32:\"recovery_mode_clean_expired_keys\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}s:7:\"version\";i:2;}' WHERE `option_name` = 'cron' -2020-03-02T15:16:52.936196Z 10 Query UPDATE `wp_options` SET `option_value` = 'a:4:{i:1583165812;a:1:{s:34:\"wp_privacy_delete_old_export_files\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:6:\"hourly\";s:4:\"args\";a:0:{}s:8:\"interval\";i:3600;}}}i:1583205412;a:2:{s:17:\"wp_update_plugins\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}s:16:\"wp_update_themes\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}i:1583248612;a:1:{s:32:\"recovery_mode_clean_expired_keys\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}s:7:\"version\";i:2;}' WHERE `option_name` = 'cron' -2020-03-02T15:16:52.945282Z 10 Query SELECT option_value FROM wp_options WHERE option_name = '_site_transient_timeout_theme_roots' LIMIT 1 -2020-03-02T15:16:52.945600Z 10 Query SELECT option_value FROM wp_options WHERE option_name = '_site_transient_theme_roots' LIMIT 1 -2020-03-02T15:16:52.945839Z 10 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('_site_transient_timeout_theme_roots', '1583164012', 'no') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:16:52.953834Z 10 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('_site_transient_theme_roots', 'a:4:{s:14:\"twentynineteen\";s:7:\"/themes\";s:15:\"twentyseventeen\";s:7:\"/themes\";s:13:\"twentysixteen\";s:7:\"/themes\";s:12:\"twentytwenty\";s:7:\"/themes\";}', 'no') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:16:52.962702Z 10 Query SELECT option_value FROM wp_options WHERE option_name = '_site_transient_update_themes' LIMIT 1 -2020-03-02T15:16:52.963126Z 10 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('_site_transient_update_themes', 'O:8:\"stdClass\":1:{s:12:\"last_checked\";i:1583162212;}', 'no') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:16:53.351275Z 10 Query UPDATE `wp_options` SET `option_value` = 'O:8:\"stdClass\":4:{s:12:\"last_checked\";i:1583162213;s:7:\"checked\";a:4:{s:14:\"twentynineteen\";s:3:\"1.4\";s:15:\"twentyseventeen\";s:3:\"2.2\";s:13:\"twentysixteen\";s:3:\"2.0\";s:12:\"twentytwenty\";s:3:\"1.1\";}s:8:\"response\";a:0:{}s:12:\"translations\";a:0:{}}', `autoload` = 'no' WHERE `option_name` = '_site_transient_update_themes' -2020-03-02T15:16:53.353509Z 10 Query SELECT option_value FROM wp_options WHERE option_name = '_transient_doing_cron' LIMIT 1 -2020-03-02T15:16:53.353722Z 10 Query SELECT option_value FROM wp_options WHERE option_name = '_transient_doing_cron' LIMIT 1 -2020-03-02T15:16:53.353905Z 10 Query SELECT autoload FROM wp_options WHERE option_name = '_transient_doing_cron' -2020-03-02T15:16:53.354101Z 10 Query DELETE FROM `wp_options` WHERE `option_name` = '_transient_doing_cron' -2020-03-02T15:16:53.362902Z 10 Query SELECT autoload FROM wp_options WHERE option_name = '_transient_timeout_doing_cron' -2020-03-02T15:16:53.363115Z 10 Quit -2020-03-02T15:17:01.763891Z 11 Connect wp_user@localhost on using TCP/IP -2020-03-02T15:17:01.764030Z 11 Query SET NAMES utf8mb4 -2020-03-02T15:17:01.764380Z 11 Query SET NAMES 'utf8mb4' COLLATE 'utf8mb4_unicode_520_ci' -2020-03-02T15:17:01.764464Z 11 Query SELECT @@SESSION.sql_mode -2020-03-02T15:17:01.764571Z 11 Query SET SESSION sql_mode='NO_ZERO_IN_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION' -2020-03-02T15:17:01.764652Z 11 Init DB wordpressdb -2020-03-02T15:17:01.765052Z 11 Query SELECT option_name, option_value FROM wp_options WHERE autoload = 'yes' -2020-03-02T15:17:01.767773Z 11 Query SELECT option_value FROM wp_options WHERE option_name = 'WPLANG' LIMIT 1 -2020-03-02T15:17:01.770199Z 11 Query SELECT option_value FROM wp_options WHERE option_name = 'can_compress_scripts' LIMIT 1 -2020-03-02T15:17:01.770811Z 11 Query SHOW FULL COLUMNS FROM `wp_options` -2020-03-02T15:17:01.771236Z 11 Query UPDATE `wp_options` SET `option_value` = 'a:5:{i:1583162221;a:1:{s:16:\"wp_version_check\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}i:1583165812;a:1:{s:34:\"wp_privacy_delete_old_export_files\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:6:\"hourly\";s:4:\"args\";a:0:{}s:8:\"interval\";i:3600;}}}i:1583205412;a:2:{s:17:\"wp_update_plugins\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}s:16:\"wp_update_themes\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}i:1583248612;a:1:{s:32:\"recovery_mode_clean_expired_keys\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}s:7:\"version\";i:2;}' WHERE `option_name` = 'cron' -2020-03-02T15:17:01.773276Z 11 Query SELECT option_value FROM wp_options WHERE option_name = 'theme_switched' LIMIT 1 -2020-03-02T15:17:01.774508Z 11 Query SELECT * FROM wp_posts WHERE ID = 3 LIMIT 1 -2020-03-02T15:17:01.775539Z 11 Quit -2020-03-02T15:17:15.872810Z 12 Connect wp_user@localhost on using TCP/IP -2020-03-02T15:17:15.873140Z 12 Query SET NAMES utf8mb4 -2020-03-02T15:17:15.873517Z 12 Query SET NAMES 'utf8mb4' COLLATE 'utf8mb4_unicode_520_ci' -2020-03-02T15:17:15.873646Z 12 Query SELECT @@SESSION.sql_mode -2020-03-02T15:17:15.873868Z 12 Query SET SESSION sql_mode='NO_ZERO_IN_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION' -2020-03-02T15:17:15.873994Z 12 Init DB wordpressdb -2020-03-02T15:17:15.874704Z 12 Query SELECT option_name, option_value FROM wp_options WHERE autoload = 'yes' -2020-03-02T15:17:15.878765Z 12 Query SELECT option_value FROM wp_options WHERE option_name = 'WPLANG' LIMIT 1 -2020-03-02T15:17:15.881708Z 12 Query SELECT option_value FROM wp_options WHERE option_name = 'can_compress_scripts' LIMIT 1 -2020-03-02T15:17:15.882296Z 12 Query SELECT option_value FROM wp_options WHERE option_name = '_transient_timeout_doing_cron' LIMIT 1 -2020-03-02T15:17:15.882561Z 12 Query SELECT option_value FROM wp_options WHERE option_name = '_transient_doing_cron' LIMIT 1 -2020-03-02T15:17:15.882833Z 12 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('_transient_doing_cron', '1583162235.8821659088134765625000', 'yes') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:17:15.885087Z 12 Query SELECT option_value FROM wp_options WHERE option_name = 'theme_switched' LIMIT 1 -2020-03-02T15:17:15.885608Z 12 Query SELECT * FROM wp_users WHERE user_login = 'wordpress-user' LIMIT 1 -2020-03-02T15:17:15.885981Z 12 Query SELECT user_id, meta_key, meta_value FROM wp_usermeta WHERE user_id IN (1) ORDER BY umeta_id ASC -2020-03-02T15:17:15.887489Z 13 Connect wp_user@localhost on using TCP/IP -2020-03-02T15:17:15.887809Z 13 Query SET NAMES utf8mb4 -2020-03-02T15:17:15.888083Z 13 Query SET NAMES 'utf8mb4' COLLATE 'utf8mb4_unicode_520_ci' -2020-03-02T15:17:15.888138Z 12 Query SELECT umeta_id FROM wp_usermeta WHERE meta_key = 'session_tokens' AND user_id = 1 -2020-03-02T15:17:15.888228Z 13 Query SELECT @@SESSION.sql_mode -2020-03-02T15:17:15.888446Z 13 Query SET SESSION sql_mode='NO_ZERO_IN_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION' -2020-03-02T15:17:15.888536Z 12 Query SHOW FULL COLUMNS FROM `wp_usermeta` -2020-03-02T15:17:15.888792Z 13 Init DB wordpressdb -2020-03-02T15:17:15.889142Z 12 Query INSERT INTO `wp_usermeta` (`user_id`, `meta_key`, `meta_value`) VALUES (1, 'session_tokens', 'a:1:{s:64:\"a8de5451b91c16bb0e59143e24d3295511cdcb161b31c92ed41d5deef00d021e\";a:4:{s:10:\"expiration\";i:1583335035;s:2:\"ip\";s:9:\"127.0.0.1\";s:2:\"ua\";s:76:\"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:73.0) Gecko/20100101 Firefox/73.0\";s:5:\"login\";i:1583162235;}}') -2020-03-02T15:17:15.889280Z 13 Query SELECT option_name, option_value FROM wp_options WHERE autoload = 'yes' -2020-03-02T15:17:15.890570Z 12 Query SELECT user_id, meta_key, meta_value FROM wp_usermeta WHERE user_id IN (1) ORDER BY umeta_id ASC -2020-03-02T15:17:15.891243Z 13 Query SELECT option_value FROM wp_options WHERE option_name = 'WPLANG' LIMIT 1 -2020-03-02T15:17:15.891244Z 12 Quit -2020-03-02T15:17:15.893888Z 13 Query SELECT option_value FROM wp_options WHERE option_name = 'can_compress_scripts' LIMIT 1 -2020-03-02T15:17:15.894784Z 13 Query SELECT option_value FROM wp_options WHERE option_name = 'theme_switched' LIMIT 1 -2020-03-02T15:17:15.896123Z 13 Query SHOW FULL COLUMNS FROM `wp_options` -2020-03-02T15:17:15.896596Z 13 Query UPDATE `wp_options` SET `option_value` = 'a:6:{i:1583162221;a:1:{s:16:\"wp_version_check\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}i:1583165812;a:1:{s:34:\"wp_privacy_delete_old_export_files\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:6:\"hourly\";s:4:\"args\";a:0:{}s:8:\"interval\";i:3600;}}}i:1583205412;a:2:{s:17:\"wp_update_plugins\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}s:16:\"wp_update_themes\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}i:1583205421;a:1:{s:16:\"wp_version_check\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}i:1583248612;a:1:{s:32:\"recovery_mode_clean_expired_keys\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}s:7:\"version\";i:2;}' WHERE `option_name` = 'cron' -2020-03-02T15:17:15.898488Z 14 Connect wp_user@localhost on using TCP/IP -2020-03-02T15:17:15.898491Z 13 Query UPDATE `wp_options` SET `option_value` = 'a:5:{i:1583165812;a:1:{s:34:\"wp_privacy_delete_old_export_files\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:6:\"hourly\";s:4:\"args\";a:0:{}s:8:\"interval\";i:3600;}}}i:1583205412;a:2:{s:17:\"wp_update_plugins\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}s:16:\"wp_update_themes\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}i:1583205421;a:1:{s:16:\"wp_version_check\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}i:1583248612;a:1:{s:32:\"recovery_mode_clean_expired_keys\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}s:7:\"version\";i:2;}' WHERE `option_name` = 'cron' -2020-03-02T15:17:15.898731Z 14 Query SET NAMES utf8mb4 -2020-03-02T15:17:15.899003Z 14 Query SET NAMES 'utf8mb4' COLLATE 'utf8mb4_unicode_520_ci' -2020-03-02T15:17:15.899454Z 13 Query SELECT option_value FROM wp_options WHERE option_name = '_site_transient_update_core' LIMIT 1 -2020-03-02T15:17:15.899831Z 13 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('_site_transient_update_core', 'O:8:\"stdClass\":3:{s:7:\"updates\";a:0:{}s:15:\"version_checked\";s:5:\"5.3.2\";s:12:\"last_checked\";i:1583162235;}', 'no') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:17:15.900314Z 14 Query SELECT @@SESSION.sql_mode -2020-03-02T15:17:15.901088Z 14 Query SET SESSION sql_mode='NO_ZERO_IN_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION' -2020-03-02T15:17:15.901229Z 14 Init DB wordpressdb -2020-03-02T15:17:15.901260Z 13 Query SELECT COUNT(NULLIF(`meta_value` LIKE '%\"administrator\"%', false)), COUNT(NULLIF(`meta_value` LIKE '%\"editor\"%', false)), COUNT(NULLIF(`meta_value` LIKE '%\"author\"%', false)), COUNT(NULLIF(`meta_value` LIKE '%\"contributor\"%', false)), COUNT(NULLIF(`meta_value` LIKE '%\"subscriber\"%', false)), COUNT(NULLIF(`meta_value` = 'a:0:{}', false)), COUNT(*) - FROM wp_usermeta - INNER JOIN wp_users ON user_id = ID - WHERE meta_key = 'wp_capabilities' -2020-03-02T15:17:15.902471Z 14 Query SELECT option_name, option_value FROM wp_options WHERE autoload = 'yes' -2020-03-02T15:17:15.904540Z 14 Query SELECT option_value FROM wp_options WHERE option_name = 'WPLANG' LIMIT 1 -2020-03-02T15:17:15.904860Z 14 Query SELECT * FROM wp_users WHERE user_login = 'wordpress-user' LIMIT 1 -2020-03-02T15:17:15.905205Z 14 Query SELECT user_id, meta_key, meta_value FROM wp_usermeta WHERE user_id IN (1) ORDER BY umeta_id ASC -2020-03-02T15:17:15.907503Z 14 Query SELECT option_value FROM wp_options WHERE option_name = 'can_compress_scripts' LIMIT 1 -2020-03-02T15:17:15.908200Z 14 Query SELECT option_value FROM wp_options WHERE option_name = 'theme_switched' LIMIT 1 -2020-03-02T15:17:15.909633Z 14 Query SELECT option_value FROM wp_options WHERE option_name = 'db_upgraded' LIMIT 1 -2020-03-02T15:17:15.910426Z 14 Query SHOW FULL COLUMNS FROM `wp_options` -2020-03-02T15:17:15.910944Z 14 Query UPDATE `wp_options` SET `option_value` = 'a:6:{i:1583162235;a:1:{s:19:\"wp_scheduled_delete\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}i:1583165812;a:1:{s:34:\"wp_privacy_delete_old_export_files\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:6:\"hourly\";s:4:\"args\";a:0:{}s:8:\"interval\";i:3600;}}}i:1583205412;a:2:{s:17:\"wp_update_plugins\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}s:16:\"wp_update_themes\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}i:1583205421;a:1:{s:16:\"wp_version_check\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}i:1583248612;a:1:{s:32:\"recovery_mode_clean_expired_keys\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}s:7:\"version\";i:2;}' WHERE `option_name` = 'cron' -2020-03-02T15:17:15.912764Z 14 Query UPDATE `wp_options` SET `option_value` = 'a:6:{i:1583162235;a:2:{s:19:\"wp_scheduled_delete\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}s:25:\"delete_expired_transients\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}i:1583165812;a:1:{s:34:\"wp_privacy_delete_old_export_files\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:6:\"hourly\";s:4:\"args\";a:0:{}s:8:\"interval\";i:3600;}}}i:1583205412;a:2:{s:17:\"wp_update_plugins\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}s:16:\"wp_update_themes\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}i:1583205421;a:1:{s:16:\"wp_version_check\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}i:1583248612;a:1:{s:32:\"recovery_mode_clean_expired_keys\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}s:7:\"version\";i:2;}' WHERE `option_name` = 'cron' -2020-03-02T15:17:15.914987Z 14 Query SELECT option_value FROM wp_options WHERE option_name = '_site_transient_update_plugins' LIMIT 1 -2020-03-02T15:17:15.915344Z 14 Query SELECT option_value FROM wp_options WHERE option_name = '_site_transient_update_themes' LIMIT 1 -2020-03-02T15:17:15.915635Z 14 Query SELECT option_value FROM wp_options WHERE option_name = 'dismissed_update_core' LIMIT 1 -2020-03-02T15:17:15.916109Z 14 Query SELECT option_value FROM wp_options WHERE option_name = '_site_transient_update_core' LIMIT 1 -2020-03-02T15:17:15.916468Z 14 Query SELECT comment_approved, COUNT( * ) AS total - FROM wp_comments - - GROUP BY comment_approved -2020-03-02T15:17:15.920134Z 14 Query SELECT * FROM wp_posts WHERE ID = 3 LIMIT 1 -2020-03-02T15:17:15.920566Z 14 Query SELECT post_id, meta_key, meta_value FROM wp_postmeta WHERE post_id IN (3) ORDER BY meta_id ASC -2020-03-02T15:17:15.924056Z 14 Query SELECT option_value FROM wp_options WHERE option_name = '_site_transient_timeout_browser_4f751766cc0d0d56b9173e9922c01a88' LIMIT 1 -2020-03-02T15:17:15.924342Z 14 Query SELECT option_value FROM wp_options WHERE option_name = '_site_transient_browser_4f751766cc0d0d56b9173e9922c01a88' LIMIT 1 -2020-03-02T15:17:16.307911Z 13 Query UPDATE `wp_options` SET `option_value` = 'O:8:\"stdClass\":4:{s:7:\"updates\";a:1:{i:0;O:8:\"stdClass\":10:{s:8:\"response\";s:6:\"latest\";s:8:\"download\";s:59:\"https://downloads.wordpress.org/release/wordpress-5.3.2.zip\";s:6:\"locale\";s:5:\"en_US\";s:8:\"packages\";O:8:\"stdClass\":5:{s:4:\"full\";s:59:\"https://downloads.wordpress.org/release/wordpress-5.3.2.zip\";s:10:\"no_content\";s:70:\"https://downloads.wordpress.org/release/wordpress-5.3.2-no-content.zip\";s:11:\"new_bundled\";s:71:\"https://downloads.wordpress.org/release/wordpress-5.3.2-new-bundled.zip\";s:7:\"partial\";b:0;s:8:\"rollback\";b:0;}s:7:\"current\";s:5:\"5.3.2\";s:7:\"version\";s:5:\"5.3.2\";s:11:\"php_version\";s:6:\"5.6.20\";s:13:\"mysql_version\";s:3:\"5.0\";s:11:\"new_bundled\";s:3:\"5.3\";s:15:\"partial_version\";s:0:\"\";}}s:12:\"last_checked\";i:1583162236;s:15:\"version_checked\";s:5:\"5.3.2\";s:12:\"translations\";a:0:{}}', `autoload` = 'no' WHERE `option_name` = '_site_transient_update_core' -2020-03-02T15:17:16.310513Z 13 Query INSERT IGNORE INTO `wp_options` ( `option_name`, `option_value`, `autoload` ) VALUES ('auto_updater.lock', '1583162236', 'no') /* LOCK */ -2020-03-02T15:17:16.311659Z 13 Query SELECT option_value FROM wp_options WHERE option_name = 'auto_updater.lock' LIMIT 1 -2020-03-02T15:17:16.312159Z 13 Query UPDATE `wp_options` SET `option_value` = '1583162236' WHERE `option_name` = 'auto_updater.lock' -2020-03-02T15:17:16.312242Z 14 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('_site_transient_timeout_browser_4f751766cc0d0d56b9173e9922c01a88', '1583767036', 'no') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:17:16.315097Z 13 Query SELECT option_value FROM wp_options WHERE option_name = '_site_transient_update_plugins' LIMIT 1 -2020-03-02T15:17:16.315229Z 14 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('_site_transient_browser_4f751766cc0d0d56b9173e9922c01a88', 'a:10:{s:4:\"name\";s:7:\"Firefox\";s:7:\"version\";s:4:\"73.0\";s:8:\"platform\";s:5:\"Linux\";s:10:\"update_url\";s:32:\"https://www.mozilla.org/firefox/\";s:7:\"img_src\";s:44:\"http://s.w.org/images/browsers/firefox.png?1\";s:11:\"img_src_ssl\";s:45:\"https://s.w.org/images/browsers/firefox.png?1\";s:15:\"current_version\";s:2:\"56\";s:7:\"upgrade\";b:0;s:8:\"insecure\";b:0;s:6:\"mobile\";b:0;}', 'no') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:17:16.315488Z 13 Query UPDATE `wp_options` SET `option_value` = 'O:8:\"stdClass\":4:{s:12:\"last_checked\";i:1583162236;s:8:\"response\";a:0:{}s:12:\"translations\";a:0:{}s:9:\"no_update\";a:2:{s:19:\"akismet/akismet.php\";O:8:\"stdClass\":9:{s:2:\"id\";s:21:\"w.org/plugins/akismet\";s:4:\"slug\";s:7:\"akismet\";s:6:\"plugin\";s:19:\"akismet/akismet.php\";s:11:\"new_version\";s:5:\"4.1.3\";s:3:\"url\";s:38:\"https://wordpress.org/plugins/akismet/\";s:7:\"package\";s:56:\"https://downloads.wordpress.org/plugin/akismet.4.1.3.zip\";s:5:\"icons\";a:2:{s:2:\"2x\";s:59:\"https://ps.w.org/akismet/assets/icon-256x256.png?rev=969272\";s:2:\"1x\";s:59:\"https://ps.w.org/akismet/assets/icon-128x128.png?rev=969272\";}s:7:\"banners\";a:1:{s:2:\"1x\";s:61:\"https://ps.w.org/akismet/assets/banner-772x250.jpg?rev=479904\";}s:11:\"banners_rtl\";a:0:{}}s:9:\"hello.php\";O:8:\"stdClass\":9:{s:2:\"id\";s:25:\"w.org/plugins/hello-dolly\";s:4:\"slug\";s:11:\"hello-dolly\";s:6:\"plugin\";s:9:\"hello.php\";s:11:\"new_version\";s:5:\"1.7.2\";s:3:\"url\";s:42:\"https://wordpress.org/plugins/hello-dolly/\";s:7:\"package\";s:60:\"https://downloads.wordpress.org/plugin/hello-dolly.1.7.2.zip\";s:5:\"icons\";a:2:{s:2:\"2x\";s:64:\"https://ps.w.org/hello-dolly/assets/icon-256x256.jpg?rev=2052855\";s:2:\"1x\";s:64:\"https://ps.w.org/hello-dolly/assets/icon-128x128.jpg?rev=2052855\";}s:7:\"banners\";a:1:{s:2:\"1x\";s:66:\"https://ps.w.org/hello-dolly/assets/banner-772x250.jpg?rev=2052855\";}s:11:\"banners_rtl\";a:0:{}}}}', `autoload` = 'no' WHERE `option_name` = '_site_transient_update_plugins' -2020-03-02T15:17:16.316191Z 14 Query SELECT option_value FROM wp_options WHERE option_name = '_site_transient_timeout_php_check_7e6c1a8668674f8cf640a3994d949207' LIMIT 1 -2020-03-02T15:17:16.316552Z 14 Query SELECT option_value FROM wp_options WHERE option_name = '_site_transient_php_check_7e6c1a8668674f8cf640a3994d949207' LIMIT 1 -2020-03-02T15:17:16.677702Z 13 Query UPDATE `wp_options` SET `option_value` = 'O:8:\"stdClass\":5:{s:12:\"last_checked\";i:1583162236;s:7:\"checked\";a:2:{s:19:\"akismet/akismet.php\";s:5:\"4.1.3\";s:9:\"hello.php\";s:5:\"1.7.2\";}s:8:\"response\";a:0:{}s:12:\"translations\";a:0:{}s:9:\"no_update\";a:2:{s:19:\"akismet/akismet.php\";O:8:\"stdClass\":9:{s:2:\"id\";s:21:\"w.org/plugins/akismet\";s:4:\"slug\";s:7:\"akismet\";s:6:\"plugin\";s:19:\"akismet/akismet.php\";s:11:\"new_version\";s:5:\"4.1.3\";s:3:\"url\";s:38:\"https://wordpress.org/plugins/akismet/\";s:7:\"package\";s:56:\"https://downloads.wordpress.org/plugin/akismet.4.1.3.zip\";s:5:\"icons\";a:2:{s:2:\"2x\";s:59:\"https://ps.w.org/akismet/assets/icon-256x256.png?rev=969272\";s:2:\"1x\";s:59:\"https://ps.w.org/akismet/assets/icon-128x128.png?rev=969272\";}s:7:\"banners\";a:1:{s:2:\"1x\";s:61:\"https://ps.w.org/akismet/assets/banner-772x250.jpg?rev=479904\";}s:11:\"banners_rtl\";a:0:{}}s:9:\"hello.php\";O:8:\"stdClass\":9:{s:2:\"id\";s:25:\"w.org/plugins/hello-dolly\";s:4:\"slug\";s:11:\"hello-dolly\";s:6:\"plugin\";s:9:\"hello.php\";s:11:\"new_version\";s:5:\"1.7.2\";s:3:\"url\";s:42:\"https://wordpress.org/plugins/hello-dolly/\";s:7:\"package\";s:60:\"https://downloads.wordpress.org/plugin/hello-dolly.1.7.2.zip\";s:5:\"icons\";a:2:{s:2:\"2x\";s:64:\"https://ps.w.org/hello-dolly/assets/icon-256x256.jpg?rev=2052855\";s:2:\"1x\";s:64:\"https://ps.w.org/hello-dolly/assets/icon-128x128.jpg?rev=2052855\";}s:7:\"banners\";a:1:{s:2:\"1x\";s:66:\"https://ps.w.org/hello-dolly/assets/banner-772x250.jpg?rev=2052855\";}s:11:\"banners_rtl\";a:0:{}}}}', `autoload` = 'no' WHERE `option_name` = '_site_transient_update_plugins' -2020-03-02T15:17:16.680502Z 13 Query SELECT option_value FROM wp_options WHERE option_name = '_site_transient_timeout_theme_roots' LIMIT 1 -2020-03-02T15:17:16.680776Z 13 Query SELECT option_value FROM wp_options WHERE option_name = '_site_transient_theme_roots' LIMIT 1 -2020-03-02T15:17:16.681683Z 13 Query SELECT option_value FROM wp_options WHERE option_name = '_site_transient_update_themes' LIMIT 1 -2020-03-02T15:17:16.682291Z 14 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('_site_transient_timeout_php_check_7e6c1a8668674f8cf640a3994d949207', '1583767036', 'no') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:17:16.682540Z 13 Query SELECT autoload FROM wp_options WHERE option_name = 'auto_updater.lock' -2020-03-02T15:17:16.682893Z 13 Query DELETE FROM `wp_options` WHERE `option_name` = 'auto_updater.lock' -2020-03-02T15:17:16.683618Z 14 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('_site_transient_php_check_7e6c1a8668674f8cf640a3994d949207', 'a:5:{s:19:\"recommended_version\";s:3:\"7.3\";s:15:\"minimum_version\";s:6:\"5.6.20\";s:12:\"is_supported\";b:1;s:9:\"is_secure\";b:1;s:13:\"is_acceptable\";b:1;}', 'no') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:17:16.683950Z 13 Query SELECT option_value FROM wp_options WHERE option_name = '_transient_doing_cron' LIMIT 1 -2020-03-02T15:17:16.684340Z 13 Query SELECT option_value FROM wp_options WHERE option_name = '_transient_doing_cron' LIMIT 1 -2020-03-02T15:17:16.684617Z 13 Query SELECT autoload FROM wp_options WHERE option_name = '_transient_doing_cron' -2020-03-02T15:17:16.684842Z 13 Query DELETE FROM `wp_options` WHERE `option_name` = '_transient_doing_cron' -2020-03-02T15:17:16.687292Z 14 Query SELECT option_value FROM wp_options WHERE option_name = '_site_transient_timeout_' LIMIT 1 -2020-03-02T15:17:16.687431Z 13 Query SELECT autoload FROM wp_options WHERE option_name = '_transient_timeout_doing_cron' -2020-03-02T15:17:16.687756Z 13 Quit -2020-03-02T15:17:16.687769Z 14 Query SELECT option_value FROM wp_options WHERE option_name = '_site_transient_' LIMIT 1 -2020-03-02T15:17:16.692794Z 14 Query SELECT option_value FROM wp_options WHERE option_name = 'auto_core_update_failed' LIMIT 1 -2020-03-02T15:17:16.694312Z 14 Query SELECT post_status, COUNT( * ) AS num_posts FROM wp_posts WHERE post_type = 'post' GROUP BY post_status -2020-03-02T15:17:16.694818Z 14 Query SELECT post_status, COUNT( * ) AS num_posts FROM wp_posts WHERE post_type = 'page' GROUP BY post_status -2020-03-02T15:17:16.695893Z 14 Query SELECT wp_posts.ID FROM wp_posts WHERE 1=1 AND wp_posts.post_type = 'post' AND ((wp_posts.post_status = 'future')) ORDER BY wp_posts.post_date ASC LIMIT 0, 5 -2020-03-02T15:17:16.696436Z 14 Query SELECT wp_posts.ID FROM wp_posts WHERE 1=1 AND wp_posts.post_type = 'post' AND ((wp_posts.post_status = 'publish')) ORDER BY wp_posts.post_date DESC LIMIT 0, 5 -2020-03-02T15:17:16.696826Z 14 Query SELECT wp_posts.* FROM wp_posts WHERE ID IN (1) -2020-03-02T15:17:16.697295Z 14 Query SELECT t.*, tt.*, tr.object_id FROM wp_terms AS t INNER JOIN wp_term_taxonomy AS tt ON t.term_id = tt.term_id INNER JOIN wp_term_relationships AS tr ON tr.term_taxonomy_id = tt.term_taxonomy_id WHERE tt.taxonomy IN ('category', 'post_tag', 'post_format') AND tr.object_id IN (1) ORDER BY t.name ASC -2020-03-02T15:17:16.697942Z 14 Query SELECT post_id, meta_key, meta_value FROM wp_postmeta WHERE post_id IN (1) ORDER BY meta_id ASC -2020-03-02T15:17:16.699112Z 14 Query SELECT wp_comments.comment_ID FROM wp_comments WHERE ( ( comment_approved = '0' OR comment_approved = '1' ) ) ORDER BY wp_comments.comment_date_gmt DESC LIMIT 0,25 -2020-03-02T15:17:16.699460Z 14 Query SELECT wp_comments.* FROM wp_comments WHERE comment_ID IN (1) -2020-03-02T15:17:16.699992Z 14 Query SELECT comment_id, meta_key, meta_value FROM wp_commentmeta WHERE comment_id IN (1) ORDER BY meta_id ASC -2020-03-02T15:17:16.700633Z 14 Query SELECT wp_comments.comment_ID FROM wp_comments WHERE ( ( comment_approved = '0' OR comment_approved = '1' ) ) ORDER BY wp_comments.comment_date_gmt DESC LIMIT 25,50 -2020-03-02T15:17:16.703967Z 14 Query SELECT COUNT(*) FROM wp_comments WHERE ( ( comment_approved = '0' OR comment_approved = '1' ) ) AND user_id = 1 ORDER BY wp_comments.comment_date_gmt DESC -2020-03-02T15:17:16.708236Z 14 Query SHOW FULL COLUMNS FROM `wp_posts` -2020-03-02T15:17:16.709068Z 14 Query INSERT INTO `wp_posts` (`post_author`, `post_date`, `post_date_gmt`, `post_content`, `post_content_filtered`, `post_title`, `post_excerpt`, `post_status`, `post_type`, `comment_status`, `ping_status`, `post_password`, `post_name`, `to_ping`, `pinged`, `post_modified`, `post_modified_gmt`, `post_parent`, `menu_order`, `post_mime_type`, `guid`) VALUES (1, '2020-03-02 15:17:16', '0000-00-00 00:00:00', '', '', 'Auto Draft', '', 'auto-draft', 'post', 'open', 'open', '', '', '', '', '2020-03-02 15:17:16', '0000-00-00 00:00:00', 0, 0, '', '') -2020-03-02T15:17:16.710964Z 14 Query SELECT * FROM wp_posts WHERE ID = 4 LIMIT 1 -2020-03-02T15:17:16.711330Z 14 Query SELECT t.*, tt.* FROM wp_terms AS t INNER JOIN wp_term_taxonomy AS tt ON t.term_id = tt.term_id INNER JOIN wp_term_relationships AS tr ON tr.term_taxonomy_id = tt.term_taxonomy_id WHERE tt.taxonomy IN ('category') AND tr.object_id IN (4) -2020-03-02T15:17:16.711947Z 14 Query UPDATE `wp_posts` SET `guid` = 'http://127.0.0.1/wordpress/?p=4' WHERE `ID` = 4 -2020-03-02T15:17:16.713531Z 14 Query SELECT * FROM wp_posts WHERE ID = 4 LIMIT 1 -2020-03-02T15:17:16.713985Z 14 Query SELECT t.*, tt.* FROM wp_terms AS t INNER JOIN wp_term_taxonomy AS tt ON t.term_id = tt.term_id INNER JOIN wp_term_relationships AS tr ON tr.term_taxonomy_id = tt.term_taxonomy_id WHERE tt.taxonomy IN ('category') AND tr.object_id IN (4) ORDER BY t.name ASC -2020-03-02T15:17:16.714399Z 14 Query SELECT t.*, tt.* FROM wp_terms AS t INNER JOIN wp_term_taxonomy AS tt ON t.term_id = tt.term_id INNER JOIN wp_term_relationships AS tr ON tr.term_taxonomy_id = tt.term_taxonomy_id WHERE tt.taxonomy IN ('post_tag') AND tr.object_id IN (4) ORDER BY t.name ASC -2020-03-02T15:17:16.714781Z 14 Query SELECT t.*, tt.* FROM wp_terms AS t INNER JOIN wp_term_taxonomy AS tt ON t.term_id = tt.term_id INNER JOIN wp_term_relationships AS tr ON tr.term_taxonomy_id = tt.term_taxonomy_id WHERE tt.taxonomy IN ('post_format') AND tr.object_id IN (4) ORDER BY t.name ASC -2020-03-02T15:17:16.715095Z 14 Query SELECT autoload FROM wp_options WHERE option_name = '_transient_is_multi_author' -2020-03-02T15:17:16.715452Z 14 Query UPDATE `wp_options` SET `option_value` = 'a:7:{i:1583162235;a:2:{s:19:\"wp_scheduled_delete\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}s:25:\"delete_expired_transients\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}i:1583162236;a:1:{s:30:\"wp_scheduled_auto_draft_delete\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}i:1583165812;a:1:{s:34:\"wp_privacy_delete_old_export_files\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:6:\"hourly\";s:4:\"args\";a:0:{}s:8:\"interval\";i:3600;}}}i:1583205412;a:2:{s:17:\"wp_update_plugins\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}s:16:\"wp_update_themes\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}i:1583205421;a:1:{s:16:\"wp_version_check\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}i:1583248612;a:1:{s:32:\"recovery_mode_clean_expired_keys\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}s:7:\"version\";i:2;}' WHERE `option_name` = 'cron' -2020-03-02T15:17:16.716560Z 14 Query SELECT umeta_id FROM wp_usermeta WHERE meta_key = 'wp_dashboard_quick_press_last_post_id' AND user_id = 1 -2020-03-02T15:17:16.716836Z 14 Query SHOW FULL COLUMNS FROM `wp_usermeta` -2020-03-02T15:17:16.717261Z 14 Query INSERT INTO `wp_usermeta` (`user_id`, `meta_key`, `meta_value`) VALUES (1, 'wp_dashboard_quick_press_last_post_id', '4') -2020-03-02T15:17:16.719013Z 14 Query SELECT wp_posts.ID FROM wp_posts WHERE 1=1 AND wp_posts.post_author IN (1) AND wp_posts.post_type = 'post' AND ((wp_posts.post_status = 'draft')) ORDER BY wp_posts.post_modified DESC LIMIT 0, 4 -2020-03-02T15:17:16.719490Z 14 Query SELECT user_id, meta_key, meta_value FROM wp_usermeta WHERE user_id IN (1) ORDER BY umeta_id ASC -2020-03-02T15:17:16.720163Z 14 Query SELECT option_value FROM wp_options WHERE option_name = '_transient_timeout_dash_v2_88ae138922fe95674369b1cb3d215a2b' LIMIT 1 -2020-03-02T15:17:16.720462Z 14 Query SELECT option_value FROM wp_options WHERE option_name = '_transient_dash_v2_88ae138922fe95674369b1cb3d215a2b' LIMIT 1 -2020-03-02T15:17:16.723149Z 14 Quit -2020-03-02T15:17:16.975855Z 15 Connect wp_user@localhost on using TCP/IP -2020-03-02T15:17:16.976094Z 15 Query SET NAMES utf8mb4 -2020-03-02T15:17:16.976276Z 15 Query SET NAMES 'utf8mb4' COLLATE 'utf8mb4_unicode_520_ci' -2020-03-02T15:17:16.976431Z 15 Query SELECT @@SESSION.sql_mode -2020-03-02T15:17:16.976598Z 15 Query SET SESSION sql_mode='NO_ZERO_IN_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION' -2020-03-02T15:17:16.976764Z 15 Init DB wordpressdb -2020-03-02T15:17:16.977218Z 15 Query SELECT option_name, option_value FROM wp_options WHERE autoload = 'yes' -2020-03-02T15:17:16.979525Z 15 Query SELECT option_value FROM wp_options WHERE option_name = 'WPLANG' LIMIT 1 -2020-03-02T15:17:16.979938Z 15 Query SELECT * FROM wp_users WHERE user_login = 'wordpress-user' LIMIT 1 -2020-03-02T15:17:16.980401Z 15 Query SELECT user_id, meta_key, meta_value FROM wp_usermeta WHERE user_id IN (1) ORDER BY umeta_id ASC -2020-03-02T15:17:16.982944Z 15 Query SELECT option_value FROM wp_options WHERE option_name = 'can_compress_scripts' LIMIT 1 -2020-03-02T15:17:16.983590Z 15 Query SELECT option_value FROM wp_options WHERE option_name = '_transient_timeout_doing_cron' LIMIT 1 -2020-03-02T15:17:16.983912Z 15 Query SELECT option_value FROM wp_options WHERE option_name = '_transient_doing_cron' LIMIT 1 -2020-03-02T15:17:16.984214Z 15 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('_transient_doing_cron', '1583162236.9834411144256591796875', 'yes') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:17:16.990926Z 16 Connect wp_user@localhost on using TCP/IP -2020-03-02T15:17:16.991136Z 16 Query SET NAMES utf8mb4 -2020-03-02T15:17:16.991255Z 15 Query SELECT option_value FROM wp_options WHERE option_name = 'theme_switched' LIMIT 1 -2020-03-02T15:17:16.991485Z 16 Query SET NAMES 'utf8mb4' COLLATE 'utf8mb4_unicode_520_ci' -2020-03-02T15:17:16.991610Z 16 Query SELECT @@SESSION.sql_mode -2020-03-02T15:17:16.991818Z 16 Query SET SESSION sql_mode='NO_ZERO_IN_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION' -2020-03-02T15:17:16.991953Z 16 Init DB wordpressdb -2020-03-02T15:17:16.992587Z 16 Query SELECT option_name, option_value FROM wp_options WHERE autoload = 'yes' -2020-03-02T15:17:16.995301Z 16 Query SELECT option_value FROM wp_options WHERE option_name = 'WPLANG' LIMIT 1 -2020-03-02T15:17:16.998866Z 16 Query SELECT option_value FROM wp_options WHERE option_name = 'can_compress_scripts' LIMIT 1 -2020-03-02T15:17:16.999629Z 16 Query SELECT option_value FROM wp_options WHERE option_name = 'theme_switched' LIMIT 1 -2020-03-02T15:17:17.000190Z 16 Query SHOW FULL COLUMNS FROM `wp_options` -2020-03-02T15:17:17.000688Z 16 Query UPDATE `wp_options` SET `option_value` = 'a:8:{i:1583162235;a:2:{s:19:\"wp_scheduled_delete\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}s:25:\"delete_expired_transients\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}i:1583162236;a:1:{s:30:\"wp_scheduled_auto_draft_delete\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}i:1583165812;a:1:{s:34:\"wp_privacy_delete_old_export_files\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:6:\"hourly\";s:4:\"args\";a:0:{}s:8:\"interval\";i:3600;}}}i:1583205412;a:2:{s:17:\"wp_update_plugins\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}s:16:\"wp_update_themes\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}i:1583205421;a:1:{s:16:\"wp_version_check\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}i:1583248612;a:1:{s:32:\"recovery_mode_clean_expired_keys\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}i:1583248635;a:1:{s:19:\"wp_scheduled_delete\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}s:7:\"version\";i:2;}' WHERE `option_name` = 'cron' -2020-03-02T15:17:17.002679Z 16 Query UPDATE `wp_options` SET `option_value` = 'a:8:{i:1583162235;a:1:{s:25:\"delete_expired_transients\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}i:1583162236;a:1:{s:30:\"wp_scheduled_auto_draft_delete\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}i:1583165812;a:1:{s:34:\"wp_privacy_delete_old_export_files\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:6:\"hourly\";s:4:\"args\";a:0:{}s:8:\"interval\";i:3600;}}}i:1583205412;a:2:{s:17:\"wp_update_plugins\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}s:16:\"wp_update_themes\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}i:1583205421;a:1:{s:16:\"wp_version_check\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}i:1583248612;a:1:{s:32:\"recovery_mode_clean_expired_keys\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}i:1583248635;a:1:{s:19:\"wp_scheduled_delete\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}s:7:\"version\";i:2;}' WHERE `option_name` = 'cron' -2020-03-02T15:17:17.003540Z 16 Query SELECT post_id FROM wp_postmeta WHERE meta_key = '_wp_trash_meta_time' AND meta_value < 1580570237 -2020-03-02T15:17:17.003934Z 16 Query SELECT comment_id FROM wp_commentmeta WHERE meta_key = '_wp_trash_meta_time' AND meta_value < 1580570237 -2020-03-02T15:17:17.004264Z 16 Query SELECT option_value FROM wp_options WHERE option_name = '_transient_doing_cron' LIMIT 1 -2020-03-02T15:17:17.004585Z 16 Query UPDATE `wp_options` SET `option_value` = 'a:8:{i:1583162235;a:1:{s:25:\"delete_expired_transients\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}i:1583162236;a:1:{s:30:\"wp_scheduled_auto_draft_delete\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}i:1583165812;a:1:{s:34:\"wp_privacy_delete_old_export_files\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:6:\"hourly\";s:4:\"args\";a:0:{}s:8:\"interval\";i:3600;}}}i:1583205412;a:2:{s:17:\"wp_update_plugins\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}s:16:\"wp_update_themes\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}i:1583205421;a:1:{s:16:\"wp_version_check\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}i:1583248612;a:1:{s:32:\"recovery_mode_clean_expired_keys\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}i:1583248635;a:2:{s:19:\"wp_scheduled_delete\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}s:25:\"delete_expired_transients\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}s:7:\"version\";i:2;}' WHERE `option_name` = 'cron' -2020-03-02T15:17:17.006163Z 16 Query UPDATE `wp_options` SET `option_value` = 'a:7:{i:1583162236;a:1:{s:30:\"wp_scheduled_auto_draft_delete\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}i:1583165812;a:1:{s:34:\"wp_privacy_delete_old_export_files\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:6:\"hourly\";s:4:\"args\";a:0:{}s:8:\"interval\";i:3600;}}}i:1583205412;a:2:{s:17:\"wp_update_plugins\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}s:16:\"wp_update_themes\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}i:1583205421;a:1:{s:16:\"wp_version_check\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}i:1583248612;a:1:{s:32:\"recovery_mode_clean_expired_keys\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}i:1583248635;a:2:{s:19:\"wp_scheduled_delete\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}s:25:\"delete_expired_transients\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}s:7:\"version\";i:2;}' WHERE `option_name` = 'cron' -2020-03-02T15:17:17.006988Z 16 Query DELETE a, b FROM wp_options a, wp_options b - WHERE a.option_name LIKE '\\_transient\\_%' - AND a.option_name NOT LIKE '\\_transient\\_timeout\\_%' - AND b.option_name = CONCAT( '_transient_timeout_', SUBSTRING( a.option_name, 12 ) ) - AND b.option_value < 1583162237 -2020-03-02T15:17:17.007331Z 16 Query DELETE a, b FROM wp_options a, wp_options b - WHERE a.option_name LIKE '\\_site\\_transient\\_%' - AND a.option_name NOT LIKE '\\_site\\_transient\\_timeout\\_%' - AND b.option_name = CONCAT( '_site_transient_timeout_', SUBSTRING( a.option_name, 17 ) ) - AND b.option_value < 1583162237 -2020-03-02T15:17:17.007793Z 16 Query SELECT option_value FROM wp_options WHERE option_name = '_transient_doing_cron' LIMIT 1 -2020-03-02T15:17:17.008118Z 16 Query UPDATE `wp_options` SET `option_value` = 'a:8:{i:1583162236;a:1:{s:30:\"wp_scheduled_auto_draft_delete\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}i:1583165812;a:1:{s:34:\"wp_privacy_delete_old_export_files\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:6:\"hourly\";s:4:\"args\";a:0:{}s:8:\"interval\";i:3600;}}}i:1583205412;a:2:{s:17:\"wp_update_plugins\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}s:16:\"wp_update_themes\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}i:1583205421;a:1:{s:16:\"wp_version_check\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}i:1583248612;a:1:{s:32:\"recovery_mode_clean_expired_keys\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}i:1583248635;a:2:{s:19:\"wp_scheduled_delete\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}s:25:\"delete_expired_transients\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}i:1583248636;a:1:{s:30:\"wp_scheduled_auto_draft_delete\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}s:7:\"version\";i:2;}' WHERE `option_name` = 'cron' -2020-03-02T15:17:17.009396Z 16 Query UPDATE `wp_options` SET `option_value` = 'a:7:{i:1583165812;a:1:{s:34:\"wp_privacy_delete_old_export_files\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:6:\"hourly\";s:4:\"args\";a:0:{}s:8:\"interval\";i:3600;}}}i:1583205412;a:2:{s:17:\"wp_update_plugins\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}s:16:\"wp_update_themes\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}i:1583205421;a:1:{s:16:\"wp_version_check\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:10:\"twicedaily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:43200;}}}i:1583248612;a:1:{s:32:\"recovery_mode_clean_expired_keys\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}i:1583248635;a:2:{s:19:\"wp_scheduled_delete\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}s:25:\"delete_expired_transients\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}i:1583248636;a:1:{s:30:\"wp_scheduled_auto_draft_delete\";a:1:{s:32:\"40cd750bba9870f18aada2478b24840a\";a:3:{s:8:\"schedule\";s:5:\"daily\";s:4:\"args\";a:0:{}s:8:\"interval\";i:86400;}}}s:7:\"version\";i:2;}' WHERE `option_name` = 'cron' -2020-03-02T15:17:17.010108Z 16 Query SELECT ID FROM wp_posts WHERE post_status = 'auto-draft' AND DATE_SUB( NOW(), INTERVAL 7 DAY ) > post_date -2020-03-02T15:17:17.010419Z 16 Query SELECT option_value FROM wp_options WHERE option_name = '_transient_doing_cron' LIMIT 1 -2020-03-02T15:17:17.010605Z 16 Query SELECT option_value FROM wp_options WHERE option_name = '_transient_doing_cron' LIMIT 1 -2020-03-02T15:17:17.010817Z 16 Query SELECT autoload FROM wp_options WHERE option_name = '_transient_doing_cron' -2020-03-02T15:17:17.011035Z 16 Query DELETE FROM `wp_options` WHERE `option_name` = '_transient_doing_cron' -2020-03-02T15:17:17.012051Z 16 Query SELECT autoload FROM wp_options WHERE option_name = '_transient_timeout_doing_cron' -2020-03-02T15:17:17.012264Z 16 Quit -2020-03-02T15:17:17.012761Z 15 Query SELECT * FROM wp_posts WHERE ID = 3 LIMIT 1 -2020-03-02T15:17:17.013765Z 15 Query SELECT post_id, meta_key, meta_value FROM wp_postmeta WHERE post_id IN (3) ORDER BY meta_id ASC -2020-03-02T15:17:17.014179Z 15 Quit -2020-03-02T15:17:17.065445Z 17 Connect wp_user@localhost on using TCP/IP -2020-03-02T15:17:17.065669Z 17 Query SET NAMES utf8mb4 -2020-03-02T15:17:17.065879Z 17 Query SET NAMES 'utf8mb4' COLLATE 'utf8mb4_unicode_520_ci' -2020-03-02T15:17:17.066018Z 17 Query SELECT @@SESSION.sql_mode -2020-03-02T15:17:17.066190Z 17 Query SET SESSION sql_mode='NO_ZERO_IN_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION' -2020-03-02T15:17:17.066335Z 17 Init DB wordpressdb -2020-03-02T15:17:17.066828Z 17 Query SELECT option_name, option_value FROM wp_options WHERE autoload = 'yes' -2020-03-02T15:17:17.069409Z 17 Query SELECT option_value FROM wp_options WHERE option_name = 'WPLANG' LIMIT 1 -2020-03-02T15:17:17.069901Z 17 Query SELECT * FROM wp_users WHERE user_login = 'wordpress-user' LIMIT 1 -2020-03-02T15:17:17.070416Z 17 Query SELECT user_id, meta_key, meta_value FROM wp_usermeta WHERE user_id IN (1) ORDER BY umeta_id ASC -2020-03-02T15:17:17.073190Z 17 Query SELECT option_value FROM wp_options WHERE option_name = 'can_compress_scripts' LIMIT 1 -2020-03-02T15:17:17.074008Z 17 Query SELECT option_value FROM wp_options WHERE option_name = 'theme_switched' LIMIT 1 -2020-03-02T15:17:17.074870Z 17 Query SELECT * FROM wp_posts WHERE ID = 3 LIMIT 1 -2020-03-02T15:17:17.075371Z 17 Query SELECT post_id, meta_key, meta_value FROM wp_postmeta WHERE post_id IN (3) ORDER BY meta_id ASC -2020-03-02T15:17:17.075861Z 17 Query SELECT option_value FROM wp_options WHERE option_name = '_site_transient_timeout_' LIMIT 1 -2020-03-02T15:17:17.076130Z 17 Query SELECT option_value FROM wp_options WHERE option_name = '_site_transient_' LIMIT 1 -2020-03-02T15:17:17.127094Z 18 Connect wp_user@localhost on using TCP/IP -2020-03-02T15:17:17.127352Z 18 Query SET NAMES utf8mb4 -2020-03-02T15:17:17.128611Z 18 Query SET NAMES 'utf8mb4' COLLATE 'utf8mb4_unicode_520_ci' -2020-03-02T15:17:17.128771Z 18 Query SELECT @@SESSION.sql_mode -2020-03-02T15:17:17.129009Z 18 Query SET SESSION sql_mode='NO_ZERO_IN_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION' -2020-03-02T15:17:17.129197Z 18 Init DB wordpressdb -2020-03-02T15:17:17.129758Z 18 Query SELECT option_name, option_value FROM wp_options WHERE autoload = 'yes' -2020-03-02T15:17:17.129849Z 19 Connect wp_user@localhost on using TCP/IP -2020-03-02T15:17:17.129961Z 19 Query SET NAMES utf8mb4 -2020-03-02T15:17:17.130083Z 19 Query SET NAMES 'utf8mb4' COLLATE 'utf8mb4_unicode_520_ci' -2020-03-02T15:17:17.130174Z 19 Query SELECT @@SESSION.sql_mode -2020-03-02T15:17:17.130301Z 19 Query SET SESSION sql_mode='NO_ZERO_IN_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION' -2020-03-02T15:17:17.130376Z 19 Init DB wordpressdb -2020-03-02T15:17:17.130767Z 19 Query SELECT option_name, option_value FROM wp_options WHERE autoload = 'yes' -2020-03-02T15:17:17.132315Z 18 Query SELECT option_value FROM wp_options WHERE option_name = 'WPLANG' LIMIT 1 -2020-03-02T15:17:17.132673Z 18 Query SELECT * FROM wp_users WHERE user_login = 'wordpress-user' LIMIT 1 -2020-03-02T15:17:17.132873Z 19 Query SELECT option_value FROM wp_options WHERE option_name = 'WPLANG' LIMIT 1 -2020-03-02T15:17:17.133055Z 18 Query SELECT user_id, meta_key, meta_value FROM wp_usermeta WHERE user_id IN (1) ORDER BY umeta_id ASC -2020-03-02T15:17:17.133105Z 19 Query SELECT * FROM wp_users WHERE user_login = 'wordpress-user' LIMIT 1 -2020-03-02T15:17:17.133437Z 19 Query SELECT user_id, meta_key, meta_value FROM wp_usermeta WHERE user_id IN (1) ORDER BY umeta_id ASC -2020-03-02T15:17:17.136165Z 18 Query SELECT option_value FROM wp_options WHERE option_name = 'can_compress_scripts' LIMIT 1 -2020-03-02T15:17:17.136288Z 19 Query SELECT option_value FROM wp_options WHERE option_name = 'can_compress_scripts' LIMIT 1 -2020-03-02T15:17:17.137160Z 19 Query SELECT option_value FROM wp_options WHERE option_name = 'theme_switched' LIMIT 1 -2020-03-02T15:17:17.137889Z 19 Query SELECT * FROM wp_posts WHERE ID = 3 LIMIT 1 -2020-03-02T15:17:17.138244Z 19 Query SELECT post_id, meta_key, meta_value FROM wp_postmeta WHERE post_id IN (3) ORDER BY meta_id ASC -2020-03-02T15:17:17.138578Z 19 Query SELECT option_value FROM wp_options WHERE option_name = '_transient_timeout_dash_v2_88ae138922fe95674369b1cb3d215a2b' LIMIT 1 -2020-03-02T15:17:17.138733Z 19 Query SELECT option_value FROM wp_options WHERE option_name = '_transient_dash_v2_88ae138922fe95674369b1cb3d215a2b' LIMIT 1 -2020-03-02T15:17:17.140723Z 18 Query SELECT option_value FROM wp_options WHERE option_name = 'theme_switched' LIMIT 1 -2020-03-02T15:17:17.141490Z 18 Query SELECT * FROM wp_posts WHERE ID = 3 LIMIT 1 -2020-03-02T15:17:17.141871Z 18 Query SELECT post_id, meta_key, meta_value FROM wp_postmeta WHERE post_id IN (3) ORDER BY meta_id ASC -2020-03-02T15:17:17.142231Z 18 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('can_compress_scripts', '0', 'no') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:17:17.144550Z 18 Quit -2020-03-02T15:17:17.168484Z 19 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('_transient_timeout_dash_v2_88ae138922fe95674369b1cb3d215a2b', '1583205437', 'no') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:17:17.171198Z 19 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('_transient_dash_v2_88ae138922fe95674369b1cb3d215a2b', '
  • An error has occurred, which probably means the feed is down. Try again later.
  • An error has occurred, which probably means the feed is down. Try again later.
', 'no') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:17:17.171963Z 19 Quit -2020-03-02T15:17:17.604643Z 17 Query SELECT option_value FROM wp_options WHERE option_name = '_site_transient_community-events-1aecf33ab8525ff212ebdffbb438372e' LIMIT 1 -2020-03-02T15:17:17.604952Z 17 Query SELECT option_value FROM wp_options WHERE option_name = '_site_transient_timeout_community-events-1aecf33ab8525ff212ebdffbb438372e' LIMIT 1 -2020-03-02T15:17:17.605221Z 17 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('_site_transient_timeout_community-events-1aecf33ab8525ff212ebdffbb438372e', '1583205437', 'no') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:17:17.607075Z 17 Query SHOW FULL COLUMNS FROM `wp_options` -2020-03-02T15:17:17.607684Z 17 Query INSERT INTO `wp_options` (`option_name`, `option_value`, `autoload`) VALUES ('_site_transient_community-events-1aecf33ab8525ff212ebdffbb438372e', 'a:3:{s:9:\"sandboxed\";b:0;s:8:\"location\";a:1:{s:2:\"ip\";s:9:\"127.0.0.0\";}s:6:\"events\";a:2:{i:0;a:8:{s:4:\"type\";s:6:\"meetup\";s:5:\"title\";s:22:\"Marts WordPress Meetup\";s:3:\"url\";s:61:\"https://www.meetup.com/WordPress-Copenhagen/events/268822657/\";s:6:\"meetup\";s:20:\"WordPress Copenhagen\";s:10:\"meetup_url\";s:44:\"https://www.meetup.com/WordPress-Copenhagen/\";s:4:\"date\";s:19:\"2020-03-17 17:00:00\";s:8:\"end_date\";s:19:\"2020-03-17 19:00:00\";s:8:\"location\";a:4:{s:8:\"location\";s:19:\"København, Denmark\";s:7:\"country\";s:2:\"dk\";s:8:\"latitude\";d:55.668365478516;s:9:\"longitude\";d:12.541541099548;}}i:1;a:8:{s:4:\"type\";s:8:\"wordcamp\";s:5:\"title\";s:23:\"WordCamp Retreat Soltau\";s:3:\"url\";s:40:\"https://2020-soltau.retreat.wordcamp.org\";s:6:\"meetup\";N;s:10:\"meetup_url\";N;s:4:\"date\";s:19:\"2020-04-30 00:00:00\";s:8:\"end_date\";s:19:\"2020-05-03 00:00:00\";s:8:\"location\";a:4:{s:8:\"location\";s:6:\"Soltau\";s:7:\"country\";s:2:\"DE\";s:8:\"latitude\";d:53.0016247;s:9:\"longitude\";d:9.8596896;}}}}', 'no') ON DUPLICATE KEY UPDATE `option_name` = VALUES(`option_name`), `option_value` = VALUES(`option_value`), `autoload` = VALUES(`autoload`) -2020-03-02T15:17:17.609070Z 17 Query SELECT umeta_id FROM wp_usermeta WHERE meta_key = 'community-events-location' AND user_id = 1 -2020-03-02T15:17:17.609425Z 17 Query SHOW FULL COLUMNS FROM `wp_usermeta` -2020-03-02T15:17:17.609906Z 17 Query INSERT INTO `wp_usermeta` (`user_id`, `meta_key`, `meta_value`) VALUES (1, 'community-events-location', 'a:1:{s:2:\"ip\";s:9:\"127.0.0.0\";}') -2020-03-02T15:17:17.610664Z 17 Quit \ No newline at end of file diff --git a/go/test/endtoend/apps/wordpress/wordpress.cnf b/go/test/endtoend/apps/wordpress/wordpress.cnf deleted file mode 100644 index d7a0576f1fb..00000000000 --- a/go/test/endtoend/apps/wordpress/wordpress.cnf +++ /dev/null @@ -1 +0,0 @@ -sql_mode = NO_ZERO_IN_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION,STRICT_TRANS_TABLES \ No newline at end of file diff --git a/go/test/endtoend/backup/vtbackup/backup_only_test.go b/go/test/endtoend/backup/vtbackup/backup_only_test.go index 0a128f47a50..bfd00fce818 100644 --- a/go/test/endtoend/backup/vtbackup/backup_only_test.go +++ b/go/test/endtoend/backup/vtbackup/backup_only_test.go @@ -296,9 +296,10 @@ func tearDown(t *testing.T, initMysql bool) { //Tear down Tablet //err := tablet.VttabletProcess.TearDown() //require.Nil(t, err) - err := localCluster.VtctlclientProcess.ExecuteCommand("DeleteTablet", "-allow_master", tablet.Alias) - require.Nil(t, err) resetTabletDirectory(t, tablet, initMysql) + // DeleteTablet on a primary will cause tablet to shutdown, so should only call it after tablet is already shut down + err := localCluster.VtctlclientProcess.ExecuteCommand("DeleteTablet", "-allow_master", tablet.Alias) + require.Nil(t, err) } } diff --git a/go/test/endtoend/cluster/vtctlclient_process.go b/go/test/endtoend/cluster/vtctlclient_process.go index 1fd5d9ef00b..99b68b7cba4 100644 --- a/go/test/endtoend/cluster/vtctlclient_process.go +++ b/go/test/endtoend/cluster/vtctlclient_process.go @@ -21,6 +21,8 @@ import ( "os/exec" "strings" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/log" ) @@ -61,9 +63,10 @@ func (vtctlclient *VtctlClientProcess) ApplySchemaWithOutput(Keyspace string, SQ } // ApplySchema applies SQL schema to the keyspace -func (vtctlclient *VtctlClientProcess) ApplySchema(Keyspace string, SQL string) (err error) { - _, err = vtctlclient.ApplySchemaWithOutput(Keyspace, SQL, "direct") - return err +func (vtctlclient *VtctlClientProcess) ApplySchema(Keyspace string, SQL string) error { + message, err := vtctlclient.ApplySchemaWithOutput(Keyspace, SQL, "direct") + + return vterrors.Wrap(err, message) } // ApplyVSchema applies vitess schema (JSON format) to the keyspace @@ -119,6 +122,25 @@ func (vtctlclient *VtctlClientProcess) OnlineDDLRetryMigration(Keyspace, uuid st ) } +// OnlineDDLRevertMigration reverts a given migration uuid +func (vtctlclient *VtctlClientProcess) OnlineDDLRevertMigration(Keyspace, uuid string) (result string, err error) { + return vtctlclient.ExecuteCommandWithOutput( + "OnlineDDL", + Keyspace, + "revert", + uuid, + ) +} + +// VExec runs a VExec query +func (vtctlclient *VtctlClientProcess) VExec(Keyspace, workflow, query string) (result string, err error) { + return vtctlclient.ExecuteCommandWithOutput( + "VExec", + fmt.Sprintf("%s.%s", Keyspace, workflow), + query, + ) +} + // ExecuteCommand executes any vtctlclient command func (vtctlclient *VtctlClientProcess) ExecuteCommand(args ...string) (err error) { output, err := vtctlclient.ExecuteCommandWithOutput(args...) diff --git a/go/test/endtoend/clustertest/vttablet_test.go b/go/test/endtoend/clustertest/vttablet_test.go index f886951f101..8ddba286775 100644 --- a/go/test/endtoend/clustertest/vttablet_test.go +++ b/go/test/endtoend/clustertest/vttablet_test.go @@ -24,6 +24,8 @@ import ( "net/http" "testing" + "github.com/stretchr/testify/require" + "vitess.io/vitess/go/test/endtoend/cluster" ) @@ -42,3 +44,11 @@ func TestVttabletProcess(t *testing.T) { t.Errorf("select:\n%v want\n%v for %s", got, want, "Keyspace of tablet should match") } } + +func TestDeleteTablet(t *testing.T) { + defer cluster.PanicHandler(t) + primary := clusterInstance.Keyspaces[0].Shards[0].MasterTablet() + require.NotNil(t, primary) + _, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("DeleteTablet", "-allow_master", primary.Alias) + require.Nil(t, err, "Error: %v", err) +} diff --git a/go/test/endtoend/mysqlserver/mysql_server_test.go b/go/test/endtoend/mysqlserver/mysql_server_test.go index 958bebcfd3b..2c8172c36c6 100644 --- a/go/test/endtoend/mysqlserver/mysql_server_test.go +++ b/go/test/endtoend/mysqlserver/mysql_server_test.go @@ -80,7 +80,7 @@ func TestLargeComment(t *testing.T) { qr, err := conn.ExecuteFetch("select * from vt_insert_test where id = 1", 1, false) require.Nilf(t, err, "select error: %v", err) - assert.Equal(t, uint64(1), qr.RowsAffected) + assert.Equal(t, 1, len(qr.Rows)) assert.Equal(t, "BLOB(\"LLL\")", qr.Rows[0][3].String()) } @@ -148,11 +148,11 @@ func TestWarnings(t *testing.T) { // validate warning with invalid_field error as warning qr, err := conn.ExecuteFetch("SELECT /*vt+ SCATTER_ERRORS_AS_WARNINGS */ invalid_field from vt_insert_test;", 1, false) require.Nilf(t, err, "select error : %v", err) - assert.Equalf(t, uint64(0), qr.RowsAffected, "query should return 0 rows, got %v", qr.RowsAffected) + assert.Empty(t, qr.Rows, "number of rows") qr, err = conn.ExecuteFetch("SHOW WARNINGS;", 1, false) require.Nilf(t, err, "SHOW WARNINGS; execution failed: %v", err) - assert.Equalf(t, uint64(1), qr.RowsAffected, "1 warning expected, got %v ", qr.RowsAffected) + assert.EqualValues(t, 1, len(qr.Rows), "number of rows") assert.Contains(t, qr.Rows[0][0].String(), "VARCHAR(\"Warning\")", qr.Rows) assert.Contains(t, qr.Rows[0][1].String(), "UINT16(1054)", qr.Rows) assert.Contains(t, qr.Rows[0][2].String(), "Unknown column", qr.Rows) @@ -160,11 +160,11 @@ func TestWarnings(t *testing.T) { // validate warning with query_timeout error as warning qr, err = conn.ExecuteFetch("SELECT /*vt+ SCATTER_ERRORS_AS_WARNINGS QUERY_TIMEOUT_MS=1 */ sleep(1) from vt_insert_test;", 1, false) require.Nilf(t, err, "insertion error : %v", err) - assert.Equalf(t, uint64(0), qr.RowsAffected, "should return 0 rows, got %v", qr.RowsAffected) + assert.Empty(t, qr.Rows, "number of rows") qr, err = conn.ExecuteFetch("SHOW WARNINGS;", 1, false) require.Nilf(t, err, "SHOW WARNINGS; execution failed: %v", err) - assert.Equalf(t, uint64(1), qr.RowsAffected, "1 warning expected, got %v ", qr.RowsAffected) + assert.EqualValues(t, 1, len(qr.Rows), "number of rows") assert.Contains(t, qr.Rows[0][0].String(), "VARCHAR(\"Warning\")", qr.Rows) assert.Contains(t, qr.Rows[0][1].String(), "UINT16(1317)", qr.Rows) assert.Contains(t, qr.Rows[0][2].String(), "context deadline exceeded", qr.Rows) @@ -175,7 +175,7 @@ func TestWarnings(t *testing.T) { qr, err = conn.ExecuteFetch("SHOW WARNINGS;", 1, false) require.Nilf(t, err, "SHOW WARNINGS; execution failed: %v", err) - assert.Equalf(t, uint64(0), qr.RowsAffected, "0 warning expected, got %v ", qr.RowsAffected) + assert.Empty(t, len(qr.Rows), "number of rows") } // TestSelectWithUnauthorizedUser verifies that an unauthorized user diff --git a/go/test/endtoend/onlineddl/declarative/onlineddl_declarative_test.go b/go/test/endtoend/onlineddl/declarative/onlineddl_declarative_test.go new file mode 100644 index 00000000000..dd37ba18cee --- /dev/null +++ b/go/test/endtoend/onlineddl/declarative/onlineddl_declarative_test.go @@ -0,0 +1,619 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package revert + +import ( + "context" + "flag" + "fmt" + "math/rand" + "os" + "path" + "strings" + "sync" + "testing" + "time" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/schema" + + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/onlineddl" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type WriteMetrics struct { + mu sync.Mutex + insertsAttempts, insertsFailures, insertsNoops, inserts int64 + updatesAttempts, updatesFailures, updatesNoops, updates int64 + deletesAttempts, deletesFailures, deletesNoops, deletes int64 +} + +func (w *WriteMetrics) Clear() { + w.mu.Lock() + defer w.mu.Unlock() + + w.inserts = 0 + w.updates = 0 + w.deletes = 0 + + w.insertsAttempts = 0 + w.insertsFailures = 0 + w.insertsNoops = 0 + + w.updatesAttempts = 0 + w.updatesFailures = 0 + w.updatesNoops = 0 + + w.deletesAttempts = 0 + w.deletesFailures = 0 + w.deletesNoops = 0 +} + +func (w *WriteMetrics) String() string { + return fmt.Sprintf(`WriteMetrics: inserts-deletes=%d, updates-deletes=%d, +insertsAttempts=%d, insertsFailures=%d, insertsNoops=%d, inserts=%d, +updatesAttempts=%d, updatesFailures=%d, updatesNoops=%d, updates=%d, +deletesAttempts=%d, deletesFailures=%d, deletesNoops=%d, deletes=%d, +`, + w.inserts-w.deletes, w.updates-w.deletes, + w.insertsAttempts, w.insertsFailures, w.insertsNoops, w.inserts, + w.updatesAttempts, w.updatesFailures, w.updatesNoops, w.updates, + w.deletesAttempts, w.deletesFailures, w.deletesNoops, w.deletes, + ) +} + +var ( + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + + hostname = "localhost" + keyspaceName = "ks" + cell = "zone1" + schemaChangeDirectory = "" + tableName = `stress_test` + createStatement1 = ` + CREATE TABLE stress_test ( + id bigint(20) not null, + rand_val varchar(32) null default '', + hint_col varchar(64) not null default 'create1', + created_timestamp timestamp not null default current_timestamp, + updates int unsigned not null default 0, + PRIMARY KEY (id), + key created_idx(created_timestamp), + key updates_idx(updates) + ) ENGINE=InnoDB + ` + createStatement2 = ` + CREATE TABLE stress_test ( + id bigint(20) not null, + rand_val varchar(32) null default '', + hint_col varchar(64) not null default 'create2', + created_timestamp timestamp not null default current_timestamp, + updates int unsigned not null default 0, + PRIMARY KEY (id), + key created_idx(created_timestamp), + key updates_idx(updates) + ) ENGINE=InnoDB + ` + createIfNotExistsStatement = ` + CREATE TABLE IF NOT EXISTS stress_test ( + id bigint(20) not null, + PRIMARY KEY (id) + ) ENGINE=InnoDB + ` + dropStatement = ` + DROP TABLE stress_test + ` + alterStatement = ` + ALTER TABLE stress_test modify hint_col varchar(64) not null default 'this-should-fail' + ` + insertRowStatement = ` + INSERT IGNORE INTO stress_test (id, rand_val) VALUES (%d, left(md5(rand()), 8)) + ` + updateRowStatement = ` + UPDATE stress_test SET updates=updates+1 WHERE id=%d + ` + deleteRowStatement = ` + DELETE FROM stress_test WHERE id=%d AND updates=1 + ` + // We use CAST(SUM(updates) AS SIGNED) because SUM() returns a DECIMAL datatype, and we want to read a SIGNED INTEGER type + selectCountRowsStatement = ` + SELECT COUNT(*) AS num_rows, CAST(SUM(updates) AS SIGNED) AS sum_updates FROM stress_test + ` + truncateStatement = ` + TRUNCATE TABLE stress_test + ` + writeMetrics WriteMetrics +) + +const ( + maxTableRows = 4096 +) + +func TestMain(m *testing.M) { + defer cluster.PanicHandler(nil) + flag.Parse() + + exitcode, err := func() (int, error) { + clusterInstance = cluster.NewCluster(cell, hostname) + schemaChangeDirectory = path.Join("/tmp", fmt.Sprintf("schema_change_dir_%d", clusterInstance.GetAndReserveTabletUID())) + defer os.RemoveAll(schemaChangeDirectory) + defer clusterInstance.Teardown() + + if _, err := os.Stat(schemaChangeDirectory); os.IsNotExist(err) { + _ = os.Mkdir(schemaChangeDirectory, 0700) + } + + clusterInstance.VtctldExtraArgs = []string{ + "-schema_change_dir", schemaChangeDirectory, + "-schema_change_controller", "local", + "-schema_change_check_interval", "1"} + + clusterInstance.VtTabletExtraArgs = []string{ + "-enable-lag-throttler", + "-throttle_threshold", "1s", + "-heartbeat_enable", + "-heartbeat_interval", "250ms", + "-migration_check_interval", "5s", + } + clusterInstance.VtGateExtraArgs = []string{ + "-ddl_strategy", "online", + } + + if err := clusterInstance.StartTopo(); err != nil { + return 1, err + } + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: keyspaceName, + } + + // No need for replicas in this stress test + if err := clusterInstance.StartKeyspace(*keyspace, []string{"1"}, 0, false); err != nil { + return 1, err + } + + vtgateInstance := clusterInstance.NewVtgateInstance() + // set the gateway we want to use + vtgateInstance.GatewayImplementation = "tabletgateway" + // Start vtgate + if err := vtgateInstance.Setup(); err != nil { + return 1, err + } + // ensure it is torn down during cluster TearDown + clusterInstance.VtgateProcess = *vtgateInstance + vtParams = mysql.ConnParams{ + Host: clusterInstance.Hostname, + Port: clusterInstance.VtgateMySQLPort, + } + + return m.Run(), nil + }() + if err != nil { + fmt.Printf("%v\n", err) + os.Exit(1) + } else { + os.Exit(exitcode) + } + +} + +func TestSchemaChange(t *testing.T) { + defer cluster.PanicHandler(t) + shards := clusterInstance.Keyspaces[0].Shards + require.Equal(t, 1, len(shards)) + + declarativeStrategy := "online -declarative" + var uuids []string + + // CREATE1 + t.Run("declarative CREATE TABLE where table does not exist", func(t *testing.T) { + // The table does not exist + uuid := testOnlineDDLStatement(t, createStatement1, declarativeStrategy, "vtgate", "create1") + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + onlineddl.CheckMigrationArtifacts(t, &vtParams, shards, uuid, true) + checkTable(t, tableName, true) + initTable(t) + testSelectTableMetrics(t) + }) + // CREATE1 again, noop + t.Run("declarative CREATE TABLE with no changes where table exists", func(t *testing.T) { + // The exists with exact same schema + uuid := testOnlineDDLStatement(t, createStatement1, declarativeStrategy, "vtgate", "create1") + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + onlineddl.CheckMigrationArtifacts(t, &vtParams, shards, uuid, false) + checkTable(t, tableName, true) + testSelectTableMetrics(t) + }) + t.Run("revert CREATE TABLE expecting noop", func(t *testing.T) { + // Reverting a noop changes nothing + uuid := testRevertMigration(t, uuids[len(uuids)-1]) + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + checkMigratedTable(t, tableName, "create1") + checkTable(t, tableName, true) + testSelectTableMetrics(t) + }) + t.Run("declarative DROP TABLE", func(t *testing.T) { + uuid := testOnlineDDLStatement(t, dropStatement, declarativeStrategy, "vtgate", "") + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + onlineddl.CheckMigrationArtifacts(t, &vtParams, shards, uuid, true) + checkTable(t, tableName, false) + }) + // Table dropped. Let's start afresh. + + // CREATE1 + t.Run("declarative CREATE TABLE where table does not exist", func(t *testing.T) { + // The table does not exist + uuid := testOnlineDDLStatement(t, createStatement1, declarativeStrategy, "vtgate", "create1") + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + onlineddl.CheckMigrationArtifacts(t, &vtParams, shards, uuid, true) + checkTable(t, tableName, true) + initTable(t) + testSelectTableMetrics(t) + }) + // CREATE2: Change schema + t.Run("declarative CREATE TABLE with changes where table exists", func(t *testing.T) { + // The table exists with different schema + uuid := testOnlineDDLStatement(t, createStatement2, declarativeStrategy, "vtgate", "create2") + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + onlineddl.CheckMigrationArtifacts(t, &vtParams, shards, uuid, true) + checkTable(t, tableName, true) + testSelectTableMetrics(t) + }) + t.Run("revert CREATE TABLE expecting previous schema", func(t *testing.T) { + // Reverting back to 1st version + uuid := testRevertMigration(t, uuids[len(uuids)-1]) + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + checkMigratedTable(t, tableName, "create1") + checkTable(t, tableName, true) + testSelectTableMetrics(t) + }) + t.Run("declarative DROP TABLE", func(t *testing.T) { + // Table exists + uuid := testOnlineDDLStatement(t, dropStatement, declarativeStrategy, "vtgate", "") + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + onlineddl.CheckMigrationArtifacts(t, &vtParams, shards, uuid, true) + checkTable(t, tableName, false) + }) + t.Run("revert DROP TABLE", func(t *testing.T) { + // This will recreate the table (well, actually, rename it back into place) + uuid := testRevertMigration(t, uuids[len(uuids)-1]) + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + checkTable(t, tableName, true) + checkMigratedTable(t, tableName, "create1") + testSelectTableMetrics(t) + }) + t.Run("revert revert DROP TABLE", func(t *testing.T) { + // This will reapply DROP TABLE + uuid := testRevertMigration(t, uuids[len(uuids)-1]) + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + checkTable(t, tableName, false) + }) + t.Run("declarative DROP TABLE where table does not exist", func(t *testing.T) { + uuid := testOnlineDDLStatement(t, dropStatement, declarativeStrategy, "vtgate", "") + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + onlineddl.CheckMigrationArtifacts(t, &vtParams, shards, uuid, false) + checkTable(t, tableName, false) + }) + t.Run("revert DROP TABLE where table did not exist", func(t *testing.T) { + // Table will not be recreated because it didn't exist during the previous DROP TABLE + uuid := testRevertMigration(t, uuids[len(uuids)-1]) + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + checkTable(t, tableName, false) + }) + // Table dropped. Let's start afresh. + + // CREATE1 + t.Run("declarative CREATE TABLE where table does not exist", func(t *testing.T) { + // The table does not exist + uuid := testOnlineDDLStatement(t, createStatement1, declarativeStrategy, "vtgate", "create1") + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + onlineddl.CheckMigrationArtifacts(t, &vtParams, shards, uuid, true) + checkTable(t, tableName, true) + initTable(t) + testSelectTableMetrics(t) + }) + // CREATE2 + t.Run("declarative CREATE TABLE with changes where table exists", func(t *testing.T) { + // The exists but with different schema + uuid := testOnlineDDLStatement(t, createStatement2, declarativeStrategy, "vtgate", "create2") + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + onlineddl.CheckMigrationArtifacts(t, &vtParams, shards, uuid, true) + checkTable(t, tableName, true) + testSelectTableMetrics(t) + }) + // CREATE1 again + t.Run("declarative CREATE TABLE again with changes where table exists", func(t *testing.T) { + // The exists but with different schema + uuid := testOnlineDDLStatement(t, createStatement1, declarativeStrategy, "vtgate", "create1") + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + onlineddl.CheckMigrationArtifacts(t, &vtParams, shards, uuid, true) + checkTable(t, tableName, true) + testSelectTableMetrics(t) + }) + t.Run("revert CREATE TABLE expecting previous schema", func(t *testing.T) { + // Reverting back to previous version + uuid := testRevertMigration(t, uuids[len(uuids)-1]) + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + checkMigratedTable(t, tableName, "create2") + checkTable(t, tableName, true) + testSelectTableMetrics(t) + }) + t.Run("ALTER TABLE expecting failure", func(t *testing.T) { + // ALTER is not supported in -declarative + uuid := testOnlineDDLStatement(t, alterStatement, declarativeStrategy, "vtgate", "") + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusFailed) + checkMigratedTable(t, tableName, "create2") + checkTable(t, tableName, true) + testSelectTableMetrics(t) + }) + t.Run("CREATE TABLE IF NOT EXISTS expecting failure", func(t *testing.T) { + // IF NOT EXISTS is not supported in -declarative + uuid := testOnlineDDLStatement(t, createIfNotExistsStatement, declarativeStrategy, "vtgate", "") + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusFailed) + checkMigratedTable(t, tableName, "create2") + checkTable(t, tableName, true) + testSelectTableMetrics(t) + }) + t.Run("CREATE TABLE IF NOT EXISTS non-declarative is successful", func(t *testing.T) { + // IF NOT EXISTS is supported in non-declarative mode. Just verifying that the statement itself is good, + // so that the failure we tested for, above, actually tests the "declarative" logic, rather than some + // unrelated error. + uuid := testOnlineDDLStatement(t, createIfNotExistsStatement, "online", "vtgate", "") + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + // the table existed, so we expect no changes in this non-declarative DDL + checkMigratedTable(t, tableName, "create2") + checkTable(t, tableName, true) + testSelectTableMetrics(t) + }) +} + +// testOnlineDDLStatement runs an online DDL, ALTER statement +func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy string, executeStrategy string, expectHint string) (uuid string) { + if executeStrategy == "vtgate" { + row := onlineddl.VtgateExecDDL(t, &vtParams, ddlStrategy, alterStatement, "").Named().Row() + if row != nil { + uuid = row.AsString("uuid", "") + } + } else { + var err error + uuid, err = clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, ddlStrategy) + assert.NoError(t, err) + } + uuid = strings.TrimSpace(uuid) + fmt.Println("# Generated UUID (for debug purposes):") + fmt.Printf("<%s>\n", uuid) + + strategy, _, err := schema.ParseDDLStrategy(ddlStrategy) + assert.NoError(t, err) + + if !strategy.IsDirect() { + time.Sleep(time.Second * 20) + } + + if expectHint != "" { + checkMigratedTable(t, tableName, expectHint) + } + return uuid +} + +// testRevertMigration reverts a given migration +func testRevertMigration(t *testing.T, revertUUID string) (uuid string) { + revertQuery := fmt.Sprintf("revert vitess_migration '%s'", revertUUID) + r := onlineddl.VtgateExecQuery(t, &vtParams, revertQuery, "") + + row := r.Named().Row() + require.NotNil(t, row) + + uuid = row["uuid"].ToString() + + fmt.Println("# Generated UUID (for debug purposes):") + fmt.Printf("<%s>\n", uuid) + + time.Sleep(time.Second * 20) + return uuid +} + +// checkTable checks the number of tables in the first two shards. +func checkTable(t *testing.T, showTableName string, expectExists bool) bool { + expectCount := 0 + if expectExists { + expectCount = 1 + } + for i := range clusterInstance.Keyspaces[0].Shards { + if !checkTablesCount(t, clusterInstance.Keyspaces[0].Shards[i].Vttablets[0], showTableName, expectCount) { + return false + } + } + return true +} + +// checkTablesCount checks the number of tables in the given tablet +func checkTablesCount(t *testing.T, tablet *cluster.Vttablet, showTableName string, expectCount int) bool { + query := fmt.Sprintf(`show tables like '%%%s%%';`, showTableName) + queryResult, err := tablet.VttabletProcess.QueryTablet(query, keyspaceName, true) + require.Nil(t, err) + return assert.Equal(t, expectCount, len(queryResult.Rows)) +} + +// checkMigratedTables checks the CREATE STATEMENT of a table after migration +func checkMigratedTable(t *testing.T, tableName, expectHint string) { + for i := range clusterInstance.Keyspaces[0].Shards { + createStatement := getCreateTableStatement(t, clusterInstance.Keyspaces[0].Shards[i].Vttablets[0], tableName) + assert.Contains(t, createStatement, expectHint) + } +} + +// getCreateTableStatement returns the CREATE TABLE statement for a given table +func getCreateTableStatement(t *testing.T, tablet *cluster.Vttablet, tableName string) (statement string) { + queryResult, err := tablet.VttabletProcess.QueryTablet(fmt.Sprintf("show create table %s;", tableName), keyspaceName, true) + require.Nil(t, err) + + assert.Equal(t, len(queryResult.Rows), 1) + assert.Equal(t, len(queryResult.Rows[0]), 2) // table name, create statement + statement = queryResult.Rows[0][1].ToString() + return statement +} + +func generateInsert(t *testing.T, conn *mysql.Conn) error { + id := rand.Int31n(int32(maxTableRows)) + query := fmt.Sprintf(insertRowStatement, id) + qr, err := conn.ExecuteFetch(query, 1000, true) + + func() { + writeMetrics.mu.Lock() + defer writeMetrics.mu.Unlock() + + writeMetrics.insertsAttempts++ + if err != nil { + writeMetrics.insertsFailures++ + return + } + assert.Less(t, qr.RowsAffected, uint64(2)) + if qr.RowsAffected == 0 { + writeMetrics.insertsNoops++ + return + } + writeMetrics.inserts++ + }() + return err +} + +func generateUpdate(t *testing.T, conn *mysql.Conn) error { + id := rand.Int31n(int32(maxTableRows)) + query := fmt.Sprintf(updateRowStatement, id) + qr, err := conn.ExecuteFetch(query, 1000, true) + + func() { + writeMetrics.mu.Lock() + defer writeMetrics.mu.Unlock() + + writeMetrics.updatesAttempts++ + if err != nil { + writeMetrics.updatesFailures++ + return + } + assert.Less(t, qr.RowsAffected, uint64(2)) + if qr.RowsAffected == 0 { + writeMetrics.updatesNoops++ + return + } + writeMetrics.updates++ + }() + return err +} + +func generateDelete(t *testing.T, conn *mysql.Conn) error { + id := rand.Int31n(int32(maxTableRows)) + query := fmt.Sprintf(deleteRowStatement, id) + qr, err := conn.ExecuteFetch(query, 1000, true) + + func() { + writeMetrics.mu.Lock() + defer writeMetrics.mu.Unlock() + + writeMetrics.deletesAttempts++ + if err != nil { + writeMetrics.deletesFailures++ + return + } + assert.Less(t, qr.RowsAffected, uint64(2)) + if qr.RowsAffected == 0 { + writeMetrics.deletesNoops++ + return + } + writeMetrics.deletes++ + }() + return err +} + +func initTable(t *testing.T) { + log.Infof("initTable begin") + defer log.Infof("initTable complete") + + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.Nil(t, err) + defer conn.Close() + + writeMetrics.Clear() + _, err = conn.ExecuteFetch(truncateStatement, 1000, true) + require.Nil(t, err) + + for i := 0; i < maxTableRows/2; i++ { + generateInsert(t, conn) + } + for i := 0; i < maxTableRows/4; i++ { + generateUpdate(t, conn) + } + for i := 0; i < maxTableRows/4; i++ { + generateDelete(t, conn) + } +} + +func testSelectTableMetrics(t *testing.T) { + writeMetrics.mu.Lock() + defer writeMetrics.mu.Unlock() + + log.Infof("%s", writeMetrics.String()) + + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.Nil(t, err) + defer conn.Close() + + rs, err := conn.ExecuteFetch(selectCountRowsStatement, 1000, true) + require.Nil(t, err) + + row := rs.Named().Row() + require.NotNil(t, row) + log.Infof("testSelectTableMetrics, row: %v", row) + numRows := row.AsInt64("num_rows", 0) + sumUpdates := row.AsInt64("sum_updates", 0) + + assert.NotZero(t, numRows) + assert.NotZero(t, sumUpdates) + assert.NotZero(t, writeMetrics.inserts) + assert.NotZero(t, writeMetrics.deletes) + assert.NotZero(t, writeMetrics.updates) + assert.Equal(t, writeMetrics.inserts-writeMetrics.deletes, numRows) + assert.Equal(t, writeMetrics.updates-writeMetrics.deletes, sumUpdates) // because we DELETE WHERE updates=1 +} diff --git a/go/test/endtoend/onlineddl/onlineddl_test.go b/go/test/endtoend/onlineddl/ghost/onlineddl_ghost_test.go similarity index 60% rename from go/test/endtoend/onlineddl/onlineddl_test.go rename to go/test/endtoend/onlineddl/ghost/onlineddl_ghost_test.go index 55dcfecef58..67fd294d700 100644 --- a/go/test/endtoend/onlineddl/onlineddl_test.go +++ b/go/test/endtoend/onlineddl/ghost/onlineddl_ghost_test.go @@ -14,25 +14,23 @@ See the License for the specific language governing permissions and limitations under the License. */ -package onlineddl +package ghost import ( - "context" "flag" "fmt" "os" "path" - "regexp" "strings" "sync" "testing" "time" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/onlineddl" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -85,14 +83,52 @@ var ( DROP TABLE %s` onlineDDLDropTableIfExistsStatement = ` DROP TABLE IF EXISTS %s` -) -func fullWordUUIDRegexp(uuid, searchWord string) *regexp.Regexp { - return regexp.MustCompile(uuid + `.*?\b` + searchWord + `\b`) -} -func fullWordRegexp(searchWord string) *regexp.Regexp { - return regexp.MustCompile(`.*?\b` + searchWord + `\b`) -} + vSchema = ` + { + "sharded": true, + "vindexes": { + "hash_index": { + "type": "hash" + } + }, + "tables": { + "vt_onlineddl_test_00": { + "column_vindexes": [ + { + "column": "id", + "name": "hash_index" + } + ] + }, + "vt_onlineddl_test_01": { + "column_vindexes": [ + { + "column": "id", + "name": "hash_index" + } + ] + }, + "vt_onlineddl_test_02": { + "column_vindexes": [ + { + "column": "id", + "name": "hash_index" + } + ] + }, + "vt_onlineddl_test_03": { + "column_vindexes": [ + { + "column": "id", + "name": "hash_index" + } + ] + } + } + } + ` +) func TestMain(m *testing.M) { defer cluster.PanicHandler(nil) @@ -127,13 +163,11 @@ func TestMain(m *testing.M) { // Start keyspace keyspace := &cluster.Keyspace{ - Name: keyspaceName, + Name: keyspaceName, + VSchema: vSchema, } - if err := clusterInstance.StartUnshardedKeyspace(*keyspace, 2, true); err != nil { - return 1, err - } - if err := clusterInstance.StartKeyspace(*keyspace, []string{"1"}, 1, false); err != nil { + if err := clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 1, false); err != nil { return 1, err } @@ -164,41 +198,42 @@ func TestMain(m *testing.M) { func TestSchemaChange(t *testing.T) { defer cluster.PanicHandler(t) - assert.Equal(t, 2, len(clusterInstance.Keyspaces[0].Shards)) + shards := clusterInstance.Keyspaces[0].Shards + assert.Equal(t, 2, len(shards)) testWithInitialSchema(t) t.Run("create non_online", func(t *testing.T) { _ = testOnlineDDLStatement(t, alterTableNormalStatement, string(schema.DDLStrategyDirect), "vtctl", "non_online") }) t.Run("successful online alter, vtgate", func(t *testing.T) { uuid := testOnlineDDLStatement(t, alterTableSuccessfulStatement, "gh-ost", "vtgate", "ghost_col") - checkRecentMigrations(t, uuid, schema.OnlineDDLStatusComplete) - checkCancelMigration(t, uuid, false) - checkRetryMigration(t, uuid, false) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) + onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) }) t.Run("successful online alter, vtctl", func(t *testing.T) { uuid := testOnlineDDLStatement(t, alterTableTrivialStatement, "gh-ost", "vtctl", "ghost_col") - checkRecentMigrations(t, uuid, schema.OnlineDDLStatusComplete) - checkCancelMigration(t, uuid, false) - checkRetryMigration(t, uuid, false) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) + onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) }) t.Run("throttled migration", func(t *testing.T) { uuid := testOnlineDDLStatement(t, alterTableThrottlingStatement, "gh-ost --max-load=Threads_running=1", "vtgate", "ghost_col") - checkRecentMigrations(t, uuid, schema.OnlineDDLStatusRunning) - checkCancelMigration(t, uuid, true) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusRunning) + onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, true) time.Sleep(2 * time.Second) - checkRecentMigrations(t, uuid, schema.OnlineDDLStatusFailed) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusFailed) }) t.Run("failed migration", func(t *testing.T) { uuid := testOnlineDDLStatement(t, alterTableFailedStatement, "gh-ost", "vtgate", "ghost_col") - checkRecentMigrations(t, uuid, schema.OnlineDDLStatusFailed) - checkCancelMigration(t, uuid, false) - checkRetryMigration(t, uuid, true) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusFailed) + onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) + onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, true) // migration will fail again }) t.Run("cancel all migrations: nothing to cancel", func(t *testing.T) { // no migrations pending at this time time.Sleep(10 * time.Second) - checkCancelAllMigrations(t, 0) + onlineddl.CheckCancelAllMigrations(t, &vtParams, 0) }) t.Run("cancel all migrations: some migrations to cancel", func(t *testing.T) { // spawn n migrations; cancel them via cancel-all @@ -212,41 +247,41 @@ func TestSchemaChange(t *testing.T) { }() } wg.Wait() - checkCancelAllMigrations(t, count) + onlineddl.CheckCancelAllMigrations(t, &vtParams, len(shards)*count) }) t.Run("Online DROP, vtctl", func(t *testing.T) { uuid := testOnlineDDLStatement(t, onlineDDLDropTableStatement, "gh-ost", "vtctl", "") - checkRecentMigrations(t, uuid, schema.OnlineDDLStatusComplete) - checkCancelMigration(t, uuid, false) - checkRetryMigration(t, uuid, false) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) + onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) }) t.Run("Online CREATE, vtctl", func(t *testing.T) { uuid := testOnlineDDLStatement(t, onlineDDLCreateTableStatement, "gh-ost", "vtctl", "online_ddl_create_col") - checkRecentMigrations(t, uuid, schema.OnlineDDLStatusComplete) - checkCancelMigration(t, uuid, false) - checkRetryMigration(t, uuid, false) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) + onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) }) t.Run("Online DROP TABLE IF EXISTS, vtgate", func(t *testing.T) { uuid := testOnlineDDLStatement(t, onlineDDLDropTableIfExistsStatement, "gh-ost", "vtgate", "") - checkRecentMigrations(t, uuid, schema.OnlineDDLStatusComplete) - checkCancelMigration(t, uuid, false) - checkRetryMigration(t, uuid, false) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) + onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) // this table existed checkTables(t, schema.OnlineDDLToGCUUID(uuid), 1) }) t.Run("Online DROP TABLE IF EXISTS for nonexistent table, vtgate", func(t *testing.T) { uuid := testOnlineDDLStatement(t, onlineDDLDropTableIfExistsStatement, "gh-ost", "vtgate", "") - checkRecentMigrations(t, uuid, schema.OnlineDDLStatusComplete) - checkCancelMigration(t, uuid, false) - checkRetryMigration(t, uuid, false) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) + onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) // this table did not exist checkTables(t, schema.OnlineDDLToGCUUID(uuid), 0) }) t.Run("Online DROP TABLE for nonexistent table, expect error, vtgate", func(t *testing.T) { uuid := testOnlineDDLStatement(t, onlineDDLDropTableStatement, "gh-ost", "vtgate", "") - checkRecentMigrations(t, uuid, schema.OnlineDDLStatusFailed) - checkCancelMigration(t, uuid, false) - checkRetryMigration(t, uuid, true) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusFailed) + onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) + onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, true) }) } @@ -268,7 +303,7 @@ func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy str tableName := fmt.Sprintf("vt_onlineddl_test_%02d", 3) sqlQuery := fmt.Sprintf(alterStatement, tableName) if executeStrategy == "vtgate" { - row := vtgateExec(t, ddlStrategy, sqlQuery, "").Named().Row() + row := onlineddl.VtgateExecDDL(t, &vtParams, ddlStrategy, sqlQuery, "").Named().Row() if row != nil { uuid = row.AsString("uuid", "") } @@ -308,72 +343,6 @@ func checkTablesCount(t *testing.T, tablet *cluster.Vttablet, showTableName stri assert.Equal(t, expectCount, len(queryResult.Rows)) } -// checkRecentMigrations checks 'OnlineDDL show recent' output. Example to such output: -// +------------------+-------+--------------+----------------------+--------------------------------------+----------+---------------------+---------------------+------------------+ -// | Tablet | shard | mysql_schema | mysql_table | migration_uuid | strategy | started_timestamp | completed_timestamp | migration_status | -// +------------------+-------+--------------+----------------------+--------------------------------------+----------+---------------------+---------------------+------------------+ -// | zone1-0000003880 | 0 | vt_ks | vt_onlineddl_test_03 | a0638f6b_ec7b_11ea_9bf8_000d3a9b8a9a | gh-ost | 2020-09-01 17:50:40 | 2020-09-01 17:50:41 | complete | -// | zone1-0000003884 | 1 | vt_ks | vt_onlineddl_test_03 | a0638f6b_ec7b_11ea_9bf8_000d3a9b8a9a | gh-ost | 2020-09-01 17:50:40 | 2020-09-01 17:50:41 | complete | -// +------------------+-------+--------------+----------------------+--------------------------------------+----------+---------------------+---------------------+------------------+ - -func checkRecentMigrations(t *testing.T, uuid string, expectStatus schema.OnlineDDLStatus) { - result, err := clusterInstance.VtctlclientProcess.OnlineDDLShowRecent(keyspaceName) - assert.NoError(t, err) - fmt.Println("# 'vtctlclient OnlineDDL show recent' output (for debug purposes):") - fmt.Println(result) - assert.Equal(t, len(clusterInstance.Keyspaces[0].Shards), strings.Count(result, uuid)) - // We ensure "full word" regexp becuase some column names may conflict - expectStatusRegexp := fullWordUUIDRegexp(uuid, string(expectStatus)) - m := expectStatusRegexp.FindAllString(result, -1) - assert.Equal(t, len(clusterInstance.Keyspaces[0].Shards), len(m)) -} - -// checkCancelMigration attempts to cancel a migration, and expects rejection -func checkCancelMigration(t *testing.T, uuid string, expectCancelPossible bool) { - result, err := clusterInstance.VtctlclientProcess.OnlineDDLCancelMigration(keyspaceName, uuid) - fmt.Println("# 'vtctlclient OnlineDDL cancel ' output (for debug purposes):") - fmt.Println(result) - assert.NoError(t, err) - - var r *regexp.Regexp - if expectCancelPossible { - r = fullWordRegexp("1") - } else { - r = fullWordRegexp("0") - } - m := r.FindAllString(result, -1) - assert.Equal(t, len(clusterInstance.Keyspaces[0].Shards), len(m)) -} - -// checkCancelAllMigrations all pending migrations -func checkCancelAllMigrations(t *testing.T, expectCount int) { - result, err := clusterInstance.VtctlclientProcess.OnlineDDLCancelAllMigrations(keyspaceName) - fmt.Println("# 'vtctlclient OnlineDDL cancel-all' output (for debug purposes):") - fmt.Println(result) - assert.NoError(t, err) - - r := fullWordRegexp(fmt.Sprintf("%d", expectCount)) - m := r.FindAllString(result, -1) - assert.Equal(t, len(clusterInstance.Keyspaces[0].Shards), len(m)) -} - -// checkRetryMigration attempts to retry a migration, and expects rejection -func checkRetryMigration(t *testing.T, uuid string, expectRetryPossible bool) { - result, err := clusterInstance.VtctlclientProcess.OnlineDDLRetryMigration(keyspaceName, uuid) - fmt.Println("# 'vtctlclient OnlineDDL retry ' output (for debug purposes):") - fmt.Println(result) - assert.NoError(t, err) - - var r *regexp.Regexp - if expectRetryPossible { - r = fullWordRegexp("1") - } else { - r = fullWordRegexp("0") - } - m := r.FindAllString(result, -1) - assert.Equal(t, len(clusterInstance.Keyspaces[0].Shards), len(m)) -} - // checkMigratedTables checks the CREATE STATEMENT of a table after migration func checkMigratedTable(t *testing.T, tableName, expectColumn string) { for i := range clusterInstance.Keyspaces[0].Shards { @@ -392,25 +361,3 @@ func getCreateTableStatement(t *testing.T, tablet *cluster.Vttablet, tableName s statement = queryResult.Rows[0][1].ToString() return statement } - -func vtgateExec(t *testing.T, ddlStrategy string, query string, expectError string) *sqltypes.Result { - t.Helper() - - ctx := context.Background() - conn, err := mysql.Connect(ctx, &vtParams) - require.Nil(t, err) - defer conn.Close() - - setSession := fmt.Sprintf("set @@ddl_strategy='%s'", ddlStrategy) - _, err = conn.ExecuteFetch(setSession, 1000, true) - assert.NoError(t, err) - - qr, err := conn.ExecuteFetch(query, 1000, true) - if expectError == "" { - require.NoError(t, err) - } else { - require.Error(t, err, "error should not be nil") - assert.Contains(t, err.Error(), expectError, "Unexpected error") - } - return qr -} diff --git a/go/test/endtoend/onlineddl/query_util.go b/go/test/endtoend/onlineddl/query_util.go new file mode 100644 index 00000000000..dc764d820db --- /dev/null +++ b/go/test/endtoend/onlineddl/query_util.go @@ -0,0 +1,61 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package onlineddl + +import ( + "io" + "strings" + + "vitess.io/vitess/go/sqltypes" + + "github.com/olekukonko/tablewriter" +) + +// PrintQueryResult will pretty-print a QueryResult to the logger. +func PrintQueryResult(writer io.Writer, qr *sqltypes.Result) { + if qr == nil { + return + } + if len(qr.Rows) == 0 { + return + } + + table := tablewriter.NewWriter(writer) + table.SetAutoFormatHeaders(false) + + // Make header. + header := make([]string, 0, len(qr.Fields)) + for _, field := range qr.Fields { + header = append(header, field.Name) + } + table.SetHeader(header) + + // Add rows. + for _, row := range qr.Rows { + vals := make([]string, 0, len(row)) + for _, val := range row { + v := val.ToString() + v = strings.ReplaceAll(v, "\r", " ") + v = strings.ReplaceAll(v, "\n", " ") + vals = append(vals, v) + } + table.Append(vals) + } + + // Print table. + table.Render() +} diff --git a/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go b/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go new file mode 100644 index 00000000000..d5c0ddaa98c --- /dev/null +++ b/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go @@ -0,0 +1,727 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package revert + +import ( + "context" + "flag" + "fmt" + "math/rand" + "os" + "path" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/schema" + + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/onlineddl" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type WriteMetrics struct { + mu sync.Mutex + insertsAttempts, insertsFailures, insertsNoops, inserts int64 + updatesAttempts, updatesFailures, updatesNoops, updates int64 + deletesAttempts, deletesFailures, deletesNoops, deletes int64 +} + +func (w *WriteMetrics) Clear() { + w.mu.Lock() + defer w.mu.Unlock() + + w.inserts = 0 + w.updates = 0 + w.deletes = 0 + + w.insertsAttempts = 0 + w.insertsFailures = 0 + w.insertsNoops = 0 + + w.updatesAttempts = 0 + w.updatesFailures = 0 + w.updatesNoops = 0 + + w.deletesAttempts = 0 + w.deletesFailures = 0 + w.deletesNoops = 0 +} + +func (w *WriteMetrics) String() string { + return fmt.Sprintf(`WriteMetrics: inserts-deletes=%d, updates-deletes=%d, +insertsAttempts=%d, insertsFailures=%d, insertsNoops=%d, inserts=%d, +updatesAttempts=%d, updatesFailures=%d, updatesNoops=%d, updates=%d, +deletesAttempts=%d, deletesFailures=%d, deletesNoops=%d, deletes=%d, +`, + w.inserts-w.deletes, w.updates-w.deletes, + w.insertsAttempts, w.insertsFailures, w.insertsNoops, w.inserts, + w.updatesAttempts, w.updatesFailures, w.updatesNoops, w.updates, + w.deletesAttempts, w.deletesFailures, w.deletesNoops, w.deletes, + ) +} + +var ( + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + + hostname = "localhost" + keyspaceName = "ks" + cell = "zone1" + schemaChangeDirectory = "" + tableName = `stress_test` + createStatement = ` + CREATE TABLE stress_test ( + id bigint(20) not null, + rand_val varchar(32) null default '', + hint_col varchar(64) not null default 'just-created', + created_timestamp timestamp not null default current_timestamp, + updates int unsigned not null default 0, + PRIMARY KEY (id), + key created_idx(created_timestamp), + key updates_idx(updates) + ) ENGINE=InnoDB + ` + createIfNotExistsStatement = ` + CREATE TABLE IF NOT EXISTS stress_test ( + id bigint(20) not null, + PRIMARY KEY (id) + ) ENGINE=InnoDB + ` + dropStatement = ` + DROP TABLE stress_test + ` + dropIfExistsStatement = ` + DROP TABLE IF EXISTS stress_test + ` + alterHintStatement = ` + ALTER TABLE stress_test modify hint_col varchar(64) not null default '%s' + ` + insertRowStatement = ` + INSERT IGNORE INTO stress_test (id, rand_val) VALUES (%d, left(md5(rand()), 8)) + ` + updateRowStatement = ` + UPDATE stress_test SET updates=updates+1 WHERE id=%d + ` + deleteRowStatement = ` + DELETE FROM stress_test WHERE id=%d AND updates=1 + ` + // We use CAST(SUM(updates) AS SIGNED) because SUM() returns a DECIMAL datatype, and we want to read a SIGNED INTEGER type + selectCountRowsStatement = ` + SELECT COUNT(*) AS num_rows, CAST(SUM(updates) AS SIGNED) AS sum_updates FROM stress_test + ` + truncateStatement = ` + TRUNCATE TABLE stress_test + ` + writeMetrics WriteMetrics +) + +const ( + maxTableRows = 4096 + maxConcurrency = 5 +) + +func TestMain(m *testing.M) { + defer cluster.PanicHandler(nil) + flag.Parse() + + exitcode, err := func() (int, error) { + clusterInstance = cluster.NewCluster(cell, hostname) + schemaChangeDirectory = path.Join("/tmp", fmt.Sprintf("schema_change_dir_%d", clusterInstance.GetAndReserveTabletUID())) + defer os.RemoveAll(schemaChangeDirectory) + defer clusterInstance.Teardown() + + if _, err := os.Stat(schemaChangeDirectory); os.IsNotExist(err) { + _ = os.Mkdir(schemaChangeDirectory, 0700) + } + + clusterInstance.VtctldExtraArgs = []string{ + "-schema_change_dir", schemaChangeDirectory, + "-schema_change_controller", "local", + "-schema_change_check_interval", "1"} + + clusterInstance.VtTabletExtraArgs = []string{ + "-enable-lag-throttler", + "-throttle_threshold", "1s", + "-heartbeat_enable", + "-heartbeat_interval", "250ms", + "-migration_check_interval", "5s", + } + clusterInstance.VtGateExtraArgs = []string{ + "-ddl_strategy", "online", + } + + if err := clusterInstance.StartTopo(); err != nil { + return 1, err + } + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: keyspaceName, + } + + // No need for replicas in this stress test + if err := clusterInstance.StartKeyspace(*keyspace, []string{"1"}, 0, false); err != nil { + return 1, err + } + + vtgateInstance := clusterInstance.NewVtgateInstance() + // set the gateway we want to use + vtgateInstance.GatewayImplementation = "tabletgateway" + // Start vtgate + if err := vtgateInstance.Setup(); err != nil { + return 1, err + } + // ensure it is torn down during cluster TearDown + clusterInstance.VtgateProcess = *vtgateInstance + vtParams = mysql.ConnParams{ + Host: clusterInstance.Hostname, + Port: clusterInstance.VtgateMySQLPort, + } + + return m.Run(), nil + }() + if err != nil { + fmt.Printf("%v\n", err) + os.Exit(1) + } else { + os.Exit(exitcode) + } + +} + +func TestSchemaChange(t *testing.T) { + defer cluster.PanicHandler(t) + shards := clusterInstance.Keyspaces[0].Shards + require.Equal(t, 1, len(shards)) + + var uuids []string + // CREATE + t.Run("CREATE TABLE IF NOT EXISTS where table does not exist", func(t *testing.T) { + // The table does not exist + uuid := testOnlineDDLStatement(t, createIfNotExistsStatement, "online", "vtgate", "") + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + checkTable(t, tableName, true) + }) + t.Run("revert CREATE TABLE IF NOT EXISTS where did not exist", func(t *testing.T) { + // The table existed, so it will now be dropped (renamed) + uuid := testRevertMigration(t, uuids[len(uuids)-1]) + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + checkTable(t, tableName, false) + }) + t.Run("revert revert CREATE TABLE IF NOT EXISTS where did not exist", func(t *testing.T) { + // Table was dropped (renamed) so it will now be restored + uuid := testRevertMigration(t, uuids[len(uuids)-1]) + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + checkTable(t, tableName, true) + }) + t.Run("revert revert revert CREATE TABLE IF NOT EXISTS where did not exist", func(t *testing.T) { + // Table was restored, so it will now be dropped (renamed) + uuid := testRevertMigration(t, uuids[len(uuids)-1]) + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + checkTable(t, tableName, false) + }) + t.Run("online CREATE TABLE", func(t *testing.T) { + uuid := testOnlineDDLStatement(t, createStatement, "online", "vtgate", "just-created") + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + checkTable(t, tableName, true) + initTable(t) + testSelectTableMetrics(t) + }) + t.Run("revert CREATE TABLE", func(t *testing.T) { + // This will drop the table (well, actually, rename it away) + uuid := testRevertMigration(t, uuids[len(uuids)-1]) + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + checkTable(t, tableName, false) + }) + t.Run("revert revert CREATE TABLE", func(t *testing.T) { + // Restore the table. Data should still be in the table! + uuid := testRevertMigration(t, uuids[len(uuids)-1]) + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + checkTable(t, tableName, true) + testSelectTableMetrics(t) + }) + t.Run("fail revert older change", func(t *testing.T) { + // We shouldn't be able to revert one-before-last succcessful migration. + uuid := testRevertMigration(t, uuids[len(uuids)-2]) + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusFailed) + }) + t.Run("CREATE TABLE IF NOT EXISTS where table exists", func(t *testing.T) { + // The table exists. A noop. + uuid := testOnlineDDLStatement(t, createIfNotExistsStatement, "online", "vtgate", "") + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + checkTable(t, tableName, true) + }) + t.Run("revert CREATE TABLE IF NOT EXISTS where table existed", func(t *testing.T) { + // Since the table already existed, thus not created by the reverts migration, + // we expect to _not_ drop it in this revert. A noop. + uuid := testRevertMigration(t, uuids[len(uuids)-1]) + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + checkTable(t, tableName, true) + }) + t.Run("revert revert CREATE TABLE IF NOT EXISTS where table existed", func(t *testing.T) { + // Table was not dropped, thus isn't re-created, and it just still exists. A noop. + uuid := testRevertMigration(t, uuids[len(uuids)-1]) + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + checkTable(t, tableName, true) + }) + t.Run("fail online CREATE TABLE", func(t *testing.T) { + // Table already exists + uuid := testOnlineDDLStatement(t, createStatement, "online", "vtgate", "just-created") + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusFailed) + checkTable(t, tableName, true) + }) + + // ALTER + + // Run two ALTER TABLE statements. + // These tests are similar to `onlineddl_vrepl_stress` endtond tests. + // If they fail, it has nothing to do with revert. + // We run these tests because we expect their functionality to work in the next step. + var alterHints []string + for i := 0; i < 2; i++ { + testName := fmt.Sprintf("online ALTER TABLE %d", i) + hint := fmt.Sprintf("hint-alter-%d", i) + alterHints = append(alterHints, hint) + t.Run(testName, func(t *testing.T) { + // One alter. We're not going to revert it. + // This specific test is similar to `onlineddl_vrepl_stress` endtond tests. + // If it fails, it has nothing to do with revert. + // We run this test because we expect its functionality to work in the next step. + ctx, cancel := context.WithCancel(context.Background()) + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + runMultipleConnections(ctx, t) + }() + uuid := testOnlineDDLStatement(t, fmt.Sprintf(alterHintStatement, hint), "online", "vtgate", hint) + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + cancel() // will cause runMultipleConnections() to terminate + wg.Wait() + testSelectTableMetrics(t) + }) + } + t.Run("revert ALTER TABLE", func(t *testing.T) { + // This reverts the last ALTER TABLE. + // And we run traffic on the table during the revert + ctx, cancel := context.WithCancel(context.Background()) + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + runMultipleConnections(ctx, t) + }() + uuid := testRevertMigration(t, uuids[len(uuids)-1]) + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + cancel() // will cause runMultipleConnections() to terminate + wg.Wait() + checkMigratedTable(t, tableName, alterHints[0]) + testSelectTableMetrics(t) + }) + t.Run("revert revert ALTER TABLE", func(t *testing.T) { + // This reverts the last revert (reapplying the last ALTER TABLE). + // And we run traffic on the table during the revert + ctx, cancel := context.WithCancel(context.Background()) + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + runMultipleConnections(ctx, t) + }() + uuid := testRevertMigration(t, uuids[len(uuids)-1]) + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + cancel() // will cause runMultipleConnections() to terminate + wg.Wait() + checkMigratedTable(t, tableName, alterHints[1]) + testSelectTableMetrics(t) + }) + t.Run("revert revert revert ALTER TABLE", func(t *testing.T) { + // For good measure, let's verify that revert-revert-revert works... + // So this again pulls us back to first ALTER + ctx, cancel := context.WithCancel(context.Background()) + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + runMultipleConnections(ctx, t) + }() + uuid := testRevertMigration(t, uuids[len(uuids)-1]) + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + cancel() // will cause runMultipleConnections() to terminate + wg.Wait() + checkMigratedTable(t, tableName, alterHints[0]) + testSelectTableMetrics(t) + }) + + // DROP + t.Run("online DROP TABLE", func(t *testing.T) { + uuid := testOnlineDDLStatement(t, dropStatement, "online", "vtgate", "") + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + checkTable(t, tableName, false) + }) + t.Run("revert DROP TABLE", func(t *testing.T) { + // This will recreate the table (well, actually, rename it back into place) + uuid := testRevertMigration(t, uuids[len(uuids)-1]) + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + checkTable(t, tableName, true) + testSelectTableMetrics(t) + }) + t.Run("revert revert DROP TABLE", func(t *testing.T) { + // This will reapply DROP TABLE + uuid := testRevertMigration(t, uuids[len(uuids)-1]) + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + checkTable(t, tableName, false) + }) + + // DROP IF EXISTS + t.Run("online DROP TABLE IF EXISTS", func(t *testing.T) { + // The table doesn't actually exist right now + uuid := testOnlineDDLStatement(t, dropIfExistsStatement, "online", "vtgate", "") + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + checkTable(t, tableName, false) + }) + t.Run("revert DROP TABLE IF EXISTS", func(t *testing.T) { + // Table will not be recreated because it didn't exist during the DROP TABLE IF EXISTS + uuid := testRevertMigration(t, uuids[len(uuids)-1]) + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + checkTable(t, tableName, false) + }) + t.Run("revert revert DROP TABLE IF EXISTS", func(t *testing.T) { + // Table still does not exist + uuid := testRevertMigration(t, uuids[len(uuids)-1]) + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + checkTable(t, tableName, false) + }) + t.Run("revert revert revert DROP TABLE IF EXISTS", func(t *testing.T) { + // Table still does not exist + uuid := testRevertMigration(t, uuids[len(uuids)-1]) + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + checkTable(t, tableName, false) + }) + + // FAILURES + t.Run("fail online DROP TABLE", func(t *testing.T) { + // The table does not exist now + uuid := testOnlineDDLStatement(t, dropStatement, "online", "vtgate", "") + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusFailed) + checkTable(t, tableName, false) + }) + t.Run("fail revert failed online DROP TABLE", func(t *testing.T) { + // Cannot revert a failed migration + uuid := testRevertMigration(t, uuids[len(uuids)-1]) + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusFailed) + checkTable(t, tableName, false) + }) +} + +// testOnlineDDLStatement runs an online DDL, ALTER statement +func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy string, executeStrategy string, expectHint string) (uuid string) { + if executeStrategy == "vtgate" { + row := onlineddl.VtgateExecDDL(t, &vtParams, ddlStrategy, alterStatement, "").Named().Row() + if row != nil { + uuid = row.AsString("uuid", "") + } + } else { + var err error + uuid, err = clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, ddlStrategy) + assert.NoError(t, err) + } + uuid = strings.TrimSpace(uuid) + fmt.Println("# Generated UUID (for debug purposes):") + fmt.Printf("<%s>\n", uuid) + + strategy, _, err := schema.ParseDDLStrategy(ddlStrategy) + assert.NoError(t, err) + + if !strategy.IsDirect() { + time.Sleep(time.Second * 20) + } + + if expectHint != "" { + checkMigratedTable(t, tableName, expectHint) + } + return uuid +} + +// testRevertMigration reverts a given migration +func testRevertMigration(t *testing.T, revertUUID string) (uuid string) { + revertQuery := fmt.Sprintf("revert vitess_migration '%s'", revertUUID) + r := onlineddl.VtgateExecQuery(t, &vtParams, revertQuery, "") + + row := r.Named().Row() + require.NotNil(t, row) + + uuid = row["uuid"].ToString() + + fmt.Println("# Generated UUID (for debug purposes):") + fmt.Printf("<%s>\n", uuid) + + time.Sleep(time.Second * 20) + return uuid +} + +// checkTable checks the number of tables in the first two shards. +func checkTable(t *testing.T, showTableName string, expectExists bool) bool { + expectCount := 0 + if expectExists { + expectCount = 1 + } + for i := range clusterInstance.Keyspaces[0].Shards { + if !checkTablesCount(t, clusterInstance.Keyspaces[0].Shards[i].Vttablets[0], showTableName, expectCount) { + return false + } + } + return true +} + +// checkTablesCount checks the number of tables in the given tablet +func checkTablesCount(t *testing.T, tablet *cluster.Vttablet, showTableName string, expectCount int) bool { + query := fmt.Sprintf(`show tables like '%%%s%%';`, showTableName) + queryResult, err := tablet.VttabletProcess.QueryTablet(query, keyspaceName, true) + require.Nil(t, err) + return assert.Equal(t, expectCount, len(queryResult.Rows)) +} + +// checkMigratedTables checks the CREATE STATEMENT of a table after migration +func checkMigratedTable(t *testing.T, tableName, expectHint string) { + for i := range clusterInstance.Keyspaces[0].Shards { + createStatement := getCreateTableStatement(t, clusterInstance.Keyspaces[0].Shards[i].Vttablets[0], tableName) + assert.Contains(t, createStatement, expectHint) + } +} + +// getCreateTableStatement returns the CREATE TABLE statement for a given table +func getCreateTableStatement(t *testing.T, tablet *cluster.Vttablet, tableName string) (statement string) { + queryResult, err := tablet.VttabletProcess.QueryTablet(fmt.Sprintf("show create table %s;", tableName), keyspaceName, true) + require.Nil(t, err) + + assert.Equal(t, len(queryResult.Rows), 1) + assert.Equal(t, len(queryResult.Rows[0]), 2) // table name, create statement + statement = queryResult.Rows[0][1].ToString() + return statement +} + +func generateInsert(t *testing.T, conn *mysql.Conn) error { + id := rand.Int31n(int32(maxTableRows)) + query := fmt.Sprintf(insertRowStatement, id) + qr, err := conn.ExecuteFetch(query, 1000, true) + + func() { + writeMetrics.mu.Lock() + defer writeMetrics.mu.Unlock() + + writeMetrics.insertsAttempts++ + if err != nil { + writeMetrics.insertsFailures++ + return + } + assert.Less(t, qr.RowsAffected, uint64(2)) + if qr.RowsAffected == 0 { + writeMetrics.insertsNoops++ + return + } + writeMetrics.inserts++ + }() + return err +} + +func generateUpdate(t *testing.T, conn *mysql.Conn) error { + id := rand.Int31n(int32(maxTableRows)) + query := fmt.Sprintf(updateRowStatement, id) + qr, err := conn.ExecuteFetch(query, 1000, true) + + func() { + writeMetrics.mu.Lock() + defer writeMetrics.mu.Unlock() + + writeMetrics.updatesAttempts++ + if err != nil { + writeMetrics.updatesFailures++ + return + } + assert.Less(t, qr.RowsAffected, uint64(2)) + if qr.RowsAffected == 0 { + writeMetrics.updatesNoops++ + return + } + writeMetrics.updates++ + }() + return err +} + +func generateDelete(t *testing.T, conn *mysql.Conn) error { + id := rand.Int31n(int32(maxTableRows)) + query := fmt.Sprintf(deleteRowStatement, id) + qr, err := conn.ExecuteFetch(query, 1000, true) + + func() { + writeMetrics.mu.Lock() + defer writeMetrics.mu.Unlock() + + writeMetrics.deletesAttempts++ + if err != nil { + writeMetrics.deletesFailures++ + return + } + assert.Less(t, qr.RowsAffected, uint64(2)) + if qr.RowsAffected == 0 { + writeMetrics.deletesNoops++ + return + } + writeMetrics.deletes++ + }() + return err +} + +func runSingleConnection(ctx context.Context, t *testing.T, done *int64) { + log.Infof("Running single connection") + conn, err := mysql.Connect(ctx, &vtParams) + require.Nil(t, err) + defer conn.Close() + + _, err = conn.ExecuteFetch("set autocommit=1", 1000, true) + require.Nil(t, err) + _, err = conn.ExecuteFetch("set transaction isolation level read committed", 1000, true) + require.Nil(t, err) + + for { + if atomic.LoadInt64(done) == 1 { + log.Infof("Terminating single connection") + return + } + switch rand.Int31n(3) { + case 0: + err = generateInsert(t, conn) + case 1: + err = generateUpdate(t, conn) + case 2: + err = generateDelete(t, conn) + } + if err != nil { + if strings.Contains(err.Error(), "disallowed due to rule: enforce blacklisted tables") { + err = nil + } + } + assert.Nil(t, err) + time.Sleep(10 * time.Millisecond) + } +} + +func runMultipleConnections(ctx context.Context, t *testing.T) { + log.Infof("Running multiple connections") + + require.True(t, checkTable(t, tableName, true)) + var done int64 + var wg sync.WaitGroup + for i := 0; i < maxConcurrency; i++ { + wg.Add(1) + go func() { + defer wg.Done() + runSingleConnection(ctx, t, &done) + }() + } + <-ctx.Done() + atomic.StoreInt64(&done, 1) + log.Infof("Running multiple connections: done") + wg.Wait() + log.Infof("All connections cancelled") +} + +func initTable(t *testing.T) { + log.Infof("initTable begin") + defer log.Infof("initTable complete") + + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.Nil(t, err) + defer conn.Close() + + writeMetrics.Clear() + _, err = conn.ExecuteFetch(truncateStatement, 1000, true) + require.Nil(t, err) + + for i := 0; i < maxTableRows/2; i++ { + generateInsert(t, conn) + } + for i := 0; i < maxTableRows/4; i++ { + generateUpdate(t, conn) + } + for i := 0; i < maxTableRows/4; i++ { + generateDelete(t, conn) + } +} + +func testSelectTableMetrics(t *testing.T) { + writeMetrics.mu.Lock() + defer writeMetrics.mu.Unlock() + + log.Infof("%s", writeMetrics.String()) + + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.Nil(t, err) + defer conn.Close() + + rs, err := conn.ExecuteFetch(selectCountRowsStatement, 1000, true) + require.Nil(t, err) + + row := rs.Named().Row() + require.NotNil(t, row) + log.Infof("testSelectTableMetrics, row: %v", row) + numRows := row.AsInt64("num_rows", 0) + sumUpdates := row.AsInt64("sum_updates", 0) + + assert.NotZero(t, numRows) + assert.NotZero(t, sumUpdates) + assert.NotZero(t, writeMetrics.inserts) + assert.NotZero(t, writeMetrics.deletes) + assert.NotZero(t, writeMetrics.updates) + assert.Equal(t, writeMetrics.inserts-writeMetrics.deletes, numRows) + assert.Equal(t, writeMetrics.updates-writeMetrics.deletes, sumUpdates) // because we DELETE WHERE updates=1 +} diff --git a/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go b/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go new file mode 100644 index 00000000000..7971a128b75 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go @@ -0,0 +1,449 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vrepl + +import ( + "flag" + "fmt" + "io/ioutil" + "net/http" + "os" + "path" + "strings" + "sync" + "testing" + "time" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/vt/schema" + throttlebase "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/base" + + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/onlineddl" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + httpClient = throttlebase.SetupHTTPClient(time.Second) + throttlerAppName = "vreplication" + + hostname = "localhost" + keyspaceName = "ks" + cell = "zone1" + schemaChangeDirectory = "" + totalTableCount = 4 + createTable = ` + CREATE TABLE %s ( + id bigint(20) NOT NULL, + test_val bigint unsigned NOT NULL DEFAULT 0, + msg varchar(64), + PRIMARY KEY (id) + ) ENGINE=InnoDB;` + // To verify non online-DDL behavior + alterTableNormalStatement = ` + ALTER TABLE %s + ADD COLUMN non_online int UNSIGNED NOT NULL DEFAULT 0` + // A trivial statement which must succeed and does not change the schema + alterTableTrivialStatement = ` + ALTER TABLE %s + ENGINE=InnoDB` + // The following statement is valid + alterTableSuccessfulStatement = ` + ALTER TABLE %s + MODIFY id bigint UNSIGNED NOT NULL, + ADD COLUMN vrepl_col int NOT NULL DEFAULT 0, + ADD INDEX idx_msg(msg)` + // The following statement will fail because vreplication requires shared PRIMARY KEY columns + alterTableFailedStatement = ` + ALTER TABLE %s + DROP PRIMARY KEY, + DROP COLUMN vrepl_col` + // We will run this query while throttling vreplication + alterTableThrottlingStatement = ` + ALTER TABLE %s + DROP COLUMN vrepl_col` + onlineDDLCreateTableStatement = ` + CREATE TABLE %s ( + id bigint NOT NULL, + test_val bigint unsigned NOT NULL DEFAULT 0, + online_ddl_create_col INT NOT NULL, + PRIMARY KEY (id) + ) ENGINE=InnoDB;` + onlineDDLDropTableStatement = ` + DROP TABLE %s` + onlineDDLDropTableIfExistsStatement = ` + DROP TABLE IF EXISTS %s` + insertRowStatement = ` + INSERT INTO %s (id, test_val) VALUES (%d, 1) + ` + selectCountRowsStatement = ` + SELECT COUNT(*) AS c FROM %s + ` + countInserts int64 + insertMutex sync.Mutex + + vSchema = ` + { + "sharded": true, + "vindexes": { + "hash_index": { + "type": "hash" + } + }, + "tables": { + "vt_onlineddl_test_00": { + "column_vindexes": [ + { + "column": "id", + "name": "hash_index" + } + ] + }, + "vt_onlineddl_test_01": { + "column_vindexes": [ + { + "column": "id", + "name": "hash_index" + } + ] + }, + "vt_onlineddl_test_02": { + "column_vindexes": [ + { + "column": "id", + "name": "hash_index" + } + ] + }, + "vt_onlineddl_test_03": { + "column_vindexes": [ + { + "column": "id", + "name": "hash_index" + } + ] + } + } + } + ` +) + +func TestMain(m *testing.M) { + defer cluster.PanicHandler(nil) + flag.Parse() + + exitcode, err := func() (int, error) { + clusterInstance = cluster.NewCluster(cell, hostname) + schemaChangeDirectory = path.Join("/tmp", fmt.Sprintf("schema_change_dir_%d", clusterInstance.GetAndReserveTabletUID())) + defer os.RemoveAll(schemaChangeDirectory) + defer clusterInstance.Teardown() + + if _, err := os.Stat(schemaChangeDirectory); os.IsNotExist(err) { + _ = os.Mkdir(schemaChangeDirectory, 0700) + } + + clusterInstance.VtctldExtraArgs = []string{ + "-schema_change_dir", schemaChangeDirectory, + "-schema_change_controller", "local", + "-schema_change_check_interval", "1"} + + clusterInstance.VtTabletExtraArgs = []string{ + "-enable-lag-throttler", + "-throttle_threshold", "1s", + "-heartbeat_enable", + "-heartbeat_interval", "250ms", + "-migration_check_interval", "5s", + } + clusterInstance.VtGateExtraArgs = []string{ + "-ddl_strategy", "online", + } + + if err := clusterInstance.StartTopo(); err != nil { + return 1, err + } + + keyspace := &cluster.Keyspace{ + Name: keyspaceName, + VSchema: vSchema, + } + + if err := clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 1, false); err != nil { + return 1, err + } + + vtgateInstance := clusterInstance.NewVtgateInstance() + // set the gateway we want to use + vtgateInstance.GatewayImplementation = "tabletgateway" + // Start vtgate + if err := vtgateInstance.Setup(); err != nil { + return 1, err + } + // ensure it is torn down during cluster TearDown + clusterInstance.VtgateProcess = *vtgateInstance + vtParams = mysql.ConnParams{ + Host: clusterInstance.Hostname, + Port: clusterInstance.VtgateMySQLPort, + } + + return m.Run(), nil + }() + if err != nil { + fmt.Printf("%v\n", err) + os.Exit(1) + } else { + os.Exit(exitcode) + } + +} + +func throttleResponse(tablet *cluster.Vttablet, path string) (resp *http.Response, respBody string, err error) { + apiURL := fmt.Sprintf("http://%s:%d/%s", tablet.VttabletProcess.TabletHostname, tablet.HTTPPort, path) + resp, err = httpClient.Get(apiURL) + if err != nil { + return resp, respBody, err + } + b, err := ioutil.ReadAll(resp.Body) + respBody = string(b) + return resp, respBody, err +} + +func throttleApp(tablet *cluster.Vttablet, app string) (*http.Response, string, error) { + return throttleResponse(tablet, fmt.Sprintf("throttler/throttle-app?app=%s&duration=1h", app)) +} + +func unthrottleApp(tablet *cluster.Vttablet, app string) (*http.Response, string, error) { + return throttleResponse(tablet, fmt.Sprintf("throttler/unthrottle-app?app=%s", app)) +} + +func TestSchemaChange(t *testing.T) { + defer cluster.PanicHandler(t) + shards := clusterInstance.Keyspaces[0].Shards + assert.Equal(t, 2, len(shards)) + testWithInitialSchema(t) + t.Run("alter non_online", func(t *testing.T) { + _ = testOnlineDDLStatement(t, alterTableNormalStatement, string(schema.DDLStrategyDirect), "vtctl", "non_online") + insertRows(t, 2) + testRows(t) + }) + t.Run("successful online alter, vtgate", func(t *testing.T) { + insertRows(t, 2) + uuid := testOnlineDDLStatement(t, alterTableSuccessfulStatement, "online", "vtgate", "vrepl_col") + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + testRows(t) + onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) + onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) + }) + t.Run("successful online alter, vtctl", func(t *testing.T) { + insertRows(t, 2) + uuid := testOnlineDDLStatement(t, alterTableTrivialStatement, "online", "vtctl", "vrepl_col") + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + testRows(t) + onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) + onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) + }) + t.Run("throttled migration", func(t *testing.T) { + insertRows(t, 2) + for i := range clusterInstance.Keyspaces[0].Shards { + throttleApp(clusterInstance.Keyspaces[0].Shards[i].Vttablets[0], throttlerAppName) + defer unthrottleApp(clusterInstance.Keyspaces[0].Shards[i].Vttablets[0], throttlerAppName) + } + uuid := testOnlineDDLStatement(t, alterTableThrottlingStatement, "online", "vtgate", "vrepl_col") + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusRunning) + testRows(t) + onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, true) + time.Sleep(2 * time.Second) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusFailed) + }) + t.Run("failed migration", func(t *testing.T) { + insertRows(t, 2) + uuid := testOnlineDDLStatement(t, alterTableFailedStatement, "online", "vtgate", "vrepl_col") + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusFailed) + testRows(t) + onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) + onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, true) + // migration will fail again + }) + t.Run("cancel all migrations: nothing to cancel", func(t *testing.T) { + // no migrations pending at this time + time.Sleep(10 * time.Second) + onlineddl.CheckCancelAllMigrations(t, &vtParams, 0) + }) + t.Run("cancel all migrations: some migrations to cancel", func(t *testing.T) { + for i := range shards { + throttleApp(shards[i].Vttablets[0], throttlerAppName) + defer unthrottleApp(shards[i].Vttablets[0], throttlerAppName) + } + // spawn n migrations; cancel them via cancel-all + var wg sync.WaitGroup + count := 4 + for i := 0; i < count; i++ { + wg.Add(1) + go func() { + defer wg.Done() + _ = testOnlineDDLStatement(t, alterTableThrottlingStatement, "online", "vtgate", "vrepl_col") + }() + } + wg.Wait() + onlineddl.CheckCancelAllMigrations(t, &vtParams, len(shards)*count) + }) + t.Run("Online DROP, vtctl", func(t *testing.T) { + uuid := testOnlineDDLStatement(t, onlineDDLDropTableStatement, "online", "vtctl", "") + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) + onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) + }) + t.Run("Online CREATE, vtctl", func(t *testing.T) { + uuid := testOnlineDDLStatement(t, onlineDDLCreateTableStatement, "online", "vtctl", "online_ddl_create_col") + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) + onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) + }) + t.Run("Online DROP TABLE IF EXISTS, vtgate", func(t *testing.T) { + uuid := testOnlineDDLStatement(t, onlineDDLDropTableIfExistsStatement, "online", "vtgate", "") + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) + onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) + // this table existed + checkTables(t, schema.OnlineDDLToGCUUID(uuid), 1) + }) + t.Run("Online DROP TABLE IF EXISTS for nonexistent table, vtgate", func(t *testing.T) { + uuid := testOnlineDDLStatement(t, onlineDDLDropTableIfExistsStatement, "online", "vtgate", "") + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) + onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) + // this table did not exist + checkTables(t, schema.OnlineDDLToGCUUID(uuid), 0) + }) + t.Run("Online DROP TABLE for nonexistent table, expect error, vtgate", func(t *testing.T) { + uuid := testOnlineDDLStatement(t, onlineDDLDropTableStatement, "online", "vtgate", "") + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusFailed) + onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) + onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, true) + }) +} + +func insertRow(t *testing.T) { + insertMutex.Lock() + defer insertMutex.Unlock() + + tableName := fmt.Sprintf("vt_onlineddl_test_%02d", 3) + sqlQuery := fmt.Sprintf(insertRowStatement, tableName, countInserts) + r := onlineddl.VtgateExecQuery(t, &vtParams, sqlQuery, "") + require.NotNil(t, r) + countInserts++ +} + +func insertRows(t *testing.T, count int) { + for i := 0; i < count; i++ { + insertRow(t) + } +} + +func testRows(t *testing.T) { + insertMutex.Lock() + defer insertMutex.Unlock() + + tableName := fmt.Sprintf("vt_onlineddl_test_%02d", 3) + sqlQuery := fmt.Sprintf(selectCountRowsStatement, tableName) + r := onlineddl.VtgateExecQuery(t, &vtParams, sqlQuery, "") + require.NotNil(t, r) + row := r.Named().Row() + require.NotNil(t, row) + require.Equal(t, countInserts, row.AsInt64("c", 0)) +} + +func testWithInitialSchema(t *testing.T) { + // Create 4 tables + var sqlQuery = "" //nolint + for i := 0; i < totalTableCount; i++ { + sqlQuery = fmt.Sprintf(createTable, fmt.Sprintf("vt_onlineddl_test_%02d", i)) + err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, sqlQuery) + require.Nil(t, err) + } + + // Check if 4 tables are created + checkTables(t, "", totalTableCount) +} + +// testOnlineDDLStatement runs an online DDL, ALTER statement +func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy string, executeStrategy string, expectColumn string) (uuid string) { + tableName := fmt.Sprintf("vt_onlineddl_test_%02d", 3) + sqlQuery := fmt.Sprintf(alterStatement, tableName) + if executeStrategy == "vtgate" { + row := onlineddl.VtgateExecDDL(t, &vtParams, ddlStrategy, sqlQuery, "").Named().Row() + if row != nil { + uuid = row.AsString("uuid", "") + } + } else { + var err error + uuid, err = clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, sqlQuery, ddlStrategy) + assert.NoError(t, err) + } + uuid = strings.TrimSpace(uuid) + fmt.Println("# Generated UUID (for debug purposes):") + fmt.Printf("<%s>\n", uuid) + + strategy, _, err := schema.ParseDDLStrategy(ddlStrategy) + assert.NoError(t, err) + + if !strategy.IsDirect() { + time.Sleep(time.Second * 20) + } + + if expectColumn != "" { + checkMigratedTable(t, tableName, expectColumn) + } + return uuid +} + +// checkTables checks the number of tables in the first two shards. +func checkTables(t *testing.T, showTableName string, expectCount int) { + for i := range clusterInstance.Keyspaces[0].Shards { + checkTablesCount(t, clusterInstance.Keyspaces[0].Shards[i].Vttablets[0], showTableName, expectCount) + } +} + +// checkTablesCount checks the number of tables in the given tablet +func checkTablesCount(t *testing.T, tablet *cluster.Vttablet, showTableName string, expectCount int) { + query := fmt.Sprintf(`show tables like '%%%s%%';`, showTableName) + queryResult, err := tablet.VttabletProcess.QueryTablet(query, keyspaceName, true) + require.Nil(t, err) + assert.Equal(t, expectCount, len(queryResult.Rows)) +} + +// checkMigratedTables checks the CREATE STATEMENT of a table after migration +func checkMigratedTable(t *testing.T, tableName, expectColumn string) { + for i := range clusterInstance.Keyspaces[0].Shards { + createStatement := getCreateTableStatement(t, clusterInstance.Keyspaces[0].Shards[i].Vttablets[0], tableName) + assert.Contains(t, createStatement, expectColumn) + } +} + +// getCreateTableStatement returns the CREATE TABLE statement for a given table +func getCreateTableStatement(t *testing.T, tablet *cluster.Vttablet, tableName string) (statement string) { + queryResult, err := tablet.VttabletProcess.QueryTablet(fmt.Sprintf("show create table %s;", tableName), keyspaceName, true) + require.Nil(t, err) + + assert.Equal(t, len(queryResult.Rows), 1) + assert.Equal(t, len(queryResult.Rows[0]), 2) // table name, create statement + statement = queryResult.Rows[0][1].ToString() + return statement +} diff --git a/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go b/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go new file mode 100644 index 00000000000..1189619e2d4 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go @@ -0,0 +1,525 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplstress + +import ( + "context" + "flag" + "fmt" + "math/rand" + "os" + "path" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/schema" + + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/onlineddl" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type WriteMetrics struct { + mu sync.Mutex + insertsAttempts, insertsFailures, insertsNoops, inserts int64 + updatesAttempts, updatesFailures, updatesNoops, updates int64 + deletesAttempts, deletesFailures, deletesNoops, deletes int64 +} + +func (w *WriteMetrics) Clear() { + w.mu.Lock() + defer w.mu.Unlock() + + w.inserts = 0 + w.updates = 0 + w.deletes = 0 + + w.insertsAttempts = 0 + w.insertsFailures = 0 + w.insertsNoops = 0 + + w.updatesAttempts = 0 + w.updatesFailures = 0 + w.updatesNoops = 0 + + w.deletesAttempts = 0 + w.deletesFailures = 0 + w.deletesNoops = 0 +} + +func (w *WriteMetrics) String() string { + return fmt.Sprintf(`WriteMetrics: inserts-deletes=%d, updates-deletes=%d, +insertsAttempts=%d, insertsFailures=%d, insertsNoops=%d, inserts=%d, +updatesAttempts=%d, updatesFailures=%d, updatesNoops=%d, updates=%d, +deletesAttempts=%d, deletesFailures=%d, deletesNoops=%d, deletes=%d, +`, + w.inserts-w.deletes, w.updates-w.deletes, + w.insertsAttempts, w.insertsFailures, w.insertsNoops, w.inserts, + w.updatesAttempts, w.updatesFailures, w.updatesNoops, w.updates, + w.deletesAttempts, w.deletesFailures, w.deletesNoops, w.deletes, + ) +} + +var ( + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + + hostname = "localhost" + keyspaceName = "ks" + cell = "zone1" + schemaChangeDirectory = "" + tableName = `stress_test` + createStatement = ` + CREATE TABLE stress_test ( + id bigint(20) not null, + rand_val varchar(32) null default '', + hint_col varchar(64) not null default '', + created_timestamp timestamp not null default current_timestamp, + updates int unsigned not null default 0, + PRIMARY KEY (id), + key created_idx(created_timestamp), + key updates_idx(updates) + ) ENGINE=InnoDB + ` + alterHintStatement = ` + ALTER TABLE stress_test modify hint_col varchar(64) not null default '%s' + ` + insertRowStatement = ` + INSERT IGNORE INTO stress_test (id, rand_val) VALUES (%d, left(md5(rand()), 8)) + ` + updateRowStatement = ` + UPDATE stress_test SET updates=updates+1 WHERE id=%d + ` + deleteRowStatement = ` + DELETE FROM stress_test WHERE id=%d AND updates=1 + ` + // We use CAST(SUM(updates) AS SIGNED) because SUM() returns a DECIMAL datatype, and we want to read a SIGNED INTEGER type + selectCountRowsStatement = ` + SELECT COUNT(*) AS num_rows, CAST(SUM(updates) AS SIGNED) AS sum_updates FROM stress_test + ` + truncateStatement = ` + TRUNCATE TABLE stress_test + ` + writeMetrics WriteMetrics +) + +const ( + maxTableRows = 4096 + maxConcurrency = 5 + countIterations = 5 +) + +func TestMain(m *testing.M) { + defer cluster.PanicHandler(nil) + flag.Parse() + + exitcode, err := func() (int, error) { + clusterInstance = cluster.NewCluster(cell, hostname) + schemaChangeDirectory = path.Join("/tmp", fmt.Sprintf("schema_change_dir_%d", clusterInstance.GetAndReserveTabletUID())) + defer os.RemoveAll(schemaChangeDirectory) + defer clusterInstance.Teardown() + + if _, err := os.Stat(schemaChangeDirectory); os.IsNotExist(err) { + _ = os.Mkdir(schemaChangeDirectory, 0700) + } + + clusterInstance.VtctldExtraArgs = []string{ + "-schema_change_dir", schemaChangeDirectory, + "-schema_change_controller", "local", + "-schema_change_check_interval", "1"} + + clusterInstance.VtTabletExtraArgs = []string{ + "-enable-lag-throttler", + "-throttle_threshold", "1s", + "-heartbeat_enable", + "-heartbeat_interval", "250ms", + "-migration_check_interval", "5s", + } + clusterInstance.VtGateExtraArgs = []string{ + "-ddl_strategy", "online", + } + + if err := clusterInstance.StartTopo(); err != nil { + return 1, err + } + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: keyspaceName, + } + + // No need for replicas in this stress test + if err := clusterInstance.StartKeyspace(*keyspace, []string{"1"}, 0, false); err != nil { + return 1, err + } + + vtgateInstance := clusterInstance.NewVtgateInstance() + // set the gateway we want to use + vtgateInstance.GatewayImplementation = "tabletgateway" + // Start vtgate + if err := vtgateInstance.Setup(); err != nil { + return 1, err + } + // ensure it is torn down during cluster TearDown + clusterInstance.VtgateProcess = *vtgateInstance + vtParams = mysql.ConnParams{ + Host: clusterInstance.Hostname, + Port: clusterInstance.VtgateMySQLPort, + } + + return m.Run(), nil + }() + if err != nil { + fmt.Printf("%v\n", err) + os.Exit(1) + } else { + os.Exit(exitcode) + } + +} + +func TestSchemaChange(t *testing.T) { + defer cluster.PanicHandler(t) + + shards := clusterInstance.Keyspaces[0].Shards + require.Equal(t, 1, len(shards)) + + t.Run("create schema", func(t *testing.T) { + assert.Equal(t, 1, len(clusterInstance.Keyspaces[0].Shards)) + testWithInitialSchema(t) + }) + for i := 0; i < countIterations; i++ { + // This first tests the general functionality of initializing the table with data, + // no concurrency involved. Just counting. + testName := fmt.Sprintf("init table %d/%d", (i + 1), countIterations) + t.Run(testName, func(t *testing.T) { + initTable(t) + testSelectTableMetrics(t) + }) + } + for i := 0; i < countIterations; i++ { + // This tests running a workload on the table, then comparing expected metrics with + // actual table metrics. All this without any ALTER TABLE: this is to validate + // that our testing/metrics logic is sound in the first place. + testName := fmt.Sprintf("workload without ALTER TABLE %d/%d", (i + 1), countIterations) + t.Run(testName, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + initTable(t) + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + runMultipleConnections(ctx, t) + }() + time.Sleep(5 * time.Second) + cancel() // will cause runMultipleConnections() to terminate + wg.Wait() + testSelectTableMetrics(t) + }) + } + t.Run("ALTER TABLE without workload", func(t *testing.T) { + // A single ALTER TABLE. Generally this is covered in endtoend/onlineddl_vrepl, + // but we wish to verify the ALTER statement used in these tests is sound + initTable(t) + hint := "hint-alter-without-workload" + uuid := testOnlineDDLStatement(t, fmt.Sprintf(alterHintStatement, hint), "online", "vtgate", hint) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + testSelectTableMetrics(t) + }) + + for i := 0; i < countIterations; i++ { + // Finally, this is the real test: + // We populate a table, and begin a concurrent workload (this is the "mini stress") + // We then ALTER TABLE via vreplication. + // Once convinced ALTER TABLE is complete, we stop the workload. + // We then compare expected metrics with table metrics. If they agree, then + // the vreplication/ALTER TABLE did not corrupt our data and we are happy. + testName := fmt.Sprintf("ALTER TABLE with workload %d/%d", (i + 1), countIterations) + t.Run(testName, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + initTable(t) + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + runMultipleConnections(ctx, t) + }() + hint := fmt.Sprintf("hint-alter-with-workload-%d", i) + uuid := testOnlineDDLStatement(t, fmt.Sprintf(alterHintStatement, hint), "online", "vtgate", hint) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + cancel() // will cause runMultipleConnections() to terminate + wg.Wait() + testSelectTableMetrics(t) + }) + } +} + +func testWithInitialSchema(t *testing.T) { + // Create the stress table + err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, createStatement) + require.Nil(t, err) + + // Check if table is created + checkTable(t, tableName) +} + +// testOnlineDDLStatement runs an online DDL, ALTER statement +func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy string, executeStrategy string, expectHint string) (uuid string) { + if executeStrategy == "vtgate" { + row := onlineddl.VtgateExecDDL(t, &vtParams, ddlStrategy, alterStatement, "").Named().Row() + if row != nil { + uuid = row.AsString("uuid", "") + } + } else { + var err error + uuid, err = clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, ddlStrategy) + assert.NoError(t, err) + } + uuid = strings.TrimSpace(uuid) + fmt.Println("# Generated UUID (for debug purposes):") + fmt.Printf("<%s>\n", uuid) + + strategy, _, err := schema.ParseDDLStrategy(ddlStrategy) + assert.NoError(t, err) + + if !strategy.IsDirect() { + time.Sleep(time.Second * 20) + } + + if expectHint != "" { + checkMigratedTable(t, tableName, expectHint) + } + return uuid +} + +// checkTable checks the number of tables in the first two shards. +func checkTable(t *testing.T, showTableName string) { + for i := range clusterInstance.Keyspaces[0].Shards { + checkTablesCount(t, clusterInstance.Keyspaces[0].Shards[i].Vttablets[0], showTableName, 1) + } +} + +// checkTablesCount checks the number of tables in the given tablet +func checkTablesCount(t *testing.T, tablet *cluster.Vttablet, showTableName string, expectCount int) { + query := fmt.Sprintf(`show tables like '%%%s%%';`, showTableName) + queryResult, err := tablet.VttabletProcess.QueryTablet(query, keyspaceName, true) + require.Nil(t, err) + assert.Equal(t, expectCount, len(queryResult.Rows)) +} + +// checkMigratedTables checks the CREATE STATEMENT of a table after migration +func checkMigratedTable(t *testing.T, tableName, expectHint string) { + for i := range clusterInstance.Keyspaces[0].Shards { + createStatement := getCreateTableStatement(t, clusterInstance.Keyspaces[0].Shards[i].Vttablets[0], tableName) + assert.Contains(t, createStatement, expectHint) + } +} + +// getCreateTableStatement returns the CREATE TABLE statement for a given table +func getCreateTableStatement(t *testing.T, tablet *cluster.Vttablet, tableName string) (statement string) { + queryResult, err := tablet.VttabletProcess.QueryTablet(fmt.Sprintf("show create table %s;", tableName), keyspaceName, true) + require.Nil(t, err) + + assert.Equal(t, len(queryResult.Rows), 1) + assert.Equal(t, len(queryResult.Rows[0]), 2) // table name, create statement + statement = queryResult.Rows[0][1].ToString() + return statement +} + +func generateInsert(t *testing.T, conn *mysql.Conn) error { + id := rand.Int31n(int32(maxTableRows)) + query := fmt.Sprintf(insertRowStatement, id) + qr, err := conn.ExecuteFetch(query, 1000, true) + + func() { + writeMetrics.mu.Lock() + defer writeMetrics.mu.Unlock() + + writeMetrics.insertsAttempts++ + if err != nil { + writeMetrics.insertsFailures++ + return + } + assert.Less(t, qr.RowsAffected, uint64(2)) + if qr.RowsAffected == 0 { + writeMetrics.insertsNoops++ + return + } + writeMetrics.inserts++ + }() + return err +} + +func generateUpdate(t *testing.T, conn *mysql.Conn) error { + id := rand.Int31n(int32(maxTableRows)) + query := fmt.Sprintf(updateRowStatement, id) + qr, err := conn.ExecuteFetch(query, 1000, true) + + func() { + writeMetrics.mu.Lock() + defer writeMetrics.mu.Unlock() + + writeMetrics.updatesAttempts++ + if err != nil { + writeMetrics.updatesFailures++ + return + } + assert.Less(t, qr.RowsAffected, uint64(2)) + if qr.RowsAffected == 0 { + writeMetrics.updatesNoops++ + return + } + writeMetrics.updates++ + }() + return err +} + +func generateDelete(t *testing.T, conn *mysql.Conn) error { + id := rand.Int31n(int32(maxTableRows)) + query := fmt.Sprintf(deleteRowStatement, id) + qr, err := conn.ExecuteFetch(query, 1000, true) + + func() { + writeMetrics.mu.Lock() + defer writeMetrics.mu.Unlock() + + writeMetrics.deletesAttempts++ + if err != nil { + writeMetrics.deletesFailures++ + return + } + assert.Less(t, qr.RowsAffected, uint64(2)) + if qr.RowsAffected == 0 { + writeMetrics.deletesNoops++ + return + } + writeMetrics.deletes++ + }() + return err +} + +func runSingleConnection(ctx context.Context, t *testing.T, done *int64) { + log.Infof("Running single connection") + conn, err := mysql.Connect(ctx, &vtParams) + require.Nil(t, err) + defer conn.Close() + + _, err = conn.ExecuteFetch("set autocommit=1", 1000, true) + require.Nil(t, err) + _, err = conn.ExecuteFetch("set transaction isolation level read committed", 1000, true) + require.Nil(t, err) + + for { + if atomic.LoadInt64(done) == 1 { + log.Infof("Terminating single connection") + return + } + switch rand.Int31n(3) { + case 0: + err = generateInsert(t, conn) + case 1: + err = generateUpdate(t, conn) + case 2: + err = generateDelete(t, conn) + } + if err != nil { + if strings.Contains(err.Error(), "disallowed due to rule: enforce blacklisted tables") { + err = nil + } + } + assert.Nil(t, err) + time.Sleep(10 * time.Millisecond) + } +} + +func runMultipleConnections(ctx context.Context, t *testing.T) { + log.Infof("Running multiple connections") + var done int64 + var wg sync.WaitGroup + for i := 0; i < maxConcurrency; i++ { + wg.Add(1) + go func() { + defer wg.Done() + runSingleConnection(ctx, t, &done) + }() + } + <-ctx.Done() + atomic.StoreInt64(&done, 1) + log.Infof("Running multiple connections: done") + wg.Wait() + log.Infof("All connections cancelled") +} + +func initTable(t *testing.T) { + log.Infof("initTable begin") + defer log.Infof("initTable complete") + + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.Nil(t, err) + defer conn.Close() + + writeMetrics.Clear() + _, err = conn.ExecuteFetch(truncateStatement, 1000, true) + require.Nil(t, err) + + for i := 0; i < maxTableRows/2; i++ { + generateInsert(t, conn) + } + for i := 0; i < maxTableRows/4; i++ { + generateUpdate(t, conn) + } + for i := 0; i < maxTableRows/4; i++ { + generateDelete(t, conn) + } +} + +func testSelectTableMetrics(t *testing.T) { + writeMetrics.mu.Lock() + defer writeMetrics.mu.Unlock() + + log.Infof("%s", writeMetrics.String()) + + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.Nil(t, err) + defer conn.Close() + + rs, err := conn.ExecuteFetch(selectCountRowsStatement, 1000, true) + require.Nil(t, err) + + row := rs.Named().Row() + require.NotNil(t, row) + log.Infof("testSelectTableMetrics, row: %v", row) + numRows := row.AsInt64("num_rows", 0) + sumUpdates := row.AsInt64("sum_updates", 0) + + assert.NotZero(t, numRows) + assert.NotZero(t, sumUpdates) + assert.NotZero(t, writeMetrics.inserts) + assert.NotZero(t, writeMetrics.deletes) + assert.NotZero(t, writeMetrics.updates) + assert.Equal(t, writeMetrics.inserts-writeMetrics.deletes, numRows) + assert.Equal(t, writeMetrics.updates-writeMetrics.deletes, sumUpdates) // because we DELETE WHERE updates=1 +} diff --git a/go/test/endtoend/onlineddl/vtgate_util.go b/go/test/endtoend/onlineddl/vtgate_util.go new file mode 100644 index 00000000000..833f155ec8f --- /dev/null +++ b/go/test/endtoend/onlineddl/vtgate_util.go @@ -0,0 +1,135 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package onlineddl + +import ( + "context" + "fmt" + "os" + "testing" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/schema" + + "vitess.io/vitess/go/test/endtoend/cluster" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// VtgateExecQuery runs a query on VTGate using given query params +func VtgateExecQuery(t *testing.T, vtParams *mysql.ConnParams, query string, expectError string) *sqltypes.Result { + t.Helper() + + ctx := context.Background() + conn, err := mysql.Connect(ctx, vtParams) + require.Nil(t, err) + defer conn.Close() + + qr, err := conn.ExecuteFetch(query, 1000, true) + if expectError == "" { + require.NoError(t, err) + } else { + require.Error(t, err, "error should not be nil") + assert.Contains(t, err.Error(), expectError, "Unexpected error") + } + return qr +} + +// VtgateExecDDL executes a DDL query with given strategy +func VtgateExecDDL(t *testing.T, vtParams *mysql.ConnParams, ddlStrategy string, query string, expectError string) *sqltypes.Result { + t.Helper() + + ctx := context.Background() + conn, err := mysql.Connect(ctx, vtParams) + require.Nil(t, err) + defer conn.Close() + + setSession := fmt.Sprintf("set @@ddl_strategy='%s'", ddlStrategy) + _, err = conn.ExecuteFetch(setSession, 1000, true) + assert.NoError(t, err) + + qr, err := conn.ExecuteFetch(query, 1000, true) + if expectError == "" { + require.NoError(t, err) + } else { + require.Error(t, err, "error should not be nil") + assert.Contains(t, err.Error(), expectError, "Unexpected error") + } + return qr +} + +// CheckRetryMigration attempts to retry a migration, and expects success/failure by counting affected rows +func CheckRetryMigration(t *testing.T, vtParams *mysql.ConnParams, shards []cluster.Shard, uuid string, expectRetryPossible bool) { + retryQuery := fmt.Sprintf("alter vitess_migration '%s' retry", uuid) + r := VtgateExecQuery(t, vtParams, retryQuery, "") + + if expectRetryPossible { + assert.Equal(t, len(shards), int(r.RowsAffected)) + } else { + assert.Equal(t, int(0), int(r.RowsAffected)) + } +} + +// CheckCancelMigration attempts to cancel a migration, and expects success/failure by counting affected rows +func CheckCancelMigration(t *testing.T, vtParams *mysql.ConnParams, shards []cluster.Shard, uuid string, expectCancelPossible bool) { + cancelQuery := fmt.Sprintf("alter vitess_migration '%s' cancel", uuid) + r := VtgateExecQuery(t, vtParams, cancelQuery, "") + + if expectCancelPossible { + assert.Equal(t, len(shards), int(r.RowsAffected)) + } else { + assert.Equal(t, int(0), int(r.RowsAffected)) + } +} + +// CheckCancelAllMigrations cancels all pending migrations and expect number of affected rows +func CheckCancelAllMigrations(t *testing.T, vtParams *mysql.ConnParams, expectCount int) { + cancelQuery := "alter vitess_migration cancel all" + r := VtgateExecQuery(t, vtParams, cancelQuery, "") + + assert.Equal(t, expectCount, int(r.RowsAffected)) +} + +// CheckMigrationStatus verifies that the migration indicated by given UUID has the given expected status +func CheckMigrationStatus(t *testing.T, vtParams *mysql.ConnParams, shards []cluster.Shard, uuid string, expectStatus schema.OnlineDDLStatus) { + showQuery := fmt.Sprintf("show vitess_migrations like '%s'", uuid) + r := VtgateExecQuery(t, vtParams, showQuery, "") + fmt.Printf("# output for `%s`:\n", showQuery) + PrintQueryResult(os.Stdout, r) + + count := 0 + for _, row := range r.Named().Rows { + if row["migration_uuid"].ToString() == uuid && row["migration_status"].ToString() == string(expectStatus) { + count++ + } + } + assert.Equal(t, len(shards), count) +} + +// CheckMigrationArtifacts verifies given migration exists, and checks if it has artifacts +func CheckMigrationArtifacts(t *testing.T, vtParams *mysql.ConnParams, shards []cluster.Shard, uuid string, expectArtifacts bool) { + showQuery := fmt.Sprintf("show vitess_migrations like '%s'", uuid) + r := VtgateExecQuery(t, vtParams, showQuery, "") + + assert.Equal(t, len(shards), len(r.Named().Rows)) + for _, row := range r.Named().Rows { + hasArtifacts := (row["artifacts"].ToString() != "") + assert.Equal(t, expectArtifacts, hasArtifacts) + } +} diff --git a/go/test/endtoend/preparestmt/main_test.go b/go/test/endtoend/preparestmt/main_test.go index 15d27f19482..7d69b7dd801 100644 --- a/go/test/endtoend/preparestmt/main_test.go +++ b/go/test/endtoend/preparestmt/main_test.go @@ -190,9 +190,13 @@ func TestMain(m *testing.M) { Name: keyspaceName, SchemaSQL: sqlSchema, } + uks := &cluster.Keyspace{Name: "uks"} if err := clusterInstance.StartUnshardedKeyspace(*keyspace, 1, false); err != nil { return 1, err } + if err := clusterInstance.StartUnshardedKeyspace(*uks, 0, false); err != nil { + return 1, err + } vtgateInstance := clusterInstance.NewVtgateInstance() // set the gateway and other params we want to use diff --git a/go/test/endtoend/preparestmt/stmt_methods_test.go b/go/test/endtoend/preparestmt/stmt_methods_test.go index 14114a903b1..a2e079b579b 100644 --- a/go/test/endtoend/preparestmt/stmt_methods_test.go +++ b/go/test/endtoend/preparestmt/stmt_methods_test.go @@ -17,6 +17,7 @@ limitations under the License. package preparestmt import ( + "bytes" "database/sql" "fmt" "strconv" @@ -38,6 +39,22 @@ func TestSelect(t *testing.T) { selectWhere(t, dbo, "") } +func TestSelectDatabase(t *testing.T) { + defer cluster.PanicHandler(t) + dbo := Connect(t) + defer dbo.Close() + prepare, err := dbo.Prepare("select database()") + require.NoError(t, err) + rows, err := prepare.Query() + require.NoError(t, err) + defer rows.Close() + var resultBytes sql.RawBytes + require.True(t, rows.Next(), "no rows found") + err = rows.Scan(&resultBytes) + require.NoError(t, err) + assert.Equal(t, string(resultBytes), "test_keyspace") +} + // TestInsertUpdateDelete validates all insert, update and // delete method on prepared statements. func TestInsertUpdateDelete(t *testing.T) { @@ -106,7 +123,7 @@ func TestInsertUpdateDelete(t *testing.T) { func testReplica(t *testing.T) { replicaConn := Connect(t, "") require.NotNil(t, replicaConn, "unable to connect") - _, err := replicaConn.Exec("use @replica") + _, err := replicaConn.Exec(fmt.Sprintf("use %s@replica", dbInfo.KeyspaceName)) require.NoError(t, err) tx, err := replicaConn.Begin() require.NoError(t, err, "error creating replica transaction") @@ -255,3 +272,110 @@ func TestWrongTableName(t *testing.T) { defer dbo.Close() execWithError(t, dbo, []uint16{1146}, "select * from teseting_table;") } + +type columns struct { + columnName string + dataType string + fullDataType string + characterMaximumLength sql.NullInt64 + numericPrecision sql.NullInt64 + numericScale sql.NullInt64 + datetimePrecision sql.NullInt64 + columnDefault sql.NullString + isNullable string + extra string + tableName string +} + +func (c *columns) ToString() string { + buf := bytes.Buffer{} + buf.WriteString(fmt.Sprintf("|%s| \t |%s| \t |%s| \t |%s| \t |%s| \t |%s| \t |%s| \t |%s| \t |%s| \t |%s| \t |%s|", + c.columnName, + c.dataType, + c.fullDataType, + getIntToString(c.characterMaximumLength), + getIntToString(c.numericPrecision), + getIntToString(c.numericScale), + getIntToString(c.datetimePrecision), + getStringToString(c.columnDefault), + c.isNullable, + c.extra, + c.tableName)) + return buf.String() +} + +func getIntToString(x sql.NullInt64) string { + if x.Valid { + return fmt.Sprintf("%d", x.Int64) + } + return "NULL" +} + +func getStringToString(x sql.NullString) string { + if x.Valid { + return x.String + } + return "NULL" +} + +func TestSelectDBA(t *testing.T) { + defer cluster.PanicHandler(t) + dbo := Connect(t) + defer dbo.Close() + + _, err := dbo.Exec("use uks") + require.NoError(t, err) + + _, err = dbo.Exec("CREATE TABLE `a` (`one` int NOT NULL,`two` int NOT NULL,PRIMARY KEY(`one`, `two`)) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4") + require.NoError(t, err) + + prepare, err := dbo.Prepare(`SELECT + column_name column_name, + data_type data_type, + column_type full_data_type, + character_maximum_length character_maximum_length, + numeric_precision numeric_precision, + numeric_scale numeric_scale, + datetime_precision datetime_precision, + column_default column_default, + is_nullable is_nullable, + extra extra, + table_name table_name + FROM information_schema.columns + WHERE table_schema = ? + ORDER BY ordinal_position`) + require.NoError(t, err) + rows, err := prepare.Query("uks") + require.NoError(t, err) + defer rows.Close() + var rec columns + rowCount := 0 + for rows.Next() { + err := rows.Scan( + &rec.columnName, + &rec.dataType, + &rec.fullDataType, + &rec.characterMaximumLength, + &rec.numericPrecision, + &rec.numericScale, + &rec.datetimePrecision, + &rec.columnDefault, + &rec.isNullable, + &rec.extra, + &rec.tableName) + require.NoError(t, err) + assert.True(t, rec.columnName == "one" || rec.columnName == "two") + assert.Equal(t, "int", rec.dataType) + assert.True(t, rec.fullDataType == "int" || rec.fullDataType == "int(11)") + assert.False(t, rec.characterMaximumLength.Valid) + assert.EqualValues(t, 10, rec.numericPrecision.Int64) + assert.EqualValues(t, 0, rec.numericScale.Int64) + assert.False(t, rec.datetimePrecision.Valid) + assert.False(t, rec.columnDefault.Valid) + assert.Equal(t, "NO", rec.isNullable) + assert.Equal(t, "", rec.extra) + assert.Equal(t, "a", rec.tableName) + rowCount++ + } + require.Equal(t, 2, rowCount) +} diff --git a/go/test/endtoend/recovery/unshardedrecovery/recovery.go b/go/test/endtoend/recovery/unshardedrecovery/recovery.go index 336b46812a7..370e2b7fb75 100644 --- a/go/test/endtoend/recovery/unshardedrecovery/recovery.go +++ b/go/test/endtoend/recovery/unshardedrecovery/recovery.go @@ -100,6 +100,11 @@ func TestMainImpl(m *testing.M) { sql := string(initDb) newInitDBFile = path.Join(localCluster.TmpDirectory, "init_db_with_passwords.sql") sql = sql + initialsharding.GetPasswordUpdateSQL(localCluster) + // https://github.com/vitessio/vitess/issues/8315 + oldAlterTableMode := ` +SET GLOBAL old_alter_table = ON; +` + sql = sql + oldAlterTableMode ioutil.WriteFile(newInitDBFile, []byte(sql), 0666) extraArgs := []string{"-db-credentials-file", dbCredentialFile} diff --git a/go/test/endtoend/reparent/reparent_test.go b/go/test/endtoend/reparent/reparent_test.go index c8f0719dbb9..8363d5eb1f1 100644 --- a/go/test/endtoend/reparent/reparent_test.go +++ b/go/test/endtoend/reparent/reparent_test.go @@ -117,6 +117,23 @@ func TestReparentNoChoiceDownMaster(t *testing.T) { resurrectTablet(ctx, t, tab1) } +func TestTrivialERS(t *testing.T) { + defer cluster.PanicHandler(t) + setupReparentCluster(t) + defer teardownCluster() + + confirmReplication(t, tab1, []*cluster.Vttablet{tab2, tab3, tab4}) + + // We should be able to do a series of ERS-es, even if nothing + // is down, without issue + for i := 1; i <= 4; i++ { + out, err := ers(t, nil, "30s") + log.Infof("ERS loop %d. EmergencyReparentShard Output: %v", i, out) + require.NoError(t, err) + time.Sleep(5 * time.Second) + } +} + func TestReparentIgnoreReplicas(t *testing.T) { defer cluster.PanicHandler(t) setupReparentCluster(t) @@ -204,7 +221,7 @@ func TestReparentReplicaOffline(t *testing.T) { // Perform a graceful reparent operation. out, err := prsWithTimeout(t, tab2, false, "", "31s") require.Error(t, err) - assert.Contains(t, out, fmt.Sprintf("tablet %s SetMaster failed", tab4.Alias)) + assert.Contains(t, out, fmt.Sprintf("tablet %s failed to SetMaster", tab4.Alias)) checkMasterTablet(t, tab2) } @@ -345,7 +362,7 @@ func TestReparentWithDownReplica(t *testing.T) { // Perform a graceful reparent operation. It will fail as one tablet is down. out, err := prs(t, tab2) require.Error(t, err) - assert.Contains(t, out, fmt.Sprintf("tablet %s SetMaster failed", tab3.Alias)) + assert.Contains(t, out, fmt.Sprintf("tablet %s failed to SetMaster", tab3.Alias)) // insert data into the new master, check the connected replica work confirmReplication(t, tab2, []*cluster.Vttablet{tab1, tab4}) @@ -441,5 +458,5 @@ func TestReparentDoesntHangIfMasterFails(t *testing.T) { // insert. The replicas should then abort right away. out, err := prs(t, tab2) require.Error(t, err) - assert.Contains(t, out, "master failed to PopulateReparentJournal") + assert.Contains(t, out, "primary failed to PopulateReparentJournal") } diff --git a/go/test/endtoend/tabletmanager/commands_test.go b/go/test/endtoend/tabletmanager/commands_test.go index 105a127b6c5..92aa0dc3ea4 100644 --- a/go/test/endtoend/tabletmanager/commands_test.go +++ b/go/test/endtoend/tabletmanager/commands_test.go @@ -120,9 +120,8 @@ func assertExcludeFields(t *testing.T, qr string) { err := json.Unmarshal([]byte(qr), &resultMap) require.Nil(t, err) - rowsAffected := resultMap["rows_affected"] - want := float64(2) - assert.Equal(t, want, rowsAffected) + rows := resultMap["rows"].([]interface{}) + assert.Equal(t, 2, len(rows)) fields := resultMap["fields"] assert.NotContainsf(t, fields, "name", "name should not be in field list") diff --git a/go/test/endtoend/tabletmanager/custom_rule_topo_test.go b/go/test/endtoend/tabletmanager/custom_rule_topo_test.go index 0ff2dced4db..bc8daacf052 100644 --- a/go/test/endtoend/tabletmanager/custom_rule_topo_test.go +++ b/go/test/endtoend/tabletmanager/custom_rule_topo_test.go @@ -83,9 +83,8 @@ func TestTopoCustomRule(t *testing.T) { err = json.Unmarshal([]byte(result), &resultMap) require.NoError(t, err) - rowsAffected := resultMap["rows_affected"] - want := float64(2) - assert.Equal(t, want, rowsAffected) + rowsAffected := resultMap["rows"].([]interface{}) + assert.EqualValues(t, 2, len(rowsAffected)) // Now update the topocustomrule file. data = []byte(`[{ diff --git a/go/test/endtoend/tabletmanager/throttler/throttler_test.go b/go/test/endtoend/tabletmanager/throttler/throttler_test.go index cc4cfb7612d..5a5697e615b 100644 --- a/go/test/endtoend/tabletmanager/throttler/throttler_test.go +++ b/go/test/endtoend/tabletmanager/throttler/throttler_test.go @@ -32,8 +32,8 @@ import ( var ( clusterInstance *cluster.LocalProcessCluster - masterTablet cluster.Vttablet - replicaTablet cluster.Vttablet + primaryTablet *cluster.Vttablet + replicaTablet *cluster.Vttablet hostname = "localhost" keyspaceName = "ks" cell = "zone1" @@ -65,8 +65,16 @@ var ( } }` - httpClient = base.SetupHTTPClient(time.Second) - checkAPIPath = "throttler/check" + httpClient = base.SetupHTTPClient(time.Second) + checkAPIPath = "throttler/check" + checkSelfAPIPath = "throttler/check-self" +) + +const ( + throttlerInitWait = 10 * time.Second + accumulateLagWait = 2 * time.Second + throttlerRefreshIntervalWait = 12 * time.Second + replicationCatchUpWait = 5 * time.Second ) func TestMain(m *testing.M) { @@ -89,6 +97,7 @@ func TestMain(m *testing.M) { "-watch_replication_stream", "-enable_replication_reporter", "-enable-lag-throttler", + "-throttle_threshold", "1s", "-heartbeat_enable", "-heartbeat_interval", "250ms", } @@ -110,9 +119,9 @@ func TestMain(m *testing.M) { tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets for _, tablet := range tablets { if tablet.Type == "master" { - masterTablet = *tablet + primaryTablet = tablet } else if tablet.Type != "rdonly" { - replicaTablet = *tablet + replicaTablet = tablet } } @@ -121,8 +130,12 @@ func TestMain(m *testing.M) { os.Exit(exitCode) } -func throttleCheck() (*http.Response, error) { - return httpClient.Head(fmt.Sprintf("http://localhost:%d/%s", masterTablet.HTTPPort, checkAPIPath)) +func throttleCheck(tablet *cluster.Vttablet) (*http.Response, error) { + return httpClient.Head(fmt.Sprintf("http://localhost:%d/%s", tablet.HTTPPort, checkAPIPath)) +} + +func throttleCheckSelf(tablet *cluster.Vttablet) (*http.Response, error) { + return httpClient.Head(fmt.Sprintf("http://localhost:%d/%s", tablet.HTTPPort, checkSelfAPIPath)) } func TestThrottlerBeforeMetricsCollected(t *testing.T) { @@ -130,20 +143,34 @@ func TestThrottlerBeforeMetricsCollected(t *testing.T) { // Immediately after startup, we expect this response: // {"StatusCode":404,"Value":0,"Threshold":0,"Message":"No such metric"} - resp, err := throttleCheck() - assert.NoError(t, err) - assert.Equal(t, http.StatusNotFound, resp.StatusCode) + { + resp, err := throttleCheck(primaryTablet) + assert.NoError(t, err) + assert.Equal(t, http.StatusNotFound, resp.StatusCode) + } } func TestThrottlerAfterMetricsCollected(t *testing.T) { defer cluster.PanicHandler(t) - time.Sleep(10 * time.Second) + time.Sleep(throttlerInitWait) // By this time metrics will have been collected. We expect no lag, and something like: // {"StatusCode":200,"Value":0.282278,"Threshold":1,"Message":""} - resp, err := throttleCheck() - assert.NoError(t, err) - assert.Equal(t, http.StatusOK, resp.StatusCode) + { + resp, err := throttleCheck(primaryTablet) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, resp.StatusCode) + } + { + resp, err := throttleCheckSelf(primaryTablet) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, resp.StatusCode) + } + { + resp, err := throttleCheckSelf(replicaTablet) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, resp.StatusCode) + } } func TestLag(t *testing.T) { @@ -153,22 +180,47 @@ func TestLag(t *testing.T) { err := clusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", replicaTablet.Alias) assert.NoError(t, err) - time.Sleep(2 * time.Second) + time.Sleep(accumulateLagWait) // Lag will have accumulated // {"StatusCode":429,"Value":4.864921,"Threshold":1,"Message":"Threshold exceeded"} - resp, err := throttleCheck() - assert.NoError(t, err) - assert.Equal(t, http.StatusTooManyRequests, resp.StatusCode) + { + resp, err := throttleCheck(primaryTablet) + assert.NoError(t, err) + assert.Equal(t, http.StatusTooManyRequests, resp.StatusCode) + } + { + resp, err := throttleCheckSelf(primaryTablet) + assert.NoError(t, err) + // self (on primary) is unaffected by replication lag + assert.Equal(t, http.StatusOK, resp.StatusCode) + } + { + resp, err := throttleCheckSelf(replicaTablet) + assert.NoError(t, err) + assert.Equal(t, http.StatusTooManyRequests, resp.StatusCode) + } } { err := clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", replicaTablet.Alias) assert.NoError(t, err) - time.Sleep(5 * time.Second) + time.Sleep(replicationCatchUpWait) // Restore - resp, err := throttleCheck() - assert.NoError(t, err) - assert.Equal(t, http.StatusOK, resp.StatusCode) + { + resp, err := throttleCheck(primaryTablet) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, resp.StatusCode) + } + { + resp, err := throttleCheckSelf(primaryTablet) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, resp.StatusCode) + } + { + resp, err := throttleCheckSelf(replicaTablet) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, resp.StatusCode) + } } } @@ -178,10 +230,10 @@ func TestNoReplicas(t *testing.T) { err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "RDONLY") assert.NoError(t, err) - time.Sleep(10 * time.Second) + time.Sleep(throttlerRefreshIntervalWait) // This makes no REPLICA servers available. We expect something like: // {"StatusCode":200,"Value":0,"Threshold":1,"Message":""} - resp, err := throttleCheck() + resp, err := throttleCheck(primaryTablet) assert.NoError(t, err) assert.Equal(t, http.StatusOK, resp.StatusCode) } @@ -189,9 +241,9 @@ func TestNoReplicas(t *testing.T) { err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "REPLICA") assert.NoError(t, err) - time.Sleep(10 * time.Second) + time.Sleep(throttlerRefreshIntervalWait) // Restore valid replica - resp, err := throttleCheck() + resp, err := throttleCheck(primaryTablet) assert.NoError(t, err) assert.Equal(t, http.StatusOK, resp.StatusCode) } diff --git a/go/test/endtoend/tabletmanager/throttler_custom_config/throttler_test.go b/go/test/endtoend/tabletmanager/throttler_custom_config/throttler_test.go new file mode 100644 index 00000000000..0f03bed0e62 --- /dev/null +++ b/go/test/endtoend/tabletmanager/throttler_custom_config/throttler_test.go @@ -0,0 +1,242 @@ +/* +Copyright 2020 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package master + +import ( + "context" + "flag" + "fmt" + "net/http" + "os" + "testing" + "time" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/base" + + "vitess.io/vitess/go/test/endtoend/cluster" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + primaryTablet *cluster.Vttablet + replicaTablet *cluster.Vttablet + hostname = "localhost" + keyspaceName = "ks" + cell = "zone1" + sqlSchema = ` + create table t1( + id bigint, + value varchar(16), + primary key(id) + ) Engine=InnoDB; +` + + vSchema = ` + { + "sharded": true, + "vindexes": { + "hash": { + "type": "hash" + } + }, + "tables": { + "t1": { + "column_vindexes": [ + { + "column": "id", + "name": "hash" + } + ] + } + } + }` + + httpClient = base.SetupHTTPClient(time.Second) + checkAPIPath = "throttler/check" + checkSelfAPIPath = "throttler/check-self" + vtParams mysql.ConnParams +) + +const ( + testThreshold = 5 + throttlerInitWait = 10 * time.Second +) + +func TestMain(m *testing.M) { + defer cluster.PanicHandler(nil) + flag.Parse() + + exitCode := func() int { + clusterInstance = cluster.NewCluster(cell, hostname) + defer clusterInstance.Teardown() + + // Start topo server + err := clusterInstance.StartTopo() + if err != nil { + return 1 + } + + // Set extra tablet args for lock timeout + clusterInstance.VtTabletExtraArgs = []string{ + "-lock_tables_timeout", "5s", + "-watch_replication_stream", + "-enable_replication_reporter", + "-enable-lag-throttler", + "-throttle_metrics_query", "show global status like 'threads_running'", + "-throttle_metrics_threshold", fmt.Sprintf("%d", testThreshold), + "-throttle_check_as_check_self", + "-heartbeat_enable", + "-heartbeat_interval", "250ms", + } + // We do not need semiSync for this test case. + clusterInstance.EnableSemiSync = false + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: keyspaceName, + SchemaSQL: sqlSchema, + VSchema: vSchema, + } + + if err = clusterInstance.StartUnshardedKeyspace(*keyspace, 0, false); err != nil { + return 1 + } + + // Collect table paths and ports + tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets + for _, tablet := range tablets { + if tablet.Type == "master" { + primaryTablet = tablet + } else if tablet.Type != "rdonly" { + replicaTablet = tablet + } + } + + vtgateInstance := clusterInstance.NewVtgateInstance() + // set the gateway we want to use + vtgateInstance.GatewayImplementation = "tabletgateway" + // Start vtgate + if err := vtgateInstance.Setup(); err != nil { + return 1 + } + // ensure it is torn down during cluster TearDown + clusterInstance.VtgateProcess = *vtgateInstance + vtParams = mysql.ConnParams{ + Host: clusterInstance.Hostname, + Port: clusterInstance.VtgateMySQLPort, + } + + return m.Run() + }() + os.Exit(exitCode) +} + +func throttleCheck(tablet *cluster.Vttablet) (*http.Response, error) { + return httpClient.Head(fmt.Sprintf("http://localhost:%d/%s", tablet.HTTPPort, checkAPIPath)) +} + +func throttleCheckSelf(tablet *cluster.Vttablet) (*http.Response, error) { + return httpClient.Head(fmt.Sprintf("http://localhost:%d/%s", tablet.HTTPPort, checkSelfAPIPath)) +} + +func TestThrottlerThresholdOK(t *testing.T) { + defer cluster.PanicHandler(t) + + { + resp, err := throttleCheck(primaryTablet) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, resp.StatusCode) + } +} + +func TestThrottlerAfterMetricsCollected(t *testing.T) { + defer cluster.PanicHandler(t) + + time.Sleep(throttlerInitWait) + // By this time metrics will have been collected. We expect no lag, and something like: + // {"StatusCode":200,"Value":0.282278,"Threshold":1,"Message":""} + { + resp, err := throttleCheck(primaryTablet) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, resp.StatusCode) + } + { + resp, err := throttleCheckSelf(primaryTablet) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, resp.StatusCode) + } +} + +func TestThreadsRunning(t *testing.T) { + defer cluster.PanicHandler(t) + + sleepSeconds := 6 + for i := 0; i < testThreshold; i++ { + go vtgateExec(t, fmt.Sprintf("select sleep(%d)", sleepSeconds), "") + } + t.Run("exceeds threshold", func(t *testing.T) { + time.Sleep(3 * time.Second) + // by this time we will have +1 threads_running, and we should hit the threshold + // {"StatusCode":429,"Value":2,"Threshold":2,"Message":"Threshold exceeded"} + { + resp, err := throttleCheck(primaryTablet) + assert.NoError(t, err) + assert.Equal(t, http.StatusTooManyRequests, resp.StatusCode) + } + { + resp, err := throttleCheckSelf(primaryTablet) + assert.NoError(t, err) + assert.Equal(t, http.StatusTooManyRequests, resp.StatusCode) + } + }) + t.Run("restored below threshold", func(t *testing.T) { + time.Sleep(time.Duration(sleepSeconds) * time.Second) + // Restore + { + resp, err := throttleCheck(primaryTablet) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, resp.StatusCode) + } + { + resp, err := throttleCheckSelf(primaryTablet) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, resp.StatusCode) + } + }) +} + +func vtgateExec(t *testing.T, query string, expectError string) *sqltypes.Result { + t.Helper() + + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.Nil(t, err) + defer conn.Close() + + qr, err := conn.ExecuteFetch(query, 1000, true) + if expectError == "" { + require.NoError(t, err) + } else { + require.Error(t, err, "error should not be nil") + assert.Contains(t, err.Error(), expectError, "Unexpected error") + } + return qr +} diff --git a/go/test/endtoend/vreplication/cluster.go b/go/test/endtoend/vreplication/cluster.go index e1e2b8820bd..0d26fac77dd 100644 --- a/go/test/endtoend/vreplication/cluster.go +++ b/go/test/endtoend/vreplication/cluster.go @@ -23,37 +23,38 @@ import ( var ( debug = false // set to true to always use local env vtdataroot for local debugging - originalVtdataroot string - vtdataroot string + originalVtdataroot string + vtdataroot string + mainClusterConfig *ClusterConfig + externalClusterConfig *ClusterConfig ) -var globalConfig = struct { - hostname string - topoPort int - vtctldPort int - vtctldGrpcPort int - tmpDir string - vtgatePort int - vtgateGrpcPort int - vtgateMySQLPort int - tabletTypes string -}{"localhost", 2379, 15000, 15999, vtdataroot + "/tmp", - 15001, 15991, 15306, "MASTER,REPLICA"} - -var ( - tabletPortBase = 15000 - tabletGrpcPortBase = 20000 - tabletMysqlPortBase = 25000 -) +// ClusterConfig defines the parameters like ports, tmpDir, tablet types which uniquely define a vitess cluster +type ClusterConfig struct { + hostname string + topoPort int + vtctldPort int + vtctldGrpcPort int + vtdataroot string + tmpDir string + vtgatePort int + vtgateGrpcPort int + vtgateMySQLPort int + tabletTypes string + tabletPortBase int + tabletGrpcPortBase int + tabletMysqlPortBase int +} // VitessCluster represents all components within the test cluster type VitessCluster struct { - Name string - Cells map[string]*Cell - Topo *cluster.TopoProcess - Vtctld *cluster.VtctldProcess - Vtctl *cluster.VtctlProcess - VtctlClient *cluster.VtctlClientProcess + ClusterConfig *ClusterConfig + Name string + Cells map[string]*Cell + Topo *cluster.TopoProcess + Vtctld *cluster.VtctldProcess + Vtctl *cluster.VtctlProcess + VtctlClient *cluster.VtctlClientProcess } // Cell represents a Vitess cell within the test cluster @@ -85,37 +86,66 @@ type Tablet struct { DbServer *cluster.MysqlctlProcess } -func init() { - originalVtdataroot = os.Getenv("VTDATAROOT") -} - -func initGlobals() { - rand.Seed(time.Now().UTC().UnixNano()) +func setTempVtDataRoot() string { dirSuffix := 100000 + rand.Intn(999999-100000) // 6 digits if debug { vtdataroot = originalVtdataroot } else { vtdataroot = path.Join(originalVtdataroot, fmt.Sprintf("vreple2e_%d", dirSuffix)) } - globalConfig.tmpDir = vtdataroot + "/tmp" if _, err := os.Stat(vtdataroot); os.IsNotExist(err) { os.Mkdir(vtdataroot, 0700) } _ = os.Setenv("VTDATAROOT", vtdataroot) fmt.Printf("VTDATAROOT is %s\n", vtdataroot) + return vtdataroot +} + +func getClusterConfig(idx int, dataRootDir string) *ClusterConfig { + basePort := 15000 + etcdPort := 2379 + + basePort += idx * 10000 + etcdPort += idx * 10000 + if _, err := os.Stat(dataRootDir); os.IsNotExist(err) { + os.Mkdir(dataRootDir, 0700) + } + + return &ClusterConfig{ + hostname: "localhost", + topoPort: etcdPort, + vtctldPort: basePort, + vtctldGrpcPort: basePort + 999, + tmpDir: dataRootDir + "/tmp", + vtgatePort: basePort + 1, + vtgateGrpcPort: basePort + 991, + vtgateMySQLPort: basePort + 306, + tabletTypes: "master", + vtdataroot: dataRootDir, + tabletPortBase: basePort + 1000, + tabletGrpcPortBase: basePort + 1991, + tabletMysqlPortBase: basePort + 1306, + } } -// NewVitessCluster creates an entire VitessCluster for e2e testing -func NewVitessCluster(name string) (cluster *VitessCluster, err error) { - return &VitessCluster{Name: name, Cells: make(map[string]*Cell)}, nil +func init() { + rand.Seed(time.Now().UTC().UnixNano()) + originalVtdataroot = os.Getenv("VTDATAROOT") + var mainVtDataRoot string + if debug { + mainVtDataRoot = originalVtdataroot + } else { + mainVtDataRoot = setTempVtDataRoot() + } + mainClusterConfig = getClusterConfig(0, mainVtDataRoot) + externalClusterConfig = getClusterConfig(1, mainVtDataRoot+"/ext") } -// InitCluster creates the global processes needed for a cluster -func InitCluster(t *testing.T, cellNames []string) *VitessCluster { - initGlobals() - vc, _ := NewVitessCluster("Vdemo") +// NewVitessCluster starts a basic cluster with vtgate, vtctld and the topo +func NewVitessCluster(t *testing.T, name string, cellNames []string, clusterConfig *ClusterConfig) *VitessCluster { + vc := &VitessCluster{Name: name, Cells: make(map[string]*Cell), ClusterConfig: clusterConfig} require.NotNil(t, vc) - topo := cluster.TopoProcessInstance(globalConfig.topoPort, globalConfig.topoPort*10, globalConfig.hostname, "etcd2", "global") + topo := cluster.TopoProcessInstance(vc.ClusterConfig.topoPort, vc.ClusterConfig.topoPort+1, vc.ClusterConfig.hostname, "etcd2", "global") require.NotNil(t, topo) require.Nil(t, topo.Setup("etcd2", nil)) @@ -125,14 +155,14 @@ func InitCluster(t *testing.T, cellNames []string) *VitessCluster { topo.ManageTopoDir("mkdir", "/vitess/"+cellName) } - vtctld := cluster.VtctldProcessInstance(globalConfig.vtctldPort, globalConfig.vtctldGrpcPort, - globalConfig.topoPort, globalConfig.hostname, globalConfig.tmpDir) + vtctld := cluster.VtctldProcessInstance(vc.ClusterConfig.vtctldPort, vc.ClusterConfig.vtctldGrpcPort, + vc.ClusterConfig.topoPort, vc.ClusterConfig.hostname, vc.ClusterConfig.tmpDir) vc.Vtctld = vtctld require.NotNil(t, vc.Vtctld) // use first cell as `-cell` vc.Vtctld.Setup(cellNames[0]) - vc.Vtctl = cluster.VtctlProcessInstance(globalConfig.topoPort, globalConfig.hostname) + vc.Vtctl = cluster.VtctlProcessInstance(vc.ClusterConfig.topoPort, vc.ClusterConfig.hostname) require.NotNil(t, vc.Vtctl) for _, cellName := range cellNames { vc.Vtctl.AddCellInfo(cellName) @@ -141,7 +171,7 @@ func InitCluster(t *testing.T, cellNames []string) *VitessCluster { require.NotNil(t, cell) } - vc.VtctlClient = cluster.VtctlClientProcessInstance(globalConfig.hostname, vc.Vtctld.GrpcPort, globalConfig.tmpDir) + vc.VtctlClient = cluster.VtctlClientProcessInstance(vc.ClusterConfig.hostname, vc.Vtctld.GrpcPort, vc.ClusterConfig.tmpDir) require.NotNil(t, vc.VtctlClient) return vc @@ -194,23 +224,29 @@ func (vc *VitessCluster) AddTablet(t *testing.T, cell *Cell, keyspace *Keyspace, tablet := &Tablet{} vttablet := cluster.VttabletProcessInstance( - tabletPortBase+tabletID, - tabletGrpcPortBase+tabletID, + vc.ClusterConfig.tabletPortBase+tabletID, + vc.ClusterConfig.tabletGrpcPortBase+tabletID, tabletID, cell.Name, shard.Name, keyspace.Name, - globalConfig.vtctldPort, + vc.ClusterConfig.vtctldPort, tabletType, vc.Topo.Port, - globalConfig.hostname, - globalConfig.tmpDir, - []string{"-queryserver-config-schema-reload-time", "5"}, //FIXME: for multi-cell initial schema doesn't seem to load without this + vc.ClusterConfig.hostname, + vc.ClusterConfig.tmpDir, + []string{ + "-queryserver-config-schema-reload-time", "5", + "-enable-lag-throttler", + "-heartbeat_enable", + "-heartbeat_interval", "250ms", + }, //FIXME: for multi-cell initial schema doesn't seem to load without "-queryserver-config-schema-reload-time" false) + require.NotNil(t, vttablet) vttablet.SupportsBackup = false - tablet.DbServer = cluster.MysqlCtlProcessInstance(tabletID, tabletMysqlPortBase+tabletID, globalConfig.tmpDir) + tablet.DbServer = cluster.MysqlCtlProcessInstance(tabletID, vc.ClusterConfig.tabletMysqlPortBase+tabletID, vc.ClusterConfig.tmpDir) require.NotNil(t, tablet.DbServer) tablet.DbServer.InitMysql = true proc, err := tablet.DbServer.StartProcess() @@ -325,15 +361,15 @@ func (vc *VitessCluster) DeleteShard(t *testing.T, cellName string, ksName strin // StartVtgate starts a vtgate process func (vc *VitessCluster) StartVtgate(t *testing.T, cell *Cell, cellsToWatch string) { vtgate := cluster.VtgateProcessInstance( - globalConfig.vtgatePort, - globalConfig.vtgateGrpcPort, - globalConfig.vtgateMySQLPort, + vc.ClusterConfig.vtgatePort, + vc.ClusterConfig.vtgateGrpcPort, + vc.ClusterConfig.vtgateMySQLPort, cell.Name, cellsToWatch, - globalConfig.hostname, - globalConfig.tabletTypes, - globalConfig.topoPort, - globalConfig.tmpDir, + vc.ClusterConfig.hostname, + vc.ClusterConfig.tabletTypes, + vc.ClusterConfig.topoPort, + vc.ClusterConfig.tmpDir, []string{"-tablet_refresh_interval", "10ms"}) require.NotNil(t, vtgate) if err := vtgate.Setup(); err != nil { diff --git a/go/test/endtoend/vreplication/config.go b/go/test/endtoend/vreplication/config.go index fc8a08e0958..214876a2050 100644 --- a/go/test/endtoend/vreplication/config.go +++ b/go/test/endtoend/vreplication/config.go @@ -3,7 +3,7 @@ package vreplication var ( initialProductSchema = ` create table product(pid int, description varbinary(128), primary key(pid)); -create table customer(cid int, name varbinary(128), typ enum('individual','soho','enterprise'), sport set('football','cricket','baseball'),ts timestamp not null default current_timestamp, primary key(cid)); +create table customer(cid int, name varbinary(128), meta json default null, typ enum('individual','soho','enterprise'), sport set('football','cricket','baseball'),ts timestamp not null default current_timestamp, primary key(cid)) CHARSET=utf8mb4; create table customer_seq(id int, next_id bigint, cache bigint, primary key(id)) comment 'vitess_sequence'; create table merchant(mname varchar(128), category varchar(128), primary key(mname)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci; create table orders(oid int, cid int, pid int, mname varchar(128), price int, primary key(oid)); @@ -299,5 +299,18 @@ create table tenant(tenant_id binary(16), name varbinary(16), primary key (tenan "create_ddl": "create table rollup(rollupname varchar(100), kount int, primary key (rollupname))" }] } +` + initialExternalSchema = ` +create table review(rid int, pid int, review varbinary(128), primary key(rid)); +create table rating(gid int, pid int, rating int, primary key(gid)); +` + + initialExternalVSchema = ` +{ + "tables": { + "review": {}, + "rating": {} + } +} ` ) diff --git a/go/test/endtoend/vreplication/helper.go b/go/test/endtoend/vreplication/helper.go index 6c9a8bdbb13..15726e2dd36 100644 --- a/go/test/endtoend/vreplication/helper.go +++ b/go/test/endtoend/vreplication/helper.go @@ -36,9 +36,9 @@ func execQuery(t *testing.T, conn *mysql.Conn, query string) *sqltypes.Result { return qr } -func getConnection(t *testing.T, port int) *mysql.Conn { +func getConnection(t *testing.T, hostname string, port int) *mysql.Conn { vtParams := mysql.ConnParams{ - Host: globalConfig.hostname, + Host: hostname, Port: port, Uname: "vt_dba", } diff --git a/go/test/endtoend/vreplication/migrate_test.go b/go/test/endtoend/vreplication/migrate_test.go new file mode 100644 index 00000000000..d327f39788f --- /dev/null +++ b/go/test/endtoend/vreplication/migrate_test.go @@ -0,0 +1,163 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" +) + +func insertInitialDataIntoExternalCluster(t *testing.T, conn *mysql.Conn) { + t.Run("insertInitialData", func(t *testing.T) { + fmt.Printf("Inserting initial data\n") + execVtgateQuery(t, conn, "rating:0", "insert into review(rid, pid, review) values(1, 1, 'review1');") + execVtgateQuery(t, conn, "rating:0", "insert into review(rid, pid, review) values(2, 1, 'review2');") + execVtgateQuery(t, conn, "rating:0", "insert into review(rid, pid, review) values(3, 2, 'review3');") + execVtgateQuery(t, conn, "rating:0", "insert into rating(gid, pid, rating) values(1, 1, 4);") + execVtgateQuery(t, conn, "rating:0", "insert into rating(gid, pid, rating) values(2, 2, 5);") + }) +} + +// TestMigrate runs an e2e test for importing from an external cluster using the Mount and Migrate commands. +// We have an anti-pattern in Vitess: vt executables look for an environment variable VTDATAROOT for certain cluster parameters +// like the log directory when they are created. Until this test we just needed a single cluster for e2e tests. +// However now we need to create an external Vitess cluster. For this we need a different VTDATAROOT and +// hence the VTDATAROOT env variable gets overwritten. +// Each time we need to create vt processes in the "other" cluster we need to set the appropriate VTDATAROOT +func TestMigrate(t *testing.T) { + defaultCellName := "zone1" + cells := []string{"zone1"} + allCellNames = "zone1" + vc = NewVitessCluster(t, "TestMigrate", cells, mainClusterConfig) + + require.NotNil(t, vc) + defaultReplicas = 0 + defaultRdonly = 0 + defer vc.TearDown() + + defaultCell = vc.Cells[defaultCellName] + vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100) + vtgate = defaultCell.Vtgates[0] + require.NotNil(t, vtgate) + vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.master", "product", "0"), 1) + + vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() + verifyClusterHealth(t, vc) + insertInitialData(t) + + // create external cluster + extCell := "extcell1" + extCells := []string{extCell} + extVc := NewVitessCluster(t, "TestMigrateExternal", extCells, externalClusterConfig) + require.NotNil(t, extVc) + defer extVc.TearDown() + + extCell2 := extVc.Cells[extCell] + extVc.AddKeyspace(t, []*Cell{extCell2}, "rating", "0", initialExternalVSchema, initialExternalSchema, 0, 0, 1000) + extVtgate := extCell2.Vtgates[0] + require.NotNil(t, extVtgate) + + extVtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.master", "rating", "0"), 1) + verifyClusterHealth(t, extVc) + extVtgateConn := getConnection(t, extVc.ClusterConfig.hostname, extVc.ClusterConfig.vtgateMySQLPort) + insertInitialDataIntoExternalCluster(t, extVtgateConn) + + var err error + var output, expected string + ksWorkflow := "product.e1" + + t.Run("mount external cluster", func(t *testing.T) { + if output, err = vc.VtctlClient.ExecuteCommandWithOutput("Mount", "-type=vitess", "-topo_type=etcd2", + fmt.Sprintf("-topo_server=localhost:%d", extVc.ClusterConfig.topoPort), "-topo_root=/vitess/global", "ext1"); err != nil { + t.Fatalf("Mount command failed with %+v : %s\n", err, output) + } + if output, err = vc.VtctlClient.ExecuteCommandWithOutput("Mount", "-type=vitess", "-list"); err != nil { + t.Fatalf("Mount command failed with %+v : %s\n", err, output) + } + expected = "ext1\n" + require.Equal(t, expected, output) + if output, err = vc.VtctlClient.ExecuteCommandWithOutput("Mount", "-type=vitess", "-show", "ext1"); err != nil { + t.Fatalf("Mount command failed with %+v : %s\n", err, output) + } + expected = `{"ClusterName":"ext1","topo_config":{"topo_type":"etcd2","server":"localhost:12379","root":"/vitess/global"}}` + "\n" + require.Equal(t, expected, output) + }) + + t.Run("migrate from external cluster", func(t *testing.T) { + if output, err = vc.VtctlClient.ExecuteCommandWithOutput("Migrate", "-all", "-cells=extcell1", + "-source=ext1.rating", "create", ksWorkflow); err != nil { + t.Fatalf("Migrate command failed with %+v : %s\n", err, output) + } + expectNumberOfStreams(t, vtgateConn, "migrate", "e1", "product:0", 1) + validateCount(t, vtgateConn, "product:0", "rating", 2) + validateCount(t, vtgateConn, "product:0", "review", 3) + execVtgateQuery(t, extVtgateConn, "rating", "insert into review(rid, pid, review) values(4, 1, 'review4');") + execVtgateQuery(t, extVtgateConn, "rating", "insert into rating(gid, pid, rating) values(3, 1, 3);") + time.Sleep(1 * time.Second) // wait for stream to find row + validateCount(t, vtgateConn, "product:0", "rating", 3) + validateCount(t, vtgateConn, "product:0", "review", 4) + vdiff(t, ksWorkflow, "extcell1") + + if output, err = vc.VtctlClient.ExecuteCommandWithOutput("Migrate", "complete", ksWorkflow); err != nil { + t.Fatalf("Migrate command failed with %+v : %s\n", err, output) + } + + expectNumberOfStreams(t, vtgateConn, "migrate", "e1", "product:0", 0) + }) + t.Run("cancel migrate workflow", func(t *testing.T) { + execVtgateQuery(t, vtgateConn, "product", "drop table review,rating") + + if output, err = vc.VtctlClient.ExecuteCommandWithOutput("Migrate", "-all", "-auto_start=false", "-cells=extcell1", + "-source=ext1.rating", "create", ksWorkflow); err != nil { + t.Fatalf("Migrate command failed with %+v : %s\n", err, output) + } + expectNumberOfStreams(t, vtgateConn, "migrate", "e1", "product:0", 1) + validateCount(t, vtgateConn, "product:0", "rating", 0) + validateCount(t, vtgateConn, "product:0", "review", 0) + if output, err = vc.VtctlClient.ExecuteCommandWithOutput("Migrate", "cancel", ksWorkflow); err != nil { + t.Fatalf("Migrate command failed with %+v : %s\n", err, output) + } + expectNumberOfStreams(t, vtgateConn, "migrate", "e1", "product:0", 0) + var found bool + found, err = checkIfTableExists(t, vc, "zone1-100", "review") + require.NoError(t, err) + require.False(t, found) + found, err = checkIfTableExists(t, vc, "zone1-100", "rating") + require.NoError(t, err) + require.False(t, found) + }) + t.Run("unmount external cluster", func(t *testing.T) { + if output, err = vc.VtctlClient.ExecuteCommandWithOutput("Mount", "-type=vitess", "-unmount", "ext1"); err != nil { + t.Fatalf("Mount command failed with %+v : %s\n", err, output) + } + + if output, err = vc.VtctlClient.ExecuteCommandWithOutput("Mount", "-type=vitess", "-list"); err != nil { + t.Fatalf("Mount command failed with %+v : %s\n", err, output) + } + expected = "\n" + require.Equal(t, expected, output) + + output, err = vc.VtctlClient.ExecuteCommandWithOutput("Mount", "-type=vitess", "-show", "ext1") + require.Errorf(t, err, "there is no vitess cluster named ext1") + }) +} diff --git a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go index 9541c154730..cb189b8867c 100644 --- a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go +++ b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go @@ -65,7 +65,7 @@ func createReshardWorkflow(t *testing.T, sourceShards, targetShards string) erro time.Sleep(1 * time.Second) catchup(t, targetTab1, workflowName, "Reshard") catchup(t, targetTab2, workflowName, "Reshard") - vdiff(t, ksWorkflow) + vdiff(t, ksWorkflow, "") return nil } @@ -79,7 +79,7 @@ func createMoveTablesWorkflow(t *testing.T, tables string) error { catchup(t, targetTab1, workflowName, "MoveTables") catchup(t, targetTab2, workflowName, "MoveTables") time.Sleep(1 * time.Second) - vdiff(t, ksWorkflow) + vdiff(t, ksWorkflow, "") return nil } @@ -233,7 +233,7 @@ func getCurrentState(t *testing.T) string { func TestBasicV2Workflows(t *testing.T) { vc = setupCluster(t) defer vtgateConn.Close() - //defer vc.TearDown() + defer vc.TearDown() testMoveTablesV2Workflow(t) testReshardV2Workflow(t) @@ -387,7 +387,7 @@ func testRestOfWorkflow(t *testing.T) { func setupCluster(t *testing.T) *VitessCluster { cells := []string{"zone1", "zone2"} - vc = InitCluster(t, cells) + vc = NewVitessCluster(t, "TestBasicVreplicationWorkflow", cells, mainClusterConfig) require.NotNil(t, vc) defaultCellName := "zone1" allCellNames = defaultCellName @@ -403,8 +403,8 @@ func setupCluster(t *testing.T) *VitessCluster { vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.master", "product", "0"), 1) vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "product", "0"), 2) - vtgateConn = getConnection(t, globalConfig.vtgateMySQLPort) - verifyClusterHealth(t) + vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + verifyClusterHealth(t, vc) insertInitialData(t) sourceReplicaTab = vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-101"].Vttablet @@ -463,7 +463,7 @@ func moveCustomerTableSwitchFlows(t *testing.T, cells []*Cell, sourceCellOrAlias moveTables(t, sourceCellOrAlias, workflow, sourceKs, targetKs, tables) catchup(t, targetTab1, workflow, "MoveTables") catchup(t, targetTab2, workflow, "MoveTables") - vdiff(t, ksWorkflow) + vdiff(t, ksWorkflow, "") } var switchReadsFollowedBySwitchWrites = func() { diff --git a/go/test/endtoend/vreplication/unsharded_init_data.sql b/go/test/endtoend/vreplication/unsharded_init_data.sql index 06eb2e18628..1b58404cfb7 100644 --- a/go/test/endtoend/vreplication/unsharded_init_data.sql +++ b/go/test/endtoend/vreplication/unsharded_init_data.sql @@ -1,5 +1,5 @@ -insert into customer(cid, name, typ, sport) values(1, 'john',1,'football,baseball'); -insert into customer(cid, name, typ, sport) values(2, 'paul','soho','cricket'); +insert into customer(cid, name, typ, sport, meta) values(1, 'john',1,'football,baseball','{}'); +insert into customer(cid, name, typ, sport, meta) values(2, 'paul','soho','cricket',convert(x'7b7d' using utf8mb4)); insert into customer(cid, name, typ, sport) values(3, 'ringo','enterprise',''); insert into merchant(mname, category) values('monoprice', 'electronics'); insert into merchant(mname, category) values('newegg', 'electronics'); diff --git a/go/test/endtoend/vreplication/vreplication_test.go b/go/test/endtoend/vreplication/vreplication_test.go index c4ea174354a..4c06d5ec864 100644 --- a/go/test/endtoend/vreplication/vreplication_test.go +++ b/go/test/endtoend/vreplication/vreplication_test.go @@ -20,6 +20,7 @@ import ( "encoding/json" "fmt" "io/ioutil" + "net/http" "strings" "testing" "time" @@ -30,17 +31,21 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" + throttlebase "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/base" "vitess.io/vitess/go/vt/wrangler" ) var ( - vc *VitessCluster - vtgate *cluster.VtgateProcess - defaultCell *Cell - vtgateConn *mysql.Conn - defaultRdonly int - defaultReplicas int - allCellNames string + vc *VitessCluster + vtgate *cluster.VtgateProcess + defaultCell *Cell + vtgateConn *mysql.Conn + defaultRdonly int + defaultReplicas int + allCellNames string + httpClient = throttlebase.SetupHTTPClient(time.Second) + sourceThrottlerAppName = "vstreamer" + targetThrottlerAppName = "vreplication" ) func init() { @@ -48,11 +53,42 @@ func init() { defaultReplicas = 1 } +func throttleResponse(tablet *cluster.VttabletProcess, path string) (resp *http.Response, respBody string, err error) { + apiURL := fmt.Sprintf("http://%s:%d/%s", tablet.TabletHostname, tablet.Port, path) + resp, err = httpClient.Get(apiURL) + if err != nil { + return resp, respBody, err + } + b, err := ioutil.ReadAll(resp.Body) + respBody = string(b) + return resp, respBody, err +} + +func throttleApp(tablet *cluster.VttabletProcess, app string) (*http.Response, string, error) { + return throttleResponse(tablet, fmt.Sprintf("throttler/throttle-app?app=%s&duration=1h", app)) +} + +func unthrottleApp(tablet *cluster.VttabletProcess, app string) (*http.Response, string, error) { + return throttleResponse(tablet, fmt.Sprintf("throttler/unthrottle-app?app=%s", app)) +} + +func throttlerCheckSelf(tablet *cluster.VttabletProcess, app string) (resp *http.Response, respBody string, err error) { + apiURL := fmt.Sprintf("http://%s:%d/throttler/check-self?app=%s", tablet.TabletHostname, tablet.Port, app) + resp, err = httpClient.Get(apiURL) + if err != nil { + return resp, respBody, err + } + b, err := ioutil.ReadAll(resp.Body) + respBody = string(b) + return resp, respBody, err +} + func TestBasicVreplicationWorkflow(t *testing.T) { defaultCellName := "zone1" allCells := []string{"zone1"} allCellNames = "zone1" - vc = InitCluster(t, allCells) + vc = NewVitessCluster(t, "TestBasicVreplicationWorkflow", allCells, mainClusterConfig) + require.NotNil(t, vc) defaultReplicas = 0 // because of CI resource constraints we can only run this test with master tablets defer func() { defaultReplicas = 1 }() @@ -65,9 +101,9 @@ func TestBasicVreplicationWorkflow(t *testing.T) { require.NotNil(t, vtgate) vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.master", "product", "0"), 1) - vtgateConn = getConnection(t, globalConfig.vtgateMySQLPort) + vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() - verifyClusterHealth(t) + verifyClusterHealth(t, vc) insertInitialData(t) materializeRollup(t) @@ -101,7 +137,7 @@ func TestMultiCellVreplicationWorkflow(t *testing.T) { cells := []string{"zone1", "zone2"} allCellNames = "zone1,zone2" - vc = InitCluster(t, cells) + vc = NewVitessCluster(t, "TestBasicVreplicationWorkflow", cells, mainClusterConfig) require.NotNil(t, vc) defaultCellName := "zone1" defaultCell = vc.Cells[defaultCellName] @@ -117,9 +153,9 @@ func TestMultiCellVreplicationWorkflow(t *testing.T) { vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.master", "product", "0"), 1) vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "product", "0"), 2) - vtgateConn = getConnection(t, globalConfig.vtgateMySQLPort) + vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() - verifyClusterHealth(t) + verifyClusterHealth(t, vc) insertInitialData(t) shardCustomer(t, true, []*Cell{cell1, cell2}, cell2.Name) } @@ -127,7 +163,7 @@ func TestMultiCellVreplicationWorkflow(t *testing.T) { func TestCellAliasVreplicationWorkflow(t *testing.T) { cells := []string{"zone1", "zone2"} - vc = InitCluster(t, cells) + vc = NewVitessCluster(t, "TestBasicVreplicationWorkflow", cells, mainClusterConfig) require.NotNil(t, vc) allCellNames = "zone1,zone2" defaultCellName := "zone1" @@ -148,25 +184,27 @@ func TestCellAliasVreplicationWorkflow(t *testing.T) { vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.master", "product", "0"), 1) vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "product", "0"), 2) - vtgateConn = getConnection(t, globalConfig.vtgateMySQLPort) + vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() - verifyClusterHealth(t) + verifyClusterHealth(t, vc) insertInitialData(t) shardCustomer(t, true, []*Cell{cell1, cell2}, "alias") } func insertInitialData(t *testing.T) { - fmt.Printf("Inserting initial data\n") - lines, _ := ioutil.ReadFile("unsharded_init_data.sql") - execMultipleQueries(t, vtgateConn, "product:0", string(lines)) - execVtgateQuery(t, vtgateConn, "product:0", "insert into customer_seq(id, next_id, cache) values(0, 100, 100);") - execVtgateQuery(t, vtgateConn, "product:0", "insert into order_seq(id, next_id, cache) values(0, 100, 100);") - fmt.Printf("Done inserting initial data\n") + t.Run("insertInitialData", func(t *testing.T) { + fmt.Printf("Inserting initial data\n") + lines, _ := ioutil.ReadFile("unsharded_init_data.sql") + execMultipleQueries(t, vtgateConn, "product:0", string(lines)) + execVtgateQuery(t, vtgateConn, "product:0", "insert into customer_seq(id, next_id, cache) values(0, 100, 100);") + execVtgateQuery(t, vtgateConn, "product:0", "insert into order_seq(id, next_id, cache) values(0, 100, 100);") + fmt.Printf("Done inserting initial data\n") - validateCount(t, vtgateConn, "product:0", "product", 2) - validateCount(t, vtgateConn, "product:0", "customer", 3) - validateQuery(t, vtgateConn, "product:0", "select * from merchant", - `[[VARCHAR("monoprice") VARCHAR("electronics")] [VARCHAR("newegg") VARCHAR("electronics")]]`) + validateCount(t, vtgateConn, "product:0", "product", 2) + validateCount(t, vtgateConn, "product:0", "customer", 3) + validateQuery(t, vtgateConn, "product:0", "select * from merchant", + `[[VARCHAR("monoprice") VARCHAR("electronics")] [VARCHAR("newegg") VARCHAR("electronics")]]`) + }) } func insertMoreCustomers(t *testing.T, numCustomers int) { @@ -187,395 +225,544 @@ func insertMoreProducts(t *testing.T) { execVtgateQuery(t, vtgateConn, "product", sql) } -func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAlias string) { - workflow := "p2c" - sourceKs := "product" - targetKs := "customer" - ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow) - if _, err := vc.AddKeyspace(t, cells, "customer", "-80,80-", customerVSchema, customerSchema, defaultReplicas, defaultRdonly, 200); err != nil { - t.Fatal(err) - } - if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.master", "customer", "-80"), 1); err != nil { - t.Fatal(err) - } - if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.master", "customer", "80-"), 1); err != nil { - t.Fatal(err) - } - tables := "customer,tenant" - moveTables(t, sourceCellOrAlias, workflow, sourceKs, targetKs, tables) - - // Assume we are operating on first cell - defaultCell := cells[0] - custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"] - customerTab1 := custKs.Shards["-80"].Tablets["zone1-200"].Vttablet - customerTab2 := custKs.Shards["80-"].Tablets["zone1-300"].Vttablet - - catchup(t, customerTab1, workflow, "MoveTables") - catchup(t, customerTab2, workflow, "MoveTables") - - productTab := vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-100"].Vttablet - query := "select * from customer" - require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, productTab, "product", query, query)) - insertQuery1 := "insert into customer(cid, name) values(1001, 'tempCustomer1')" - matchInsertQuery1 := "insert into customer(cid, `name`) values (:vtg1, :vtg2)" - require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, productTab, "product", insertQuery1, matchInsertQuery1)) - execVtgateQuery(t, vtgateConn, "product", "update tenant set name='xyz'") - vdiff(t, ksWorkflow) - switchReadsDryRun(t, allCellNames, ksWorkflow, dryRunResultsReadCustomerShard) - switchReads(t, allCellNames, ksWorkflow) - require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, productTab, "customer", query, query)) - switchWritesDryRun(t, ksWorkflow, dryRunResultsSwitchWritesCustomerShard) - switchWrites(t, ksWorkflow, false) - ksShards := []string{"product/0", "customer/-80", "customer/80-"} - printShardPositions(vc, ksShards) - insertQuery2 := "insert into customer(name, cid) values('tempCustomer2', 100)" - matchInsertQuery2 := "insert into customer(`name`, cid) values (:vtg1, :_cid0)" - require.False(t, validateThatQueryExecutesOnTablet(t, vtgateConn, productTab, "customer", insertQuery2, matchInsertQuery2)) - - insertQuery2 = "insert into customer(name, cid) values('tempCustomer3', 101)" //ID 101, hence due to reverse_bits in shard 80- - require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, customerTab2, "customer", insertQuery2, matchInsertQuery2)) - - insertQuery2 = "insert into customer(name, cid) values('tempCustomer4', 102)" //ID 102, hence due to reverse_bits in shard -80 - require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, customerTab1, "customer", insertQuery2, matchInsertQuery2)) - reverseKsWorkflow := "product.p2c_reverse" - if testReverse { - //Reverse Replicate - switchReads(t, allCellNames, reverseKsWorkflow) - printShardPositions(vc, ksShards) - switchWrites(t, reverseKsWorkflow, false) - - insertQuery1 = "insert into customer(cid, name) values(1002, 'tempCustomer5')" - require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, productTab, "product", insertQuery1, matchInsertQuery1)) - // both inserts go into 80-, this tests the edge-case where a stream (-80) has no relevant new events after the previous switch - insertQuery1 = "insert into customer(cid, name) values(1003, 'tempCustomer6')" - require.False(t, validateThatQueryExecutesOnTablet(t, vtgateConn, customerTab1, "customer", insertQuery1, matchInsertQuery1)) - insertQuery1 = "insert into customer(cid, name) values(1004, 'tempCustomer7')" - require.False(t, validateThatQueryExecutesOnTablet(t, vtgateConn, customerTab2, "customer", insertQuery1, matchInsertQuery1)) - - //Go forward again - switchReads(t, allCellNames, ksWorkflow) - switchWrites(t, ksWorkflow, false) - dropSourcesDryRun(t, ksWorkflow, false, dryRunResultsDropSourcesDropCustomerShard) - dropSourcesDryRun(t, ksWorkflow, true, dryRunResultsDropSourcesRenameCustomerShard) - - var exists bool - exists, err := checkIfBlacklistExists(t, vc, "product:0", "customer") - require.NoError(t, err, "Error getting blacklist for customer:0") - require.True(t, exists) - dropSources(t, ksWorkflow) +func insertMoreProductsForSourceThrottler(t *testing.T) { + sql := "insert into product(pid, description) values(103, 'new-cpu'),(104, 'new-camera'),(105, 'new-mouse');" + execVtgateQuery(t, vtgateConn, "product", sql) +} - exists, err = checkIfBlacklistExists(t, vc, "product:0", "customer") - require.NoError(t, err, "Error getting blacklist for customer:0") - require.False(t, exists) +func insertMoreProductsForTargetThrottler(t *testing.T) { + sql := "insert into product(pid, description) values(203, 'new-cpu'),(204, 'new-camera'),(205, 'new-mouse');" + execVtgateQuery(t, vtgateConn, "product", sql) +} - for _, shard := range strings.Split("-80,80-", ",") { - expectNumberOfStreams(t, vtgateConn, "shardCustomerTargetStreams", "p2c", "customer:"+shard, 0) +func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAlias string) { + t.Run("shardCustomer", func(t *testing.T) { + workflow := "p2c" + sourceKs := "product" + targetKs := "customer" + ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow) + if _, err := vc.AddKeyspace(t, cells, "customer", "-80,80-", customerVSchema, customerSchema, defaultReplicas, defaultRdonly, 200); err != nil { + t.Fatal(err) + } + if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.master", "customer", "-80"), 1); err != nil { + t.Fatal(err) + } + if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.master", "customer", "80-"), 1); err != nil { + t.Fatal(err) } - expectNumberOfStreams(t, vtgateConn, "shardCustomerReverseStreams", "p2c_reverse", "product:0", 0) + tables := "customer,tenant" + moveTables(t, sourceCellOrAlias, workflow, sourceKs, targetKs, tables) - var found bool - found, err = checkIfTableExists(t, vc, "zone1-100", "customer") - assert.NoError(t, err, "Customer table not deleted from zone1-100") - require.False(t, found) + // Assume we are operating on first cell + defaultCell := cells[0] + custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"] + customerTab1 := custKs.Shards["-80"].Tablets["zone1-200"].Vttablet + customerTab2 := custKs.Shards["80-"].Tablets["zone1-300"].Vttablet - found, err = checkIfTableExists(t, vc, "zone1-200", "customer") - assert.NoError(t, err, "Customer table not deleted from zone1-200") - require.True(t, found) + catchup(t, customerTab1, workflow, "MoveTables") + catchup(t, customerTab2, workflow, "MoveTables") - insertQuery2 = "insert into customer(name, cid) values('tempCustomer8', 103)" //ID 103, hence due to reverse_bits in shard 80- + productTab := vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-100"].Vttablet + query := "select * from customer" + require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, productTab, "product", query, query)) + insertQuery1 := "insert into customer(cid, name) values(1001, 'tempCustomer1')" + matchInsertQuery1 := "insert into customer(cid, `name`) values (:vtg1, :vtg2)" + require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, productTab, "product", insertQuery1, matchInsertQuery1)) + execVtgateQuery(t, vtgateConn, "product", "update tenant set name='xyz'") + vdiff(t, ksWorkflow, "") + switchReadsDryRun(t, allCellNames, ksWorkflow, dryRunResultsReadCustomerShard) + switchReads(t, allCellNames, ksWorkflow) + require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, productTab, "customer", query, query)) + switchWritesDryRun(t, ksWorkflow, dryRunResultsSwitchWritesCustomerShard) + switchWrites(t, ksWorkflow, false) + ksShards := []string{"product/0", "customer/-80", "customer/80-"} + printShardPositions(vc, ksShards) + insertQuery2 := "insert into customer(name, cid) values('tempCustomer2', 100)" + matchInsertQuery2 := "insert into customer(`name`, cid) values (:vtg1, :_cid0)" require.False(t, validateThatQueryExecutesOnTablet(t, vtgateConn, productTab, "customer", insertQuery2, matchInsertQuery2)) - insertQuery2 = "insert into customer(name, cid) values('tempCustomer10', 104)" //ID 105, hence due to reverse_bits in shard -80 - require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, customerTab1, "customer", insertQuery2, matchInsertQuery2)) - insertQuery2 = "insert into customer(name, cid) values('tempCustomer9', 105)" //ID 104, hence due to reverse_bits in shard 80- + + insertQuery2 = "insert into customer(name, cid) values('tempCustomer3', 101)" //ID 101, hence due to reverse_bits in shard 80- require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, customerTab2, "customer", insertQuery2, matchInsertQuery2)) - execVtgateQuery(t, vtgateConn, "customer", "delete from customer where name like 'tempCustomer%'") - validateCountInTablet(t, customerTab1, "customer", "customer", 1) - validateCountInTablet(t, customerTab2, "customer", "customer", 2) - validateCount(t, vtgateConn, "customer", "customer.customer", 3) + insertQuery2 = "insert into customer(name, cid) values('tempCustomer4', 102)" //ID 102, hence due to reverse_bits in shard -80 + require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, customerTab1, "customer", insertQuery2, matchInsertQuery2)) + + execVtgateQuery(t, vtgateConn, "customer", "update customer set meta = convert(x'7b7d' using utf8mb4) where cid = 1") + reverseKsWorkflow := "product.p2c_reverse" + if testReverse { + //Reverse Replicate + switchReads(t, allCellNames, reverseKsWorkflow) + printShardPositions(vc, ksShards) + switchWrites(t, reverseKsWorkflow, false) + + insertQuery1 = "insert into customer(cid, name) values(1002, 'tempCustomer5')" + require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, productTab, "product", insertQuery1, matchInsertQuery1)) + // both inserts go into 80-, this tests the edge-case where a stream (-80) has no relevant new events after the previous switch + insertQuery1 = "insert into customer(cid, name) values(1003, 'tempCustomer6')" + require.False(t, validateThatQueryExecutesOnTablet(t, vtgateConn, customerTab1, "customer", insertQuery1, matchInsertQuery1)) + insertQuery1 = "insert into customer(cid, name) values(1004, 'tempCustomer7')" + require.False(t, validateThatQueryExecutesOnTablet(t, vtgateConn, customerTab2, "customer", insertQuery1, matchInsertQuery1)) + + //Go forward again + switchReads(t, allCellNames, ksWorkflow) + switchWrites(t, ksWorkflow, false) + dropSourcesDryRun(t, ksWorkflow, false, dryRunResultsDropSourcesDropCustomerShard) + dropSourcesDryRun(t, ksWorkflow, true, dryRunResultsDropSourcesRenameCustomerShard) + + var exists bool + exists, err := checkIfBlacklistExists(t, vc, "product:0", "customer") + require.NoError(t, err, "Error getting blacklist for customer:0") + require.True(t, exists) + dropSources(t, ksWorkflow) + + exists, err = checkIfBlacklistExists(t, vc, "product:0", "customer") + require.NoError(t, err, "Error getting blacklist for customer:0") + require.False(t, exists) + + for _, shard := range strings.Split("-80,80-", ",") { + expectNumberOfStreams(t, vtgateConn, "shardCustomerTargetStreams", "p2c", "customer:"+shard, 0) + } - query = "insert into customer (name, cid) values('george', 5)" - execVtgateQuery(t, vtgateConn, "customer", query) - validateCountInTablet(t, customerTab1, "customer", "customer", 1) - validateCountInTablet(t, customerTab2, "customer", "customer", 3) - validateCount(t, vtgateConn, "customer", "customer.customer", 4) - } + expectNumberOfStreams(t, vtgateConn, "shardCustomerReverseStreams", "p2c_reverse", "product:0", 0) + + var found bool + found, err = checkIfTableExists(t, vc, "zone1-100", "customer") + assert.NoError(t, err, "Customer table not deleted from zone1-100") + require.False(t, found) + + found, err = checkIfTableExists(t, vc, "zone1-200", "customer") + assert.NoError(t, err, "Customer table not deleted from zone1-200") + require.True(t, found) + + insertQuery2 = "insert into customer(name, cid) values('tempCustomer8', 103)" //ID 103, hence due to reverse_bits in shard 80- + require.False(t, validateThatQueryExecutesOnTablet(t, vtgateConn, productTab, "customer", insertQuery2, matchInsertQuery2)) + insertQuery2 = "insert into customer(name, cid) values('tempCustomer10', 104)" //ID 105, hence due to reverse_bits in shard -80 + require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, customerTab1, "customer", insertQuery2, matchInsertQuery2)) + insertQuery2 = "insert into customer(name, cid) values('tempCustomer9', 105)" //ID 104, hence due to reverse_bits in shard 80- + require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, customerTab2, "customer", insertQuery2, matchInsertQuery2)) + + execVtgateQuery(t, vtgateConn, "customer", "delete from customer where name like 'tempCustomer%'") + validateCountInTablet(t, customerTab1, "customer", "customer", 1) + validateCountInTablet(t, customerTab2, "customer", "customer", 2) + validateCount(t, vtgateConn, "customer", "customer.customer", 3) + + query = "insert into customer (name, cid) values('george', 5)" + execVtgateQuery(t, vtgateConn, "customer", query) + validateCountInTablet(t, customerTab1, "customer", "customer", 1) + validateCountInTablet(t, customerTab2, "customer", "customer", 3) + validateCount(t, vtgateConn, "customer", "customer.customer", 4) + } + }) } func validateRollupReplicates(t *testing.T) { - insertMoreProducts(t) - time.Sleep(1 * time.Second) - validateCount(t, vtgateConn, "product", "rollup", 1) - validateQuery(t, vtgateConn, "product:0", "select rollupname, kount from rollup", - `[[VARCHAR("total") INT32(5)]]`) + t.Run("validateRollupReplicates", func(t *testing.T) { + insertMoreProducts(t) + time.Sleep(1 * time.Second) + validateCount(t, vtgateConn, "product", "rollup", 1) + validateQuery(t, vtgateConn, "product:0", "select rollupname, kount from rollup", + `[[VARCHAR("total") INT32(5)]]`) + }) } func reshardCustomer2to4Split(t *testing.T, cells []*Cell, sourceCellOrAlias string) { - ksName := "customer" - counts := map[string]int{"zone1-600": 4, "zone1-700": 5, "zone1-800": 6, "zone1-900": 5} - reshard(t, ksName, "customer", "c2c4", "-80,80-", "-40,40-80,80-c0,c0-", 600, counts, nil, cells, sourceCellOrAlias) - validateCount(t, vtgateConn, ksName, "customer", 20) - query := "insert into customer (name) values('yoko')" - execVtgateQuery(t, vtgateConn, ksName, query) - validateCount(t, vtgateConn, ksName, "customer", 21) + t.Run("reshardCustomer2to4Split", func(t *testing.T) { + ksName := "customer" + counts := map[string]int{"zone1-600": 4, "zone1-700": 5, "zone1-800": 6, "zone1-900": 5} + reshard(t, ksName, "customer", "c2c4", "-80,80-", "-40,40-80,80-c0,c0-", 600, counts, nil, cells, sourceCellOrAlias) + validateCount(t, vtgateConn, ksName, "customer", 20) + query := "insert into customer (name) values('yoko')" + execVtgateQuery(t, vtgateConn, ksName, query) + validateCount(t, vtgateConn, ksName, "customer", 21) + }) } func reshardMerchant2to3SplitMerge(t *testing.T) { - ksName := "merchant" - counts := map[string]int{"zone1-1600": 0, "zone1-1700": 2, "zone1-1800": 0} - reshard(t, ksName, "merchant", "m2m3", "-80,80-", "-40,40-c0,c0-", 1600, counts, dryRunResultsSwitchWritesM2m3, nil, "") - validateCount(t, vtgateConn, ksName, "merchant", 2) - query := "insert into merchant (mname, category) values('amazon', 'electronics')" - execVtgateQuery(t, vtgateConn, ksName, query) - validateCount(t, vtgateConn, ksName, "merchant", 3) + t.Run("reshardMerchant2to3SplitMerge", func(t *testing.T) { + ksName := "merchant" + counts := map[string]int{"zone1-1600": 0, "zone1-1700": 2, "zone1-1800": 0} + reshard(t, ksName, "merchant", "m2m3", "-80,80-", "-40,40-c0,c0-", 1600, counts, dryRunResultsSwitchWritesM2m3, nil, "") + validateCount(t, vtgateConn, ksName, "merchant", 2) + query := "insert into merchant (mname, category) values('amazon', 'electronics')" + execVtgateQuery(t, vtgateConn, ksName, query) + validateCount(t, vtgateConn, ksName, "merchant", 3) + + var output string + var err error - var output string - var err error - - for _, shard := range strings.Split("-80,80-", ",") { - output, err = vc.VtctlClient.ExecuteCommandWithOutput("GetShard", "merchant:"+shard) - if err == nil { - t.Fatal("GetShard merchant:-80 failed") + for _, shard := range strings.Split("-80,80-", ",") { + output, err = vc.VtctlClient.ExecuteCommandWithOutput("GetShard", "merchant:"+shard) + if err == nil { + t.Fatal("GetShard merchant:-80 failed") + } + assert.Contains(t, output, "node doesn't exist", "GetShard succeeded for dropped shard merchant:"+shard) } - assert.Contains(t, output, "node doesn't exist", "GetShard succeeded for dropped shard merchant:"+shard) - } - for _, shard := range strings.Split("-40,40-c0,c0-", ",") { - output, err = vc.VtctlClient.ExecuteCommandWithOutput("GetShard", "merchant:"+shard) - if err != nil { - t.Fatalf("GetShard merchant failed for: %s: %v", shard, err) + for _, shard := range strings.Split("-40,40-c0,c0-", ",") { + output, err = vc.VtctlClient.ExecuteCommandWithOutput("GetShard", "merchant:"+shard) + if err != nil { + t.Fatalf("GetShard merchant failed for: %s: %v", shard, err) + } + assert.NotContains(t, output, "node doesn't exist", "GetShard failed for valid shard merchant:"+shard) + assert.Contains(t, output, "master_alias", "GetShard failed for valid shard merchant:"+shard) } - assert.NotContains(t, output, "node doesn't exist", "GetShard failed for valid shard merchant:"+shard) - assert.Contains(t, output, "master_alias", "GetShard failed for valid shard merchant:"+shard) - } - for _, shard := range strings.Split("-40,40-c0,c0-", ",") { - expectNumberOfStreams(t, vtgateConn, "reshardMerchant2to3SplitMerge", "m2m3", "merchant:"+shard, 0) - } - - var found bool - found, err = checkIfTableExists(t, vc, "zone1-1600", "customer") - assert.NoError(t, err, "Customer table found incorrectly in zone1-1600") - require.False(t, found) - found, err = checkIfTableExists(t, vc, "zone1-1600", "merchant") - assert.NoError(t, err, "Merchant table not found in zone1-1600") - require.True(t, found) + for _, shard := range strings.Split("-40,40-c0,c0-", ",") { + expectNumberOfStreams(t, vtgateConn, "reshardMerchant2to3SplitMerge", "m2m3", "merchant:"+shard, 0) + } + var found bool + found, err = checkIfTableExists(t, vc, "zone1-1600", "customer") + assert.NoError(t, err, "Customer table found incorrectly in zone1-1600") + require.False(t, found) + found, err = checkIfTableExists(t, vc, "zone1-1600", "merchant") + assert.NoError(t, err, "Merchant table not found in zone1-1600") + require.True(t, found) + }) } func reshardMerchant3to1Merge(t *testing.T) { - ksName := "merchant" - counts := map[string]int{"zone1-2000": 3} - reshard(t, ksName, "merchant", "m3m1", "-40,40-c0,c0-", "0", 2000, counts, nil, nil, "") - validateCount(t, vtgateConn, ksName, "merchant", 3) - query := "insert into merchant (mname, category) values('flipkart', 'electronics')" - execVtgateQuery(t, vtgateConn, ksName, query) - validateCount(t, vtgateConn, ksName, "merchant", 4) + t.Run("reshardMerchant3to1Merge", func(t *testing.T) { + ksName := "merchant" + counts := map[string]int{"zone1-2000": 3} + reshard(t, ksName, "merchant", "m3m1", "-40,40-c0,c0-", "0", 2000, counts, nil, nil, "") + validateCount(t, vtgateConn, ksName, "merchant", 3) + query := "insert into merchant (mname, category) values('flipkart', 'electronics')" + execVtgateQuery(t, vtgateConn, ksName, query) + validateCount(t, vtgateConn, ksName, "merchant", 4) + }) } func reshardCustomer3to2SplitMerge(t *testing.T) { //-40,40-80,80-c0 => merge/split, c0- stays the same ending up with 3 - ksName := "customer" - counts := map[string]int{"zone1-1000": 8, "zone1-1100": 8, "zone1-1200": 5} - reshard(t, ksName, "customer", "c4c3", "-40,40-80,80-c0", "-60,60-c0", 1000, counts, nil, nil, "") + t.Run("reshardCustomer3to2SplitMerge", func(t *testing.T) { + ksName := "customer" + counts := map[string]int{"zone1-1000": 8, "zone1-1100": 8, "zone1-1200": 5} + reshard(t, ksName, "customer", "c4c3", "-40,40-80,80-c0", "-60,60-c0", 1000, counts, nil, nil, "") + }) } func reshardCustomer3to1Merge(t *testing.T) { //to unsharded - ksName := "customer" - counts := map[string]int{"zone1-1500": 21} - reshard(t, ksName, "customer", "c3c1", "-60,60-c0,c0-", "0", 1500, counts, nil, nil, "") + t.Run("reshardCustomer3to1Merge", func(t *testing.T) { + ksName := "customer" + counts := map[string]int{"zone1-1500": 21} + reshard(t, ksName, "customer", "c3c1", "-60,60-c0,c0-", "0", 1500, counts, nil, nil, "") + }) } func reshard(t *testing.T, ksName string, tableName string, workflow string, sourceShards string, targetShards string, tabletIDBase int, counts map[string]int, dryRunResultSwitchWrites []string, cells []*Cell, sourceCellOrAlias string) { - if cells == nil { - cells = []*Cell{defaultCell} - } - if sourceCellOrAlias == "" { - sourceCellOrAlias = defaultCell.Name - } - ksWorkflow := ksName + "." + workflow - keyspace := vc.Cells[defaultCell.Name].Keyspaces[ksName] - require.NoError(t, vc.AddShards(t, cells, keyspace, targetShards, defaultReplicas, defaultRdonly, tabletIDBase)) - arrTargetShardNames := strings.Split(targetShards, ",") - - for _, shardName := range arrTargetShardNames { - if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.master", ksName, shardName), 1); err != nil { - t.Fatal(err) + t.Run("reshard", func(t *testing.T) { + if cells == nil { + cells = []*Cell{defaultCell} } - } - if err := vc.VtctlClient.ExecuteCommand("Reshard", "-cells="+sourceCellOrAlias, "-tablet_types=replica,master", ksWorkflow, sourceShards, targetShards); err != nil { - t.Fatalf("Reshard command failed with %+v\n", err) - } - tablets := vc.getVttabletsInKeyspace(t, defaultCell, ksName, "master") - targetShards = "," + targetShards + "," - for _, tab := range tablets { - if strings.Contains(targetShards, ","+tab.Shard+",") { - fmt.Printf("Waiting for vrepl to catch up on %s since it IS a target shard\n", tab.Shard) - catchup(t, tab, workflow, "Reshard") - } else { - fmt.Printf("Not waiting for vrepl to catch up on %s since it is NOT a target shard\n", tab.Shard) - continue + if sourceCellOrAlias == "" { + sourceCellOrAlias = defaultCell.Name } - } - vdiff(t, ksWorkflow) - switchReads(t, allCellNames, ksWorkflow) - if dryRunResultSwitchWrites != nil { - switchWritesDryRun(t, ksWorkflow, dryRunResultSwitchWrites) - } - switchWrites(t, ksWorkflow, false) - dropSources(t, ksWorkflow) + ksWorkflow := ksName + "." + workflow + keyspace := vc.Cells[defaultCell.Name].Keyspaces[ksName] + require.NoError(t, vc.AddShards(t, cells, keyspace, targetShards, defaultReplicas, defaultRdonly, tabletIDBase)) + arrTargetShardNames := strings.Split(targetShards, ",") + + for _, shardName := range arrTargetShardNames { + if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.master", ksName, shardName), 1); err != nil { + t.Fatal(err) + } + } + if err := vc.VtctlClient.ExecuteCommand("Reshard", "-cells="+sourceCellOrAlias, "-tablet_types=replica,master", ksWorkflow, sourceShards, targetShards); err != nil { + t.Fatalf("Reshard command failed with %+v\n", err) + } + tablets := vc.getVttabletsInKeyspace(t, defaultCell, ksName, "master") + targetShards = "," + targetShards + "," + for _, tab := range tablets { + if strings.Contains(targetShards, ","+tab.Shard+",") { + fmt.Printf("Waiting for vrepl to catch up on %s since it IS a target shard\n", tab.Shard) + catchup(t, tab, workflow, "Reshard") + } else { + fmt.Printf("Not waiting for vrepl to catch up on %s since it is NOT a target shard\n", tab.Shard) + continue + } + } + vdiff(t, ksWorkflow, "") + switchReads(t, allCellNames, ksWorkflow) + if dryRunResultSwitchWrites != nil { + switchWritesDryRun(t, ksWorkflow, dryRunResultSwitchWrites) + } + switchWrites(t, ksWorkflow, false) + dropSources(t, ksWorkflow) - for tabletName, count := range counts { - if tablets[tabletName] == nil { - continue + for tabletName, count := range counts { + if tablets[tabletName] == nil { + continue + } + validateCountInTablet(t, tablets[tabletName], ksName, tableName, count) } - validateCountInTablet(t, tablets[tabletName], ksName, tableName, count) - } + }) } func shardOrders(t *testing.T) { - workflow := "o2c" - cell := defaultCell.Name - sourceKs := "product" - targetKs := "customer" - tables := "orders" - ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow) - applyVSchema(t, ordersVSchema, targetKs) - moveTables(t, cell, workflow, sourceKs, targetKs, tables) - - custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"] - customerTab1 := custKs.Shards["-80"].Tablets["zone1-200"].Vttablet - customerTab2 := custKs.Shards["80-"].Tablets["zone1-300"].Vttablet - catchup(t, customerTab1, workflow, "MoveTables") - catchup(t, customerTab2, workflow, "MoveTables") - vdiff(t, ksWorkflow) - switchReads(t, allCellNames, ksWorkflow) - switchWrites(t, ksWorkflow, false) - dropSources(t, ksWorkflow) - validateCountInTablet(t, customerTab1, "customer", "orders", 1) - validateCountInTablet(t, customerTab2, "customer", "orders", 2) - validateCount(t, vtgateConn, "customer", "orders", 3) + t.Run("shardOrders", func(t *testing.T) { + workflow := "o2c" + cell := defaultCell.Name + sourceKs := "product" + targetKs := "customer" + tables := "orders" + ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow) + applyVSchema(t, ordersVSchema, targetKs) + moveTables(t, cell, workflow, sourceKs, targetKs, tables) + + custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"] + customerTab1 := custKs.Shards["-80"].Tablets["zone1-200"].Vttablet + customerTab2 := custKs.Shards["80-"].Tablets["zone1-300"].Vttablet + catchup(t, customerTab1, workflow, "MoveTables") + catchup(t, customerTab2, workflow, "MoveTables") + vdiff(t, ksWorkflow, "") + switchReads(t, allCellNames, ksWorkflow) + switchWrites(t, ksWorkflow, false) + dropSources(t, ksWorkflow) + validateCountInTablet(t, customerTab1, "customer", "orders", 1) + validateCountInTablet(t, customerTab2, "customer", "orders", 2) + validateCount(t, vtgateConn, "customer", "orders", 3) + }) } func shardMerchant(t *testing.T) { - workflow := "p2m" - cell := defaultCell.Name - sourceKs := "product" - targetKs := "merchant" - tables := "merchant" - ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow) - if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, "merchant", "-80,80-", merchantVSchema, "", defaultReplicas, defaultRdonly, 400); err != nil { - t.Fatal(err) - } - if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.master", "merchant", "-80"), 1); err != nil { - t.Fatal(err) - } - if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.master", "merchant", "80-"), 1); err != nil { - t.Fatal(err) - } - moveTables(t, cell, workflow, sourceKs, targetKs, tables) - merchantKs := vc.Cells[defaultCell.Name].Keyspaces["merchant"] - merchantTab1 := merchantKs.Shards["-80"].Tablets["zone1-400"].Vttablet - merchantTab2 := merchantKs.Shards["80-"].Tablets["zone1-500"].Vttablet - catchup(t, merchantTab1, workflow, "MoveTables") - catchup(t, merchantTab2, workflow, "MoveTables") - - vdiff(t, "merchant.p2m") - switchReads(t, allCellNames, ksWorkflow) - switchWrites(t, ksWorkflow, false) - dropSources(t, ksWorkflow) - - validateCountInTablet(t, merchantTab1, "merchant", "merchant", 1) - validateCountInTablet(t, merchantTab2, "merchant", "merchant", 1) - validateCount(t, vtgateConn, "merchant", "merchant", 2) - -} - -func vdiff(t *testing.T, workflow string) { - output, err := vc.VtctlClient.ExecuteCommandWithOutput("VDiff", "-format", "json", workflow) - fmt.Printf("vdiff err: %+v, output: %+v\n", err, output) - require.Nil(t, err) - require.NotNil(t, output) - diffReports := make([]*wrangler.DiffReport, 0) - err = json.Unmarshal([]byte(output), &diffReports) - require.Nil(t, err) - if len(diffReports) < 1 { - t.Fatal("VDiff did not return a valid json response " + output + "\n") - } - require.True(t, len(diffReports) > 0) - for key, diffReport := range diffReports { - if diffReport.ProcessedRows != diffReport.MatchingRows { - t.Errorf("vdiff error for %d : %#v\n", key, diffReport) + t.Run("shardMerchant", func(t *testing.T) { + workflow := "p2m" + cell := defaultCell.Name + sourceKs := "product" + targetKs := "merchant" + tables := "merchant" + ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow) + if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, "merchant", "-80,80-", merchantVSchema, "", defaultReplicas, defaultRdonly, 400); err != nil { + t.Fatal(err) } - } + if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.master", "merchant", "-80"), 1); err != nil { + t.Fatal(err) + } + if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.master", "merchant", "80-"), 1); err != nil { + t.Fatal(err) + } + moveTables(t, cell, workflow, sourceKs, targetKs, tables) + merchantKs := vc.Cells[defaultCell.Name].Keyspaces["merchant"] + merchantTab1 := merchantKs.Shards["-80"].Tablets["zone1-400"].Vttablet + merchantTab2 := merchantKs.Shards["80-"].Tablets["zone1-500"].Vttablet + catchup(t, merchantTab1, workflow, "MoveTables") + catchup(t, merchantTab2, workflow, "MoveTables") + + vdiff(t, "merchant.p2m", "") + switchReads(t, allCellNames, ksWorkflow) + switchWrites(t, ksWorkflow, false) + dropSources(t, ksWorkflow) + + validateCountInTablet(t, merchantTab1, "merchant", "merchant", 1) + validateCountInTablet(t, merchantTab2, "merchant", "merchant", 1) + validateCount(t, vtgateConn, "merchant", "merchant", 2) + }) +} + +func vdiff(t *testing.T, workflow, cells string) { + t.Run("vdiff", func(t *testing.T) { + output, err := vc.VtctlClient.ExecuteCommandWithOutput("VDiff", "-tablet_types=master", "-source_cell="+cells, "-format", "json", workflow) + fmt.Printf("vdiff err: %+v, output: %+v\n", err, output) + require.Nil(t, err) + require.NotNil(t, output) + diffReports := make([]*wrangler.DiffReport, 0) + err = json.Unmarshal([]byte(output), &diffReports) + require.Nil(t, err) + if len(diffReports) < 1 { + t.Fatal("VDiff did not return a valid json response " + output + "\n") + } + require.True(t, len(diffReports) > 0) + for key, diffReport := range diffReports { + if diffReport.ProcessedRows != diffReport.MatchingRows { + require.Failf(t, "vdiff failed", "Table %d : %#v\n", key, diffReport) + } + } + }) } func materialize(t *testing.T, spec string) { - err := vc.VtctlClient.ExecuteCommand("Materialize", spec) - require.NoError(t, err, "Materialize") + t.Run("materialize", func(t *testing.T) { + err := vc.VtctlClient.ExecuteCommand("Materialize", spec) + require.NoError(t, err, "Materialize") + }) } func materializeProduct(t *testing.T) { - workflow := "cproduct" - keyspace := "customer" - applyVSchema(t, materializeProductVSchema, keyspace) - materialize(t, materializeProductSpec) - customerTablets := vc.getVttabletsInKeyspace(t, defaultCell, keyspace, "master") - for _, tab := range customerTablets { - catchup(t, tab, workflow, "Materialize") - } - for _, tab := range customerTablets { - validateCountInTablet(t, tab, keyspace, workflow, 5) - } + t.Run("materializeProduct", func(t *testing.T) { + // materializing from "product" keyspace to "customer" keyspace + workflow := "cproduct" + keyspace := "customer" + applyVSchema(t, materializeProductVSchema, keyspace) + materialize(t, materializeProductSpec) + customerTablets := vc.getVttabletsInKeyspace(t, defaultCell, keyspace, "master") + { + for _, tab := range customerTablets { + catchup(t, tab, workflow, "Materialize") + } + for _, tab := range customerTablets { + validateCountInTablet(t, tab, keyspace, workflow, 5) + } + } + + productTablets := vc.getVttabletsInKeyspace(t, defaultCell, "product", "master") + t.Run("throttle-app-product", func(t *testing.T) { + // Now, throttle the streamer on source tablets, insert some rows + for _, tab := range productTablets { + _, body, err := throttleApp(tab, sourceThrottlerAppName) + assert.NoError(t, err) + assert.Contains(t, body, sourceThrottlerAppName) + } + // Wait for throttling to take effect (caching will expire by this time): + time.Sleep(1 * time.Second) + for _, tab := range productTablets { + { + _, body, err := throttlerCheckSelf(tab, sourceThrottlerAppName) + assert.NoError(t, err) + assert.Contains(t, body, "417") + } + { + _, body, err := throttlerCheckSelf(tab, targetThrottlerAppName) + assert.NoError(t, err) + assert.Contains(t, body, "200") + } + } + insertMoreProductsForSourceThrottler(t) + // To be fair to the test, we give the target time to apply the new changes. We expect it to NOT get them in the first place, + time.Sleep(1 * time.Second) + // we expect the additional rows to **not appear** in the materialized view + for _, tab := range customerTablets { + validateCountInTablet(t, tab, keyspace, workflow, 5) + } + }) + t.Run("unthrottle-app-product", func(t *testing.T) { + // unthrottle on source tablets, and expect the rows to show up + for _, tab := range productTablets { + _, body, err := unthrottleApp(tab, sourceThrottlerAppName) + assert.NoError(t, err) + assert.Contains(t, body, sourceThrottlerAppName) + } + // give time for unthrottling to take effect and for target to fetch data + time.Sleep(3 * time.Second) + for _, tab := range productTablets { + { + _, body, err := throttlerCheckSelf(tab, sourceThrottlerAppName) + assert.NoError(t, err) + assert.Contains(t, body, "200") + } + } + for _, tab := range customerTablets { + validateCountInTablet(t, tab, keyspace, workflow, 8) + } + }) + + t.Run("throttle-app-customer", func(t *testing.T) { + // Now, throttle the streamer on source tablets, insert some rows + for _, tab := range customerTablets { + _, body, err := throttleApp(tab, targetThrottlerAppName) + assert.NoError(t, err) + assert.Contains(t, body, targetThrottlerAppName) + } + // Wait for throttling to take effect (caching will expire by this time): + time.Sleep(1 * time.Second) + for _, tab := range customerTablets { + { + _, body, err := throttlerCheckSelf(tab, targetThrottlerAppName) + assert.NoError(t, err) + assert.Contains(t, body, "417") + } + { + _, body, err := throttlerCheckSelf(tab, sourceThrottlerAppName) + assert.NoError(t, err) + assert.Contains(t, body, "200") + } + } + insertMoreProductsForTargetThrottler(t) + // To be fair to the test, we give the target time to apply the new changes. We expect it to NOT get them in the first place, + time.Sleep(1 * time.Second) + // we expect the additional rows to **not appear** in the materialized view + for _, tab := range customerTablets { + validateCountInTablet(t, tab, keyspace, workflow, 8) + } + }) + t.Run("unthrottle-app-customer", func(t *testing.T) { + // unthrottle on source tablets, and expect the rows to show up + for _, tab := range customerTablets { + _, body, err := unthrottleApp(tab, targetThrottlerAppName) + assert.NoError(t, err) + assert.Contains(t, body, targetThrottlerAppName) + } + // give time for unthrottling to take effect and for target to fetch data + time.Sleep(3 * time.Second) + for _, tab := range customerTablets { + { + _, body, err := throttlerCheckSelf(tab, targetThrottlerAppName) + assert.NoError(t, err) + assert.Contains(t, body, "200") + } + } + for _, tab := range customerTablets { + validateCountInTablet(t, tab, keyspace, workflow, 11) + } + }) + }) } func materializeRollup(t *testing.T) { - keyspace := "product" - workflow := "rollup" - applyVSchema(t, materializeSalesVSchema, keyspace) - productTab := vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-100"].Vttablet - materialize(t, materializeRollupSpec) - catchup(t, productTab, workflow, "Materialize") - validateCount(t, vtgateConn, "product", "rollup", 1) - validateQuery(t, vtgateConn, "product:0", "select rollupname, kount from rollup", - `[[VARCHAR("total") INT32(2)]]`) + t.Run("materializeRollup", func(t *testing.T) { + keyspace := "product" + workflow := "rollup" + applyVSchema(t, materializeSalesVSchema, keyspace) + productTab := vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-100"].Vttablet + materialize(t, materializeRollupSpec) + catchup(t, productTab, workflow, "Materialize") + validateCount(t, vtgateConn, "product", "rollup", 1) + validateQuery(t, vtgateConn, "product:0", "select rollupname, kount from rollup", + `[[VARCHAR("total") INT32(2)]]`) + }) } func materializeSales(t *testing.T) { - keyspace := "product" - applyVSchema(t, materializeSalesVSchema, keyspace) - materialize(t, materializeSalesSpec) - productTab := vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-100"].Vttablet - catchup(t, productTab, "sales", "Materialize") - validateCount(t, vtgateConn, "product", "sales", 2) - validateQuery(t, vtgateConn, "product:0", "select kount, amount from sales", - `[[INT32(1) INT32(10)] [INT32(2) INT32(35)]]`) + t.Run("materializeSales", func(t *testing.T) { + keyspace := "product" + applyVSchema(t, materializeSalesVSchema, keyspace) + materialize(t, materializeSalesSpec) + productTab := vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-100"].Vttablet + catchup(t, productTab, "sales", "Materialize") + validateCount(t, vtgateConn, "product", "sales", 2) + validateQuery(t, vtgateConn, "product:0", "select kount, amount from sales", + `[[INT32(1) INT32(10)] [INT32(2) INT32(35)]]`) + }) } func materializeMerchantSales(t *testing.T) { - workflow := "msales" - materialize(t, materializeMerchantSalesSpec) - merchantTablets := vc.getVttabletsInKeyspace(t, defaultCell, "merchant", "master") - for _, tab := range merchantTablets { - catchup(t, tab, workflow, "Materialize") - } - validateCountInTablet(t, merchantTablets["zone1-400"], "merchant", "msales", 1) - validateCountInTablet(t, merchantTablets["zone1-500"], "merchant", "msales", 1) - validateCount(t, vtgateConn, "merchant", "msales", 2) + t.Run("materializeMerchantSales", func(t *testing.T) { + workflow := "msales" + materialize(t, materializeMerchantSalesSpec) + merchantTablets := vc.getVttabletsInKeyspace(t, defaultCell, "merchant", "master") + for _, tab := range merchantTablets { + catchup(t, tab, workflow, "Materialize") + } + validateCountInTablet(t, merchantTablets["zone1-400"], "merchant", "msales", 1) + validateCountInTablet(t, merchantTablets["zone1-500"], "merchant", "msales", 1) + validateCount(t, vtgateConn, "merchant", "msales", 2) + }) } func materializeMerchantOrders(t *testing.T) { - workflow := "morders" - keyspace := "merchant" - applyVSchema(t, merchantOrdersVSchema, keyspace) - materialize(t, materializeMerchantOrdersSpec) - merchantTablets := vc.getVttabletsInKeyspace(t, defaultCell, "merchant", "master") - for _, tab := range merchantTablets { - catchup(t, tab, workflow, "Materialize") - } - validateCountInTablet(t, merchantTablets["zone1-400"], "merchant", "morders", 2) - validateCountInTablet(t, merchantTablets["zone1-500"], "merchant", "morders", 1) - validateCount(t, vtgateConn, "merchant", "morders", 3) + t.Run("materializeMerchantOrders", func(t *testing.T) { + workflow := "morders" + keyspace := "merchant" + applyVSchema(t, merchantOrdersVSchema, keyspace) + materialize(t, materializeMerchantOrdersSpec) + merchantTablets := vc.getVttabletsInKeyspace(t, defaultCell, "merchant", "master") + for _, tab := range merchantTablets { + catchup(t, tab, workflow, "Materialize") + } + validateCountInTablet(t, merchantTablets["zone1-400"], "merchant", "morders", 2) + validateCountInTablet(t, merchantTablets["zone1-500"], "merchant", "morders", 1) + validateCount(t, vtgateConn, "merchant", "morders", 3) + }) } func checkVtgateHealth(t *testing.T, cell *Cell) { @@ -594,8 +781,8 @@ func checkTabletHealth(t *testing.T, tablet *Tablet) { } } -func iterateTablets(t *testing.T, f func(t *testing.T, tablet *Tablet)) { - for _, cell := range vc.Cells { +func iterateTablets(t *testing.T, cluster *VitessCluster, f func(t *testing.T, tablet *Tablet)) { + for _, cell := range cluster.Cells { for _, ks := range cell.Keyspaces { for _, shard := range ks.Shards { for _, tablet := range shard.Tablets { @@ -606,15 +793,15 @@ func iterateTablets(t *testing.T, f func(t *testing.T, tablet *Tablet)) { } } -func iterateCells(t *testing.T, f func(t *testing.T, cell *Cell)) { - for _, cell := range vc.Cells { +func iterateCells(t *testing.T, cluster *VitessCluster, f func(t *testing.T, cell *Cell)) { + for _, cell := range cluster.Cells { f(t, cell) } } -func verifyClusterHealth(t *testing.T) { - iterateCells(t, checkVtgateHealth) - iterateTablets(t, checkTabletHealth) +func verifyClusterHealth(t *testing.T, cluster *VitessCluster) { + iterateCells(t, cluster, checkVtgateHealth) + iterateTablets(t, cluster, checkTabletHealth) } func catchup(t *testing.T, vttablet *cluster.VttabletProcess, workflow, info string) { diff --git a/go/test/endtoend/vtcombo/vttest_sample_test.go b/go/test/endtoend/vtcombo/vttest_sample_test.go index 32c8fb86729..2bce46efb5f 100644 --- a/go/test/endtoend/vtcombo/vttest_sample_test.go +++ b/go/test/endtoend/vtcombo/vttest_sample_test.go @@ -74,6 +74,7 @@ func TestMain(m *testing.M) { cfg.Topology = topology cfg.SchemaDir = os.Getenv("VTROOT") + "/test/vttest_schema" cfg.DefaultSchemaDir = os.Getenv("VTROOT") + "/test/vttest_schema/default" + cfg.PersistentMode = true localCluster = &vttest.LocalCluster{ Config: cfg, @@ -116,27 +117,24 @@ func TestStandalone(t *testing.T) { conn, err := vtgateconn.Dial(ctx, grpcAddress) require.Nil(t, err) defer conn.Close() - cur := conn.Session(ks1+":-80@master", nil) idStart, rowCount := 1000, 500 - query := "insert into test_table (id, msg, keyspace_id) values (:id, :msg, :keyspace_id)" - _, err = cur.Execute(ctx, "begin", nil) - require.Nil(t, err) + insertManyRows(ctx, t, conn, idStart, rowCount) + assertInsertedRowsExist(ctx, t, conn, idStart, rowCount) + assertCanInsertRow(ctx, t, conn) + assertTablesPresent(t) - for i := idStart; i < idStart+rowCount; i++ { - bindVariables := map[string]*querypb.BindVariable{ - "id": {Type: querypb.Type_UINT64, Value: []byte(strconv.FormatInt(int64(i), 10))}, - "msg": {Type: querypb.Type_VARCHAR, Value: []byte("test" + strconv.FormatInt(int64(i), 10))}, - "keyspace_id": {Type: querypb.Type_UINT64, Value: []byte(strconv.FormatInt(int64(i), 10))}, - } - _, err = cur.Execute(ctx, query, bindVariables) - require.Nil(t, err) - } - - _, err = cur.Execute(ctx, "commit", nil) + err = localCluster.TearDown() require.Nil(t, err) + err = localCluster.Setup() + require.Nil(t, err) + + assertInsertedRowsExist(ctx, t, conn, idStart, rowCount) + assertTablesPresent(t) +} - cur = conn.Session(ks1+":-80@rdonly", nil) +func assertInsertedRowsExist(ctx context.Context, t *testing.T, conn *vtgateconn.VTGateConn, idStart, rowCount int) { + cur := conn.Session(ks1+":-80@rdonly", nil) bindVariables := map[string]*querypb.BindVariable{ "id_start": {Type: querypb.Type_UINT64, Value: []byte(strconv.FormatInt(int64(idStart), 10))}, } @@ -153,23 +151,49 @@ func TestStandalone(t *testing.T) { require.Nil(t, err) require.Equal(t, 1, len(res.Rows)) assert.Equal(t, "VARCHAR(\"test1000\")", res.Rows[0][1].String()) +} - cur = conn.Session(ks1+":80-@master", nil) - _, err = cur.Execute(ctx, "begin", nil) +func assertCanInsertRow(ctx context.Context, t *testing.T, conn *vtgateconn.VTGateConn) { + cur := conn.Session(ks1+":80-@master", nil) + _, err := cur.Execute(ctx, "begin", nil) require.Nil(t, err) i := 0x810000000000000 - bindVariables = map[string]*querypb.BindVariable{ + bindVariables := map[string]*querypb.BindVariable{ "id": {Type: querypb.Type_UINT64, Value: []byte(strconv.FormatInt(int64(i), 10))}, "msg": {Type: querypb.Type_VARCHAR, Value: []byte("test" + strconv.FormatInt(int64(i), 10))}, "keyspace_id": {Type: querypb.Type_UINT64, Value: []byte(strconv.FormatInt(int64(i), 10))}, } + query := "insert into test_table (id, msg, keyspace_id) values (:id, :msg, :keyspace_id)" _, err = cur.Execute(ctx, query, bindVariables) require.Nil(t, err) _, err = cur.Execute(ctx, "commit", nil) require.Nil(t, err) +} + +func insertManyRows(ctx context.Context, t *testing.T, conn *vtgateconn.VTGateConn, idStart, rowCount int) { + cur := conn.Session(ks1+":-80@master", nil) + + query := "insert into test_table (id, msg, keyspace_id) values (:id, :msg, :keyspace_id)" + _, err := cur.Execute(ctx, "begin", nil) + require.Nil(t, err) + + for i := idStart; i < idStart+rowCount; i++ { + bindVariables := map[string]*querypb.BindVariable{ + "id": {Type: querypb.Type_UINT64, Value: []byte(strconv.FormatInt(int64(i), 10))}, + "msg": {Type: querypb.Type_VARCHAR, Value: []byte("test" + strconv.FormatInt(int64(i), 10))}, + "keyspace_id": {Type: querypb.Type_UINT64, Value: []byte(strconv.FormatInt(int64(i), 10))}, + } + _, err = cur.Execute(ctx, query, bindVariables) + require.Nil(t, err) + } + + _, err = cur.Execute(ctx, "commit", nil) + require.Nil(t, err) +} +func assertTablesPresent(t *testing.T) { tmpCmd := exec.Command("vtctlclient", "-vtctl_client_protocol", "grpc", "-server", grpcAddress, "-stderrthreshold", "0", "ListAllTablets", "test") log.Infof("Running vtctlclient with command: %v", tmpCmd.Args) diff --git a/go/test/endtoend/vtgate/aggr_test.go b/go/test/endtoend/vtgate/aggr_test.go index f68a5808c34..adc8c93a84c 100644 --- a/go/test/endtoend/vtgate/aggr_test.go +++ b/go/test/endtoend/vtgate/aggr_test.go @@ -40,3 +40,22 @@ func TestAggregateTypes(t *testing.T) { assertMatches(t, conn, "select val1, count(distinct val2) k, count(*) from aggr_test group by val1 order by k desc, val1 limit 4", `[[VARCHAR("c") INT64(2) INT64(2)] [VARCHAR("a") INT64(1) INT64(2)] [VARCHAR("b") INT64(1) INT64(1)] [VARCHAR("e") INT64(1) INT64(2)]]`) exec(t, conn, "delete from aggr_test") } + +func TestGroupBy(t *testing.T) { + defer cluster.PanicHandler(t) + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.Nil(t, err) + defer conn.Close() + exec(t, conn, "insert into t3(id5, id6, id7) values(1,1,2), (2,2,4), (3,2,4), (4,1,2), (5,1,2), (6,3,6)") + // test ordering and group by int column + assertMatches(t, conn, "select id6, id7, count(*) k from t3 group by id6, id7 order by k", `[[INT64(3) INT64(6) INT64(1)] [INT64(2) INT64(4) INT64(2)] [INT64(1) INT64(2) INT64(3)]]`) + + defer func() { + exec(t, conn, "set workload = oltp") + exec(t, conn, "delete from t3") + }() + // Test the same queries in streaming mode + exec(t, conn, "set workload = olap") + assertMatches(t, conn, "select id6, id7, count(*) k from t3 group by id6, id7 order by k", `[[INT64(3) INT64(6) INT64(1)] [INT64(2) INT64(4) INT64(2)] [INT64(1) INT64(2) INT64(3)]]`) +} diff --git a/go/test/endtoend/vtgate/createdb_plugin/main_test.go b/go/test/endtoend/vtgate/createdb_plugin/main_test.go new file mode 100644 index 00000000000..bf1677f7330 --- /dev/null +++ b/go/test/endtoend/vtgate/createdb_plugin/main_test.go @@ -0,0 +1,189 @@ +/* +Copyright 2020 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unsharded + +import ( + "context" + "flag" + "fmt" + "os" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/endtoend/cluster" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + keyspaceName = "ks" + cell = "zone1" + hostname = "localhost" +) + +func TestMain(m *testing.M) { + defer cluster.PanicHandler(nil) + flag.Parse() + + exitCode := func() int { + clusterInstance = cluster.NewCluster(cell, hostname) + defer clusterInstance.Teardown() + + // Start topo server + if err := clusterInstance.StartTopo(); err != nil { + return 1 + } + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: keyspaceName, + } + if err := clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 0, false); err != nil { + return 1 + } + + // Start vtgate + clusterInstance.VtGateExtraArgs = []string{"-dbddl_plugin", "noop", "-mysql_server_query_timeout", "60s"} + vtgateProcess := clusterInstance.NewVtgateInstance() + vtgateProcess.SysVarSetEnabled = true + if err := vtgateProcess.Setup(); err != nil { + return 1 + } + + vtParams = mysql.ConnParams{ + Host: clusterInstance.Hostname, + Port: clusterInstance.VtgateMySQLPort, + } + return m.Run() + }() + os.Exit(exitCode) +} + +func TestDBDDLPlugin(t *testing.T) { + defer cluster.PanicHandler(t) + ctx := context.Background() + vtParams := mysql.ConnParams{ + Host: "localhost", + Port: clusterInstance.VtgateMySQLPort, + } + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer conn.Close() + + createAndDrop := func(t *testing.T) { + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + qr := exec(t, conn, `create database aaa`) + require.EqualValues(t, 1, qr.RowsAffected) + }() + time.Sleep(300 * time.Millisecond) + start(t, "aaa") + + // wait until the create database query has returned + wg.Wait() + + exec(t, conn, `use aaa`) + exec(t, conn, `create table t (id bigint primary key)`) + exec(t, conn, `insert into t(id) values (1),(2),(3),(4),(5)`) + assertMatches(t, conn, "select count(*) from t", `[[INT64(5)]]`) + + wg.Add(1) + go func() { + defer wg.Done() + _ = exec(t, conn, `drop database aaa`) + }() + time.Sleep(300 * time.Millisecond) + shutdown(t, "aaa") + + // wait until the drop database query has returned + wg.Wait() + + _, err = conn.ExecuteFetch(`select count(*) from t`, 1000, true) + require.Error(t, err) + } + t.Run("first try", func(t *testing.T) { + createAndDrop(t) + }) + if !t.Failed() { + t.Run("second try", func(t *testing.T) { + createAndDrop(t) + }) + } +} + +func start(t *testing.T, ksName string) { + keyspace := &cluster.Keyspace{ + Name: ksName, + } + require.NoError(t, + clusterInstance.StartUnshardedKeyspace(*keyspace, 0, false), + "new database creation failed") +} + +func shutdown(t *testing.T, ksName string) { + for _, ks := range clusterInstance.Keyspaces { + if ks.Name != ksName { + continue + } + for _, shard := range ks.Shards { + for _, tablet := range shard.Vttablets { + if tablet.MysqlctlProcess.TabletUID > 0 { + _, err := tablet.MysqlctlProcess.StopProcess() + assert.NoError(t, err) + } + if tablet.MysqlctldProcess.TabletUID > 0 { + err := tablet.MysqlctldProcess.Stop() + assert.NoError(t, err) + } + _ = tablet.VttabletProcess.TearDown() + } + } + } + + require.NoError(t, + clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteKeyspace", "-recursive", ksName)) + + require.NoError(t, + clusterInstance.VtctlclientProcess.ExecuteCommand("RebuildVSchemaGraph")) +} + +func exec(t *testing.T, conn *mysql.Conn, query string) *sqltypes.Result { + t.Helper() + qr, err := conn.ExecuteFetch(query, 1000, true) + require.NoError(t, err) + return qr +} + +func assertMatches(t *testing.T, conn *mysql.Conn, query, expected string) { + t.Helper() + qr := exec(t, conn, query) + got := fmt.Sprintf("%v", qr.Rows) + diff := cmp.Diff(expected, got) + if diff != "" { + t.Errorf("Query: %s (-want +got):\n%s", query, diff) + } +} diff --git a/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go b/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go new file mode 100644 index 00000000000..9fc3c9d59ea --- /dev/null +++ b/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go @@ -0,0 +1,136 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Test the vtgate's ability to route while watching a subset of keyspaces. +*/ + +package keyspacewatches + +import ( + "database/sql" + "fmt" + "math/rand" + "os" + "testing" + "time" + + _ "github.com/go-sql-driver/mysql" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/cluster" +) + +var ( + vtParams mysql.ConnParams + keyspaceUnshardedName = "ks1" + cell = "zone1" + hostname = "localhost" + mysqlAuthServerStatic = "mysql_auth_server_static.json" + sqlSchema = ` + create table keyspaces_to_watch_test( + id BIGINT NOT NULL, + msg VARCHAR(64) NOT NULL, + PRIMARY KEY (id) + ) Engine=InnoDB;` +) + +// createConfig creates a config file in TmpDir in vtdataroot and writes the given data. +func createConfig(clusterInstance *cluster.LocalProcessCluster, name, data string) error { + // creating new file + f, err := os.Create(clusterInstance.TmpDirectory + "/" + name) + if err != nil { + return err + } + + if data == "" { + return nil + } + + // write the given data + _, err = fmt.Fprint(f, data) + return err +} + +func createCluster() (*cluster.LocalProcessCluster, int) { + clusterInstance := cluster.NewCluster(cell, hostname) + + // Start topo server + if err := clusterInstance.StartTopo(); err != nil { + return nil, 1 + } + + // create auth server config + SQLConfig := `{ + "testuser1": { + "Password": "testpassword1", + "UserData": "vtgate client 1" + } + }` + if err := createConfig(clusterInstance, mysqlAuthServerStatic, SQLConfig); err != nil { + return nil, 1 + } + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: keyspaceUnshardedName, + SchemaSQL: sqlSchema, + } + if err := clusterInstance.StartUnshardedKeyspace(*keyspace, 1, false); err != nil { + return nil, 1 + } + + clusterInstance.VtGateExtraArgs = []string{ + "-mysql_auth_server_static_file", clusterInstance.TmpDirectory + "/" + mysqlAuthServerStatic, + "-keyspaces_to_watch", "ks1", + } + + // Start vtgate + if err := clusterInstance.StartVtgate(); err != nil { + return nil, 1 + } + vtParams = mysql.ConnParams{ + Host: clusterInstance.Hostname, + Port: clusterInstance.VtgateMySQLPort, + } + rand.Seed(time.Now().UnixNano()) + return clusterInstance, 0 +} + +func TestRoutingWithKeyspacesToWatch(t *testing.T) { + defer cluster.PanicHandler(t) + + clusterInstance, exitCode := createCluster() + defer clusterInstance.Teardown() + + if exitCode != 0 { + os.Exit(exitCode) + } + + dsn := fmt.Sprintf( + "testuser1:testpassword1@tcp(%s:%v)/", + clusterInstance.Hostname, + clusterInstance.VtgateMySQLPort, + ) + db, err := sql.Open("mysql", dsn) + require.Nil(t, err) + defer db.Close() + + // if this returns w/o failing the test we're good to go + _, err = db.Exec("select * from keyspaces_to_watch_test") + require.Nil(t, err) +} diff --git a/go/test/endtoend/vtgate/misc_test.go b/go/test/endtoend/vtgate/misc_test.go index 09a075556ae..042c5062205 100644 --- a/go/test/endtoend/vtgate/misc_test.go +++ b/go/test/endtoend/vtgate/misc_test.go @@ -77,6 +77,18 @@ func TestShowColumns(t *testing.T) { assertMatches(t, conn, "SHOW columns FROM `t5_null_vindex` in `ks`", expected) } +func TestShowTables(t *testing.T) { + conn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + defer conn.Close() + + query := "show tables;" + qr := exec(t, conn, query) + + assert.Equal(t, "information_schema", qr.Fields[0].Database) + assert.Equal(t, "Tables_in_ks", qr.Fields[0].Name) +} + func TestCastConvert(t *testing.T) { conn, err := mysql.Connect(context.Background(), &vtParams) require.NoError(t, err) @@ -352,6 +364,9 @@ func TestExplainPassthrough(t *testing.T) { got := fmt.Sprintf("%v", result.Rows) require.Contains(t, got, "SIMPLE") // there is a lot more coming from mysql, // but we are trying to make the test less fragile + + result = exec(t, conn, "explain ks.t1") + require.EqualValues(t, 2, len(result.Rows)) } func TestXXHash(t *testing.T) { @@ -404,8 +419,15 @@ func TestSwitchBetweenOlapAndOltp(t *testing.T) { require.NoError(t, err) defer conn.Close() + assertMatches(t, conn, "select @@workload", `[[VARBINARY("OLTP")]]`) + exec(t, conn, "set workload='olap'") + + assertMatches(t, conn, "select @@workload", `[[VARBINARY("OLAP")]]`) + exec(t, conn, "set workload='oltp'") + + assertMatches(t, conn, "select @@workload", `[[VARBINARY("OLTP")]]`) } func TestFoundRowsOnDualQueries(t *testing.T) { @@ -425,7 +447,7 @@ func TestUseStmtInOLAP(t *testing.T) { require.NoError(t, err) defer conn.Close() - queries := []string{"set workload='olap'", "use `ks:80-`"} + queries := []string{"set workload='olap'", "use `ks:80-`", "use `ks:-80`"} for i, q := range queries { t.Run(fmt.Sprintf("%d-%s", i, q), func(t *testing.T) { exec(t, conn, q) @@ -496,6 +518,69 @@ func TestCreateView(t *testing.T) { assertMatches(t, conn, "select * from v1", `[[INT64(1) INT64(1)] [INT64(2) INT64(2)] [INT64(3) INT64(3)] [INT64(4) INT64(4)] [INT64(5) INT64(5)]]`) } +func TestVersions(t *testing.T) { + defer cluster.PanicHandler(t) + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer conn.Close() + + qr := exec(t, conn, `select @@version`) + assert.Contains(t, fmt.Sprintf("%v", qr.Rows), "vitess") + + qr = exec(t, conn, `select @@version_comment`) + assert.Contains(t, fmt.Sprintf("%v", qr.Rows), "Git revision") +} + +func TestFlush(t *testing.T) { + defer cluster.PanicHandler(t) + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer conn.Close() + exec(t, conn, "flush local tables t1, t2") +} + +func TestShowVariables(t *testing.T) { + defer cluster.PanicHandler(t) + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer conn.Close() + res := exec(t, conn, "show variables like \"%version%\";") + found := false + for _, row := range res.Rows { + if row[0].ToString() == "version" { + assert.Contains(t, row[1].ToString(), "vitess") + found = true + } + } + require.True(t, found, "Expected a row for version in show query") +} + +func TestOrderBy(t *testing.T) { + defer cluster.PanicHandler(t) + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.Nil(t, err) + defer conn.Close() + exec(t, conn, "insert into t4(id1, id2) values(1,'a'), (2,'Abc'), (3,'b'), (4,'c'), (5,'test')") + exec(t, conn, "insert into t4(id1, id2) values(6,'d'), (7,'e'), (8,'F')") + // test ordering of varchar column + assertMatches(t, conn, "select id1, id2 from t4 order by id2 desc", `[[INT64(5) VARCHAR("test")] [INT64(8) VARCHAR("F")] [INT64(7) VARCHAR("e")] [INT64(6) VARCHAR("d")] [INT64(4) VARCHAR("c")] [INT64(3) VARCHAR("b")] [INT64(2) VARCHAR("Abc")] [INT64(1) VARCHAR("a")]]`) + // test ordering of int column + assertMatches(t, conn, "select id1, id2 from t4 order by id1 desc", `[[INT64(8) VARCHAR("F")] [INT64(7) VARCHAR("e")] [INT64(6) VARCHAR("d")] [INT64(5) VARCHAR("test")] [INT64(4) VARCHAR("c")] [INT64(3) VARCHAR("b")] [INT64(2) VARCHAR("Abc")] [INT64(1) VARCHAR("a")]]`) + + defer func() { + exec(t, conn, "set workload = oltp") + exec(t, conn, "delete from t4") + }() + // Test the same queries in streaming mode + exec(t, conn, "set workload = olap") + assertMatches(t, conn, "select id1, id2 from t4 order by id2 desc", `[[INT64(5) VARCHAR("test")] [INT64(8) VARCHAR("F")] [INT64(7) VARCHAR("e")] [INT64(6) VARCHAR("d")] [INT64(4) VARCHAR("c")] [INT64(3) VARCHAR("b")] [INT64(2) VARCHAR("Abc")] [INT64(1) VARCHAR("a")]]`) + assertMatches(t, conn, "select id1, id2 from t4 order by id1 desc", `[[INT64(8) VARCHAR("F")] [INT64(7) VARCHAR("e")] [INT64(6) VARCHAR("d")] [INT64(5) VARCHAR("test")] [INT64(4) VARCHAR("c")] [INT64(3) VARCHAR("b")] [INT64(2) VARCHAR("Abc")] [INT64(1) VARCHAR("a")]]`) +} + func TestSubQueryOnTopOfSubQuery(t *testing.T) { defer cluster.PanicHandler(t) ctx := context.Background() @@ -507,7 +592,7 @@ func TestSubQueryOnTopOfSubQuery(t *testing.T) { exec(t, conn, `insert into t1(id1, id2) values (1, 1), (2, 2), (3, 3), (4, 4), (5, 5)`) exec(t, conn, `insert into t2(id3, id4) values (1, 3), (2, 4)`) - assertMatches(t, conn, "select id1 from t1 where id1 not in (select id3 from t2) and id2 in (select id4 from t2)", `[[INT64(3)] [INT64(4)]]`) + assertMatches(t, conn, "select id1 from t1 where id1 not in (select id3 from t2) and id2 in (select id4 from t2) order by id1", `[[INT64(3)] [INT64(4)]]`) } func assertMatches(t *testing.T, conn *mysql.Conn, query, expected string) { diff --git a/go/test/endtoend/vtgate/mysql80/main_test.go b/go/test/endtoend/vtgate/mysql80/main_test.go new file mode 100644 index 00000000000..56661010449 --- /dev/null +++ b/go/test/endtoend/vtgate/mysql80/main_test.go @@ -0,0 +1,70 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vtgate + +import ( + "flag" + "os" + "testing" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/cluster" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + KeyspaceName = "ks" + Cell = "test" +) + +func TestMain(m *testing.M) { + defer cluster.PanicHandler(nil) + flag.Parse() + + exitCode := func() int { + clusterInstance = cluster.NewCluster(Cell, "localhost") + defer clusterInstance.Teardown() + + // Start topo server + err := clusterInstance.StartTopo() + if err != nil { + return 1 + } + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: KeyspaceName, + } + err = clusterInstance.StartUnshardedKeyspace(*keyspace, 0, false) + if err != nil { + return 1 + } + + // Start vtgate + err = clusterInstance.StartVtgate() + if err != nil { + return 1 + } + vtParams = mysql.ConnParams{ + Host: clusterInstance.Hostname, + Port: clusterInstance.VtgateMySQLPort, + } + return m.Run() + }() + os.Exit(exitCode) +} diff --git a/go/test/endtoend/vtgate/mysql80/misc_test.go b/go/test/endtoend/vtgate/mysql80/misc_test.go new file mode 100644 index 00000000000..72e4eff8c69 --- /dev/null +++ b/go/test/endtoend/vtgate/mysql80/misc_test.go @@ -0,0 +1,50 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vtgate + +import ( + "context" + "testing" + + "vitess.io/vitess/go/test/endtoend/cluster" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" +) + +func TestFunctionInDefault(t *testing.T) { + defer cluster.PanicHandler(t) + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer conn.Close() + + exec(t, conn, `create table function_default (x varchar(25) DEFAULT (TRIM(" check ")))`) + exec(t, conn, "drop table function_default") + + exec(t, conn, `create table function_default (x varchar(25) DEFAULT "check")`) + exec(t, conn, "drop table function_default") +} + +func exec(t *testing.T, conn *mysql.Conn, query string) *sqltypes.Result { + t.Helper() + qr, err := conn.ExecuteFetch(query, 1000, true) + require.NoError(t, err, "for query: "+query) + return qr +} diff --git a/go/test/endtoend/vtgate/reservedconn/main_test.go b/go/test/endtoend/vtgate/reservedconn/main_test.go index 9bac75fd1e3..ebe133a2c81 100644 --- a/go/test/endtoend/vtgate/reservedconn/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/main_test.go @@ -167,3 +167,13 @@ func assertIsEmpty(t *testing.T, conn *mysql.Conn, query string) { qr := checkedExec(t, conn, query) assert.Empty(t, qr.Rows) } + +func assertResponseMatch(t *testing.T, conn *mysql.Conn, query1, query2 string) { + qr1 := checkedExec(t, conn, query1) + got1 := fmt.Sprintf("%v", qr1.Rows) + + qr2 := checkedExec(t, conn, query2) + got2 := fmt.Sprintf("%v", qr2.Rows) + + assert.Equal(t, got1, got2) +} diff --git a/go/test/endtoend/vtgate/reservedconn/sysvar_test.go b/go/test/endtoend/vtgate/reservedconn/sysvar_test.go index 19af2cdc4d2..69531781085 100644 --- a/go/test/endtoend/vtgate/reservedconn/sysvar_test.go +++ b/go/test/endtoend/vtgate/reservedconn/sysvar_test.go @@ -22,6 +22,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" @@ -319,3 +320,78 @@ func TestSetSystemVarInTxWithConnError(t *testing.T) { // subsequent queries on 80- will pass assertMatches(t, conn, "select id, @@sql_safe_updates from test where id = 4", "[[INT64(4) INT64(1)]]") } + +func TestEnableSystemSettings(t *testing.T) { + vtParams := mysql.ConnParams{ + Host: "localhost", + Port: clusterInstance.VtgateMySQLPort, + } + conn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + defer conn.Close() + + // test set @@enable_system_settings to false and true + checkedExec(t, conn, "set enable_system_settings = false") + assertMatches(t, conn, `select @@enable_system_settings`, `[[INT64(0)]]`) + checkedExec(t, conn, "set enable_system_settings = true") + assertMatches(t, conn, `select @@enable_system_settings`, `[[INT64(1)]]`) + + // prepare the @@sql_mode variable + checkedExec(t, conn, "set sql_mode = 'NO_ZERO_DATE'") + assertMatches(t, conn, "select @@sql_mode", `[[VARCHAR("NO_ZERO_DATE")]]`) + + // check disabling @@enable_system_settings + checkedExec(t, conn, "set enable_system_settings = false") + checkedExec(t, conn, "set sql_mode = ''") // attempting to set @@sql_mode to an empty string + assertMatches(t, conn, "select @@sql_mode", `[[VARCHAR("NO_ZERO_DATE")]]`) // @@sql_mode did not change + + // check enabling @@enable_system_settings + checkedExec(t, conn, "set enable_system_settings = true") + checkedExec(t, conn, "set sql_mode = ''") // changing @@sql_mode to empty string + assertMatches(t, conn, "select @@sql_mode", `[[VARCHAR("")]]`) // @@sql_mode did change +} + +// Tests type consitency through multiple queries +func TestSystemVariableType(t *testing.T) { + vtParams := mysql.ConnParams{ + Host: "localhost", + Port: clusterInstance.VtgateMySQLPort, + } + conn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + defer conn.Close() + + checkedExec(t, conn, "delete from test") + checkedExec(t, conn, "insert into test (id, val1, val2, val3) values (1, null, 0, 0)") + + // regardless of the "from", the select @@autocommit should return the same type + query1 := "select @@autocommit" + query2 := "select @@autocommit from test" + + checkedExec(t, conn, "set autocommit = false") + assertResponseMatch(t, conn, query1, query2) + + checkedExec(t, conn, "set autocommit = true") + assertResponseMatch(t, conn, query1, query2) +} + +func TestSysvarSocket(t *testing.T) { + vtParams := mysql.ConnParams{ + Host: "localhost", + Port: clusterInstance.VtgateMySQLPort, + } + conn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + defer conn.Close() + + qr := checkedExec(t, conn, "select @@socket") + assert.Contains(t, fmt.Sprintf("%v", qr.Rows), "mysql.sock") + + _, err = exec(t, conn, "set socket = '/any/path'") + require.Error(t, err) + sqlErr, ok := err.(*mysql.SQLError) + require.True(t, ok, "not a mysql error: %T", err) + assert.Equal(t, mysql.ERIncorrectGlobalLocalVar, sqlErr.Number()) + assert.Equal(t, mysql.SSUnknownSQLState, sqlErr.SQLState()) + assert.Equal(t, "Variable 'socket' is a read only variable (errno 1238) (sqlstate HY000) during query: set socket = '/any/path'", sqlErr.Error()) +} diff --git a/go/test/endtoend/vtgate/reservedconn/udv_test.go b/go/test/endtoend/vtgate/reservedconn/udv_test.go index d8ab49f267e..391fc722a4d 100644 --- a/go/test/endtoend/vtgate/reservedconn/udv_test.go +++ b/go/test/endtoend/vtgate/reservedconn/udv_test.go @@ -43,65 +43,64 @@ func TestSetUDV(t *testing.T) { query string expectedRows string rowsAffected int + rowsReturned int } queries := []queriesWithExpectations{{ query: "select @foo", - expectedRows: "[[NULL]]", rowsAffected: 1, + expectedRows: "[[NULL]]", rowsReturned: 1, }, { - query: "set @foo = 'abc', @bar = 42, @baz = 30.5, @tablet = concat('foo','bar')", - expectedRows: "", rowsAffected: 0, + query: "set @foo = 'abc', @bar = 42, @baz = 30.5, @tablet = concat('foo','bar')", }, { - query: "/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE */", - expectedRows: "", rowsAffected: 0, + query: "/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE */", }, { // This is handled at vtgate. query: "select @foo, @bar, @baz, @tablet", - expectedRows: `[[VARBINARY("abc") INT64(42) FLOAT64(30.5) VARBINARY("foobar")]]`, rowsAffected: 1, + expectedRows: `[[VARBINARY("abc") INT64(42) FLOAT64(30.5) VARBINARY("foobar")]]`, rowsReturned: 1, }, { // Cannot really check a specific value for sql_mode as it will differ based on database selected to run these tests. query: "select @OLD_SQL_MODE = @@SQL_MODE", - expectedRows: `[[INT64(1)]]`, rowsAffected: 1, + expectedRows: `[[INT64(1)]]`, rowsReturned: 1, }, { // This one is sent to tablet. query: "select @foo, @bar, @baz, @tablet, @OLD_SQL_MODE = @@SQL_MODE", - expectedRows: `[[VARCHAR("abc") INT64(42) DECIMAL(30.5) VARCHAR("foobar") INT64(1)]]`, rowsAffected: 1, + expectedRows: `[[VARCHAR("abc") INT64(42) DECIMAL(30.5) VARCHAR("foobar") INT64(1)]]`, rowsReturned: 1, }, { query: "insert into test(id, val1, val2, val3) values(1, @foo, null, null), (2, null, @bar, null), (3, null, null, @baz)", expectedRows: ``, rowsAffected: 3, }, { query: "select id, val1, val2, val3 from test order by id", - expectedRows: `[[INT64(1) VARCHAR("abc") NULL NULL] [INT64(2) NULL INT32(42) NULL] [INT64(3) NULL NULL FLOAT32(30.5)]]`, rowsAffected: 3, + expectedRows: `[[INT64(1) VARCHAR("abc") NULL NULL] [INT64(2) NULL INT32(42) NULL] [INT64(3) NULL NULL FLOAT32(30.5)]]`, rowsReturned: 3, }, { query: "select id, val1 from test where val1=@foo", - expectedRows: `[[INT64(1) VARCHAR("abc")]]`, rowsAffected: 1, + expectedRows: `[[INT64(1) VARCHAR("abc")]]`, rowsReturned: 1, }, { query: "select id, val2 from test where val2=@bar", - expectedRows: `[[INT64(2) INT32(42)]]`, rowsAffected: 1, + expectedRows: `[[INT64(2) INT32(42)]]`, rowsReturned: 1, }, { query: "select id, val3 from test where val3=@baz", - expectedRows: `[[INT64(3) FLOAT32(30.5)]]`, rowsAffected: 1, + expectedRows: `[[INT64(3) FLOAT32(30.5)]]`, rowsReturned: 1, }, { query: "delete from test where val2 = @bar", expectedRows: ``, rowsAffected: 1, }, { query: "select id, val2 from test where val2=@bar", - expectedRows: ``, rowsAffected: 0, + expectedRows: ``, }, { query: "update test set val2 = @bar where val1 = @foo", expectedRows: ``, rowsAffected: 1, }, { query: "select id, val1, val2 from test where val1=@foo", - expectedRows: `[[INT64(1) VARCHAR("abc") INT32(42)]]`, rowsAffected: 1, + expectedRows: `[[INT64(1) VARCHAR("abc") INT32(42)]]`, rowsReturned: 1, }, { query: "insert into test(id, val1, val2, val3) values (42, @tablet, null, null)", expectedRows: ``, rowsAffected: 1, }, { query: "select id, val1 from test where val1 = @tablet", - expectedRows: `[[INT64(42) VARCHAR("foobar")]]`, rowsAffected: 1, + expectedRows: `[[INT64(42) VARCHAR("foobar")]]`, rowsReturned: 1, }, { query: "set @foo = now(), @bar = now(), @dd = date('2020-10-20'), @tt = time('10:15')", - expectedRows: `[]`, rowsAffected: 0, + expectedRows: `[]`, }, { query: "select @foo = @bar, @dd, @tt", - expectedRows: `[[INT64(1) VARCHAR("2020-10-20") VARCHAR("10:15:00")]]`, rowsAffected: 1, + expectedRows: `[[INT64(1) VARCHAR("2020-10-20") VARCHAR("10:15:00")]]`, rowsReturned: 1, }} conn, err := mysql.Connect(ctx, &vtParams) @@ -114,7 +113,8 @@ func TestSetUDV(t *testing.T) { t.Run(fmt.Sprintf("%d-%s", i, q.query), func(t *testing.T) { qr, err := exec(t, conn, q.query) require.NoError(t, err) - assert.Equal(t, uint64(q.rowsAffected), qr.RowsAffected, "rows affected wrong for query: %s", q.query) + assert.EqualValues(t, q.rowsAffected, qr.RowsAffected, "rows affected wrong for query: %s", q.query) + assert.EqualValues(t, q.rowsReturned, len(qr.Rows), "rows returned wrong for query: %s", q.query) if q.expectedRows != "" { result := fmt.Sprintf("%v", qr.Rows) if diff := cmp.Diff(q.expectedRows, result); diff != "" { @@ -125,6 +125,47 @@ func TestSetUDV(t *testing.T) { } } +func TestMysqlDumpInitialLog(t *testing.T) { + defer cluster.PanicHandler(t) + ctx := context.Background() + vtParams := mysql.ConnParams{ + Host: "localhost", + Port: clusterInstance.VtgateMySQLPort, + } + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer conn.Close() + + queries := []string{ + "/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;", + "/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;", + "/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;", + "/*!50503 SET NAMES utf8mb4 */;", + "/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;", + "/*!40103 SET TIME_ZONE='+00:00' */;", + "/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;", + "/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;", + "/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;", + "/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;", + "/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;", + "/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;", + "/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;", + "/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;", + "/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;", + "/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;", + "/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;", + "/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;", + } + + for _, query := range queries { + t.Run(query, func(t *testing.T) { + _, more, err := conn.ExecuteFetchMulti(query, 1000, true) + require.NoError(t, err) + require.False(t, more) + }) + } +} + func TestUserDefinedVariableResolvedAtTablet(t *testing.T) { ctx := context.Background() vtParams := mysql.ConnParams{ diff --git a/go/test/endtoend/vtgate/system_schema_test.go b/go/test/endtoend/vtgate/system_schema_test.go index 8e5c29f3b5f..ed1c99b91da 100644 --- a/go/test/endtoend/vtgate/system_schema_test.go +++ b/go/test/endtoend/vtgate/system_schema_test.go @@ -78,8 +78,6 @@ func TestInformationSchemaQuery(t *testing.T) { assertResultIsEmpty(t, conn, "table_schema = 'PERFORMANCE_SCHEMA'") assertSingleRowIsReturned(t, conn, "table_schema = 'performance_schema' and table_name = 'users'", "performance_schema") assertResultIsEmpty(t, conn, "table_schema = 'performance_schema' and table_name = 'foo'") - assertSingleRowIsReturned(t, conn, "table_schema = 'vt_ks' and table_name = 't1'", "vt_ks") - assertSingleRowIsReturned(t, conn, "table_schema = 'ks' and table_name = 't1'", "vt_ks") } func assertResultIsEmpty(t *testing.T, conn *mysql.Conn, pre string) { @@ -141,6 +139,7 @@ func TestConnectWithSystemSchema(t *testing.T) { connParams.DbName = dbname conn, err := mysql.Connect(ctx, &connParams) require.NoError(t, err) + exec(t, conn, `select @@max_allowed_packet from dual`) conn.Close() } } @@ -148,12 +147,12 @@ func TestConnectWithSystemSchema(t *testing.T) { func TestUseSystemSchema(t *testing.T) { defer cluster.PanicHandler(t) ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer conn.Close() for _, dbname := range []string{"information_schema", "mysql", "performance_schema", "sys"} { - conn, err := mysql.Connect(ctx, &vtParams) - require.NoError(t, err) - exec(t, conn, fmt.Sprintf("use %s", dbname)) - conn.Close() + exec(t, conn, `select @@max_allowed_packet from dual`) } } @@ -213,5 +212,5 @@ func TestMultipleSchemaPredicates(t *testing.T) { "where t.table_schema = '%s' and c.table_schema = '%s' and c.table_schema = '%s'", KeyspaceName, KeyspaceName, "a") _, err = conn.ExecuteFetch(query, 1000, true) require.Error(t, err) - require.Contains(t, err.Error(), "specifying two different database in the query is not supported") + require.Contains(t, err.Error(), "two predicates for specifying the database are not supported") } diff --git a/go/test/endtoend/vtgate/unsharded/main_test.go b/go/test/endtoend/vtgate/unsharded/main_test.go index 09089e00e05..547b12ef2af 100644 --- a/go/test/endtoend/vtgate/unsharded/main_test.go +++ b/go/test/endtoend/vtgate/unsharded/main_test.go @@ -24,6 +24,9 @@ import ( "testing" "time" + "vitess.io/vitess/go/vt/log" + querypb "vitess.io/vitess/go/vt/proto/query" + "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -92,6 +95,55 @@ CREATE TABLE allDefaults ( } } } +` + + createProcSQL = `use vt_customer; +CREATE PROCEDURE sp_insert() +BEGIN + insert into allDefaults () values (); +END; + +CREATE PROCEDURE sp_delete() +BEGIN + delete from allDefaults; +END; + +CREATE PROCEDURE sp_multi_dml() +BEGIN + insert into allDefaults () values (); + delete from allDefaults; +END; + +CREATE PROCEDURE sp_variable() +BEGIN + insert into allDefaults () values (); + SELECT min(id) INTO @myvar FROM allDefaults; + DELETE FROM allDefaults WHERE id = @myvar; +END; + +CREATE PROCEDURE sp_select() +BEGIN + SELECT * FROM allDefaults; +END; + +CREATE PROCEDURE sp_all() +BEGIN + insert into allDefaults () values (); + select * from allDefaults; + delete from allDefaults; + set autocommit = 0; +END; + +CREATE PROCEDURE in_parameter(IN val int) +BEGIN + insert into allDefaults(id) values(val); +END; + +CREATE PROCEDURE out_parameter(OUT val int) +BEGIN + insert into allDefaults(id) values (128); + select 128 into val from dual; +END; ` ) @@ -116,11 +168,20 @@ func TestMain(m *testing.M) { } clusterInstance.VtTabletExtraArgs = []string{"-queryserver-config-transaction-timeout", "3"} if err := clusterInstance.StartUnshardedKeyspace(*Keyspace, 0, false); err != nil { + log.Fatal(err.Error()) return 1 } // Start vtgate + clusterInstance.VtGateExtraArgs = []string{"-warn_sharded_only=true"} if err := clusterInstance.StartVtgate(); err != nil { + log.Fatal(err.Error()) + return 1 + } + + masterProcess := clusterInstance.Keyspaces[0].Shards[0].MasterTablet().VttabletProcess + if _, err := masterProcess.QueryTablet(createProcSQL, KeyspaceName, false); err != nil { + log.Fatal(err.Error()) return 1 } @@ -163,6 +224,7 @@ func TestSelectIntoAndLoadFrom(t *testing.T) { query = `load data infile '` + directory + `x2.txt' replace into table t1 Fields terminated by ';' optionally enclosed by '"' escaped by '\t' lines terminated by '\n'` exec(t, conn, query) assertMatches(t, conn, `select c1,c2,c3 from t1`, `[[INT64(300) INT64(100) INT64(300)]]`) + assertMatches(t, conn, "show warnings", `[[VARCHAR("Warning") UINT16(1235) VARCHAR("use of feature that is only supported in unsharded mode: LOAD")]]`) } func TestEmptyStatement(t *testing.T) { @@ -176,8 +238,9 @@ func TestEmptyStatement(t *testing.T) { require.Nil(t, err) defer conn.Close() defer exec(t, conn, `delete from t1`) - execAssertError(t, conn, " \t;", "Query was empty") - execMulti(t, conn, `insert into t1(c1, c2, c3, c4) values (300,100,300,'abc'); ;; insert into t1(c1, c2, c3, c4) values (301,101,301,'abcd');;`) + execAssertError(t, conn, " \t; \n;", "Query was empty") + execMulti(t, conn, `insert into t1(c1, c2, c3, c4) values (300,100,300,'abc'); ;; insert into t1(c1, c2, c3, c4) values (301,101,301,'abcd');;`) + assertMatches(t, conn, `select c1,c2,c3 from t1`, `[[INT64(300) INT64(100) INT64(300)] [INT64(301) INT64(101) INT64(301)]]`) } @@ -237,6 +300,80 @@ func TestDDLUnsharded(t *testing.T) { assertMatches(t, conn, "show tables", `[[VARCHAR("allDefaults")] [VARCHAR("t1")]]`) } +func TestCallProcedure(t *testing.T) { + defer cluster.PanicHandler(t) + ctx := context.Background() + vtParams := mysql.ConnParams{ + Host: "localhost", + Port: clusterInstance.VtgateMySQLPort, + Flags: mysql.CapabilityClientMultiResults, + DbName: "@master", + } + time.Sleep(5 * time.Second) + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer conn.Close() + qr := exec(t, conn, `CALL sp_insert()`) + require.EqualValues(t, 1, qr.RowsAffected) + + assertMatches(t, conn, "show warnings", `[[VARCHAR("Warning") UINT16(1235) VARCHAR("'CALL' not supported in sharded mode")]]`) + + _, err = conn.ExecuteFetch(`CALL sp_select()`, 1000, true) + require.Error(t, err) + require.Contains(t, err.Error(), "Multi-Resultset not supported in stored procedure") + + _, err = conn.ExecuteFetch(`CALL sp_all()`, 1000, true) + require.Error(t, err) + require.Contains(t, err.Error(), "Multi-Resultset not supported in stored procedure") + + qr = exec(t, conn, `CALL sp_delete()`) + require.GreaterOrEqual(t, 1, int(qr.RowsAffected)) + + qr = exec(t, conn, `CALL sp_multi_dml()`) + require.EqualValues(t, 1, qr.RowsAffected) + + qr = exec(t, conn, `CALL sp_variable()`) + require.EqualValues(t, 1, qr.RowsAffected) + + qr = exec(t, conn, `CALL in_parameter(42)`) + require.EqualValues(t, 1, qr.RowsAffected) + + _ = exec(t, conn, `SET @foo = 123`) + qr = exec(t, conn, `CALL in_parameter(@foo)`) + require.EqualValues(t, 1, qr.RowsAffected) + qr = exec(t, conn, "select * from allDefaults where id = 123") + assert.NotEmpty(t, qr.Rows) + + _, err = conn.ExecuteFetch(`CALL out_parameter(@foo)`, 100, true) + require.Error(t, err) + require.Contains(t, err.Error(), "OUT and INOUT parameters are not supported") +} + +func TestTempTable(t *testing.T) { + defer cluster.PanicHandler(t) + ctx := context.Background() + vtParams := mysql.ConnParams{ + Host: "localhost", + Port: clusterInstance.VtgateMySQLPort, + } + conn1, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer conn1.Close() + + _ = exec(t, conn1, `create temporary table temp_t(id bigint primary key)`) + assertMatches(t, conn1, "show warnings", `[[VARCHAR("Warning") UINT16(1235) VARCHAR("'temporary table' not supported in sharded mode")]]`) + _ = exec(t, conn1, `insert into temp_t(id) values (1),(2),(3)`) + assertMatches(t, conn1, `select id from temp_t order by id`, `[[INT64(1)] [INT64(2)] [INT64(3)]]`) + assertMatches(t, conn1, `select count(table_id) from information_schema.innodb_temp_table_info`, `[[INT64(1)]]`) + + conn2, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer conn2.Close() + + assertMatches(t, conn2, `select count(table_id) from information_schema.innodb_temp_table_info`, `[[INT64(1)]]`) + execAssertError(t, conn2, `show create table temp_t`, `Table 'vt_customer.temp_t' doesn't exist (errno 1146) (sqlstate 42S02)`) +} + func TestReservedConnDML(t *testing.T) { defer cluster.PanicHandler(t) ctx := context.Background() @@ -260,6 +397,45 @@ func TestReservedConnDML(t *testing.T) { exec(t, conn, `commit`) } +func TestNumericPrecisionScale(t *testing.T) { + defer cluster.PanicHandler(t) + ctx := context.Background() + vtParams := mysql.ConnParams{ + Host: "localhost", + Port: clusterInstance.VtgateMySQLPort, + } + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer conn.Close() + + _ = exec(t, conn, "CREATE TABLE `a` (`one` bigint NOT NULL PRIMARY KEY) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4") + require.NoError(t, err) + defer exec(t, conn, "drop table `a`") + + qr := exec(t, conn, "select numeric_precision, numeric_scale from information_schema.columns where table_name = 'a'") + require.Equal(t, 1, len(qr.Rows)) + + /* + We expect UINT64 to be returned as type for field and rows from VTGate to client. + + require.Equal(t, querypb.Type_UINT64, qr.Fields[0].Type) + require.Equal(t, querypb.Type_UINT64, qr.Fields[1].Type) + require.Equal(t, sqltypes.Uint64, qr.Rows[0][0].Type()) + require.Equal(t, sqltypes.Uint64, qr.Rows[0][1].Type()) + + But, the field query from mysql returns field at UINT32 and row types as UINT64. + Our conversion on VTGate on receiving data from VTTablet the Rows are converted to Field Types. + So, we see UINT32 for both fields and rows. + + This issue is only with MySQL 8.0. In CI we use 5.7 as well. So asserting with both the values. + */ + + assert.True(t, qr.Fields[0].Type == querypb.Type_UINT64 || qr.Fields[0].Type == querypb.Type_UINT32) + assert.True(t, qr.Fields[1].Type == querypb.Type_UINT64 || qr.Fields[1].Type == querypb.Type_UINT32) + assert.True(t, qr.Rows[0][0].Type() == sqltypes.Uint64 || qr.Rows[0][0].Type() == sqltypes.Uint32) + assert.True(t, qr.Rows[0][1].Type() == sqltypes.Uint64 || qr.Rows[0][1].Type() == sqltypes.Uint32) +} + func exec(t *testing.T, conn *mysql.Conn, query string) *sqltypes.Result { t.Helper() qr, err := conn.ExecuteFetch(query, 1000, true) diff --git a/go/test/endtoend/vtgate/vtroot_6701/topo_6702/topo-stderr.txt b/go/test/endtoend/vtgate/vtroot_6701/topo_6702/topo-stderr.txt deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/go/test/fuzzing/oss_fuzz_build.sh b/go/test/fuzzing/oss_fuzz_build.sh new file mode 100644 index 00000000000..167f6769f8e --- /dev/null +++ b/go/test/fuzzing/oss_fuzz_build.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +# Copyright 2021 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +compile_go_fuzzer ./go/test/fuzzing Fuzz vtctl_fuzzer +compile_go_fuzzer ./go/test/fuzzing FuzzIsDML is_dml_fuzzer +compile_go_fuzzer ./go/test/fuzzing FuzzNormalizer normalizer_fuzzer +compile_go_fuzzer ./go/test/fuzzing FuzzParser parser_fuzzer + +#cp ./go/test/fuzzing/mysql/mysql_fuzzer.go ./go/mysql/ +compile_go_fuzzer ./go/mysql FuzzWritePacket write_packet_fuzzer +compile_go_fuzzer ./go/mysql FuzzHandleNextCommand handle_next_command_fuzzer +compile_go_fuzzer ./go/mysql FuzzReadQueryResults read_query_results_fuzzer + +# Build dictionaries +cp $SRC/vitess/go/test/fuzzing/vtctl_fuzzer.dict $OUT/ diff --git a/go/test/fuzzing/parser_fuzzer.go b/go/test/fuzzing/parser_fuzzer.go new file mode 100644 index 00000000000..a1595fcfef5 --- /dev/null +++ b/go/test/fuzzing/parser_fuzzer.go @@ -0,0 +1,47 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// +build gofuzz + +package fuzzing + +import ( + querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/sqlparser" +) + +func FuzzIsDML(data []byte) int { + _ = sqlparser.IsDML(string(data)) + return 1 +} + +func FuzzNormalizer(data []byte) int { + stmt, reservedVars, err := sqlparser.Parse2(string(data)) + if err != nil { + return -1 + } + prefix := "bv" + bv := make(map[string]*querypb.BindVariable) + sqlparser.Normalize(stmt, reservedVars, bv, prefix) + return 1 +} + +func FuzzParser(data []byte) int { + _, err := sqlparser.Parse(string(data)) + if err != nil { + return 0 + } + return 1 +} diff --git a/go/test/fuzzing/vtctl_fuzzer.dict b/go/test/fuzzing/vtctl_fuzzer.dict new file mode 100644 index 00000000000..7a4628c267d --- /dev/null +++ b/go/test/fuzzing/vtctl_fuzzer.dict @@ -0,0 +1,58 @@ + +"insert into" +"(" +")" +"values" +"create" +"table" +"bigint" +"char" +"NOT NULL" +"varbinary" +"primary key" +"constraint" +"references" +"where" +"from" +"as" +"select" +"and" +"group" +"order" +"by" +"$"" +"_" +"'" +"inner" +"left" +"right" +"full" +"outer" +"join" +"on" + + +"=" +":" +";" +"," +"-allow_update" +"-allow_master" +"-allow_different_shard" +"-allow_master_override" +"-parent" +"-db_name_override" +"-dry-run" +"-hostname" +"-mysql_port" +"-port" +"-grpc_port" +"-tags" +"-keyspace" +"-shard" +"-cells" +"-json" +"-use_pool" +"-max_rows" +"-concurrency" +"-disable_binlogs" \ No newline at end of file diff --git a/go/test/fuzzing/vtctl_fuzzer.go b/go/test/fuzzing/vtctl_fuzzer.go new file mode 100644 index 00000000000..3e5b3350187 --- /dev/null +++ b/go/test/fuzzing/vtctl_fuzzer.go @@ -0,0 +1,202 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// +build gofuzz + +package fuzzing + +import ( + "context" + "strings" + + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/vtctl" + "vitess.io/vitess/go/vt/vttablet/tmclient" + "vitess.io/vitess/go/vt/wrangler" +) + +func init() { + *tmclient.TabletManagerProtocol = "fuzzing" + tmclient.RegisterTabletManagerClientFactory("fuzzing", func() tmclient.TabletManagerClient { + return nil + }) +} + +func IsDivisibleBy(n int, divisibleby int) bool { + return (n % divisibleby) == 0 +} + +func getCommandType(index int) string { + + m := map[int]string{ + 0: "GetTablet", // Tablets + 1: "InitTablet", + 2: "UpdateTabletAddrs", + 3: "DeleteTablet", + 4: "SetReadOnly", + 5: "SetReadWrite", + 6: "StartReplication", + 7: "StopReplication", + 8: "ChangeTabletType", + 9: "Ping", + 10: "RefreshState", + 11: "RefreshStateByShard", + 12: "RunHealthCheck", + 13: "IgnoreHealthCheck", + 14: "IgnoreHealthError", + 15: "ExecuteHook", + 16: "ExecuteFetchAsApp", + 17: "ExecuteFetchAsDba", + 18: "VReplicationExec", + 19: "Backup", + 20: "RestoreFromBackup", + 21: "ReparentTablet", + 22: "CreateShard", // Shards + 23: "GetShard", + 24: "ValidateShard", + 25: "ShardReplicationPositions", + 26: "ListShardTablets", + 27: "SetShardIsMasterServing", + 28: "SetShardTabletControl", + 29: "UpdateSrvKeyspacePartition", + 30: "SourceShardDelete", + 31: "SourceShardAdd", + 32: "ShardReplicationFix", + 33: "WaitForFilteredReplication", + 34: "RemoveShardCell", + 35: "DeleteShard", + 36: "ListBackups", + 37: "BackupShard", + 38: "RemoveBackup", + 39: "InitShardMaster", + 40: "PlannedReparentShard", + 41: "EmergencyReparentShard", + 42: "TabletExternallyReparented", + 43: "CreateKeyspace", // Keyspaces + 44: "DeleteKeyspace", + 45: "RemoveKeyspaceCell", + 46: "GetKeyspace", + 47: "GetKeyspaces", + 48: "SetKeyspaceShardingInfo", + 49: "SetKeyspaceServedFrom", + 50: "RebuildKeyspaceGraph", + 51: "ValidateKeyspace", + 52: "Reshard", + 53: "MoveTables", + 54: "DropSources", + 55: "CreateLookupVindex", + 56: "ExternalizeVindex", + 57: "Materialize", + 58: "SplitClone", + 59: "VerticalSplitClone", + 60: "VDiff", + 61: "MigrateServedTypes", + 62: "MigrateServedFrom", + 63: "SwitchReads", + 64: "SwitchWrites", + 65: "CancelResharding", + 66: "ShowResharding", + 67: "FindAllShardsInKeyspace", + 68: "WaitForDrain", + } + return m[index] + +} + +/* + In this fuzzer we split the input into 3 chunks: + 1: the first byte - Is converted to an int, and + that int determines the number of command-line + calls the fuzzer will make. + 2: The next n bytes where n is equal to the int from + the first byte. These n bytes are converted to + a corresponding command and represent which + commands will be called. + 3: The rest of the data array should have a length + that is divisible by the number of calls. + This part is split up into equally large chunks, + and each chunk is used as parameters for the + corresponding command. +*/ +func Fuzz(data []byte) int { + + // Basic checks + if len(data) == 0 { + return -1 + } + numberOfCalls := int(data[0]) + if numberOfCalls < 3 || numberOfCalls > 10 { + return -1 + } + if len(data) < numberOfCalls+numberOfCalls+1 { + return -1 + } + + // Define part 2 and 3 of the data array + commandPart := data[1 : numberOfCalls+1] + restOfArray := data[numberOfCalls+1:] + + // Just a small check. It is necessary + if len(commandPart) != numberOfCalls { + return -1 + } + + // Check if restOfArray is divisible by numberOfCalls + if !IsDivisibleBy(len(restOfArray), numberOfCalls) { + return -1 + } + + // At this point we have a data array that can + // be divided properly. We can now proceed to + // passing it to Vitess + ctx := context.Background() + topo, err := createTopo(ctx) + if err != nil { + return -1 + } + tmc := tmclient.NewTabletManagerClient() + logger := logutil.NewMemoryLogger() + + chunkSize := len(restOfArray) / numberOfCalls + command := 0 + for i := 0; i < len(restOfArray); i = i + chunkSize { + from := i //lower + to := i + chunkSize //upper + + // Index of command in getCommandType(): + commandIndex := int(commandPart[command]) % 68 + vtCommand := getCommandType(commandIndex) + command_slice := []string{vtCommand} + args := strings.Split(string(restOfArray[from:to]), " ") + + // Add params to the command + for i := range args { + command_slice = append(command_slice, args[i]) + } + + _ = vtctl.RunCommand(ctx, wrangler.New(logger, topo, tmc), command_slice) + command++ + } + + return 1 + +} + +func createTopo(ctx context.Context) (*topo.Server, error) { + ts := memorytopo.NewServer("zone1", "zone2", "zone3") + return ts, nil +} diff --git a/go/timer/suspendable_ticker.go b/go/timer/suspendable_ticker.go index 2d971c69eb0..5257626b85f 100644 --- a/go/timer/suspendable_ticker.go +++ b/go/timer/suspendable_ticker.go @@ -61,6 +61,15 @@ func (s *SuspendableTicker) Stop() { s.ticker.Stop() } +// TickNow generates a tick at this point in time. It may block +// if nothing consumes the tick. +func (s *SuspendableTicker) TickNow() { + if atomic.LoadInt64(&s.suspended) == 0 { + // not suspended + s.C <- time.Now() + } +} + func (s *SuspendableTicker) loop() { for t := range s.ticker.C { if atomic.LoadInt64(&s.suspended) == 0 { diff --git a/go/tools/astfmtgen/main.go b/go/tools/astfmtgen/main.go new file mode 100644 index 00000000000..839c8a52cc5 --- /dev/null +++ b/go/tools/astfmtgen/main.go @@ -0,0 +1,221 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + "go/ast" + "go/printer" + "go/token" + "go/types" + "os" + "path" + "strconv" + "strings" + + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/go/packages" +) + +func main() { + err := load(os.Args[1]) + if err != nil { + panic(err) + } +} + +func load(packageName string) error { + config := &packages.Config{ + Mode: packages.NeedName | + packages.NeedFiles | + packages.NeedCompiledGoFiles | + packages.NeedImports | + packages.NeedTypes | + packages.NeedSyntax | + packages.NeedTypesInfo, + } + pkgs, err := packages.Load(config, packageName) + if err != nil { + return fmt.Errorf("error loading package %s: %w", packageName, err) + } + for _, pkg := range pkgs { + if pkg.Name == "sqlparser" { + rewriter := &Rewriter{pkg: pkg} + err := rewriter.Rewrite() + if err != nil { + return err + } + } + } + return nil +} + +type Rewriter struct { + pkg *packages.Package + astExpr *types.Interface +} + +func (r *Rewriter) Rewrite() error { + scope := r.pkg.Types.Scope() + exprT := scope.Lookup("Expr").(*types.TypeName) + exprN := exprT.Type().(*types.Named).Underlying() + r.astExpr = exprN.(*types.Interface) + + for i, file := range r.pkg.GoFiles { + dirname, filename := path.Split(file) + if filename == "ast_format.go" { + syntax := r.pkg.Syntax[i] + astutil.Apply(syntax, r.replaceAstfmtCalls, nil) + + f, err := os.Create(path.Join(dirname, "ast_format_fast.go")) + if err != nil { + return err + } + fmt.Fprintf(f, "// Code generated by ASTFmtGen. DO NOT EDIT.\n") + printer.Fprint(f, r.pkg.Fset, syntax) + f.Close() + } + } + return nil +} + +func (r *Rewriter) replaceAstfmtCalls(cursor *astutil.Cursor) bool { + switch v := cursor.Node().(type) { + case *ast.Comment: + v.Text = strings.ReplaceAll(v.Text, " Format ", " formatFast ") + case *ast.FuncDecl: + if v.Name.Name == "Format" { + v.Name.Name = "formatFast" + } + case *ast.ExprStmt: + if call, ok := v.X.(*ast.CallExpr); ok { + if r.isPrintfCall(call) { + return r.rewriteAstPrintf(cursor, call) + } + } + } + return true +} + +func (r *Rewriter) isPrintfCall(n *ast.CallExpr) bool { + s, ok := n.Fun.(*ast.SelectorExpr) + if !ok { + return false + } + id := s.Sel + if id != nil && !r.pkg.TypesInfo.Types[id].IsType() { + return id.Name == "astPrintf" + } + return false +} + +func (r *Rewriter) rewriteLiteral(rcv ast.Expr, method string, arg ast.Expr) ast.Stmt { + expr := &ast.CallExpr{ + Fun: &ast.SelectorExpr{ + X: rcv, + Sel: &ast.Ident{Name: method}, + }, + Args: []ast.Expr{arg}, + } + return &ast.ExprStmt{X: expr} +} + +func (r *Rewriter) rewriteAstPrintf(cursor *astutil.Cursor, expr *ast.CallExpr) bool { + callexpr := expr.Fun.(*ast.SelectorExpr) + lit := expr.Args[1].(*ast.BasicLit) + format, _ := strconv.Unquote(lit.Value) + + end := len(format) + fieldnum := 0 + for i := 0; i < end; { + lasti := i + for i < end && format[i] != '%' { + i++ + } + if i > lasti { + var arg ast.Expr + var method string + var lit = format[lasti:i] + + if len(lit) == 1 { + method = "WriteByte" + arg = &ast.BasicLit{ + Kind: token.CHAR, + Value: strconv.QuoteRune(rune(lit[0])), + } + } else { + method = "WriteString" + arg = &ast.BasicLit{ + Kind: token.STRING, + Value: strconv.Quote(lit), + } + } + + cursor.InsertBefore(r.rewriteLiteral(callexpr.X, method, arg)) + } + if i >= end { + break + } + i++ // '%' + token := format[i] + switch token { + case 'c': + cursor.InsertBefore(r.rewriteLiteral(callexpr.X, "WriteByte", expr.Args[2+fieldnum])) + case 's': + cursor.InsertBefore(r.rewriteLiteral(callexpr.X, "WriteString", expr.Args[2+fieldnum])) + case 'l', 'r', 'v': + leftExpr := expr.Args[0] + leftExprT := r.pkg.TypesInfo.Types[leftExpr].Type + + rightExpr := expr.Args[2+fieldnum] + rightExprT := r.pkg.TypesInfo.Types[rightExpr].Type + + var call ast.Expr + if types.Implements(leftExprT, r.astExpr) && types.Implements(rightExprT, r.astExpr) { + call = &ast.CallExpr{ + Fun: &ast.SelectorExpr{ + X: callexpr.X, + Sel: &ast.Ident{Name: "printExpr"}, + }, + Args: []ast.Expr{ + leftExpr, + rightExpr, + &ast.Ident{ + Name: strconv.FormatBool(token != 'r'), + }, + }, + } + } else { + call = &ast.CallExpr{ + Fun: &ast.SelectorExpr{ + X: rightExpr, + Sel: &ast.Ident{Name: "formatFast"}, + }, + Args: []ast.Expr{callexpr.X}, + } + } + cursor.InsertBefore(&ast.ExprStmt{X: call}) + default: + panic(fmt.Sprintf("unsupported escape %q", token)) + } + fieldnum++ + i++ + } + + cursor.Delete() + return true +} diff --git a/go/tools/asthelpergen/asthelpergen.go b/go/tools/asthelpergen/asthelpergen.go new file mode 100644 index 00000000000..c1d7072b7c5 --- /dev/null +++ b/go/tools/asthelpergen/asthelpergen.go @@ -0,0 +1,324 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package asthelpergen + +import ( + "bytes" + "fmt" + "go/types" + "io/ioutil" + "log" + "path" + "strings" + + "github.com/dave/jennifer/jen" + "golang.org/x/tools/go/packages" +) + +const licenseFileHeader = `Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License.` + +type ( + generatorSPI interface { + addType(t types.Type) + scope() *types.Scope + findImplementations(iff *types.Interface, impl func(types.Type) error) error + iface() *types.Interface + } + generator interface { + genFile() (string, *jen.File) + interfaceMethod(t types.Type, iface *types.Interface, spi generatorSPI) error + structMethod(t types.Type, strct *types.Struct, spi generatorSPI) error + ptrToStructMethod(t types.Type, strct *types.Struct, spi generatorSPI) error + ptrToBasicMethod(t types.Type, basic *types.Basic, spi generatorSPI) error + sliceMethod(t types.Type, slice *types.Slice, spi generatorSPI) error + basicMethod(t types.Type, basic *types.Basic, spi generatorSPI) error + } + // astHelperGen finds implementations of the given interface, + // and uses the supplied `generator`s to produce the output code + astHelperGen struct { + DebugTypes bool + mod *packages.Module + sizes types.Sizes + namedIface *types.Named + _iface *types.Interface + gens []generator + + _scope *types.Scope + todo []types.Type + } +) + +func (gen *astHelperGen) iface() *types.Interface { + return gen._iface +} + +func newGenerator(mod *packages.Module, sizes types.Sizes, named *types.Named, generators ...generator) *astHelperGen { + return &astHelperGen{ + DebugTypes: true, + mod: mod, + sizes: sizes, + namedIface: named, + _iface: named.Underlying().(*types.Interface), + gens: generators, + } +} + +func findImplementations(scope *types.Scope, iff *types.Interface, impl func(types.Type) error) error { + for _, name := range scope.Names() { + obj := scope.Lookup(name) + if _, ok := obj.(*types.TypeName); !ok { + continue + } + baseType := obj.Type() + if types.Implements(baseType, iff) { + err := impl(baseType) + if err != nil { + return err + } + continue + } + pointerT := types.NewPointer(baseType) + if types.Implements(pointerT, iff) { + err := impl(pointerT) + if err != nil { + return err + } + continue + } + } + return nil +} +func (gen *astHelperGen) findImplementations(iff *types.Interface, impl func(types.Type) error) error { + for _, name := range gen._scope.Names() { + obj := gen._scope.Lookup(name) + if _, ok := obj.(*types.TypeName); !ok { + continue + } + baseType := obj.Type() + if types.Implements(baseType, iff) { + err := impl(baseType) + if err != nil { + return err + } + continue + } + pointerT := types.NewPointer(baseType) + if types.Implements(pointerT, iff) { + err := impl(pointerT) + if err != nil { + return err + } + continue + } + } + return nil +} + +// GenerateCode is the main loop where we build up the code per file. +func (gen *astHelperGen) GenerateCode() (map[string]*jen.File, error) { + pkg := gen.namedIface.Obj().Pkg() + + gen._scope = pkg.Scope() + gen.todo = append(gen.todo, gen.namedIface) + jenFiles := gen.createFiles() + + result := map[string]*jen.File{} + for fName, genFile := range jenFiles { + fullPath := path.Join(gen.mod.Dir, strings.TrimPrefix(pkg.Path(), gen.mod.Path), fName) + result[fullPath] = genFile + } + + return result, nil +} + +// TypePaths are the packages +type TypePaths []string + +func (t *TypePaths) String() string { + return fmt.Sprintf("%v", *t) +} + +// Set adds the package path +func (t *TypePaths) Set(path string) error { + *t = append(*t, path) + return nil +} + +// VerifyFilesOnDisk compares the generated results from the codegen against the files that +// currently exist on disk and returns any mismatches +func VerifyFilesOnDisk(result map[string]*jen.File) (errors []error) { + for fullPath, file := range result { + existing, err := ioutil.ReadFile(fullPath) + if err != nil { + errors = append(errors, fmt.Errorf("missing file on disk: %s (%w)", fullPath, err)) + continue + } + + var buf bytes.Buffer + if err := file.Render(&buf); err != nil { + errors = append(errors, fmt.Errorf("render error for '%s': %w", fullPath, err)) + continue + } + + if !bytes.Equal(existing, buf.Bytes()) { + errors = append(errors, fmt.Errorf("'%s' has changed", fullPath)) + continue + } + } + return errors +} + +// GenerateASTHelpers loads the input code, constructs the necessary generators, +// and generates the rewriter and clone methods for the AST +func GenerateASTHelpers(packagePatterns []string, rootIface, exceptCloneType string) (map[string]*jen.File, error) { + loaded, err := packages.Load(&packages.Config{ + Mode: packages.NeedName | packages.NeedTypes | packages.NeedTypesSizes | packages.NeedTypesInfo | packages.NeedDeps | packages.NeedImports | packages.NeedModule, + }, packagePatterns...) + + if err != nil { + return nil, err + } + + scopes := make(map[string]*types.Scope) + for _, pkg := range loaded { + scopes[pkg.PkgPath] = pkg.Types.Scope() + } + + pos := strings.LastIndexByte(rootIface, '.') + if pos < 0 { + return nil, fmt.Errorf("unexpected input type: %s", rootIface) + } + + pkgname := rootIface[:pos] + typename := rootIface[pos+1:] + + scope := scopes[pkgname] + if scope == nil { + return nil, fmt.Errorf("no scope found for type '%s'", rootIface) + } + + tt := scope.Lookup(typename) + if tt == nil { + return nil, fmt.Errorf("no type called '%s' found in '%s'", typename, pkgname) + } + + nt := tt.Type().(*types.Named) + pName := nt.Obj().Pkg().Name() + generator := newGenerator(loaded[0].Module, loaded[0].TypesSizes, nt, + newEqualsGen(pName), + newCloneGen(pName, exceptCloneType), + newVisitGen(pName), + newRewriterGen(pName, types.TypeString(nt, noQualifier)), + ) + + it, err := generator.GenerateCode() + if err != nil { + return nil, err + } + + return it, nil +} + +var _ generatorSPI = (*astHelperGen)(nil) + +func (gen *astHelperGen) scope() *types.Scope { + return gen._scope +} + +func (gen *astHelperGen) addType(t types.Type) { + gen.todo = append(gen.todo, t) +} + +func (gen *astHelperGen) createFiles() map[string]*jen.File { + alreadyDone := map[string]bool{} + for len(gen.todo) > 0 { + t := gen.todo[0] + underlying := t.Underlying() + typeName := printableTypeName(t) + gen.todo = gen.todo[1:] + + if alreadyDone[typeName] { + continue + } + var err error + for _, g := range gen.gens { + switch underlying := underlying.(type) { + case *types.Interface: + err = g.interfaceMethod(t, underlying, gen) + case *types.Slice: + err = g.sliceMethod(t, underlying, gen) + case *types.Struct: + err = g.structMethod(t, underlying, gen) + case *types.Pointer: + ptrToType := underlying.Elem().Underlying() + switch ptrToType := ptrToType.(type) { + case *types.Struct: + err = g.ptrToStructMethod(t, ptrToType, gen) + case *types.Basic: + err = g.ptrToBasicMethod(t, ptrToType, gen) + default: + panic(fmt.Sprintf("%T", ptrToType)) + } + case *types.Basic: + err = g.basicMethod(t, underlying, gen) + default: + log.Fatalf("don't know how to handle %s %T", typeName, underlying) + } + if err != nil { + log.Fatal(err) + } + } + alreadyDone[typeName] = true + } + + result := map[string]*jen.File{} + for _, g := range gen.gens { + fName, jenFile := g.genFile() + result[fName] = jenFile + } + return result +} + +// printableTypeName returns a string that can be used as a valid golang identifier +func printableTypeName(t types.Type) string { + switch t := t.(type) { + case *types.Pointer: + return "RefOf" + printableTypeName(t.Elem()) + case *types.Slice: + return "SliceOf" + printableTypeName(t.Elem()) + case *types.Named: + return t.Obj().Name() + case *types.Basic: + return strings.Title(t.Name()) + case *types.Interface: + return t.String() + default: + panic(fmt.Sprintf("unknown type %T %v", t, t)) + } +} diff --git a/go/tools/asthelpergen/asthelpergen_test.go b/go/tools/asthelpergen/asthelpergen_test.go new file mode 100644 index 00000000000..16372b13d75 --- /dev/null +++ b/go/tools/asthelpergen/asthelpergen_test.go @@ -0,0 +1,43 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package asthelpergen + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestFullGeneration(t *testing.T) { + result, err := GenerateASTHelpers([]string{"./integration/..."}, "vitess.io/vitess/go/tools/asthelpergen/integration.AST", "*NoCloneType") + require.NoError(t, err) + + verifyErrors := VerifyFilesOnDisk(result) + require.Empty(t, verifyErrors) + + for _, file := range result { + contents := fmt.Sprintf("%#v", file) + require.Contains(t, contents, "http://www.apache.org/licenses/LICENSE-2.0") + applyIdx := strings.Index(contents, "func (a *application) apply(parent, node AST, replacer replacerFunc)") + cloneIdx := strings.Index(contents, "CloneAST(in AST) AST") + if applyIdx == 0 && cloneIdx == 0 { + t.Fatalf("file doesn't contain expected contents") + } + } +} diff --git a/go/tools/asthelpergen/clone_gen.go b/go/tools/asthelpergen/clone_gen.go new file mode 100644 index 00000000000..121f53c18c6 --- /dev/null +++ b/go/tools/asthelpergen/clone_gen.go @@ -0,0 +1,252 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package asthelpergen + +import ( + "fmt" + "go/types" + "log" + + "github.com/dave/jennifer/jen" +) + +// cloneGen creates the deep clone methods for the AST. It works by discovering the types that it needs to support, +// starting from a root interface type. While creating the clone method for this root interface, more types that need +// to be cloned are discovered. This continues type by type until all necessary types have been traversed. +type cloneGen struct { + exceptType string + file *jen.File +} + +var _ generator = (*cloneGen)(nil) + +func newCloneGen(pkgname string, exceptType string) *cloneGen { + file := jen.NewFile(pkgname) + file.HeaderComment(licenseFileHeader) + file.HeaderComment("Code generated by ASTHelperGen. DO NOT EDIT.") + + return &cloneGen{ + exceptType: exceptType, + file: file, + } +} + +func (c *cloneGen) addFunc(name string, code *jen.Statement) { + c.file.Add(jen.Comment(fmt.Sprintf("%s creates a deep clone of the input.", name))) + c.file.Add(code) +} + +func (c *cloneGen) genFile() (string, *jen.File) { + return "ast_clone.go", c.file +} + +const cloneName = "Clone" + +// readValueOfType produces code to read the expression of type `t`, and adds the type to the todo-list +func (c *cloneGen) readValueOfType(t types.Type, expr jen.Code, spi generatorSPI) jen.Code { + switch t.Underlying().(type) { + case *types.Basic: + return expr + case *types.Interface: + if types.TypeString(t, noQualifier) == "interface{}" { + // these fields have to be taken care of manually + return expr + } + } + spi.addType(t) + return jen.Id(cloneName + printableTypeName(t)).Call(expr) +} + +func (c *cloneGen) structMethod(t types.Type, _ *types.Struct, spi generatorSPI) error { + typeString := types.TypeString(t, noQualifier) + funcName := cloneName + printableTypeName(t) + c.addFunc(funcName, + jen.Func().Id(funcName).Call(jen.Id("n").Id(typeString)).Id(typeString).Block( + jen.Return(jen.Op("*").Add(c.readValueOfType(types.NewPointer(t), jen.Op("&").Id("n"), spi))), + )) + return nil +} + +func (c *cloneGen) sliceMethod(t types.Type, slice *types.Slice, spi generatorSPI) error { + typeString := types.TypeString(t, noQualifier) + name := printableTypeName(t) + funcName := cloneName + name + + c.addFunc(funcName, + //func (n Bytes) Clone() Bytes { + jen.Func().Id(funcName).Call(jen.Id("n").Id(typeString)).Id(typeString).Block( + // res := make(Bytes, len(n)) + jen.Id("res").Op(":=").Id("make").Call(jen.Id(typeString), jen.Lit(0), jen.Id("len").Call(jen.Id("n"))), + c.copySliceElement(slice.Elem(), spi), + // return res + jen.Return(jen.Id("res")), + )) + return nil +} + +func (c *cloneGen) basicMethod(t types.Type, basic *types.Basic, spi generatorSPI) error { + return nil +} + +func (c *cloneGen) copySliceElement(elType types.Type, spi generatorSPI) jen.Code { + if isBasic(elType) { + // copy(res, n) + return jen.Id("copy").Call(jen.Id("res"), jen.Id("n")) + } + + //for _, x := range n { + // res = append(res, CloneAST(x)) + //} + spi.addType(elType) + + return jen.For(jen.List(jen.Op("_"), jen.Id("x"))).Op(":=").Range().Id("n").Block( + jen.Id("res").Op("=").Id("append").Call(jen.Id("res"), c.readValueOfType(elType, jen.Id("x"), spi)), + ) +} + +func (c *cloneGen) interfaceMethod(t types.Type, iface *types.Interface, spi generatorSPI) error { + + //func CloneAST(in AST) AST { + // if in == nil { + // return nil + //} + // switch in := in.(type) { + //case *RefContainer: + // return in.CloneRefOfRefContainer() + //} + // // this should never happen + // return nil + //} + + typeString := types.TypeString(t, noQualifier) + typeName := printableTypeName(t) + + stmts := []jen.Code{ifNilReturnNil("in")} + + var cases []jen.Code + _ = findImplementations(spi.scope(), iface, func(t types.Type) error { + typeString := types.TypeString(t, noQualifier) + + // case Type: return CloneType(in) + block := jen.Case(jen.Id(typeString)).Block(jen.Return(c.readValueOfType(t, jen.Id("in"), spi))) + switch t := t.(type) { + case *types.Pointer: + _, isIface := t.Elem().(*types.Interface) + if !isIface { + cases = append(cases, block) + } + + case *types.Named: + _, isIface := t.Underlying().(*types.Interface) + if !isIface { + cases = append(cases, block) + } + + default: + log.Fatalf("unexpected type encountered: %s", typeString) + } + + return nil + }) + + cases = append(cases, + jen.Default().Block( + jen.Comment("this should never happen"), + jen.Return(jen.Nil()), + )) + + // switch n := node.(type) { + stmts = append(stmts, jen.Switch(jen.Id("in").Op(":=").Id("in").Assert(jen.Id("type")).Block( + cases..., + ))) + + funcName := cloneName + typeName + funcDecl := jen.Func().Id(funcName).Call(jen.Id("in").Id(typeString)).Id(typeString).Block(stmts...) + c.addFunc(funcName, funcDecl) + return nil +} + +func (c *cloneGen) ptrToBasicMethod(t types.Type, _ *types.Basic, spi generatorSPI) error { + ptr := t.Underlying().(*types.Pointer) + return c.ptrToOtherMethod(t, ptr, spi) +} + +func (c *cloneGen) ptrToOtherMethod(t types.Type, ptr *types.Pointer, spi generatorSPI) error { + receiveType := types.TypeString(t, noQualifier) + + funcName := "Clone" + printableTypeName(t) + c.addFunc(funcName, + jen.Func().Id(funcName).Call(jen.Id("n").Id(receiveType)).Id(receiveType).Block( + ifNilReturnNil("n"), + jen.Id("out").Op(":=").Add(c.readValueOfType(ptr.Elem(), jen.Op("*").Id("n"), spi)), + jen.Return(jen.Op("&").Id("out")), + )) + return nil +} + +func ifNilReturnNil(id string) *jen.Statement { + return jen.If(jen.Id(id).Op("==").Nil()).Block(jen.Return(jen.Nil())) +} + +func isBasic(t types.Type) bool { + _, x := t.Underlying().(*types.Basic) + return x +} + +func (c *cloneGen) ptrToStructMethod(t types.Type, strct *types.Struct, spi generatorSPI) error { + receiveType := types.TypeString(t, noQualifier) + funcName := cloneName + printableTypeName(t) + + //func CloneRefOfType(n *Type) *Type + funcDeclaration := jen.Func().Id(funcName).Call(jen.Id("n").Id(receiveType)).Id(receiveType) + + if receiveType == c.exceptType { + c.addFunc(funcName, funcDeclaration.Block( + jen.Return(jen.Id("n")), + )) + return nil + } + + var fields []jen.Code + for i := 0; i < strct.NumFields(); i++ { + field := strct.Field(i) + if isBasic(field.Type()) || field.Name() == "_" { + continue + } + // out.Field = CloneType(n.Field) + fields = append(fields, + jen.Id("out").Dot(field.Name()).Op("=").Add(c.readValueOfType(field.Type(), jen.Id("n").Dot(field.Name()), spi))) + } + + stmts := []jen.Code{ + // if n == nil { return nil } + ifNilReturnNil("n"), + // out := *n + jen.Id("out").Op(":=").Op("*").Id("n"), + } + + // handle all fields with CloneAble types + stmts = append(stmts, fields...) + + stmts = append(stmts, + // return &out + jen.Return(jen.Op("&").Id("out")), + ) + + c.addFunc(funcName, funcDeclaration.Block(stmts...)) + return nil +} diff --git a/go/tools/asthelpergen/equals_gen.go b/go/tools/asthelpergen/equals_gen.go new file mode 100644 index 00000000000..93125ebcd37 --- /dev/null +++ b/go/tools/asthelpergen/equals_gen.go @@ -0,0 +1,259 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package asthelpergen + +import ( + "fmt" + "go/types" + + "github.com/dave/jennifer/jen" +) + +const equalsName = "Equals" + +type equalsGen struct { + file *jen.File +} + +var _ generator = (*equalsGen)(nil) + +func newEqualsGen(pkgname string) *equalsGen { + file := jen.NewFile(pkgname) + file.HeaderComment(licenseFileHeader) + file.HeaderComment("Code generated by ASTHelperGen. DO NOT EDIT.") + + return &equalsGen{ + file: file, + } +} + +func (e *equalsGen) addFunc(name string, code *jen.Statement) { + e.file.Add(jen.Comment(fmt.Sprintf("%s does deep equals between the two objects.", name))) + e.file.Add(code) +} + +func (e *equalsGen) genFile() (string, *jen.File) { + return "ast_equals.go", e.file +} + +func (e *equalsGen) interfaceMethod(t types.Type, iface *types.Interface, spi generatorSPI) error { + /* + func EqualsAST(inA, inB AST) bool { + if inA == inB { + return true + } + if inA == nil || inB8 == nil { + return false + } + switch a := inA.(type) { + case *SubImpl: + b, ok := inB.(*SubImpl) + if !ok { + return false + } + return EqualsSubImpl(a, b) + } + return false + } + */ + stmts := []jen.Code{ + jen.If(jen.Id("inA == nil").Op("&&").Id("inB == nil")).Block(jen.Return(jen.True())), + jen.If(jen.Id("inA == nil").Op("||").Id("inB == nil")).Block(jen.Return(jen.False())), + } + + var cases []jen.Code + _ = spi.findImplementations(iface, func(t types.Type) error { + if _, ok := t.Underlying().(*types.Interface); ok { + return nil + } + typeString := types.TypeString(t, noQualifier) + caseBlock := jen.Case(jen.Id(typeString)).Block( + jen.Id("b, ok := inB.").Call(jen.Id(typeString)), + jen.If(jen.Id("!ok")).Block(jen.Return(jen.False())), + jen.Return(compareValueType(t, jen.Id("a"), jen.Id("b"), true, spi)), + ) + cases = append(cases, caseBlock) + return nil + }) + + cases = append(cases, + jen.Default().Block( + jen.Comment("this should never happen"), + jen.Return(jen.False()), + )) + + stmts = append(stmts, jen.Switch(jen.Id("a := inA.(type)").Block( + cases..., + ))) + + typeString := types.TypeString(t, noQualifier) + funcName := equalsName + printableTypeName(t) + funcDecl := jen.Func().Id(funcName).Call(jen.List(jen.Id("inA"), jen.Id("inB")).Id(typeString)).Bool().Block(stmts...) + e.addFunc(funcName, funcDecl) + + return nil +} + +func compareValueType(t types.Type, a, b *jen.Statement, eq bool, spi generatorSPI) *jen.Statement { + switch t.Underlying().(type) { + case *types.Basic: + if eq { + return a.Op("==").Add(b) + } + return a.Op("!=").Add(b) + } + spi.addType(t) + var neg = "!" + if eq { + neg = "" + } + return jen.Id(neg+equalsName+printableTypeName(t)).Call(a, b) +} + +func (e *equalsGen) structMethod(t types.Type, strct *types.Struct, spi generatorSPI) error { + /* + func EqualsRefOfRefContainer(inA RefContainer, inB RefContainer) bool { + return EqualsRefOfLeaf(inA.ASTImplementationType, inB.ASTImplementationType) && + EqualsAST(inA.ASTType, inB.ASTType) && inA.NotASTType == inB.NotASTType + } + + */ + + typeString := types.TypeString(t, noQualifier) + funcName := equalsName + printableTypeName(t) + funcDecl := jen.Func().Id(funcName).Call(jen.List(jen.Id("a"), jen.Id("b")).Id(typeString)).Bool(). + Block(jen.Return(compareAllStructFields(strct, spi))) + e.addFunc(funcName, funcDecl) + + return nil +} + +func compareAllStructFields(strct *types.Struct, spi generatorSPI) jen.Code { + var basicsPred []*jen.Statement + var others []*jen.Statement + for i := 0; i < strct.NumFields(); i++ { + field := strct.Field(i) + if field.Type().Underlying().String() == "interface{}" || field.Name() == "_" { + // we can safely ignore this, we do not want ast to contain interface{} types. + continue + } + fieldA := jen.Id("a").Dot(field.Name()) + fieldB := jen.Id("b").Dot(field.Name()) + pred := compareValueType(field.Type(), fieldA, fieldB, true, spi) + if _, ok := field.Type().(*types.Basic); ok { + basicsPred = append(basicsPred, pred) + continue + } + others = append(others, pred) + } + + var ret *jen.Statement + for _, pred := range basicsPred { + if ret == nil { + ret = pred + } else { + ret = ret.Op("&&").Line().Add(pred) + } + } + + for _, pred := range others { + if ret == nil { + ret = pred + } else { + ret = ret.Op("&&").Line().Add(pred) + } + } + + if ret == nil { + return jen.True() + } + return ret +} + +func (e *equalsGen) ptrToStructMethod(t types.Type, strct *types.Struct, spi generatorSPI) error { + typeString := types.TypeString(t, noQualifier) + funcName := equalsName + printableTypeName(t) + + //func EqualsRefOfType(a,b *Type) *Type + funcDeclaration := jen.Func().Id(funcName).Call(jen.Id("a"), jen.Id("b").Id(typeString)).Bool() + stmts := []jen.Code{ + jen.If(jen.Id("a == b")).Block(jen.Return(jen.True())), + jen.If(jen.Id("a == nil").Op("||").Id("b == nil")).Block(jen.Return(jen.False())), + jen.Return(compareAllStructFields(strct, spi)), + } + + e.addFunc(funcName, funcDeclaration.Block(stmts...)) + return nil +} + +func (e *equalsGen) ptrToBasicMethod(t types.Type, _ *types.Basic, spi generatorSPI) error { + /* + func EqualsRefOfBool(a, b *bool) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return *a == *b + } + */ + typeString := types.TypeString(t, noQualifier) + funcName := equalsName + printableTypeName(t) + + //func EqualsRefOfType(a,b *Type) *Type + funcDeclaration := jen.Func().Id(funcName).Call(jen.Id("a"), jen.Id("b").Id(typeString)).Bool() + stmts := []jen.Code{ + jen.If(jen.Id("a == b")).Block(jen.Return(jen.True())), + jen.If(jen.Id("a == nil").Op("||").Id("b == nil")).Block(jen.Return(jen.False())), + jen.Return(jen.Id("*a == *b")), + } + e.addFunc(funcName, funcDeclaration.Block(stmts...)) + return nil +} + +func (e *equalsGen) sliceMethod(t types.Type, slice *types.Slice, spi generatorSPI) error { + /* + func EqualsSliceOfRefOfLeaf(a, b []*Leaf) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !EqualsRefOfLeaf(a[i], b[i]) { + return false + } + } + return false + } + */ + + stmts := []jen.Code{jen.If(jen.Id("len(a) != len(b)")).Block(jen.Return(jen.False())), + jen.For(jen.Id("i := 0; i < len(a); i++")).Block( + jen.If(compareValueType(slice.Elem(), jen.Id("a[i]"), jen.Id("b[i]"), false, spi)).Block(jen.Return(jen.False()))), + jen.Return(jen.True()), + } + + typeString := types.TypeString(t, noQualifier) + funcName := equalsName + printableTypeName(t) + funcDecl := jen.Func().Id(funcName).Call(jen.List(jen.Id("a"), jen.Id("b")).Id(typeString)).Bool().Block(stmts...) + e.addFunc(funcName, funcDecl) + return nil +} + +func (e *equalsGen) basicMethod(t types.Type, basic *types.Basic, spi generatorSPI) error { + return nil +} diff --git a/go/tools/asthelpergen/integration/ast_clone.go b/go/tools/asthelpergen/integration/ast_clone.go new file mode 100644 index 00000000000..a11806b1301 --- /dev/null +++ b/go/tools/asthelpergen/integration/ast_clone.go @@ -0,0 +1,223 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by ASTHelperGen. DO NOT EDIT. + +package integration + +// CloneAST creates a deep clone of the input. +func CloneAST(in AST) AST { + if in == nil { + return nil + } + switch in := in.(type) { + case BasicType: + return in + case Bytes: + return CloneBytes(in) + case InterfaceContainer: + return CloneInterfaceContainer(in) + case InterfaceSlice: + return CloneInterfaceSlice(in) + case *Leaf: + return CloneRefOfLeaf(in) + case LeafSlice: + return CloneLeafSlice(in) + case *NoCloneType: + return CloneRefOfNoCloneType(in) + case *RefContainer: + return CloneRefOfRefContainer(in) + case *RefSliceContainer: + return CloneRefOfRefSliceContainer(in) + case *SubImpl: + return CloneRefOfSubImpl(in) + case ValueContainer: + return CloneValueContainer(in) + case ValueSliceContainer: + return CloneValueSliceContainer(in) + default: + // this should never happen + return nil + } +} + +// CloneBytes creates a deep clone of the input. +func CloneBytes(n Bytes) Bytes { + res := make(Bytes, 0, len(n)) + copy(res, n) + return res +} + +// CloneInterfaceContainer creates a deep clone of the input. +func CloneInterfaceContainer(n InterfaceContainer) InterfaceContainer { + return *CloneRefOfInterfaceContainer(&n) +} + +// CloneInterfaceSlice creates a deep clone of the input. +func CloneInterfaceSlice(n InterfaceSlice) InterfaceSlice { + res := make(InterfaceSlice, 0, len(n)) + for _, x := range n { + res = append(res, CloneAST(x)) + } + return res +} + +// CloneRefOfLeaf creates a deep clone of the input. +func CloneRefOfLeaf(n *Leaf) *Leaf { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneLeafSlice creates a deep clone of the input. +func CloneLeafSlice(n LeafSlice) LeafSlice { + res := make(LeafSlice, 0, len(n)) + for _, x := range n { + res = append(res, CloneRefOfLeaf(x)) + } + return res +} + +// CloneRefOfNoCloneType creates a deep clone of the input. +func CloneRefOfNoCloneType(n *NoCloneType) *NoCloneType { + return n +} + +// CloneRefOfRefContainer creates a deep clone of the input. +func CloneRefOfRefContainer(n *RefContainer) *RefContainer { + if n == nil { + return nil + } + out := *n + out.ASTType = CloneAST(n.ASTType) + out.ASTImplementationType = CloneRefOfLeaf(n.ASTImplementationType) + return &out +} + +// CloneRefOfRefSliceContainer creates a deep clone of the input. +func CloneRefOfRefSliceContainer(n *RefSliceContainer) *RefSliceContainer { + if n == nil { + return nil + } + out := *n + out.ASTElements = CloneSliceOfAST(n.ASTElements) + out.NotASTElements = CloneSliceOfInt(n.NotASTElements) + out.ASTImplementationElements = CloneSliceOfRefOfLeaf(n.ASTImplementationElements) + return &out +} + +// CloneRefOfSubImpl creates a deep clone of the input. +func CloneRefOfSubImpl(n *SubImpl) *SubImpl { + if n == nil { + return nil + } + out := *n + out.inner = CloneSubIface(n.inner) + out.field = CloneRefOfBool(n.field) + return &out +} + +// CloneValueContainer creates a deep clone of the input. +func CloneValueContainer(n ValueContainer) ValueContainer { + return *CloneRefOfValueContainer(&n) +} + +// CloneValueSliceContainer creates a deep clone of the input. +func CloneValueSliceContainer(n ValueSliceContainer) ValueSliceContainer { + return *CloneRefOfValueSliceContainer(&n) +} + +// CloneSubIface creates a deep clone of the input. +func CloneSubIface(in SubIface) SubIface { + if in == nil { + return nil + } + switch in := in.(type) { + case *SubImpl: + return CloneRefOfSubImpl(in) + default: + // this should never happen + return nil + } +} + +// CloneRefOfInterfaceContainer creates a deep clone of the input. +func CloneRefOfInterfaceContainer(n *InterfaceContainer) *InterfaceContainer { + if n == nil { + return nil + } + out := *n + out.v = n.v + return &out +} + +// CloneSliceOfAST creates a deep clone of the input. +func CloneSliceOfAST(n []AST) []AST { + res := make([]AST, 0, len(n)) + for _, x := range n { + res = append(res, CloneAST(x)) + } + return res +} + +// CloneSliceOfInt creates a deep clone of the input. +func CloneSliceOfInt(n []int) []int { + res := make([]int, 0, len(n)) + copy(res, n) + return res +} + +// CloneSliceOfRefOfLeaf creates a deep clone of the input. +func CloneSliceOfRefOfLeaf(n []*Leaf) []*Leaf { + res := make([]*Leaf, 0, len(n)) + for _, x := range n { + res = append(res, CloneRefOfLeaf(x)) + } + return res +} + +// CloneRefOfBool creates a deep clone of the input. +func CloneRefOfBool(n *bool) *bool { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfValueContainer creates a deep clone of the input. +func CloneRefOfValueContainer(n *ValueContainer) *ValueContainer { + if n == nil { + return nil + } + out := *n + out.ASTType = CloneAST(n.ASTType) + out.ASTImplementationType = CloneRefOfLeaf(n.ASTImplementationType) + return &out +} + +// CloneRefOfValueSliceContainer creates a deep clone of the input. +func CloneRefOfValueSliceContainer(n *ValueSliceContainer) *ValueSliceContainer { + if n == nil { + return nil + } + out := *n + out.ASTElements = CloneSliceOfAST(n.ASTElements) + out.NotASTElements = CloneSliceOfInt(n.NotASTElements) + out.ASTImplementationElements = CloneSliceOfRefOfLeaf(n.ASTImplementationElements) + return &out +} diff --git a/go/tools/asthelpergen/integration/ast_equals.go b/go/tools/asthelpergen/integration/ast_equals.go new file mode 100644 index 00000000000..95bce62e7a6 --- /dev/null +++ b/go/tools/asthelpergen/integration/ast_equals.go @@ -0,0 +1,331 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by ASTHelperGen. DO NOT EDIT. + +package integration + +// EqualsAST does deep equals between the two objects. +func EqualsAST(inA, inB AST) bool { + if inA == nil && inB == nil { + return true + } + if inA == nil || inB == nil { + return false + } + switch a := inA.(type) { + case BasicType: + b, ok := inB.(BasicType) + if !ok { + return false + } + return a == b + case Bytes: + b, ok := inB.(Bytes) + if !ok { + return false + } + return EqualsBytes(a, b) + case InterfaceContainer: + b, ok := inB.(InterfaceContainer) + if !ok { + return false + } + return EqualsInterfaceContainer(a, b) + case InterfaceSlice: + b, ok := inB.(InterfaceSlice) + if !ok { + return false + } + return EqualsInterfaceSlice(a, b) + case *Leaf: + b, ok := inB.(*Leaf) + if !ok { + return false + } + return EqualsRefOfLeaf(a, b) + case LeafSlice: + b, ok := inB.(LeafSlice) + if !ok { + return false + } + return EqualsLeafSlice(a, b) + case *NoCloneType: + b, ok := inB.(*NoCloneType) + if !ok { + return false + } + return EqualsRefOfNoCloneType(a, b) + case *RefContainer: + b, ok := inB.(*RefContainer) + if !ok { + return false + } + return EqualsRefOfRefContainer(a, b) + case *RefSliceContainer: + b, ok := inB.(*RefSliceContainer) + if !ok { + return false + } + return EqualsRefOfRefSliceContainer(a, b) + case *SubImpl: + b, ok := inB.(*SubImpl) + if !ok { + return false + } + return EqualsRefOfSubImpl(a, b) + case ValueContainer: + b, ok := inB.(ValueContainer) + if !ok { + return false + } + return EqualsValueContainer(a, b) + case ValueSliceContainer: + b, ok := inB.(ValueSliceContainer) + if !ok { + return false + } + return EqualsValueSliceContainer(a, b) + default: + // this should never happen + return false + } +} + +// EqualsBytes does deep equals between the two objects. +func EqualsBytes(a, b Bytes) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if a[i] != b[i] { + return false + } + } + return true +} + +// EqualsInterfaceContainer does deep equals between the two objects. +func EqualsInterfaceContainer(a, b InterfaceContainer) bool { + return true +} + +// EqualsInterfaceSlice does deep equals between the two objects. +func EqualsInterfaceSlice(a, b InterfaceSlice) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !EqualsAST(a[i], b[i]) { + return false + } + } + return true +} + +// EqualsRefOfLeaf does deep equals between the two objects. +func EqualsRefOfLeaf(a, b *Leaf) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.v == b.v +} + +// EqualsLeafSlice does deep equals between the two objects. +func EqualsLeafSlice(a, b LeafSlice) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !EqualsRefOfLeaf(a[i], b[i]) { + return false + } + } + return true +} + +// EqualsRefOfNoCloneType does deep equals between the two objects. +func EqualsRefOfNoCloneType(a, b *NoCloneType) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.v == b.v +} + +// EqualsRefOfRefContainer does deep equals between the two objects. +func EqualsRefOfRefContainer(a, b *RefContainer) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.NotASTType == b.NotASTType && + EqualsAST(a.ASTType, b.ASTType) && + EqualsRefOfLeaf(a.ASTImplementationType, b.ASTImplementationType) +} + +// EqualsRefOfRefSliceContainer does deep equals between the two objects. +func EqualsRefOfRefSliceContainer(a, b *RefSliceContainer) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsSliceOfAST(a.ASTElements, b.ASTElements) && + EqualsSliceOfInt(a.NotASTElements, b.NotASTElements) && + EqualsSliceOfRefOfLeaf(a.ASTImplementationElements, b.ASTImplementationElements) +} + +// EqualsRefOfSubImpl does deep equals between the two objects. +func EqualsRefOfSubImpl(a, b *SubImpl) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsSubIface(a.inner, b.inner) && + EqualsRefOfBool(a.field, b.field) +} + +// EqualsValueContainer does deep equals between the two objects. +func EqualsValueContainer(a, b ValueContainer) bool { + return a.NotASTType == b.NotASTType && + EqualsAST(a.ASTType, b.ASTType) && + EqualsRefOfLeaf(a.ASTImplementationType, b.ASTImplementationType) +} + +// EqualsValueSliceContainer does deep equals between the two objects. +func EqualsValueSliceContainer(a, b ValueSliceContainer) bool { + return EqualsSliceOfAST(a.ASTElements, b.ASTElements) && + EqualsSliceOfInt(a.NotASTElements, b.NotASTElements) && + EqualsSliceOfRefOfLeaf(a.ASTImplementationElements, b.ASTImplementationElements) +} + +// EqualsSubIface does deep equals between the two objects. +func EqualsSubIface(inA, inB SubIface) bool { + if inA == nil && inB == nil { + return true + } + if inA == nil || inB == nil { + return false + } + switch a := inA.(type) { + case *SubImpl: + b, ok := inB.(*SubImpl) + if !ok { + return false + } + return EqualsRefOfSubImpl(a, b) + default: + // this should never happen + return false + } +} + +// EqualsRefOfInterfaceContainer does deep equals between the two objects. +func EqualsRefOfInterfaceContainer(a, b *InterfaceContainer) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return true +} + +// EqualsSliceOfAST does deep equals between the two objects. +func EqualsSliceOfAST(a, b []AST) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !EqualsAST(a[i], b[i]) { + return false + } + } + return true +} + +// EqualsSliceOfInt does deep equals between the two objects. +func EqualsSliceOfInt(a, b []int) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if a[i] != b[i] { + return false + } + } + return true +} + +// EqualsSliceOfRefOfLeaf does deep equals between the two objects. +func EqualsSliceOfRefOfLeaf(a, b []*Leaf) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !EqualsRefOfLeaf(a[i], b[i]) { + return false + } + } + return true +} + +// EqualsRefOfBool does deep equals between the two objects. +func EqualsRefOfBool(a, b *bool) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return *a == *b +} + +// EqualsRefOfValueContainer does deep equals between the two objects. +func EqualsRefOfValueContainer(a, b *ValueContainer) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.NotASTType == b.NotASTType && + EqualsAST(a.ASTType, b.ASTType) && + EqualsRefOfLeaf(a.ASTImplementationType, b.ASTImplementationType) +} + +// EqualsRefOfValueSliceContainer does deep equals between the two objects. +func EqualsRefOfValueSliceContainer(a, b *ValueSliceContainer) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsSliceOfAST(a.ASTElements, b.ASTElements) && + EqualsSliceOfInt(a.NotASTElements, b.NotASTElements) && + EqualsSliceOfRefOfLeaf(a.ASTImplementationElements, b.ASTImplementationElements) +} diff --git a/go/tools/asthelpergen/integration/ast_rewrite.go b/go/tools/asthelpergen/integration/ast_rewrite.go new file mode 100644 index 00000000000..5d554e92e43 --- /dev/null +++ b/go/tools/asthelpergen/integration/ast_rewrite.go @@ -0,0 +1,498 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by ASTHelperGen. DO NOT EDIT. + +package integration + +func (a *application) rewriteAST(parent AST, node AST, replacer replacerFunc) bool { + if node == nil { + return true + } + switch node := node.(type) { + case BasicType: + return a.rewriteBasicType(parent, node, replacer) + case Bytes: + return a.rewriteBytes(parent, node, replacer) + case InterfaceContainer: + return a.rewriteInterfaceContainer(parent, node, replacer) + case InterfaceSlice: + return a.rewriteInterfaceSlice(parent, node, replacer) + case *Leaf: + return a.rewriteRefOfLeaf(parent, node, replacer) + case LeafSlice: + return a.rewriteLeafSlice(parent, node, replacer) + case *NoCloneType: + return a.rewriteRefOfNoCloneType(parent, node, replacer) + case *RefContainer: + return a.rewriteRefOfRefContainer(parent, node, replacer) + case *RefSliceContainer: + return a.rewriteRefOfRefSliceContainer(parent, node, replacer) + case *SubImpl: + return a.rewriteRefOfSubImpl(parent, node, replacer) + case ValueContainer: + return a.rewriteValueContainer(parent, node, replacer) + case ValueSliceContainer: + return a.rewriteValueSliceContainer(parent, node, replacer) + default: + // this should never happen + return true + } +} +func (a *application) rewriteBytes(parent AST, node Bytes, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteInterfaceContainer(parent AST, node InterfaceContainer, replacer replacerFunc) bool { + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteInterfaceSlice(parent AST, node InterfaceSlice, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + for x, el := range node { + if !a.rewriteAST(node, el, func(idx int) replacerFunc { + return func(newNode, parent AST) { + parent.(InterfaceSlice)[idx] = newNode.(AST) + } + }(x)) { + return false + } + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfLeaf(parent AST, node *Leaf, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteLeafSlice(parent AST, node LeafSlice, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + for x, el := range node { + if !a.rewriteRefOfLeaf(node, el, func(idx int) replacerFunc { + return func(newNode, parent AST) { + parent.(LeafSlice)[idx] = newNode.(*Leaf) + } + }(x)) { + return false + } + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfNoCloneType(parent AST, node *NoCloneType, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfRefContainer(parent AST, node *RefContainer, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteAST(node, node.ASTType, func(newNode, parent AST) { + parent.(*RefContainer).ASTType = newNode.(AST) + }) { + return false + } + if !a.rewriteRefOfLeaf(node, node.ASTImplementationType, func(newNode, parent AST) { + parent.(*RefContainer).ASTImplementationType = newNode.(*Leaf) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfRefSliceContainer(parent AST, node *RefSliceContainer, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + for x, el := range node.ASTElements { + if !a.rewriteAST(node, el, func(idx int) replacerFunc { + return func(newNode, parent AST) { + parent.(*RefSliceContainer).ASTElements[idx] = newNode.(AST) + } + }(x)) { + return false + } + } + for x, el := range node.ASTImplementationElements { + if !a.rewriteRefOfLeaf(node, el, func(idx int) replacerFunc { + return func(newNode, parent AST) { + parent.(*RefSliceContainer).ASTImplementationElements[idx] = newNode.(*Leaf) + } + }(x)) { + return false + } + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfSubImpl(parent AST, node *SubImpl, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteSubIface(node, node.inner, func(newNode, parent AST) { + parent.(*SubImpl).inner = newNode.(SubIface) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteValueContainer(parent AST, node ValueContainer, replacer replacerFunc) bool { + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteAST(node, node.ASTType, func(newNode, parent AST) { + panic("[BUG] tried to replace 'ASTType' on 'ValueContainer'") + }) { + return false + } + if !a.rewriteRefOfLeaf(node, node.ASTImplementationType, func(newNode, parent AST) { + panic("[BUG] tried to replace 'ASTImplementationType' on 'ValueContainer'") + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteValueSliceContainer(parent AST, node ValueSliceContainer, replacer replacerFunc) bool { + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + for _, el := range node.ASTElements { + if !a.rewriteAST(node, el, func(newNode, parent AST) { + panic("[BUG] tried to replace 'ASTElements' on 'ValueSliceContainer'") + }) { + return false + } + } + for _, el := range node.ASTImplementationElements { + if !a.rewriteRefOfLeaf(node, el, func(newNode, parent AST) { + panic("[BUG] tried to replace 'ASTImplementationElements' on 'ValueSliceContainer'") + }) { + return false + } + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteSubIface(parent AST, node SubIface, replacer replacerFunc) bool { + if node == nil { + return true + } + switch node := node.(type) { + case *SubImpl: + return a.rewriteRefOfSubImpl(parent, node, replacer) + default: + // this should never happen + return true + } +} +func (a *application) rewriteBasicType(parent AST, node BasicType, replacer replacerFunc) bool { + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfInterfaceContainer(parent AST, node *InterfaceContainer, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfValueContainer(parent AST, node *ValueContainer, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteAST(node, node.ASTType, func(newNode, parent AST) { + parent.(*ValueContainer).ASTType = newNode.(AST) + }) { + return false + } + if !a.rewriteRefOfLeaf(node, node.ASTImplementationType, func(newNode, parent AST) { + parent.(*ValueContainer).ASTImplementationType = newNode.(*Leaf) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfValueSliceContainer(parent AST, node *ValueSliceContainer, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + for x, el := range node.ASTElements { + if !a.rewriteAST(node, el, func(idx int) replacerFunc { + return func(newNode, parent AST) { + parent.(*ValueSliceContainer).ASTElements[idx] = newNode.(AST) + } + }(x)) { + return false + } + } + for x, el := range node.ASTImplementationElements { + if !a.rewriteRefOfLeaf(node, el, func(idx int) replacerFunc { + return func(newNode, parent AST) { + parent.(*ValueSliceContainer).ASTImplementationElements[idx] = newNode.(*Leaf) + } + }(x)) { + return false + } + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} diff --git a/go/tools/asthelpergen/integration/ast_visit.go b/go/tools/asthelpergen/integration/ast_visit.go new file mode 100644 index 00000000000..8fb3c89ad56 --- /dev/null +++ b/go/tools/asthelpergen/integration/ast_visit.go @@ -0,0 +1,242 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by ASTHelperGen. DO NOT EDIT. + +package integration + +func VisitAST(in AST, f Visit) error { + if in == nil { + return nil + } + switch in := in.(type) { + case BasicType: + return VisitBasicType(in, f) + case Bytes: + return VisitBytes(in, f) + case InterfaceContainer: + return VisitInterfaceContainer(in, f) + case InterfaceSlice: + return VisitInterfaceSlice(in, f) + case *Leaf: + return VisitRefOfLeaf(in, f) + case LeafSlice: + return VisitLeafSlice(in, f) + case *NoCloneType: + return VisitRefOfNoCloneType(in, f) + case *RefContainer: + return VisitRefOfRefContainer(in, f) + case *RefSliceContainer: + return VisitRefOfRefSliceContainer(in, f) + case *SubImpl: + return VisitRefOfSubImpl(in, f) + case ValueContainer: + return VisitValueContainer(in, f) + case ValueSliceContainer: + return VisitValueSliceContainer(in, f) + default: + // this should never happen + return nil + } +} +func VisitBytes(in Bytes, f Visit) error { + _, err := f(in) + return err +} +func VisitInterfaceContainer(in InterfaceContainer, f Visit) error { + if cont, err := f(in); err != nil || !cont { + return err + } + return nil +} +func VisitInterfaceSlice(in InterfaceSlice, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + for _, el := range in { + if err := VisitAST(el, f); err != nil { + return err + } + } + return nil +} +func VisitRefOfLeaf(in *Leaf, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + return nil +} +func VisitLeafSlice(in LeafSlice, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + for _, el := range in { + if err := VisitRefOfLeaf(el, f); err != nil { + return err + } + } + return nil +} +func VisitRefOfNoCloneType(in *NoCloneType, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + return nil +} +func VisitRefOfRefContainer(in *RefContainer, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitAST(in.ASTType, f); err != nil { + return err + } + if err := VisitRefOfLeaf(in.ASTImplementationType, f); err != nil { + return err + } + return nil +} +func VisitRefOfRefSliceContainer(in *RefSliceContainer, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + for _, el := range in.ASTElements { + if err := VisitAST(el, f); err != nil { + return err + } + } + for _, el := range in.ASTImplementationElements { + if err := VisitRefOfLeaf(el, f); err != nil { + return err + } + } + return nil +} +func VisitRefOfSubImpl(in *SubImpl, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitSubIface(in.inner, f); err != nil { + return err + } + return nil +} +func VisitValueContainer(in ValueContainer, f Visit) error { + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitAST(in.ASTType, f); err != nil { + return err + } + if err := VisitRefOfLeaf(in.ASTImplementationType, f); err != nil { + return err + } + return nil +} +func VisitValueSliceContainer(in ValueSliceContainer, f Visit) error { + if cont, err := f(in); err != nil || !cont { + return err + } + for _, el := range in.ASTElements { + if err := VisitAST(el, f); err != nil { + return err + } + } + for _, el := range in.ASTImplementationElements { + if err := VisitRefOfLeaf(el, f); err != nil { + return err + } + } + return nil +} +func VisitSubIface(in SubIface, f Visit) error { + if in == nil { + return nil + } + switch in := in.(type) { + case *SubImpl: + return VisitRefOfSubImpl(in, f) + default: + // this should never happen + return nil + } +} +func VisitBasicType(in BasicType, f Visit) error { + _, err := f(in) + return err +} +func VisitRefOfInterfaceContainer(in *InterfaceContainer, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + return nil +} +func VisitRefOfValueContainer(in *ValueContainer, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitAST(in.ASTType, f); err != nil { + return err + } + if err := VisitRefOfLeaf(in.ASTImplementationType, f); err != nil { + return err + } + return nil +} +func VisitRefOfValueSliceContainer(in *ValueSliceContainer, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + for _, el := range in.ASTElements { + if err := VisitAST(el, f); err != nil { + return err + } + } + for _, el := range in.ASTImplementationElements { + if err := VisitRefOfLeaf(el, f); err != nil { + return err + } + } + return nil +} diff --git a/go/tools/asthelpergen/integration/integration_clone_test.go b/go/tools/asthelpergen/integration/integration_clone_test.go new file mode 100644 index 00000000000..f7adf9e7eef --- /dev/null +++ b/go/tools/asthelpergen/integration/integration_clone_test.go @@ -0,0 +1,66 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCloneLeaf(t *testing.T) { + leaf1 := &Leaf{1} + clone := CloneRefOfLeaf(leaf1) + assert.Equal(t, leaf1, clone) + leaf1.v = 5 + assert.NotEqual(t, leaf1, clone) +} + +func TestClone2(t *testing.T) { + container := &RefContainer{ + ASTType: &RefContainer{}, + NotASTType: 0, + ASTImplementationType: &Leaf{2}, + } + clone := CloneRefOfRefContainer(container) + assert.Equal(t, container, clone) + container.ASTImplementationType.v = 5 + assert.NotEqual(t, container, clone) +} + +func TestTypeException(t *testing.T) { + l1 := &Leaf{1} + nc := &NoCloneType{1} + + slice := InterfaceSlice{ + l1, + nc, + } + + clone := CloneAST(slice) + + // change the original values + l1.v = 99 + nc.v = 99 + + expected := InterfaceSlice{ + &Leaf{1}, // the change is not seen + &NoCloneType{99}, // since this type is not cloned, we do see the change + } + + assert.Equal(t, expected, clone) +} diff --git a/go/tools/asthelpergen/integration/integration_equals_test.go b/go/tools/asthelpergen/integration/integration_equals_test.go new file mode 100644 index 00000000000..df3316cfe17 --- /dev/null +++ b/go/tools/asthelpergen/integration/integration_equals_test.go @@ -0,0 +1,70 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestEquals(t *testing.T) { + for idxA, objA := range createObjs() { + for idxB, objB := range createObjs() { + t.Run(fmt.Sprintf("%s == %s", name(objA), name(objB)), func(t *testing.T) { + if idxA == idxB { + require.True(t, EqualsAST(objA, objB)) + } else { + require.False(t, EqualsAST(objA, objB)) + } + }) + } + } +} + +func createObjs() []AST { + t := true + return []AST{ + nil, + &Leaf{1}, + &Leaf{2}, + &RefContainer{ASTType: &Leaf{1}, ASTImplementationType: &Leaf{2}}, + ValueContainer{ASTType: ValueContainer{ASTType: &Leaf{1}, ASTImplementationType: &Leaf{2}}}, + &RefSliceContainer{ASTElements: []AST{&Leaf{1}, &Leaf{2}}, ASTImplementationElements: []*Leaf{{3}, {4}}}, + ValueSliceContainer{ASTElements: []AST{&Leaf{1}, &Leaf{2}}, ASTImplementationElements: []*Leaf{{3}, {4}}}, + InterfaceSlice{ + &RefContainer{ + ASTType: &RefContainer{NotASTType: 12}, + ASTImplementationType: &Leaf{2}, + }, + &Leaf{2}, + &Leaf{3}, + }, + &SubImpl{ + inner: &SubImpl{}, + field: &t, + }, + } +} + +func name(a AST) string { + if a == nil { + return "nil" + } + return a.String() +} diff --git a/go/tools/asthelpergen/integration/integration_rewriter_test.go b/go/tools/asthelpergen/integration/integration_rewriter_test.go new file mode 100644 index 00000000000..7699fe45f6a --- /dev/null +++ b/go/tools/asthelpergen/integration/integration_rewriter_test.go @@ -0,0 +1,353 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "fmt" + "reflect" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/stretchr/testify/assert" +) + +func TestRewriteVisitRefContainer(t *testing.T) { + leaf1 := &Leaf{1} + leaf2 := &Leaf{2} + container := &RefContainer{ASTType: leaf1, ASTImplementationType: leaf2} + containerContainer := &RefContainer{ASTType: container} + + tv := &rewriteTestVisitor{} + + _ = Rewrite(containerContainer, tv.pre, tv.post) + + expected := []step{ + Pre{containerContainer}, + Pre{container}, + Pre{leaf1}, + Post{leaf1}, + Pre{leaf2}, + Post{leaf2}, + Post{container}, + Post{containerContainer}, + } + tv.assertEquals(t, expected) +} + +func TestRewriteVisitValueContainer(t *testing.T) { + leaf1 := &Leaf{1} + leaf2 := &Leaf{2} + container := ValueContainer{ASTType: leaf1, ASTImplementationType: leaf2} + containerContainer := ValueContainer{ASTType: container} + + tv := &rewriteTestVisitor{} + + _ = Rewrite(containerContainer, tv.pre, tv.post) + + expected := []step{ + Pre{containerContainer}, + Pre{container}, + Pre{leaf1}, + Post{leaf1}, + Pre{leaf2}, + Post{leaf2}, + Post{container}, + Post{containerContainer}, + } + tv.assertEquals(t, expected) +} + +func TestRewriteVisitRefSliceContainer(t *testing.T) { + leaf1 := &Leaf{1} + leaf2 := &Leaf{2} + leaf3 := &Leaf{3} + leaf4 := &Leaf{4} + container := &RefSliceContainer{ASTElements: []AST{leaf1, leaf2}, ASTImplementationElements: []*Leaf{leaf3, leaf4}} + containerContainer := &RefSliceContainer{ASTElements: []AST{container}} + + tv := &rewriteTestVisitor{} + + _ = Rewrite(containerContainer, tv.pre, tv.post) + + tv.assertEquals(t, []step{ + Pre{containerContainer}, + Pre{container}, + Pre{leaf1}, + Post{leaf1}, + Pre{leaf2}, + Post{leaf2}, + Pre{leaf3}, + Post{leaf3}, + Pre{leaf4}, + Post{leaf4}, + Post{container}, + Post{containerContainer}, + }) +} + +func TestRewriteVisitValueSliceContainer(t *testing.T) { + leaf1 := &Leaf{1} + leaf2 := &Leaf{2} + leaf3 := &Leaf{3} + leaf4 := &Leaf{4} + container := ValueSliceContainer{ASTElements: []AST{leaf1, leaf2}, ASTImplementationElements: []*Leaf{leaf3, leaf4}} + containerContainer := ValueSliceContainer{ASTElements: []AST{container}} + + tv := &rewriteTestVisitor{} + + _ = Rewrite(containerContainer, tv.pre, tv.post) + + tv.assertEquals(t, []step{ + Pre{containerContainer}, + Pre{container}, + Pre{leaf1}, + Post{leaf1}, + Pre{leaf2}, + Post{leaf2}, + Pre{leaf3}, + Post{leaf3}, + Pre{leaf4}, + Post{leaf4}, + Post{container}, + Post{containerContainer}, + }) +} + +func TestRewriteVisitInterfaceSlice(t *testing.T) { + leaf1 := &Leaf{2} + astType := &RefContainer{NotASTType: 12} + implementationType := &Leaf{2} + + leaf2 := &Leaf{3} + refContainer := &RefContainer{ + ASTType: astType, + ASTImplementationType: implementationType, + } + ast := InterfaceSlice{ + refContainer, + leaf1, + leaf2, + } + + tv := &rewriteTestVisitor{} + + _ = Rewrite(ast, tv.pre, tv.post) + + tv.assertEquals(t, []step{ + Pre{ast}, + Pre{refContainer}, + Pre{astType}, + Post{astType}, + Pre{implementationType}, + Post{implementationType}, + Post{refContainer}, + Pre{leaf1}, + Post{leaf1}, + Pre{leaf2}, + Post{leaf2}, + Post{ast}, + }) +} + +func TestRewriteVisitRefContainerReplace(t *testing.T) { + ast := &RefContainer{ + ASTType: &RefContainer{NotASTType: 12}, + ASTImplementationType: &Leaf{2}, + } + + // rewrite field of type AST + _ = Rewrite(ast, func(cursor *Cursor) bool { + leaf, ok := cursor.node.(*RefContainer) + if ok && leaf.NotASTType == 12 { + cursor.Replace(&Leaf{99}) + } + return true + }, nil) + + assert.Equal(t, &RefContainer{ + ASTType: &Leaf{99}, + ASTImplementationType: &Leaf{2}, + }, ast) + + _ = Rewrite(ast, rewriteLeaf(2, 55), nil) + + assert.Equal(t, &RefContainer{ + ASTType: &Leaf{99}, + ASTImplementationType: &Leaf{55}, + }, ast) +} + +func TestRewriteVisitValueContainerReplace(t *testing.T) { + + ast := ValueContainer{ + ASTType: ValueContainer{NotASTType: 12}, + ASTImplementationType: &Leaf{2}, + } + + defer func() { + if r := recover(); r != nil { + require.Equal(t, "[BUG] tried to replace 'ASTType' on 'ValueContainer'", r) + } + }() + _ = Rewrite(ast, func(cursor *Cursor) bool { + leaf, ok := cursor.node.(ValueContainer) + if ok && leaf.NotASTType == 12 { + cursor.Replace(&Leaf{99}) + } + return true + }, nil) + +} + +func TestRewriteVisitValueContainerReplace2(t *testing.T) { + ast := ValueContainer{ + ASTType: ValueContainer{NotASTType: 12}, + ASTImplementationType: &Leaf{2}, + } + + defer func() { + if r := recover(); r != nil { + require.Equal(t, "[BUG] tried to replace 'ASTImplementationType' on 'ValueContainer'", r) + } + }() + _ = Rewrite(ast, rewriteLeaf(2, 10), nil) +} + +func TestRewriteVisitRefContainerPreOrPostOnly(t *testing.T) { + leaf1 := &Leaf{1} + leaf2 := &Leaf{2} + container := &RefContainer{ASTType: leaf1, ASTImplementationType: leaf2} + containerContainer := &RefContainer{ASTType: container} + + tv := &rewriteTestVisitor{} + + _ = Rewrite(containerContainer, tv.pre, nil) + tv.assertEquals(t, []step{ + Pre{containerContainer}, + Pre{container}, + Pre{leaf1}, + Pre{leaf2}, + }) + + tv = &rewriteTestVisitor{} + _ = Rewrite(containerContainer, nil, tv.post) + tv.assertEquals(t, []step{ + Post{leaf1}, + Post{leaf2}, + Post{container}, + Post{containerContainer}, + }) +} + +func rewriteLeaf(from, to int) func(*Cursor) bool { + return func(cursor *Cursor) bool { + leaf, ok := cursor.node.(*Leaf) + if ok && leaf.v == from { + cursor.Replace(&Leaf{to}) + } + return true + } +} + +func TestRefSliceContainerReplace(t *testing.T) { + ast := &RefSliceContainer{ + ASTElements: []AST{&Leaf{1}, &Leaf{2}}, + ASTImplementationElements: []*Leaf{{3}, {4}}, + } + + _ = Rewrite(ast, rewriteLeaf(2, 42), nil) + + assert.Equal(t, &RefSliceContainer{ + ASTElements: []AST{&Leaf{1}, &Leaf{42}}, + ASTImplementationElements: []*Leaf{{3}, {4}}, + }, ast) + + _ = Rewrite(ast, rewriteLeaf(3, 88), nil) + + assert.Equal(t, &RefSliceContainer{ + ASTElements: []AST{&Leaf{1}, &Leaf{42}}, + ASTImplementationElements: []*Leaf{{88}, {4}}, + }, ast) +} + +type step interface { + String() string +} +type Pre struct { + el AST +} + +func (r Pre) String() string { + return fmt.Sprintf("Pre(%s)", r.el.String()) +} +func (r Post) String() string { + return fmt.Sprintf("Post(%s)", r.el.String()) +} + +type Post struct { + el AST +} + +type rewriteTestVisitor struct { + walk []step +} + +func (tv *rewriteTestVisitor) pre(cursor *Cursor) bool { + tv.walk = append(tv.walk, Pre{el: cursor.Node()}) + return true +} +func (tv *rewriteTestVisitor) post(cursor *Cursor) bool { + tv.walk = append(tv.walk, Post{el: cursor.Node()}) + return true +} +func (tv *rewriteTestVisitor) assertEquals(t *testing.T, expected []step) { + t.Helper() + var lines []string + error := false + expectedSize := len(expected) + for i, step := range tv.walk { + if expectedSize <= i { + t.Errorf("❌️ - Expected less elements %v", tv.walk[i:]) + break + } else { + e := expected[i] + if reflect.DeepEqual(e, step) { + a := "✔️ - " + e.String() + if error { + fmt.Println(a) + } else { + lines = append(lines, a) + } + } else { + if !error { + // first error we see. + error = true + for _, line := range lines { + fmt.Println(line) + } + } + t.Errorf("❌️ - Expected: %s Got: %s\n", e.String(), step.String()) + } + } + } + walkSize := len(tv.walk) + if expectedSize > walkSize { + t.Errorf("❌️ - Expected more elements %v", expected[walkSize:]) + } + +} diff --git a/go/tools/asthelpergen/integration/integration_visit_test.go b/go/tools/asthelpergen/integration/integration_visit_test.go new file mode 100644 index 00000000000..31c0d6451c4 --- /dev/null +++ b/go/tools/asthelpergen/integration/integration_visit_test.go @@ -0,0 +1,186 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "fmt" + "reflect" + "testing" + + "github.com/stretchr/testify/require" +) + +type testVisitor struct { + seen []AST +} + +func (tv *testVisitor) visit(node AST) (bool, error) { + tv.seen = append(tv.seen, node) + return true, nil +} + +func TestVisitRefContainer(t *testing.T) { + leaf1 := &Leaf{1} + leaf2 := &Leaf{2} + container := &RefContainer{ASTType: leaf1, ASTImplementationType: leaf2} + containerContainer := &RefContainer{ASTType: container} + + tv := &testVisitor{} + + require.NoError(t, + VisitAST(containerContainer, tv.visit)) + + tv.assertVisitOrder(t, []AST{ + containerContainer, + container, + leaf1, + leaf2, + }) +} + +func TestVisitValueContainer(t *testing.T) { + leaf1 := &Leaf{1} + leaf2 := &Leaf{2} + container := ValueContainer{ASTType: leaf1, ASTImplementationType: leaf2} + containerContainer := ValueContainer{ASTType: container} + + tv := &testVisitor{} + + require.NoError(t, + VisitAST(containerContainer, tv.visit)) + + expected := []AST{ + containerContainer, + container, + leaf1, + leaf2, + } + tv.assertVisitOrder(t, expected) +} + +func TestVisitRefSliceContainer(t *testing.T) { + leaf1 := &Leaf{1} + leaf2 := &Leaf{2} + leaf3 := &Leaf{3} + leaf4 := &Leaf{4} + container := &RefSliceContainer{ASTElements: []AST{leaf1, leaf2}, ASTImplementationElements: []*Leaf{leaf3, leaf4}} + containerContainer := &RefSliceContainer{ASTElements: []AST{container}} + + tv := &testVisitor{} + + require.NoError(t, + VisitAST(containerContainer, tv.visit)) + + tv.assertVisitOrder(t, []AST{ + containerContainer, + container, + leaf1, + leaf2, + leaf3, + leaf4, + }) +} + +func TestVisitValueSliceContainer(t *testing.T) { + leaf1 := &Leaf{1} + leaf2 := &Leaf{2} + leaf3 := &Leaf{3} + leaf4 := &Leaf{4} + container := ValueSliceContainer{ASTElements: []AST{leaf1, leaf2}, ASTImplementationElements: []*Leaf{leaf3, leaf4}} + containerContainer := ValueSliceContainer{ASTElements: []AST{container}} + + tv := &testVisitor{} + + require.NoError(t, + VisitAST(containerContainer, tv.visit)) + + tv.assertVisitOrder(t, []AST{ + containerContainer, + container, + leaf1, + leaf2, + leaf3, + leaf4, + }) +} + +func TestVisitInterfaceSlice(t *testing.T) { + leaf1 := &Leaf{2} + astType := &RefContainer{NotASTType: 12} + implementationType := &Leaf{2} + + leaf2 := &Leaf{3} + refContainer := &RefContainer{ + ASTType: astType, + ASTImplementationType: implementationType, + } + ast := InterfaceSlice{ + refContainer, + leaf1, + leaf2, + } + + tv := &testVisitor{} + + require.NoError(t, + VisitAST(ast, tv.visit)) + + tv.assertVisitOrder(t, []AST{ + ast, + refContainer, + astType, + implementationType, + leaf1, + leaf2, + }) +} + +func (tv *testVisitor) assertVisitOrder(t *testing.T, expected []AST) { + t.Helper() + var lines []string + failed := false + expectedSize := len(expected) + for i, step := range tv.seen { + if expectedSize <= i { + t.Errorf("❌️ - Expected less elements %v", tv.seen[i:]) + break + } else { + e := expected[i] + if reflect.DeepEqual(e, step) { + a := "✔️ - " + e.String() + if failed { + fmt.Println(a) + } else { + lines = append(lines, a) + } + } else { + if !failed { + // first error we see. + failed = true + for _, line := range lines { + fmt.Println(line) + } + } + t.Errorf("❌️ - Expected: %s Got: %s\n", e.String(), step.String()) + } + } + } + walkSize := len(tv.seen) + if expectedSize > walkSize { + t.Errorf("❌️ - Expected more elements %v", expected[walkSize:]) + } +} diff --git a/go/tools/asthelpergen/integration/test_helpers.go b/go/tools/asthelpergen/integration/test_helpers.go new file mode 100644 index 00000000000..923f6f7c546 --- /dev/null +++ b/go/tools/asthelpergen/integration/test_helpers.go @@ -0,0 +1,81 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "strings" +) + +// ast type helpers + +func sliceStringAST(els ...AST) string { + result := make([]string, len(els)) + for i, el := range els { + result[i] = el.String() + } + return strings.Join(result, ", ") +} +func sliceStringLeaf(els ...*Leaf) string { + result := make([]string, len(els)) + for i, el := range els { + result[i] = el.String() + } + return strings.Join(result, ", ") +} + +// the methods below are what the generated code expected to be there in the package + +// ApplyFunc is apply function +type ApplyFunc func(*Cursor) bool + +// Cursor is cursor +type Cursor struct { + parent AST + replacer replacerFunc + node AST +} + +// Node returns the current Node. +func (c *Cursor) Node() AST { return c.node } + +// Parent returns the parent of the current Node. +func (c *Cursor) Parent() AST { return c.parent } + +// Replace replaces the current node in the parent field with this new object. The use needs to make sure to not +// replace the object with something of the wrong type, or the visitor will panic. +func (c *Cursor) Replace(newNode AST) { + c.replacer(newNode, c.parent) + c.node = newNode +} + +type replacerFunc func(newNode, parent AST) + +// Rewrite is the api. +func Rewrite(node AST, pre, post ApplyFunc) AST { + outer := &struct{ AST }{node} + + a := &application{ + pre: pre, + post: post, + } + + a.rewriteAST(outer, node, func(newNode, parent AST) { + outer.AST = newNode + }) + + return outer.AST +} diff --git a/go/tools/asthelpergen/integration/types.go b/go/tools/asthelpergen/integration/types.go new file mode 100644 index 00000000000..3bed2b5e009 --- /dev/null +++ b/go/tools/asthelpergen/integration/types.go @@ -0,0 +1,180 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +//nolint +package integration + +import ( + "fmt" + "strings" +) + +/* +These types are used to test the rewriter generator against these types. +To recreate them, just run: + +go run go/tools/asthelpergen -in ./go/tools/asthelpergen/integration -iface vitess.io/vitess/go/tools/asthelpergen/integration.AST -except "*NoCloneType" +*/ +// AST is the interface all interface types implement +type AST interface { + String() string +} + +// Empty struct impl of the iface +type Leaf struct { + v int +} + +func (l *Leaf) String() string { + if l == nil { + return "nil" + } + return fmt.Sprintf("Leaf(%d)", l.v) +} + +// Container implements the interface ByRef +type RefContainer struct { + ASTType AST + NotASTType int + ASTImplementationType *Leaf +} + +func (r *RefContainer) String() string { + if r == nil { + return "nil" + } + var astType = "" + if r.ASTType == nil { + astType = "nil" + } else { + astType = r.ASTType.String() + } + return fmt.Sprintf("RefContainer{%s, %d, %s}", astType, r.NotASTType, r.ASTImplementationType.String()) +} + +// Container implements the interface ByRef +type RefSliceContainer struct { + ASTElements []AST + NotASTElements []int + ASTImplementationElements []*Leaf +} + +func (r *RefSliceContainer) String() string { + return fmt.Sprintf("RefSliceContainer{%s, %s, %s}", sliceStringAST(r.ASTElements...), "r.NotASTType", sliceStringLeaf(r.ASTImplementationElements...)) +} + +// Container implements the interface ByValue +type ValueContainer struct { + ASTType AST + NotASTType int + ASTImplementationType *Leaf +} + +func (r ValueContainer) String() string { + return fmt.Sprintf("ValueContainer{%s, %d, %s}", r.ASTType.String(), r.NotASTType, r.ASTImplementationType.String()) +} + +// Container implements the interface ByValue +type ValueSliceContainer struct { + ASTElements []AST + NotASTElements []int + ASTImplementationElements []*Leaf +} + +func (r ValueSliceContainer) String() string { + return fmt.Sprintf("ValueSliceContainer{%s, %s, %s}", sliceStringAST(r.ASTElements...), "r.NotASTType", sliceStringLeaf(r.ASTImplementationElements...)) +} + +// We need to support these types - a slice of AST elements can implement the interface +type InterfaceSlice []AST + +func (r InterfaceSlice) String() string { + var elements []string + for _, el := range r { + elements = append(elements, el.String()) + } + + return "[" + strings.Join(elements, ", ") + "]" +} + +// We need to support these types - a slice of AST elements can implement the interface +type Bytes []byte + +func (r Bytes) String() string { + return string(r) +} + +type LeafSlice []*Leaf + +func (r LeafSlice) String() string { + var elements []string + for _, el := range r { + elements = append(elements, el.String()) + } + return strings.Join(elements, ", ") +} + +type BasicType int + +func (r BasicType) String() string { + return fmt.Sprintf("int(%d)", r) +} + +const ( + // these consts are here to try to trick the generator + thisIsNotAType BasicType = 1 + thisIsNotAType2 BasicType = 2 +) + +// We want to support all types that are used as field types, which can include interfaces. +// Example would be sqlparser.Expr that implements sqlparser.SQLNode +type SubIface interface { + AST + iface() +} + +type SubImpl struct { + inner SubIface + field *bool +} + +func (r *SubImpl) String() string { + return "SubImpl" +} +func (r *SubImpl) iface() {} + +type InterfaceContainer struct { + v interface{} +} + +func (r InterfaceContainer) String() string { + return fmt.Sprintf("%v", r.v) +} + +type NoCloneType struct { + v int +} + +func (r *NoCloneType) String() string { + return fmt.Sprintf("NoClone(%d)", r.v) +} + +type Visit func(node AST) (bool, error) + +type application struct { + pre, post ApplyFunc + cur Cursor +} diff --git a/go/tools/asthelpergen/main/main.go b/go/tools/asthelpergen/main/main.go new file mode 100644 index 00000000000..9225aa7615d --- /dev/null +++ b/go/tools/asthelpergen/main/main.go @@ -0,0 +1,39 @@ +package main + +import ( + "flag" + "log" + + . "vitess.io/vitess/go/tools/asthelpergen" +) + +func main() { + var patterns TypePaths + var generate, except string + var verify bool + + flag.Var(&patterns, "in", "Go packages to load the generator") + flag.StringVar(&generate, "iface", "", "Root interface generate rewriter for") + flag.BoolVar(&verify, "verify", false, "ensure that the generated files are correct") + flag.StringVar(&except, "except", "", "don't deep clone these types") + flag.Parse() + + result, err := GenerateASTHelpers(patterns, generate, except) + if err != nil { + log.Fatal(err) + } + + if verify { + for _, err := range VerifyFilesOnDisk(result) { + log.Fatal(err) + } + log.Printf("%d files OK", len(result)) + } else { + for fullPath, file := range result { + if err := file.Save(fullPath); err != nil { + log.Fatalf("failed to save file to '%s': %v", fullPath, err) + } + log.Printf("saved '%s'", fullPath) + } + } +} diff --git a/go/tools/asthelpergen/rewrite_gen.go b/go/tools/asthelpergen/rewrite_gen.go new file mode 100644 index 00000000000..1a7d2411d7e --- /dev/null +++ b/go/tools/asthelpergen/rewrite_gen.go @@ -0,0 +1,387 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package asthelpergen + +import ( + "fmt" + "go/types" + + "github.com/dave/jennifer/jen" +) + +const ( + rewriteName = "rewrite" +) + +type rewriteGen struct { + ifaceName string + file *jen.File +} + +var _ generator = (*rewriteGen)(nil) + +func newRewriterGen(pkgname string, ifaceName string) *rewriteGen { + file := jen.NewFile(pkgname) + file.HeaderComment(licenseFileHeader) + file.HeaderComment("Code generated by ASTHelperGen. DO NOT EDIT.") + + return &rewriteGen{ + ifaceName: ifaceName, + file: file, + } +} + +func (r *rewriteGen) genFile() (string, *jen.File) { + return "ast_rewrite.go", r.file +} + +func (r *rewriteGen) interfaceMethod(t types.Type, iface *types.Interface, spi generatorSPI) error { + if !shouldAdd(t, spi.iface()) { + return nil + } + /* + func VisitAST(in AST) (bool, error) { + if in == nil { + return false, nil + } + switch a := inA.(type) { + case *SubImpl: + return VisitSubImpl(a, b) + default: + return false, nil + } + } + */ + stmts := []jen.Code{ + jen.If(jen.Id("node == nil").Block(returnTrue())), + } + + var cases []jen.Code + _ = spi.findImplementations(iface, func(t types.Type) error { + if _, ok := t.Underlying().(*types.Interface); ok { + return nil + } + typeString := types.TypeString(t, noQualifier) + funcName := rewriteName + printableTypeName(t) + spi.addType(t) + caseBlock := jen.Case(jen.Id(typeString)).Block( + jen.Return(jen.Id("a").Dot(funcName).Call(jen.Id("parent, node, replacer"))), + ) + cases = append(cases, caseBlock) + return nil + }) + + cases = append(cases, + jen.Default().Block( + jen.Comment("this should never happen"), + returnTrue(), + )) + + stmts = append(stmts, jen.Switch(jen.Id("node := node.(type)").Block( + cases..., + ))) + + r.rewriteFunc(t, stmts) + return nil +} + +func (r *rewriteGen) structMethod(t types.Type, strct *types.Struct, spi generatorSPI) error { + if !shouldAdd(t, spi.iface()) { + return nil + } + fields := r.rewriteAllStructFields(t, strct, spi, true) + + stmts := []jen.Code{executePre()} + stmts = append(stmts, fields...) + stmts = append(stmts, executePost(len(fields) > 0)) + stmts = append(stmts, returnTrue()) + + r.rewriteFunc(t, stmts) + + return nil +} + +func (r *rewriteGen) ptrToStructMethod(t types.Type, strct *types.Struct, spi generatorSPI) error { + if !shouldAdd(t, spi.iface()) { + return nil + } + + /* + if node == nil { return nil } + */ + stmts := []jen.Code{jen.If(jen.Id("node == nil").Block(returnTrue()))} + + /* + if !pre(&cur) { + return nil + } + */ + stmts = append(stmts, executePre()) + fields := r.rewriteAllStructFields(t, strct, spi, false) + stmts = append(stmts, fields...) + stmts = append(stmts, executePost(len(fields) > 0)) + stmts = append(stmts, returnTrue()) + + r.rewriteFunc(t, stmts) + + return nil +} + +func (r *rewriteGen) ptrToBasicMethod(t types.Type, _ *types.Basic, spi generatorSPI) error { + if !shouldAdd(t, spi.iface()) { + return nil + } + + /* + */ + + stmts := []jen.Code{ + jen.Comment("ptrToBasicMethod"), + } + r.rewriteFunc(t, stmts) + + return nil +} + +func (r *rewriteGen) sliceMethod(t types.Type, slice *types.Slice, spi generatorSPI) error { + if !shouldAdd(t, spi.iface()) { + return nil + } + + /* + if node == nil { + return nil + } + cur := Cursor{ + node: node, + parent: parent, + replacer: replacer, + } + if !pre(&cur) { + return nil + } + */ + stmts := []jen.Code{ + jen.If(jen.Id("node == nil").Block(returnTrue())), + } + stmts = append(stmts, executePre()) + + haveChildren := false + if shouldAdd(slice.Elem(), spi.iface()) { + /* + for i, el := range node { + if err := rewriteRefOfLeaf(node, el, func(newNode, parent AST) { + parent.(LeafSlice)[i] = newNode.(*Leaf) + }, pre, post); err != nil { + return err + } + } + */ + haveChildren = true + stmts = append(stmts, + jen.For(jen.Id("x, el").Op(":=").Id("range node")). + Block(r.rewriteChildSlice(t, slice.Elem(), "notUsed", jen.Id("el"), jen.Index(jen.Id("idx")), false))) + } + + stmts = append(stmts, executePost(haveChildren)) + stmts = append(stmts, returnTrue()) + + r.rewriteFunc(t, stmts) + return nil +} + +func setupCursor() []jen.Code { + return []jen.Code{ + jen.Id("a.cur.replacer = replacer"), + jen.Id("a.cur.parent = parent"), + jen.Id("a.cur.node = node"), + } +} +func executePre() jen.Code { + curStmts := setupCursor() + curStmts = append(curStmts, jen.If(jen.Id("!a.pre(&a.cur)")).Block(returnTrue())) + return jen.If(jen.Id("a.pre!= nil").Block(curStmts...)) +} + +func executePost(seenChildren bool) jen.Code { + var curStmts []jen.Code + if seenChildren { + // if we have visited children, we have to write to the cursor fields + curStmts = setupCursor() + } else { + curStmts = append(curStmts, + jen.If(jen.Id("a.pre == nil")).Block(setupCursor()...)) + } + + curStmts = append(curStmts, jen.If(jen.Id("!a.post(&a.cur)")).Block(returnFalse())) + + return jen.If(jen.Id("a.post != nil")).Block(curStmts...) +} + +func (r *rewriteGen) basicMethod(t types.Type, _ *types.Basic, spi generatorSPI) error { + if !shouldAdd(t, spi.iface()) { + return nil + } + + stmts := []jen.Code{executePre(), executePost(false), returnTrue()} + r.rewriteFunc(t, stmts) + return nil +} + +func (r *rewriteGen) rewriteFunc(t types.Type, stmts []jen.Code) { + + /* + func (a *application) rewriteNodeType(parent AST, node NodeType, replacer replacerFunc) { + */ + + typeString := types.TypeString(t, noQualifier) + funcName := fmt.Sprintf("%s%s", rewriteName, printableTypeName(t)) + code := jen.Func().Params( + jen.Id("a").Op("*").Id("application"), + ).Id(funcName).Params( + jen.Id(fmt.Sprintf("parent %s, node %s, replacer replacerFunc", r.ifaceName, typeString)), + ).Bool().Block(stmts...) + + r.file.Add(code) +} + +func (r *rewriteGen) rewriteAllStructFields(t types.Type, strct *types.Struct, spi generatorSPI, fail bool) []jen.Code { + /* + if errF := rewriteAST(node, node.ASTType, func(newNode, parent AST) { + err = vterrors.New(vtrpcpb.Code_INTERNAL, "[BUG] tried to replace '%s' on '%s'") + }, pre, post); errF != nil { + return errF + } + + */ + var output []jen.Code + for i := 0; i < strct.NumFields(); i++ { + field := strct.Field(i) + if types.Implements(field.Type(), spi.iface()) { + spi.addType(field.Type()) + output = append(output, r.rewriteChild(t, field.Type(), field.Name(), jen.Id("node").Dot(field.Name()), jen.Dot(field.Name()), fail)) + continue + } + slice, isSlice := field.Type().(*types.Slice) + if isSlice && types.Implements(slice.Elem(), spi.iface()) { + spi.addType(slice.Elem()) + id := jen.Id("x") + if fail { + id = jen.Id("_") + } + output = append(output, + jen.For(jen.List(id, jen.Id("el")).Op(":=").Id("range node."+field.Name())). + Block(r.rewriteChildSlice(t, slice.Elem(), field.Name(), jen.Id("el"), jen.Dot(field.Name()).Index(jen.Id("idx")), fail))) + } + } + return output +} + +func failReplacer(t types.Type, f string) *jen.Statement { + typeString := types.TypeString(t, noQualifier) + return jen.Panic(jen.Lit(fmt.Sprintf("[BUG] tried to replace '%s' on '%s'", f, typeString))) +} + +func (r *rewriteGen) rewriteChild(t, field types.Type, fieldName string, param jen.Code, replace jen.Code, fail bool) jen.Code { + /* + if errF := rewriteAST(node, node.ASTType, func(newNode, parent AST) { + parent.(*RefContainer).ASTType = newNode.(AST) + }, pre, post); errF != nil { + return errF + } + + if errF := rewriteAST(node, el, func(newNode, parent AST) { + parent.(*RefSliceContainer).ASTElements[i] = newNode.(AST) + }, pre, post); errF != nil { + return errF + } + + */ + funcName := rewriteName + printableTypeName(field) + var replaceOrFail *jen.Statement + if fail { + replaceOrFail = failReplacer(t, fieldName) + } else { + replaceOrFail = jen.Id("parent"). + Assert(jen.Id(types.TypeString(t, noQualifier))). + Add(replace). + Op("="). + Id("newNode").Assert(jen.Id(types.TypeString(field, noQualifier))) + + } + funcBlock := jen.Func().Call(jen.Id("newNode, parent").Id(r.ifaceName)). + Block(replaceOrFail) + + rewriteField := jen.If( + jen.Op("!").Id("a").Dot(funcName).Call( + jen.Id("node"), + param, + funcBlock).Block(returnFalse())) + + return rewriteField +} + +func (r *rewriteGen) rewriteChildSlice(t, field types.Type, fieldName string, param jen.Code, replace jen.Code, fail bool) jen.Code { + /* + if errF := a.rewriteAST(node, el, func(idx int) replacerFunc { + return func(newNode, parent AST) { + parent.(InterfaceSlice)[idx] = newNode.(AST) + } + }(i)); errF != nil { + return errF + } + + if errF := a.rewriteAST(node, el, func(newNode, parent AST) { + return errr... + }); errF != nil { + return errF + } + + */ + + funcName := rewriteName + printableTypeName(field) + var funcBlock jen.Code + replacerFuncDef := jen.Func().Call(jen.Id("newNode, parent").Id(r.ifaceName)) + if fail { + funcBlock = replacerFuncDef.Block(failReplacer(t, fieldName)) + } else { + funcBlock = jen.Func().Call(jen.Id("idx int")).Id("replacerFunc"). + Block(jen.Return(replacerFuncDef.Block( + jen.Id("parent").Assert(jen.Id(types.TypeString(t, noQualifier))).Add(replace).Op("=").Id("newNode").Assert(jen.Id(types.TypeString(field, noQualifier)))), + )).Call(jen.Id("x")) + } + + rewriteField := jen.If( + jen.Op("!").Id("a").Dot(funcName).Call( + jen.Id("node"), + param, + funcBlock).Block(returnFalse())) + + return rewriteField +} + +var noQualifier = func(p *types.Package) string { + return "" +} + +func returnTrue() jen.Code { + return jen.Return(jen.True()) +} + +func returnFalse() jen.Code { + return jen.Return(jen.False()) +} diff --git a/go/tools/asthelpergen/visit_gen.go b/go/tools/asthelpergen/visit_gen.go new file mode 100644 index 00000000000..51d8651f090 --- /dev/null +++ b/go/tools/asthelpergen/visit_gen.go @@ -0,0 +1,263 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package asthelpergen + +import ( + "go/types" + + "github.com/dave/jennifer/jen" +) + +const visitName = "Visit" + +type visitGen struct { + file *jen.File +} + +var _ generator = (*visitGen)(nil) + +func newVisitGen(pkgname string) *visitGen { + file := jen.NewFile(pkgname) + file.HeaderComment(licenseFileHeader) + file.HeaderComment("Code generated by ASTHelperGen. DO NOT EDIT.") + + return &visitGen{ + file: file, + } +} + +func (v *visitGen) genFile() (string, *jen.File) { + return "ast_visit.go", v.file +} + +func shouldAdd(t types.Type, i *types.Interface) bool { + return types.Implements(t, i) +} + +func (v *visitGen) interfaceMethod(t types.Type, iface *types.Interface, spi generatorSPI) error { + if !shouldAdd(t, spi.iface()) { + return nil + } + /* + func VisitAST(in AST) (bool, error) { + if in == nil { + return false, nil + } + switch a := inA.(type) { + case *SubImpl: + return VisitSubImpl(a, b) + default: + return false, nil + } + } + */ + stmts := []jen.Code{ + jen.If(jen.Id("in == nil").Block(returnNil())), + } + + var cases []jen.Code + _ = spi.findImplementations(iface, func(t types.Type) error { + if _, ok := t.Underlying().(*types.Interface); ok { + return nil + } + typeString := types.TypeString(t, noQualifier) + funcName := visitName + printableTypeName(t) + spi.addType(t) + caseBlock := jen.Case(jen.Id(typeString)).Block( + jen.Return(jen.Id(funcName).Call(jen.Id("in"), jen.Id("f"))), + ) + cases = append(cases, caseBlock) + return nil + }) + + cases = append(cases, + jen.Default().Block( + jen.Comment("this should never happen"), + returnNil(), + )) + + stmts = append(stmts, jen.Switch(jen.Id("in := in.(type)").Block( + cases..., + ))) + + v.visitFunc(t, stmts) + return nil +} + +func returnNil() jen.Code { + return jen.Return(jen.Nil()) +} + +func (v *visitGen) structMethod(t types.Type, strct *types.Struct, spi generatorSPI) error { + if !shouldAdd(t, spi.iface()) { + return nil + } + + /* + func VisitRefOfRefContainer(in *RefContainer, f func(node AST) (kontinue bool, err error)) (bool, error) { + if cont, err := f(in); err != nil || !cont { + return false, err + } + if k, err := VisitRefOfLeaf(in.ASTImplementationType, f); err != nil || !k { + return false, err + } + if k, err := VisitAST(in.ASTType, f); err != nil || !k { + return false, err + } + return true, nil + } + */ + + stmts := visitAllStructFields(strct, spi) + v.visitFunc(t, stmts) + + return nil +} + +func (v *visitGen) ptrToStructMethod(t types.Type, strct *types.Struct, spi generatorSPI) error { + if !shouldAdd(t, spi.iface()) { + return nil + } + + /* + func VisitRefOfRefContainer(in *RefContainer, f func(node AST) (kontinue bool, err error)) (bool, error) { + if in == nil { + return true, nil + } + if cont, err := f(in); err != nil || !cont { + return false, err + } + if k, err := VisitRefOfLeaf(in.ASTImplementationType, f); err != nil || !k { + return false, err + } + if k, err := VisitAST(in.ASTType, f); err != nil || !k { + return false, err + } + return true, nil + } + */ + + stmts := []jen.Code{ + jen.If(jen.Id("in == nil").Block(returnNil())), + } + stmts = append(stmts, visitAllStructFields(strct, spi)...) + v.visitFunc(t, stmts) + + return nil +} + +func (v *visitGen) ptrToBasicMethod(t types.Type, _ *types.Basic, spi generatorSPI) error { + if !shouldAdd(t, spi.iface()) { + return nil + } + + stmts := []jen.Code{ + jen.Comment("ptrToBasicMethod"), + } + + v.visitFunc(t, stmts) + + return nil +} + +func (v *visitGen) sliceMethod(t types.Type, slice *types.Slice, spi generatorSPI) error { + if !shouldAdd(t, spi.iface()) { + return nil + } + + if !shouldAdd(slice.Elem(), spi.iface()) { + return v.visitNoChildren(t, spi) + } + + stmts := []jen.Code{ + jen.If(jen.Id("in == nil").Block(returnNil())), + visitIn(), + jen.For(jen.Id("_, el := range in")).Block( + visitChild(slice.Elem(), jen.Id("el")), + ), + returnNil(), + } + + v.visitFunc(t, stmts) + + return nil +} + +func (v *visitGen) basicMethod(t types.Type, basic *types.Basic, spi generatorSPI) error { + if !shouldAdd(t, spi.iface()) { + return nil + } + + return v.visitNoChildren(t, spi) +} + +func (v *visitGen) visitNoChildren(t types.Type, spi generatorSPI) error { + stmts := []jen.Code{ + jen.Id("_, err := f(in)"), + jen.Return(jen.Err()), + } + + v.visitFunc(t, stmts) + + return nil +} + +func visitAllStructFields(strct *types.Struct, spi generatorSPI) []jen.Code { + output := []jen.Code{ + visitIn(), + } + for i := 0; i < strct.NumFields(); i++ { + field := strct.Field(i) + if types.Implements(field.Type(), spi.iface()) { + spi.addType(field.Type()) + visitField := visitChild(field.Type(), jen.Id("in").Dot(field.Name())) + output = append(output, visitField) + continue + } + slice, isSlice := field.Type().(*types.Slice) + if isSlice && types.Implements(slice.Elem(), spi.iface()) { + spi.addType(slice.Elem()) + output = append(output, jen.For(jen.Id("_, el := range in."+field.Name())).Block( + visitChild(slice.Elem(), jen.Id("el")), + )) + } + } + output = append(output, returnNil()) + return output +} + +func visitChild(t types.Type, id jen.Code) *jen.Statement { + funcName := visitName + printableTypeName(t) + visitField := jen.If( + jen.Id("err := ").Id(funcName).Call(id, jen.Id("f")), + jen.Id("err != nil "), + ).Block(jen.Return(jen.Err())) + return visitField +} + +func visitIn() *jen.Statement { + return jen.If( + jen.Id("cont, err := ").Id("f").Call(jen.Id("in")), + jen.Id("err != nil || !cont"), + ).Block(jen.Return(jen.Err())) +} + +func (v *visitGen) visitFunc(t types.Type, stmts []jen.Code) { + typeString := types.TypeString(t, noQualifier) + funcName := visitName + printableTypeName(t) + v.file.Add(jen.Func().Id(funcName).Call(jen.Id("in").Id(typeString), jen.Id("f Visit")).Error().Block(stmts...)) +} diff --git a/go/tools/sizegen/integration/cached_size.go b/go/tools/sizegen/integration/cached_size.go new file mode 100644 index 00000000000..7ceba285138 --- /dev/null +++ b/go/tools/sizegen/integration/cached_size.go @@ -0,0 +1,225 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by Sizegen. DO NOT EDIT. + +package integration + +import ( + "math" + "reflect" + "unsafe" +) + +type cachedObject interface { + CachedSize(alloc bool) int64 +} + +func (cached *A) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(16) + } + return size +} +func (cached *Bimpl) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(8) + } + return size +} +func (cached *C) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(16) + } + // field field1 vitess.io/vitess/go/tools/sizegen/integration.B + if cc, ok := cached.field1.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *D) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(8) + } + // field field1 *vitess.io/vitess/go/tools/sizegen/integration.Bimpl + if cached.field1 != nil { + size += int64(8) + } + return size +} + +//go:nocheckptr +func (cached *Map1) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(8) + } + // field field1 map[uint8]uint8 + if cached.field1 != nil { + size += int64(48) + hmap := reflect.ValueOf(cached.field1) + numBuckets := int(math.Pow(2, float64((*(*uint8)(unsafe.Pointer(hmap.Pointer() + uintptr(9))))))) + numOldBuckets := (*(*uint16)(unsafe.Pointer(hmap.Pointer() + uintptr(10)))) + size += int64(numOldBuckets * 32) + if len(cached.field1) > 0 || numBuckets > 1 { + size += int64(numBuckets * 32) + } + } + return size +} + +//go:nocheckptr +func (cached *Map2) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(8) + } + // field field1 map[uint64]vitess.io/vitess/go/tools/sizegen/integration.A + if cached.field1 != nil { + size += int64(48) + hmap := reflect.ValueOf(cached.field1) + numBuckets := int(math.Pow(2, float64((*(*uint8)(unsafe.Pointer(hmap.Pointer() + uintptr(9))))))) + numOldBuckets := (*(*uint16)(unsafe.Pointer(hmap.Pointer() + uintptr(10)))) + size += int64(numOldBuckets * 208) + if len(cached.field1) > 0 || numBuckets > 1 { + size += int64(numBuckets * 208) + } + } + return size +} + +//go:nocheckptr +func (cached *Map3) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(8) + } + // field field1 map[uint64]vitess.io/vitess/go/tools/sizegen/integration.B + if cached.field1 != nil { + size += int64(48) + hmap := reflect.ValueOf(cached.field1) + numBuckets := int(math.Pow(2, float64((*(*uint8)(unsafe.Pointer(hmap.Pointer() + uintptr(9))))))) + numOldBuckets := (*(*uint16)(unsafe.Pointer(hmap.Pointer() + uintptr(10)))) + size += int64(numOldBuckets * 208) + if len(cached.field1) > 0 || numBuckets > 1 { + size += int64(numBuckets * 208) + } + for _, v := range cached.field1 { + if cc, ok := v.(cachedObject); ok { + size += cc.CachedSize(true) + } + } + } + return size +} +func (cached *Padded) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + return size +} +func (cached *Slice1) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + // field field1 []vitess.io/vitess/go/tools/sizegen/integration.A + { + size += int64(cap(cached.field1)) * int64(16) + } + return size +} +func (cached *Slice2) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + // field field1 []vitess.io/vitess/go/tools/sizegen/integration.B + { + size += int64(cap(cached.field1)) * int64(16) + for _, elem := range cached.field1 { + if cc, ok := elem.(cachedObject); ok { + size += cc.CachedSize(true) + } + } + } + return size +} +func (cached *Slice3) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + // field field1 []*vitess.io/vitess/go/tools/sizegen/integration.Bimpl + { + size += int64(cap(cached.field1)) * int64(8) + for _, elem := range cached.field1 { + if elem != nil { + size += int64(8) + } + } + } + return size +} +func (cached *String1) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + // field field1 string + size += int64(len(cached.field1)) + return size +} diff --git a/go/tools/sizegen/integration/integration_test.go b/go/tools/sizegen/integration/integration_test.go new file mode 100644 index 00000000000..d2c22a2cbcd --- /dev/null +++ b/go/tools/sizegen/integration/integration_test.go @@ -0,0 +1,85 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "fmt" + "testing" +) + +func TestTypeSizes(t *testing.T) { + const PtrSize = 8 + const SliceHeaderSize = 3 * PtrSize + const FatPointerSize = 2 * PtrSize + const BucketHeaderSize = 8 + const BucketSize = 8 + const HashMapHeaderSize = 48 + + cases := []struct { + obj cachedObject + size int64 + }{ + {&A{}, 16}, + {&C{}, 16}, + {&C{field1: &Bimpl{}}, 24}, + {&D{}, 8}, + {&D{field1: &Bimpl{}}, 16}, + {&Padded{}, 24}, + + {&Slice1{}, 24}, + {&Slice1{field1: []A{}}, SliceHeaderSize}, + {&Slice1{field1: []A{{}}}, SliceHeaderSize + 16}, + {&Slice1{field1: []A{{}, {}, {}, {}}}, SliceHeaderSize + 16*4}, + + {&Slice2{}, SliceHeaderSize}, + {&Slice2{field1: []B{}}, SliceHeaderSize}, + {&Slice2{field1: []B{&Bimpl{}}}, SliceHeaderSize + FatPointerSize*1 + 8*1}, + {&Slice2{field1: []B{&Bimpl{}, &Bimpl{}, &Bimpl{}, &Bimpl{}}}, SliceHeaderSize + FatPointerSize*4 + 8*4}, + + {&Slice3{}, SliceHeaderSize}, + {&Slice3{field1: []*Bimpl{}}, SliceHeaderSize}, + {&Slice3{field1: []*Bimpl{nil}}, SliceHeaderSize + PtrSize*1 + 0}, + {&Slice3{field1: []*Bimpl{nil, nil, nil, nil}}, SliceHeaderSize + PtrSize*4 + 0}, + {&Slice3{field1: []*Bimpl{{}}}, SliceHeaderSize + PtrSize*1 + 8*1}, + {&Slice3{field1: []*Bimpl{{}, {}, {}, {}}}, SliceHeaderSize + PtrSize*4 + 8*4}, + + {&Map1{field1: nil}, PtrSize}, + {&Map1{field1: map[uint8]uint8{}}, PtrSize + HashMapHeaderSize}, + {&Map1{field1: map[uint8]uint8{0: 0}}, PtrSize + HashMapHeaderSize + BucketHeaderSize + 1*BucketSize + 1*BucketSize + PtrSize}, + + {&Map2{field1: nil}, PtrSize}, + {&Map2{field1: map[uint64]A{}}, PtrSize + HashMapHeaderSize}, + {&Map2{field1: map[uint64]A{0: {}}}, PtrSize + HashMapHeaderSize + BucketHeaderSize + 8*BucketSize + 16*BucketSize + PtrSize}, + + {&Map3{field1: nil}, PtrSize}, + {&Map3{field1: map[uint64]B{}}, PtrSize + HashMapHeaderSize}, + {&Map3{field1: map[uint64]B{0: &Bimpl{}}}, PtrSize + HashMapHeaderSize + BucketHeaderSize + 8*BucketSize + FatPointerSize*BucketSize + PtrSize + 8}, + {&Map3{field1: map[uint64]B{0: nil}}, PtrSize + HashMapHeaderSize + BucketHeaderSize + 8*BucketSize + FatPointerSize*BucketSize + PtrSize}, + + {&String1{}, PtrSize*2 + 8}, + {&String1{field1: "1234"}, PtrSize*2 + 8 + 4}, + } + + for _, tt := range cases { + t.Run(fmt.Sprintf("sizeof(%T)", tt.obj), func(t *testing.T) { + size := tt.obj.CachedSize(true) + if size != tt.size { + t.Errorf("expected %T to be %d bytes, got %d", tt.obj, tt.size, size) + } + }) + } +} diff --git a/go/tools/sizegen/integration/types.go b/go/tools/sizegen/integration/types.go new file mode 100644 index 00000000000..c05b08cfd07 --- /dev/null +++ b/go/tools/sizegen/integration/types.go @@ -0,0 +1,76 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +//nolint +package integration + +type A struct { + field1 uint64 + field2 uint64 +} + +type B interface { + iface() +} + +type Bimpl struct { + field1 uint64 +} + +func (b *Bimpl) iface() {} + +type C struct { + field1 B +} + +type D struct { + field1 *Bimpl +} + +type Padded struct { + field1 uint64 + field2 uint8 + field3 uint64 +} + +type Slice1 struct { + field1 []A +} + +type Slice2 struct { + field1 []B +} + +type Slice3 struct { + field1 []*Bimpl +} + +type Map1 struct { + field1 map[uint8]uint8 +} + +type Map2 struct { + field1 map[uint64]A +} + +type Map3 struct { + field1 map[uint64]B +} + +type String1 struct { + field1 string + field2 uint64 +} diff --git a/go/tools/sizegen/sizegen.go b/go/tools/sizegen/sizegen.go new file mode 100644 index 00000000000..c108f44fa39 --- /dev/null +++ b/go/tools/sizegen/sizegen.go @@ -0,0 +1,546 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "bytes" + "flag" + "fmt" + "go/types" + "io/ioutil" + "log" + "path" + "sort" + "strings" + + "github.com/dave/jennifer/jen" + "golang.org/x/tools/go/packages" +) + +const licenseFileHeader = `Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License.` + +type sizegen struct { + DebugTypes bool + mod *packages.Module + sizes types.Sizes + codegen map[string]*codeFile + known map[*types.Named]*typeState +} + +type codeFlag uint32 + +const ( + codeWithInterface = 1 << 0 + codeWithUnsafe = 1 << 1 +) + +type codeImpl struct { + name string + flags codeFlag + code jen.Code +} + +type codeFile struct { + pkg string + impls []codeImpl +} + +type typeState struct { + generated bool + local bool + pod bool // struct with only primitives +} + +func newSizegen(mod *packages.Module, sizes types.Sizes) *sizegen { + return &sizegen{ + DebugTypes: true, + mod: mod, + sizes: sizes, + known: make(map[*types.Named]*typeState), + codegen: make(map[string]*codeFile), + } +} + +func isPod(tt types.Type) bool { + switch tt := tt.(type) { + case *types.Struct: + for i := 0; i < tt.NumFields(); i++ { + if !isPod(tt.Field(i).Type()) { + return false + } + } + return true + + case *types.Basic: + switch tt.Kind() { + case types.String, types.UnsafePointer: + return false + } + return true + + default: + return false + } +} + +func (sizegen *sizegen) getKnownType(named *types.Named) *typeState { + ts := sizegen.known[named] + if ts == nil { + local := strings.HasPrefix(named.Obj().Pkg().Path(), sizegen.mod.Path) + ts = &typeState{ + local: local, + pod: isPod(named.Underlying()), + } + sizegen.known[named] = ts + } + return ts +} + +func (sizegen *sizegen) generateType(pkg *types.Package, file *codeFile, named *types.Named) { + ts := sizegen.getKnownType(named) + if ts.generated { + return + } + ts.generated = true + + switch tt := named.Underlying().(type) { + case *types.Struct: + if impl, flag := sizegen.sizeImplForStruct(named.Obj(), tt); impl != nil { + file.impls = append(file.impls, codeImpl{ + code: impl, + name: named.String(), + flags: flag, + }) + } + case *types.Interface: + findImplementations(pkg.Scope(), tt, func(tt types.Type) { + if _, isStruct := tt.Underlying().(*types.Struct); isStruct { + sizegen.generateType(pkg, file, tt.(*types.Named)) + } + }) + default: + // no-op + } +} + +func (sizegen *sizegen) generateKnownType(named *types.Named) { + pkgInfo := named.Obj().Pkg() + file := sizegen.codegen[pkgInfo.Path()] + if file == nil { + file = &codeFile{pkg: pkgInfo.Name()} + sizegen.codegen[pkgInfo.Path()] = file + } + + sizegen.generateType(pkgInfo, file, named) +} + +func findImplementations(scope *types.Scope, iff *types.Interface, impl func(types.Type)) { + for _, name := range scope.Names() { + obj := scope.Lookup(name) + baseType := obj.Type() + if types.Implements(baseType, iff) || types.Implements(types.NewPointer(baseType), iff) { + impl(baseType) + } + } +} + +func (sizegen *sizegen) finalize() map[string]*jen.File { + var complete bool + + for !complete { + complete = true + for tt, ts := range sizegen.known { + isComplex := !ts.pod + notYetGenerated := !ts.generated + if ts.local && isComplex && notYetGenerated { + sizegen.generateKnownType(tt) + complete = false + } + } + } + + outputFiles := make(map[string]*jen.File) + + for pkg, file := range sizegen.codegen { + if len(file.impls) == 0 { + continue + } + if !strings.HasPrefix(pkg, sizegen.mod.Path) { + log.Printf("failed to generate code for foreign package '%s'", pkg) + log.Printf("DEBUG:\n%#v", file) + continue + } + + sort.Slice(file.impls, func(i, j int) bool { + return strings.Compare(file.impls[i].name, file.impls[j].name) < 0 + }) + + out := jen.NewFile(file.pkg) + out.HeaderComment(licenseFileHeader) + out.HeaderComment("Code generated by Sizegen. DO NOT EDIT.") + + for _, impl := range file.impls { + if impl.flags&codeWithInterface != 0 { + out.Add(jen.Type().Id("cachedObject").InterfaceFunc(func(i *jen.Group) { + i.Id("CachedSize").Params(jen.Id("alloc").Id("bool")).Int64() + })) + break + } + } + + for _, impl := range file.impls { + if impl.flags&codeWithUnsafe != 0 { + out.Commentf("//go:nocheckptr") + } + out.Add(impl.code) + } + + fullPath := path.Join(sizegen.mod.Dir, strings.TrimPrefix(pkg, sizegen.mod.Path), "cached_size.go") + outputFiles[fullPath] = out + } + + return outputFiles +} + +func (sizegen *sizegen) sizeImplForStruct(name *types.TypeName, st *types.Struct) (jen.Code, codeFlag) { + if sizegen.sizes.Sizeof(st) == 0 { + return nil, 0 + } + + var stmt []jen.Code + var funcFlags codeFlag + for i := 0; i < st.NumFields(); i++ { + field := st.Field(i) + fieldType := field.Type() + fieldName := jen.Id("cached").Dot(field.Name()) + + fieldStmt, flag := sizegen.sizeStmtForType(fieldName, fieldType, false) + if fieldStmt != nil { + if sizegen.DebugTypes { + stmt = append(stmt, jen.Commentf("%s", field.String())) + } + stmt = append(stmt, fieldStmt) + } + funcFlags |= flag + } + + f := jen.Func() + f.Params(jen.Id("cached").Op("*").Id(name.Name())) + f.Id("CachedSize").Params(jen.Id("alloc").Id("bool")).Int64() + f.BlockFunc(func(b *jen.Group) { + b.Add(jen.If(jen.Id("cached").Op("==").Nil()).Block(jen.Return(jen.Lit(int64(0))))) + b.Add(jen.Id("size").Op(":=").Lit(int64(0))) + b.Add(jen.If(jen.Id("alloc")).Block( + jen.Id("size").Op("+=").Lit(sizegen.sizes.Sizeof(st)), + )) + for _, s := range stmt { + b.Add(s) + } + b.Add(jen.Return(jen.Id("size"))) + }) + return f, funcFlags +} + +func (sizegen *sizegen) sizeStmtForMap(fieldName *jen.Statement, m *types.Map) []jen.Code { + const bucketCnt = 8 + const sizeofHmap = int64(6 * 8) + + /* + type bmap struct { + // tophash generally contains the top byte of the hash value + // for each key in this bucket. If tophash[0] < minTopHash, + // tophash[0] is a bucket evacuation state instead. + tophash [bucketCnt]uint8 + // Followed by bucketCnt keys and then bucketCnt elems. + // NOTE: packing all the keys together and then all the elems together makes the + // code a bit more complicated than alternating key/elem/key/elem/... but it allows + // us to eliminate padding which would be needed for, e.g., map[int64]int8. + // Followed by an overflow pointer. + } + */ + sizeOfBucket := int( + bucketCnt + // tophash + bucketCnt*sizegen.sizes.Sizeof(m.Key()) + + bucketCnt*sizegen.sizes.Sizeof(m.Elem()) + + 8, // overflow pointer + ) + + return []jen.Code{ + jen.Id("size").Op("+=").Lit(sizeofHmap), + + jen.Id("hmap").Op(":=").Qual("reflect", "ValueOf").Call(fieldName), + + jen.Id("numBuckets").Op(":=").Id("int").Call( + jen.Qual("math", "Pow").Call(jen.Lit(2), jen.Id("float64").Call( + jen.Parens(jen.Op("*").Parens(jen.Op("*").Id("uint8")).Call( + jen.Qual("unsafe", "Pointer").Call(jen.Id("hmap").Dot("Pointer").Call(). + Op("+").Id("uintptr").Call(jen.Lit(9)))))))), + + jen.Id("numOldBuckets").Op(":=").Parens(jen.Op("*").Parens(jen.Op("*").Id("uint16")).Call( + jen.Qual("unsafe", "Pointer").Call( + jen.Id("hmap").Dot("Pointer").Call().Op("+").Id("uintptr").Call(jen.Lit(10))))), + + jen.Id("size").Op("+=").Id("int64").Call(jen.Id("numOldBuckets").Op("*").Lit(sizeOfBucket)), + + jen.If(jen.Id("len").Call(fieldName).Op(">").Lit(0).Op("||").Id("numBuckets").Op(">").Lit(1)).Block( + jen.Id("size").Op("+=").Id("int64").Call( + jen.Id("numBuckets").Op("*").Lit(sizeOfBucket))), + } +} + +func (sizegen *sizegen) sizeStmtForType(fieldName *jen.Statement, field types.Type, alloc bool) (jen.Code, codeFlag) { + if sizegen.sizes.Sizeof(field) == 0 { + return nil, 0 + } + + switch node := field.(type) { + case *types.Slice: + elemT := node.Elem() + elemSize := sizegen.sizes.Sizeof(elemT) + + switch elemSize { + case 0: + return nil, 0 + + case 1: + return jen.Id("size").Op("+=").Int64().Call(jen.Cap(fieldName)), 0 + + default: + stmt, flag := sizegen.sizeStmtForType(jen.Id("elem"), elemT, false) + return jen.BlockFunc(func(b *jen.Group) { + b.Add( + jen.Id("size"). + Op("+="). + Int64().Call(jen.Cap(fieldName)). + Op("*"). + Lit(sizegen.sizes.Sizeof(elemT))) + + if stmt != nil { + b.Add(jen.For(jen.List(jen.Id("_"), jen.Id("elem")).Op(":=").Range().Add(fieldName)).Block(stmt)) + } + }), flag + } + + case *types.Map: + keySize, keyFlag := sizegen.sizeStmtForType(jen.Id("k"), node.Key(), false) + valSize, valFlag := sizegen.sizeStmtForType(jen.Id("v"), node.Elem(), false) + + return jen.If(fieldName.Clone().Op("!=").Nil()).BlockFunc(func(block *jen.Group) { + for _, stmt := range sizegen.sizeStmtForMap(fieldName, node) { + block.Add(stmt) + } + + var forLoopVars []jen.Code + switch { + case keySize != nil && valSize != nil: + forLoopVars = []jen.Code{jen.Id("k"), jen.Id("v")} + case keySize == nil && valSize != nil: + forLoopVars = []jen.Code{jen.Id("_"), jen.Id("v")} + case keySize != nil && valSize == nil: + forLoopVars = []jen.Code{jen.Id("k")} + case keySize == nil && valSize == nil: + return + } + + block.Add(jen.For(jen.List(forLoopVars...).Op(":=").Range().Add(fieldName))).BlockFunc(func(b *jen.Group) { + if keySize != nil { + b.Add(keySize) + } + if valSize != nil { + b.Add(valSize) + } + }) + }), codeWithUnsafe | keyFlag | valFlag + + case *types.Pointer: + return sizegen.sizeStmtForType(fieldName, node.Elem(), true) + + case *types.Named: + ts := sizegen.getKnownType(node) + if ts.pod || !ts.local { + if alloc { + if !ts.local { + log.Printf("WARNING: size of external type %s cannot be fully calculated", node) + } + return jen.If(fieldName.Clone().Op("!=").Nil()).Block( + jen.Id("size").Op("+=").Lit(sizegen.sizes.Sizeof(node.Underlying())), + ), 0 + } + return nil, 0 + } + return sizegen.sizeStmtForType(fieldName, node.Underlying(), alloc) + + case *types.Interface: + if node.Empty() { + return nil, 0 + } + return jen.If( + jen.List( + jen.Id("cc"), jen.Id("ok")). + Op(":="). + Add(fieldName.Clone().Assert(jen.Id("cachedObject"))), + jen.Id("ok"), + ).Block( + jen.Id("size"). + Op("+="). + Id("cc"). + Dot("CachedSize"). + Call(jen.True()), + ), codeWithInterface + + case *types.Struct: + return jen.Id("size").Op("+=").Add(fieldName.Clone().Dot("CachedSize").Call(jen.Lit(alloc))), 0 + + case *types.Basic: + if !alloc { + if node.Info()&types.IsString != 0 { + return jen.Id("size").Op("+=").Int64().Call(jen.Len(fieldName)), 0 + } + return nil, 0 + } + return jen.Id("size").Op("+=").Lit(sizegen.sizes.Sizeof(node)), 0 + default: + log.Printf("unhandled type: %T", node) + return nil, 0 + } +} + +type typePaths []string + +func (t *typePaths) String() string { + return fmt.Sprintf("%v", *t) +} + +func (t *typePaths) Set(path string) error { + *t = append(*t, path) + return nil +} + +func main() { + var patterns typePaths + var generate typePaths + var verify bool + + flag.Var(&patterns, "in", "Go packages to load the generator") + flag.Var(&generate, "gen", "Typename of the Go struct to generate size info for") + flag.BoolVar(&verify, "verify", false, "ensure that the generated files are correct") + flag.Parse() + + result, err := GenerateSizeHelpers(patterns, generate) + if err != nil { + log.Fatal(err) + } + + if verify { + for _, err := range VerifyFilesOnDisk(result) { + log.Fatal(err) + } + log.Printf("%d files OK", len(result)) + } else { + for fullPath, file := range result { + if err := file.Save(fullPath); err != nil { + log.Fatalf("filed to save file to '%s': %v", fullPath, err) + } + log.Printf("saved '%s'", fullPath) + } + } +} + +// VerifyFilesOnDisk compares the generated results from the codegen against the files that +// currently exist on disk and returns any mismatches +func VerifyFilesOnDisk(result map[string]*jen.File) (errors []error) { + for fullPath, file := range result { + existing, err := ioutil.ReadFile(fullPath) + if err != nil { + errors = append(errors, fmt.Errorf("missing file on disk: %s (%w)", fullPath, err)) + continue + } + + var buf bytes.Buffer + if err := file.Render(&buf); err != nil { + errors = append(errors, fmt.Errorf("render error for '%s': %w", fullPath, err)) + continue + } + + if !bytes.Equal(existing, buf.Bytes()) { + errors = append(errors, fmt.Errorf("'%s' has changed", fullPath)) + continue + } + } + return errors +} + +// GenerateSizeHelpers generates the auxiliary code that implements CachedSize helper methods +// for all the types listed in typePatterns +func GenerateSizeHelpers(packagePatterns []string, typePatterns []string) (map[string]*jen.File, error) { + loaded, err := packages.Load(&packages.Config{ + Mode: packages.NeedName | packages.NeedTypes | packages.NeedTypesSizes | packages.NeedTypesInfo | packages.NeedDeps | packages.NeedImports | packages.NeedModule, + }, packagePatterns...) + + if err != nil { + return nil, err + } + + sizegen := newSizegen(loaded[0].Module, loaded[0].TypesSizes) + + scopes := make(map[string]*types.Scope) + for _, pkg := range loaded { + scopes[pkg.PkgPath] = pkg.Types.Scope() + } + + for _, gen := range typePatterns { + pos := strings.LastIndexByte(gen, '.') + if pos < 0 { + return nil, fmt.Errorf("unexpected input type: %s", gen) + } + + pkgname := gen[:pos] + typename := gen[pos+1:] + + scope := scopes[pkgname] + if scope == nil { + return nil, fmt.Errorf("no scope found for type '%s'", gen) + } + + if typename == "*" { + for _, name := range scope.Names() { + sizegen.generateKnownType(scope.Lookup(name).Type().(*types.Named)) + } + } else { + tt := scope.Lookup(typename) + if tt == nil { + return nil, fmt.Errorf("no type called '%s' found in '%s'", typename, pkgname) + } + + sizegen.generateKnownType(tt.Type().(*types.Named)) + } + } + + return sizegen.finalize(), nil +} diff --git a/go/tools/sizegen/sizegen_test.go b/go/tools/sizegen/sizegen_test.go new file mode 100644 index 00000000000..7b549d727c4 --- /dev/null +++ b/go/tools/sizegen/sizegen_test.go @@ -0,0 +1,39 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestFullGeneration(t *testing.T) { + result, err := GenerateSizeHelpers([]string{"./integration/..."}, []string{"vitess.io/vitess/go/tools/sizegen/integration.*"}) + require.NoError(t, err) + + verifyErrors := VerifyFilesOnDisk(result) + require.Empty(t, verifyErrors) + + for _, file := range result { + contents := fmt.Sprintf("%#v", file) + require.Contains(t, contents, "http://www.apache.org/licenses/LICENSE-2.0") + require.Contains(t, contents, "type cachedObject interface") + require.Contains(t, contents, "//go:nocheckptr") + } +} diff --git a/go/vt/binlog/binlogplayer/binlog_player.go b/go/vt/binlog/binlogplayer/binlog_player.go index ad36b31202d..42142cd08d6 100644 --- a/go/vt/binlog/binlogplayer/binlog_player.go +++ b/go/vt/binlog/binlogplayer/binlog_player.go @@ -558,6 +558,7 @@ func CreateVReplicationTable() []string { var AlterVReplicationTable = []string{ "ALTER TABLE _vt.vreplication ADD COLUMN db_name VARBINARY(255) NOT NULL", "ALTER TABLE _vt.vreplication MODIFY source BLOB NOT NULL", + "ALTER TABLE _vt.vreplication ADD KEY workflow_idx (workflow(64))", } // VRSettings contains the settings of a vreplication table. diff --git a/go/vt/binlog/event_streamer.go b/go/vt/binlog/event_streamer.go index 128f8ba1790..a8cce64a0c9 100644 --- a/go/vt/binlog/event_streamer.go +++ b/go/vt/binlog/event_streamer.go @@ -200,7 +200,7 @@ func parsePkNames(tokenizer *sqlparser.Tokenizer) ([]*querypb.Field, error) { Name: string(val), }) default: - return nil, fmt.Errorf("syntax error at position: %d", tokenizer.Position) + return nil, fmt.Errorf("syntax error at position: %d", tokenizer.Pos) } } return columns, nil @@ -297,15 +297,14 @@ func parsePkTuple(tokenizer *sqlparser.Tokenizer, insertid int64, fields []*quer return nil, insertid, fmt.Errorf("incompatible string field with type %v", fields[index].Type) } - decoded := make([]byte, base64.StdEncoding.DecodedLen(len(val))) - numDecoded, err := base64.StdEncoding.Decode(decoded, val) + decoded, err := base64.StdEncoding.DecodeString(val) if err != nil { return nil, insertid, err } - result.Lengths = append(result.Lengths, int64(numDecoded)) - result.Values = append(result.Values, decoded[:numDecoded]...) + result.Lengths = append(result.Lengths, int64(len(decoded))) + result.Values = append(result.Values, decoded...) default: - return nil, insertid, fmt.Errorf("syntax error at position: %d", tokenizer.Position) + return nil, insertid, fmt.Errorf("syntax error at position: %d", tokenizer.Pos) } index++ } diff --git a/go/vt/concurrency/error_group.go b/go/vt/concurrency/error_group.go new file mode 100644 index 00000000000..49b5762b248 --- /dev/null +++ b/go/vt/concurrency/error_group.go @@ -0,0 +1,106 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package concurrency + +import "context" + +// ErrorGroup provides a function for waiting for N goroutines to complete with +// at least X successes and no more than Y failures, and cancelling the rest. +// +// It should be used as follows: +// +// errCh := make(chan error) +// errgroupCtx, errgroupCancel := context.WithCancel(ctx) +// +// for _, arg := range args { +// arg := arg +// +// go func() { +// err := doWork(errGroupCtx, arg) +// errCh <- err +// }() +// } +// +// errgroup := concurrency.ErrorGroup{ +// NumGoroutines: len(args), +// NumRequiredSuccess: 5, // need at least 5 to respond with nil error before cancelling the rest +// NumAllowedErrors: 1, // if more than 1 responds with non-nil error, cancel the rest +// } +// errRec := errgroup.Wait(errgroupCancel, errCh) +// +// if errRec.HasErrors() { +// // ... +// } +type ErrorGroup struct { + NumGoroutines int + NumRequiredSuccesses int + NumAllowedErrors int +} + +// Wait waits for a group of goroutines that are sending errors to the given +// error channel, and are cancellable by the given cancel function. +// +// Wait will cancel any outstanding goroutines under the following conditions: +// +// (1) More than NumAllowedErrors non-nil results have been consumed on the +// error channel. +// +// (2) At least NumRequiredSuccesses nil results have been consumed on the error +// channel. +// +// After the cancellation condition is triggered, Wait will continue to consume +// results off the error channel so as to not permanently block any of those +// cancelled goroutines. +// +// When finished consuming results from all goroutines, cancelled or otherwise, +// Wait returns an AllErrorRecorder that contains all errors returned by any of +// those goroutines. It does not close the error channel. +func (eg ErrorGroup) Wait(cancel context.CancelFunc, errors chan error) *AllErrorRecorder { + errCounter := 0 + successCounter := 0 + responseCounter := 0 + rec := &AllErrorRecorder{} + + if eg.NumGoroutines < 1 { + return rec + } + + for err := range errors { + responseCounter++ + + switch err { + case nil: + successCounter++ + default: + errCounter++ + rec.RecordError(err) + } + + // Even though we cancel in the next conditional, we need to keep + // consuming off the channel, or those goroutines will get stuck + // forever. + if responseCounter == eg.NumGoroutines { + break + } + + if errCounter > eg.NumAllowedErrors || successCounter >= eg.NumRequiredSuccesses { + cancel() + } + } + + return rec +} diff --git a/go/vt/concurrency/error_group_test.go b/go/vt/concurrency/error_group_test.go new file mode 100644 index 00000000000..9222dbc2d25 --- /dev/null +++ b/go/vt/concurrency/error_group_test.go @@ -0,0 +1,34 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package concurrency + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestErrorGroup(t *testing.T) { + t.Run("Wait() returns immediately when NumGoroutines = 0", func(t *testing.T) { + eg := ErrorGroup{ + NumGoroutines: 0, + } + + rec := eg.Wait(nil, nil) + assert.NoError(t, rec.Error()) + }) +} diff --git a/go/vt/discovery/healthcheck.go b/go/vt/discovery/healthcheck.go index 9004e48bfef..83a4048fe87 100644 --- a/go/vt/discovery/healthcheck.go +++ b/go/vt/discovery/healthcheck.go @@ -450,8 +450,14 @@ func (hc *HealthCheckImpl) updateHealth(th *TabletHealth, prevTarget *query.Targ } } case isPrimary && !isPrimaryUp: - // No healthy master tablet - hc.healthy[targetKey] = []*TabletHealth{} + if healthy, ok := hc.healthy[targetKey]; ok && len(healthy) > 0 { + // isPrimary is true here therefore we should only have 1 tablet in healthy + alias := tabletAliasString(topoproto.TabletAliasString(healthy[0].Tablet.Alias)) + // Clear healthy list for primary if the existing tablet is down + if alias == tabletAlias { + hc.healthy[targetKey] = []*TabletHealth{} + } + } } if !trivialUpdate { diff --git a/go/vt/discovery/healthcheck_test.go b/go/vt/discovery/healthcheck_test.go index 73ba30a665f..54d3c303a12 100644 --- a/go/vt/discovery/healthcheck_test.go +++ b/go/vt/discovery/healthcheck_test.go @@ -324,6 +324,84 @@ func TestHealthCheckErrorOnPrimary(t *testing.T) { assert.Empty(t, a, "wrong result, expected empty list") } +func TestHealthCheckErrorOnPrimaryAfterExternalReparent(t *testing.T) { + ts := memorytopo.NewServer("cell") + hc := createTestHc(ts) + defer hc.Close() + + resultChan := hc.Subscribe() + + tablet1 := createTestTablet(0, "cell", "a") + input1 := make(chan *querypb.StreamHealthResponse) + fc1 := createFakeConn(tablet1, input1) + fc1.errCh = make(chan error) + hc.AddTablet(tablet1) + <-resultChan + + tablet2 := createTestTablet(1, "cell", "b") + tablet2.Type = topodatapb.TabletType_REPLICA + input2 := make(chan *querypb.StreamHealthResponse) + createFakeConn(tablet2, input2) + hc.AddTablet(tablet2) + <-resultChan + + shr2 := &querypb.StreamHealthResponse{ + TabletAlias: tablet2.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Serving: true, + TabletExternallyReparentedTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{SecondsBehindMaster: 10, CpuUsage: 0.2}, + } + input2 <- shr2 + <-resultChan + shr1 := &querypb.StreamHealthResponse{ + TabletAlias: tablet1.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_MASTER}, + Serving: true, + TabletExternallyReparentedTimestamp: 10, + RealtimeStats: &querypb.RealtimeStats{SecondsBehindMaster: 0, CpuUsage: 0.2}, + } + input1 <- shr1 + <-resultChan + // tablet 1 is the primary now + health := []*TabletHealth{{ + Tablet: tablet1, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_MASTER}, + Serving: true, + Stats: &querypb.RealtimeStats{SecondsBehindMaster: 0, CpuUsage: 0.2}, + MasterTermStartTime: 10, + }} + a := hc.GetHealthyTabletStats(&querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_MASTER}) + mustMatch(t, health, a, "unexpected result") + + shr2 = &querypb.StreamHealthResponse{ + TabletAlias: tablet2.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_MASTER}, + Serving: true, + TabletExternallyReparentedTimestamp: 20, + RealtimeStats: &querypb.RealtimeStats{SecondsBehindMaster: 0, CpuUsage: 0.2}, + } + input2 <- shr2 + <-resultChan + // reparent: tablet 2 is the primary now + health = []*TabletHealth{{ + Tablet: tablet2, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_MASTER}, + Serving: true, + Stats: &querypb.RealtimeStats{SecondsBehindMaster: 0, CpuUsage: 0.2}, + MasterTermStartTime: 20, + }} + a = hc.GetHealthyTabletStats(&querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_MASTER}) + mustMatch(t, health, a, "unexpected result") + + // Stream error from tablet 1 + fc1.errCh <- fmt.Errorf("some stream error") + <-resultChan + // tablet 2 should still be the master + a = hc.GetHealthyTabletStats(&querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_MASTER}) + mustMatch(t, health, a, "unexpected result") +} + func TestHealthCheckVerifiesTabletAlias(t *testing.T) { ts := memorytopo.NewServer("cell") hc := createTestHc(ts) diff --git a/go/vt/grpcoptionaltls/conn_wrapper.go b/go/vt/grpcoptionaltls/conn_wrapper.go new file mode 100755 index 00000000000..5659a9170f3 --- /dev/null +++ b/go/vt/grpcoptionaltls/conn_wrapper.go @@ -0,0 +1,38 @@ +/* +Copyright 2019 The Vitess Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package grpcoptionaltls + +import ( + "bytes" + "io" + "net" +) + +// WrappedConn imitates MSG_PEEK behaviour +// Unlike net.Conn is not thread-safe for reading already peeked bytes +type WrappedConn struct { + net.Conn + rd io.Reader +} + +func NewWrappedConn(conn net.Conn, peeked []byte) net.Conn { + var rd = io.MultiReader(bytes.NewReader(peeked), conn) + return &WrappedConn{ + Conn: conn, + rd: rd, + } +} + +func (wc *WrappedConn) Read(b []byte) (n int, err error) { + return wc.rd.Read(b) +} diff --git a/go/vt/grpcoptionaltls/optionaltls.go b/go/vt/grpcoptionaltls/optionaltls.go new file mode 100755 index 00000000000..c18f80412a6 --- /dev/null +++ b/go/vt/grpcoptionaltls/optionaltls.go @@ -0,0 +1,58 @@ +/* +Copyright 2019 The Vitess Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package grpcoptionaltls + +import ( + "net" + + "google.golang.org/grpc/credentials" +) + +type optionalTLSCreds struct { + credentials.TransportCredentials +} + +func (c *optionalTLSCreds) Clone() credentials.TransportCredentials { + return New(c.TransportCredentials.Clone()) +} + +func (c *optionalTLSCreds) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) { + isTLS, bytes, err := DetectTLS(conn) + if err != nil { + conn.Close() + return nil, nil, err + } + + var wc net.Conn = NewWrappedConn(conn, bytes) + if isTLS { + return c.TransportCredentials.ServerHandshake(wc) + } + + var authInfo = info{ + CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}, + } + + return wc, authInfo, nil +} + +func New(tc credentials.TransportCredentials) credentials.TransportCredentials { + return &optionalTLSCreds{TransportCredentials: tc} +} + +type info struct { + credentials.CommonAuthInfo +} + +func (info) AuthType() string { + return "insecure" +} diff --git a/go/vt/grpcoptionaltls/server_test.go b/go/vt/grpcoptionaltls/server_test.go new file mode 100755 index 00000000000..a0f6e6c8ea0 --- /dev/null +++ b/go/vt/grpcoptionaltls/server_test.go @@ -0,0 +1,127 @@ +/* +Copyright 2019 The Vitess Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package grpcoptionaltls + +import ( + "context" + "crypto/tls" + "io/ioutil" + "net" + "os" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + pb "google.golang.org/grpc/examples/helloworld/helloworld" + + "vitess.io/vitess/go/vt/tlstest" +) + +// server is used to implement helloworld.GreeterServer. +type server struct { + pb.UnimplementedGreeterServer +} + +// SayHello implements helloworld.GreeterServer +func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { + return &pb.HelloReply{Message: "Hello " + in.GetName()}, nil +} + +func createUnstartedServer(creds credentials.TransportCredentials) *grpc.Server { + s := grpc.NewServer(grpc.Creds(creds)) + pb.RegisterGreeterServer(s, &server{}) + return s +} + +type testCredentials struct { + client credentials.TransportCredentials + server credentials.TransportCredentials +} + +func createCredentials() (*testCredentials, error) { + // Create a temporary directory. + certDir, err := ioutil.TempDir("", "optionaltls_grpc_test") + if err != nil { + return nil, err + } + defer os.RemoveAll(certDir) + + certs := tlstest.CreateClientServerCertPairs(certDir) + cert, err := tls.LoadX509KeyPair(certs.ServerCert, certs.ServerKey) + if err != nil { + return nil, err + } + + clientCredentials, err := credentials.NewClientTLSFromFile(certs.ServerCA, certs.ServerName) + if err != nil { + return nil, err + } + tc := &testCredentials{ + client: clientCredentials, + server: credentials.NewServerTLSFromCert(&cert), + } + return tc, nil +} + +func TestOptionalTLS(t *testing.T) { + testCtx, testCancel := context.WithCancel(context.Background()) + defer testCancel() + + tc, err := createCredentials() + if err != nil { + t.Fatalf("failed to create credentials %v", err) + } + + lis, err := net.Listen("tcp", "") + if err != nil { + t.Fatalf("failed to listen %v", err) + } + defer lis.Close() + addr := lis.Addr().String() + + srv := createUnstartedServer(New(tc.server)) + go func() { + srv.Serve(lis) + }() + defer srv.Stop() + + testFunc := func(t *testing.T, dialOpt grpc.DialOption) { + ctx, cancel := context.WithTimeout(testCtx, 5*time.Second) + defer cancel() + conn, err := grpc.DialContext(ctx, addr, dialOpt) + if err != nil { + t.Fatalf("failed to connect to the server %v", err) + } + defer conn.Close() + c := pb.NewGreeterClient(conn) + resp, err := c.SayHello(ctx, &pb.HelloRequest{Name: "Vittes"}) + if err != nil { + t.Fatalf("could not greet: %v", err) + } + if resp.Message != "Hello Vittes" { + t.Fatalf("unexpected reply %s", resp.Message) + } + } + + t.Run("Plain2TLS", func(t *testing.T) { + for i := 0; i < 5; i += 1 { + testFunc(t, grpc.WithInsecure()) + } + }) + t.Run("TLS2TLS", func(t *testing.T) { + for i := 0; i < 5; i += 1 { + testFunc(t, grpc.WithTransportCredentials(tc.client)) + } + }) +} diff --git a/go/vt/grpcoptionaltls/tls_detector.go b/go/vt/grpcoptionaltls/tls_detector.go new file mode 100755 index 00000000000..beff6bfd740 --- /dev/null +++ b/go/vt/grpcoptionaltls/tls_detector.go @@ -0,0 +1,50 @@ +/* +Copyright 2019 The Vitess Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package grpcoptionaltls + +import "io" + +const TLSPeekedBytes = 6 + +func looksLikeTLS(bytes []byte) bool { + if len(bytes) < TLSPeekedBytes { + return false + } + // TLS starts as + // 0: 0x16 - handshake protocol magic + // 1: 0x03 - SSL version major + // 2: 0x00 to 0x03 - SSL version minor (SSLv3 or TLS1.0 through TLS1.3) + // 3-4: length (2 bytes) + // 5: 0x01 - handshake type (ClientHello) + // 6-8: handshake len (3 bytes), equals value from offset 3-4 minus 4 + // HTTP2 initial frame bytes + // https://tools.ietf.org/html/rfc7540#section-3.4 + + // Definitely not TLS + if bytes[0] != 0x16 || bytes[1] != 0x03 || bytes[5] != 0x01 { + return false + } + return true +} + +// DetectTLS reads necessary number of bytes from io.Reader +// returns result, bytes read from Reader and error +// No matter if error happens or what flag value is +// returned bytes should be checked +func DetectTLS(r io.Reader) (bool, []byte, error) { + var bytes = make([]byte, TLSPeekedBytes) + if n, err := io.ReadFull(r, bytes); err != nil { + return false, bytes[:n], err + } + return looksLikeTLS(bytes), bytes, nil +} diff --git a/go/vt/key/cached_size.go b/go/vt/key/cached_size.go new file mode 100644 index 00000000000..ef4e6e79896 --- /dev/null +++ b/go/vt/key/cached_size.go @@ -0,0 +1,43 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by Sizegen. DO NOT EDIT. + +package key + +func (cached *DestinationExactKeyRange) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(8) + } + // field KeyRange *vitess.io/vitess/go/vt/proto/topodata.KeyRange + size += cached.KeyRange.CachedSize(true) + return size +} +func (cached *DestinationKeyRange) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(8) + } + // field KeyRange *vitess.io/vitess/go/vt/proto/topodata.KeyRange + size += cached.KeyRange.CachedSize(true) + return size +} diff --git a/go/vt/key/destination.go b/go/vt/key/destination.go index 235f561c2d7..c4fb37b7a9d 100644 --- a/go/vt/key/destination.go +++ b/go/vt/key/destination.go @@ -42,14 +42,6 @@ type Destination interface { // The returned error must be generated by vterrors. Resolve([]*topodatapb.ShardReference, func(shard string) error) error - // IsUnique returns true if this is a single destination. - // It returns false if this type can map to multiple destinations. - // - // TODO(alainjobart) That is just a method I think will be useful. - // Mainly, in v3, once we Map(), each returned result should - // be IsUnique=true for a Unique vindex. - IsUnique() bool - // String returns a printable version of the Destination. String() string } @@ -75,11 +67,6 @@ func DestinationsString(destinations []Destination) string { // It implements the Destination interface. type DestinationShard string -// IsUnique is part of the Destination interface. -func (d DestinationShard) IsUnique() bool { - return true -} - // Resolve is part of the Destination interface. func (d DestinationShard) Resolve(allShards []*topodatapb.ShardReference, addShard func(shard string) error) error { return addShard(string(d)) @@ -98,11 +85,6 @@ func (d DestinationShard) String() string { // It implements the Destination interface. type DestinationShards []string -// IsUnique is part of the Destination interface. -func (d DestinationShards) IsUnique() bool { - return false -} - // Resolve is part of the Destination interface. func (d DestinationShards) Resolve(allShards []*topodatapb.ShardReference, addShard func(shard string) error) error { for _, shard := range d { @@ -134,11 +116,6 @@ type DestinationExactKeyRange struct { KeyRange *topodatapb.KeyRange } -// IsUnique is part of the Destination interface. -func (d DestinationExactKeyRange) IsUnique() bool { - return true -} - // Resolve is part of the Destination interface. func (d DestinationExactKeyRange) Resolve(allShards []*topodatapb.ShardReference, addShard func(shard string) error) error { return processExactKeyRange(allShards, d.KeyRange, addShard) @@ -188,11 +165,6 @@ func processExactKeyRange(allShards []*topodatapb.ShardReference, kr *topodatapb // It implements the Destination interface. type DestinationExactKeyRanges []*topodatapb.KeyRange -// IsUnique is part of the Destination interface. -func (d DestinationExactKeyRanges) IsUnique() bool { - return false -} - // Resolve is part of the Destination interface. func (d DestinationExactKeyRanges) Resolve(allShards []*topodatapb.ShardReference, addShard func(shard string) error) error { for _, kr := range d { @@ -231,11 +203,6 @@ type DestinationKeyRange struct { KeyRange *topodatapb.KeyRange } -// IsUnique is part of the Destination interface. -func (d DestinationKeyRange) IsUnique() bool { - return true -} - // Resolve is part of the Destination interface. func (d DestinationKeyRange) Resolve(allShards []*topodatapb.ShardReference, addShard func(shard string) error) error { return processKeyRange(allShards, d.KeyRange, addShard) @@ -267,11 +234,6 @@ func processKeyRange(allShards []*topodatapb.ShardReference, kr *topodatapb.KeyR // It implements the Destination interface. type DestinationKeyRanges []*topodatapb.KeyRange -// IsUnique is part of the Destination interface. -func (d DestinationKeyRanges) IsUnique() bool { - return false -} - // Resolve is part of the Destination interface. func (d DestinationKeyRanges) Resolve(allShards []*topodatapb.ShardReference, addShard func(shard string) error) error { for _, kr := range d { @@ -304,11 +266,6 @@ func (d DestinationKeyRanges) String() string { // It implements the Destination interface. type DestinationKeyspaceID []byte -// IsUnique is part of the Destination interface. -func (d DestinationKeyspaceID) IsUnique() bool { - return true -} - // Resolve is part of the Destination interface. func (d DestinationKeyspaceID) Resolve(allShards []*topodatapb.ShardReference, addShard func(shard string) error) error { shard, err := GetShardForKeyspaceID(allShards, d) @@ -345,11 +302,6 @@ func GetShardForKeyspaceID(allShards []*topodatapb.ShardReference, keyspaceID [] // It implements the Destination interface. type DestinationKeyspaceIDs [][]byte -// IsUnique is part of the Destination interface. -func (d DestinationKeyspaceIDs) IsUnique() bool { - return false -} - // Resolve is part of the Destination interface. func (d DestinationKeyspaceIDs) Resolve(allShards []*topodatapb.ShardReference, addShard func(shard string) error) error { for _, ksid := range d { @@ -400,11 +352,6 @@ func (dp DestinationAnyShardPickerRandomShard) PickShard(shardCount int) int { // It implements the Destination interface. type DestinationAnyShard struct{} -// IsUnique is part of the Destination interface. -func (d DestinationAnyShard) IsUnique() bool { - return true -} - // Resolve is part of the Destination interface. func (d DestinationAnyShard) Resolve(allShards []*topodatapb.ShardReference, addShard func(shard string) error) error { if len(allShards) == 0 { @@ -427,11 +374,6 @@ func (d DestinationAnyShard) String() string { // It implements the Destination interface. type DestinationAllShards struct{} -// IsUnique is part of the Destination interface. -func (d DestinationAllShards) IsUnique() bool { - return false -} - // Resolve is part of the Destination interface. func (d DestinationAllShards) Resolve(allShards []*topodatapb.ShardReference, addShard func(shard string) error) error { for _, shard := range allShards { @@ -456,11 +398,6 @@ func (d DestinationAllShards) String() string { // It implements the Destination interface. type DestinationNone struct{} -// IsUnique is part of the Destination interface. -func (d DestinationNone) IsUnique() bool { - return true -} - // Resolve is part of the Destination interface. func (d DestinationNone) Resolve(allShards []*topodatapb.ShardReference, addShard func(shard string) error) error { return nil diff --git a/go/vt/key/key.go b/go/vt/key/key.go index 90f076403a3..9cedad6f409 100644 --- a/go/vt/key/key.go +++ b/go/vt/key/key.go @@ -62,6 +62,16 @@ func ParseKeyspaceIDType(param string) (topodatapb.KeyspaceIdType, error) { return topodatapb.KeyspaceIdType(value), nil } +// KeyspaceIDTypeString returns the string representation of a keyspace id type. +func KeyspaceIDTypeString(id topodatapb.KeyspaceIdType) string { + s, ok := topodatapb.KeyspaceIdType_name[int32(id)] + if !ok { + return KeyspaceIDTypeString(topodatapb.KeyspaceIdType_UNSET) + } + + return s +} + // // KeyRange helper methods // @@ -185,7 +195,7 @@ func KeyRangeEqual(left, right *topodatapb.KeyRange) bool { bytes.Equal(left.End, right.End) } -// KeyRangeStartEqual returns true if right's keyrange start is _after_ left's start +// KeyRangeStartSmaller returns true if right's keyrange start is _after_ left's start func KeyRangeStartSmaller(left, right *topodatapb.KeyRange) bool { if left == nil { return right != nil diff --git a/go/vt/mysqlctl/backup.go b/go/vt/mysqlctl/backup.go index 7e15c50ea47..e8df43cf8af 100644 --- a/go/vt/mysqlctl/backup.go +++ b/go/vt/mysqlctl/backup.go @@ -23,10 +23,12 @@ import ( "os" "path/filepath" "strings" + "time" "context" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/mysqlctl/backupstorage" "vitess.io/vitess/go/vt/proto/vtrpc" @@ -82,6 +84,9 @@ var ( // backupCompressBlocks is the number of blocks that are processed // once before the writer blocks backupCompressBlocks = flag.Int("backup_storage_number_blocks", 2, "if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression") + + backupDuration = stats.NewGauge("backup_duration_seconds", "How long it took to complete the last backup operation (in seconds)") + restoreDuration = stats.NewGauge("restore_duration_seconds", "How long it took to complete the last restore operation (in seconds)") ) // Backup is the main entry point for a backup: @@ -89,7 +94,7 @@ var ( // - shuts down Mysqld during the backup // - remember if we were replicating, restore the exact same state func Backup(ctx context.Context, params BackupParams) error { - + startTs := time.Now() backupDir := GetBackupDir(params.Keyspace, params.Shard) name := fmt.Sprintf("%v.%v", params.BackupTime.UTC().Format(BackupTimestampFormat), params.TabletAlias) // Start the backup with the BackupStorage. @@ -129,6 +134,7 @@ func Backup(ctx context.Context, params BackupParams) error { } // The backup worked, so just return the finish error, if any. + backupDuration.Set(int64(time.Since(startTs).Seconds())) return finishErr } @@ -223,6 +229,7 @@ func ShouldRestore(ctx context.Context, params RestoreParams) (bool, error) { // appropriate backup on the BackupStorage, Restore logs an error // and returns ErrNoBackup. Any other error is returned. func Restore(ctx context.Context, params RestoreParams) (*BackupManifest, error) { + startTs := time.Now() // find the right backup handle: most recent one, with a MANIFEST params.Logger.Infof("Restore: looking for a suitable backup to restore") bs, err := backupstorage.GetBackupStorage() @@ -322,5 +329,6 @@ func Restore(ctx context.Context, params RestoreParams) (*BackupManifest, error) return nil, err } + restoreDuration.Set(int64(time.Since(startTs).Seconds())) return manifest, nil } diff --git a/go/vt/mysqlctl/mycnf_gen.go b/go/vt/mysqlctl/mycnf_gen.go index 127b17c459d..a72e9eee9eb 100644 --- a/go/vt/mysqlctl/mycnf_gen.go +++ b/go/vt/mysqlctl/mycnf_gen.go @@ -92,7 +92,12 @@ func TabletDir(uid uint32) string { if *tabletDir != "" { return fmt.Sprintf("%s/%s", env.VtDataRoot(), *tabletDir) } - return fmt.Sprintf("%s/vt_%010d", env.VtDataRoot(), uid) + return DefaultTabletDirAtRoot(env.VtDataRoot(), uid) +} + +// DefaultTabletDirAtRoot returns the default directory for a tablet given a UID and a VtDataRoot variable +func DefaultTabletDirAtRoot(dataRoot string, uid uint32) string { + return fmt.Sprintf("%s/vt_%010d", dataRoot, uid) } // MycnfFile returns the default location of the my.cnf file. diff --git a/go/vt/mysqlctl/mysqlctlproto/backup.go b/go/vt/mysqlctl/mysqlctlproto/backup.go new file mode 100644 index 00000000000..6fa2755b441 --- /dev/null +++ b/go/vt/mysqlctl/mysqlctlproto/backup.go @@ -0,0 +1,31 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mysqlctlproto + +import ( + "vitess.io/vitess/go/vt/mysqlctl/backupstorage" + + mysqlctlpb "vitess.io/vitess/go/vt/proto/mysqlctl" +) + +// BackupHandleToProto returns a BackupInfo proto from a BackupHandle. +func BackupHandleToProto(bh backupstorage.BackupHandle) *mysqlctlpb.BackupInfo { + return &mysqlctlpb.BackupInfo{ + Name: bh.Name(), + Directory: bh.Directory(), + } +} diff --git a/go/vt/mysqlctl/mysqlctlproto/doc.go b/go/vt/mysqlctl/mysqlctlproto/doc.go new file mode 100644 index 00000000000..cf77ea853ff --- /dev/null +++ b/go/vt/mysqlctl/mysqlctlproto/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package mysqlctlproto provides utility functions for working with data +structures in mysqlctl.proto. +*/ +package mysqlctlproto diff --git a/go/vt/mysqlctl/xtrabackupengine.go b/go/vt/mysqlctl/xtrabackupengine.go index 3582b777584..b134884fb62 100644 --- a/go/vt/mysqlctl/xtrabackupengine.go +++ b/go/vt/mysqlctl/xtrabackupengine.go @@ -49,7 +49,7 @@ type XtrabackupEngine struct { var ( // path where backup engine program is located - xtrabackupEnginePath = flag.String("xtrabackup_root_path", "", "directory location of the xtrabackup executable, e.g., /usr/bin") + xtrabackupEnginePath = flag.String("xtrabackup_root_path", "", "directory location of the xtrabackup and xbstream executables, e.g., /usr/bin") // flags to pass through to backup phase xtrabackupBackupFlags = flag.String("xtrabackup_backup_flags", "", "flags to pass to backup command. These should be space separated and will be added to the end of the command") // flags to pass through to prepare phase of restore @@ -299,6 +299,7 @@ func (be *XtrabackupEngine) backupFiles(ctx context.Context, params BackupParams // the replication position. Note that if we don't read stderr as we go, the // xtrabackup process gets blocked when the write buffer fills up. stderrBuilder := &strings.Builder{} + posBuilder := &strings.Builder{} stderrDone := make(chan struct{}) go func() { defer close(stderrDone) @@ -318,7 +319,7 @@ func (be *XtrabackupEngine) backupFiles(ctx context.Context, params BackupParams } capture = true } - fmt.Fprintln(stderrBuilder, line) + fmt.Fprintln(posBuilder, line) } if err := scanner.Err(); err != nil { params.Logger.Errorf("error reading from xtrabackup stderr: %v", err) @@ -359,10 +360,11 @@ func (be *XtrabackupEngine) backupFiles(ctx context.Context, params BackupParams sterrOutput := stderrBuilder.String() if err := backupCmd.Wait(); err != nil { - return replicationPosition, vterrors.Wrap(err, "xtrabackup failed with error") + return replicationPosition, vterrors.Wrap(err, fmt.Sprintf("xtrabackup failed with error. Output=%s", sterrOutput)) } - replicationPosition, rerr := findReplicationPosition(sterrOutput, flavor, params.Logger) + posOutput := posBuilder.String() + replicationPosition, rerr := findReplicationPosition(posOutput, flavor, params.Logger) if rerr != nil { return replicationPosition, vterrors.Wrap(rerr, "backup failed trying to find replication position") } @@ -580,7 +582,7 @@ func (be *XtrabackupEngine) extractFiles(ctx context.Context, logger logutil.Log case xbstream: // now extract the files by running xbstream - xbstreamProgram := xbstream + xbstreamProgram := path.Join(*xtrabackupEnginePath, xbstream) flagsToExec := []string{"-C", tempDir, "-xv"} if *xbstreamRestoreFlags != "" { flagsToExec = append(flagsToExec, strings.Fields(*xbstreamRestoreFlags)...) diff --git a/go/vt/orchestrator/db/db.go b/go/vt/orchestrator/db/db.go index 8472840569b..680d982a0c2 100644 --- a/go/vt/orchestrator/db/db.go +++ b/go/vt/orchestrator/db/db.go @@ -133,8 +133,10 @@ func OpenOrchestrator() (db *sql.DB, err error) { if err == nil && !fromCache { log.Debugf("Connected to orchestrator backend: sqlite on %v", config.Config.SQLite3DataFile) } - db.SetMaxOpenConns(1) - db.SetMaxIdleConns(1) + if db != nil { + db.SetMaxOpenConns(1) + db.SetMaxIdleConns(1) + } } else { if db, fromCache, err := openOrchestratorMySQLGeneric(); err != nil { return db, log.Errore(err) diff --git a/go/vt/orchestrator/external/golib/sqlutils/sqlutils.go b/go/vt/orchestrator/external/golib/sqlutils/sqlutils.go index a0f7209627e..74bfee3beff 100644 --- a/go/vt/orchestrator/external/golib/sqlutils/sqlutils.go +++ b/go/vt/orchestrator/external/golib/sqlutils/sqlutils.go @@ -25,9 +25,6 @@ import ( "sync" "time" - _ "github.com/go-sql-driver/mysql" - _ "github.com/mattn/go-sqlite3" - "vitess.io/vitess/go/vt/orchestrator/external/golib/log" ) diff --git a/go/vt/orchestrator/inst/cluster_test.go b/go/vt/orchestrator/inst/cluster_test.go index 27b7f212aa4..640e89ab4af 100644 --- a/go/vt/orchestrator/inst/cluster_test.go +++ b/go/vt/orchestrator/inst/cluster_test.go @@ -21,6 +21,8 @@ import ( "testing" + _ "github.com/mattn/go-sqlite3" + "vitess.io/vitess/go/vt/orchestrator/config" "vitess.io/vitess/go/vt/orchestrator/external/golib/log" test "vitess.io/vitess/go/vt/orchestrator/external/golib/tests" diff --git a/go/vt/proto/automation/automation.pb.go b/go/vt/proto/automation/automation.pb.go index fe039ca84ef..2212bfadb07 100644 --- a/go/vt/proto/automation/automation.pb.go +++ b/go/vt/proto/automation/automation.pb.go @@ -1,11 +1,13 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: automation.proto package automation import ( fmt "fmt" + io "io" math "math" + math_bits "math/bits" proto "github.com/golang/protobuf/proto" ) @@ -102,18 +104,26 @@ func (*ClusterOperation) ProtoMessage() {} func (*ClusterOperation) Descriptor() ([]byte, []int) { return fileDescriptor_06e15ad07c41cb38, []int{0} } - func (m *ClusterOperation) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ClusterOperation.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ClusterOperation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ClusterOperation.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ClusterOperation.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ClusterOperation) XXX_Merge(src proto.Message) { xxx_messageInfo_ClusterOperation.Merge(m, src) } func (m *ClusterOperation) XXX_Size() int { - return xxx_messageInfo_ClusterOperation.Size(m) + return m.Size() } func (m *ClusterOperation) XXX_DiscardUnknown() { xxx_messageInfo_ClusterOperation.DiscardUnknown(m) @@ -165,18 +175,26 @@ func (*TaskContainer) ProtoMessage() {} func (*TaskContainer) Descriptor() ([]byte, []int) { return fileDescriptor_06e15ad07c41cb38, []int{1} } - func (m *TaskContainer) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TaskContainer.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *TaskContainer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TaskContainer.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_TaskContainer.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *TaskContainer) XXX_Merge(src proto.Message) { xxx_messageInfo_TaskContainer.Merge(m, src) } func (m *TaskContainer) XXX_Size() int { - return xxx_messageInfo_TaskContainer.Size(m) + return m.Size() } func (m *TaskContainer) XXX_DiscardUnknown() { xxx_messageInfo_TaskContainer.DiscardUnknown(m) @@ -221,18 +239,26 @@ func (*Task) ProtoMessage() {} func (*Task) Descriptor() ([]byte, []int) { return fileDescriptor_06e15ad07c41cb38, []int{2} } - func (m *Task) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Task.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *Task) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Task.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_Task.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *Task) XXX_Merge(src proto.Message) { xxx_messageInfo_Task.Merge(m, src) } func (m *Task) XXX_Size() int { - return xxx_messageInfo_Task.Size(m) + return m.Size() } func (m *Task) XXX_DiscardUnknown() { xxx_messageInfo_Task.DiscardUnknown(m) @@ -296,18 +322,26 @@ func (*EnqueueClusterOperationRequest) ProtoMessage() {} func (*EnqueueClusterOperationRequest) Descriptor() ([]byte, []int) { return fileDescriptor_06e15ad07c41cb38, []int{3} } - func (m *EnqueueClusterOperationRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnqueueClusterOperationRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *EnqueueClusterOperationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnqueueClusterOperationRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_EnqueueClusterOperationRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *EnqueueClusterOperationRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_EnqueueClusterOperationRequest.Merge(m, src) } func (m *EnqueueClusterOperationRequest) XXX_Size() int { - return xxx_messageInfo_EnqueueClusterOperationRequest.Size(m) + return m.Size() } func (m *EnqueueClusterOperationRequest) XXX_DiscardUnknown() { xxx_messageInfo_EnqueueClusterOperationRequest.DiscardUnknown(m) @@ -342,18 +376,26 @@ func (*EnqueueClusterOperationResponse) ProtoMessage() {} func (*EnqueueClusterOperationResponse) Descriptor() ([]byte, []int) { return fileDescriptor_06e15ad07c41cb38, []int{4} } - func (m *EnqueueClusterOperationResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnqueueClusterOperationResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *EnqueueClusterOperationResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnqueueClusterOperationResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_EnqueueClusterOperationResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *EnqueueClusterOperationResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_EnqueueClusterOperationResponse.Merge(m, src) } func (m *EnqueueClusterOperationResponse) XXX_Size() int { - return xxx_messageInfo_EnqueueClusterOperationResponse.Size(m) + return m.Size() } func (m *EnqueueClusterOperationResponse) XXX_DiscardUnknown() { xxx_messageInfo_EnqueueClusterOperationResponse.DiscardUnknown(m) @@ -381,18 +423,26 @@ func (*GetClusterOperationStateRequest) ProtoMessage() {} func (*GetClusterOperationStateRequest) Descriptor() ([]byte, []int) { return fileDescriptor_06e15ad07c41cb38, []int{5} } - func (m *GetClusterOperationStateRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetClusterOperationStateRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *GetClusterOperationStateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetClusterOperationStateRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_GetClusterOperationStateRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *GetClusterOperationStateRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_GetClusterOperationStateRequest.Merge(m, src) } func (m *GetClusterOperationStateRequest) XXX_Size() int { - return xxx_messageInfo_GetClusterOperationStateRequest.Size(m) + return m.Size() } func (m *GetClusterOperationStateRequest) XXX_DiscardUnknown() { xxx_messageInfo_GetClusterOperationStateRequest.DiscardUnknown(m) @@ -420,18 +470,26 @@ func (*GetClusterOperationStateResponse) ProtoMessage() {} func (*GetClusterOperationStateResponse) Descriptor() ([]byte, []int) { return fileDescriptor_06e15ad07c41cb38, []int{6} } - func (m *GetClusterOperationStateResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetClusterOperationStateResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *GetClusterOperationStateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetClusterOperationStateResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_GetClusterOperationStateResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *GetClusterOperationStateResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_GetClusterOperationStateResponse.Merge(m, src) } func (m *GetClusterOperationStateResponse) XXX_Size() int { - return xxx_messageInfo_GetClusterOperationStateResponse.Size(m) + return m.Size() } func (m *GetClusterOperationStateResponse) XXX_DiscardUnknown() { xxx_messageInfo_GetClusterOperationStateResponse.DiscardUnknown(m) @@ -459,18 +517,26 @@ func (*GetClusterOperationDetailsRequest) ProtoMessage() {} func (*GetClusterOperationDetailsRequest) Descriptor() ([]byte, []int) { return fileDescriptor_06e15ad07c41cb38, []int{7} } - func (m *GetClusterOperationDetailsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetClusterOperationDetailsRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *GetClusterOperationDetailsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetClusterOperationDetailsRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_GetClusterOperationDetailsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *GetClusterOperationDetailsRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_GetClusterOperationDetailsRequest.Merge(m, src) } func (m *GetClusterOperationDetailsRequest) XXX_Size() int { - return xxx_messageInfo_GetClusterOperationDetailsRequest.Size(m) + return m.Size() } func (m *GetClusterOperationDetailsRequest) XXX_DiscardUnknown() { xxx_messageInfo_GetClusterOperationDetailsRequest.DiscardUnknown(m) @@ -499,18 +565,26 @@ func (*GetClusterOperationDetailsResponse) ProtoMessage() {} func (*GetClusterOperationDetailsResponse) Descriptor() ([]byte, []int) { return fileDescriptor_06e15ad07c41cb38, []int{8} } - func (m *GetClusterOperationDetailsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetClusterOperationDetailsResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *GetClusterOperationDetailsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetClusterOperationDetailsResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_GetClusterOperationDetailsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *GetClusterOperationDetailsResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_GetClusterOperationDetailsResponse.Merge(m, src) } func (m *GetClusterOperationDetailsResponse) XXX_Size() int { - return xxx_messageInfo_GetClusterOperationDetailsResponse.Size(m) + return m.Size() } func (m *GetClusterOperationDetailsResponse) XXX_DiscardUnknown() { xxx_messageInfo_GetClusterOperationDetailsResponse.DiscardUnknown(m) @@ -544,42 +618,1988 @@ func init() { func init() { proto.RegisterFile("automation.proto", fileDescriptor_06e15ad07c41cb38) } var fileDescriptor_06e15ad07c41cb38 = []byte{ - // 588 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x94, 0xdd, 0x6e, 0xd3, 0x3e, - 0x18, 0xc6, 0xff, 0x49, 0xdb, 0xfd, 0xe9, 0x1b, 0xb6, 0x45, 0x16, 0x9b, 0xb2, 0x89, 0xb1, 0x2c, - 0x1c, 0x50, 0x86, 0xd4, 0x8a, 0xed, 0x60, 0x68, 0x80, 0xc4, 0xd8, 0xa2, 0x69, 0x1a, 0x4a, 0x26, - 0x37, 0x13, 0xd2, 0x38, 0xa8, 0x4c, 0x67, 0xa1, 0xd0, 0x34, 0xce, 0x6c, 0xa7, 0x52, 0x6f, 0x80, - 0x8b, 0xe0, 0x26, 0xb8, 0x14, 0x6e, 0x09, 0xe5, 0xab, 0x4d, 0xd3, 0x0f, 0x09, 0x71, 0x66, 0xbf, - 0x7e, 0xde, 0xe7, 0x7d, 0xfc, 0x6b, 0x1d, 0xd0, 0x49, 0x2c, 0xd9, 0x90, 0x48, 0x9f, 0x85, 0xed, - 0x88, 0x33, 0xc9, 0x10, 0x4c, 0x2b, 0xd6, 0x2f, 0x05, 0xf4, 0xf3, 0x20, 0x16, 0x92, 0x72, 0x37, - 0xa2, 0x3c, 0x2d, 0xa2, 0x0d, 0x50, 0xfd, 0x7b, 0x43, 0x31, 0x95, 0x56, 0x13, 0xab, 0xfe, 0x3d, - 0x7a, 0x07, 0x8f, 0x05, 0xe5, 0x3e, 0x09, 0x7a, 0x92, 0x88, 0x81, 0x30, 0x54, 0xb3, 0xd6, 0xd2, - 0x8e, 0x76, 0xda, 0x25, 0x67, 0x8f, 0x88, 0xc1, 0x39, 0x0b, 0x25, 0xf1, 0x43, 0xca, 0xb1, 0x96, - 0xc9, 0x93, 0xa2, 0x40, 0x27, 0xd0, 0x10, 0x92, 0x48, 0x6a, 0xd4, 0x4c, 0xa5, 0xb5, 0x71, 0x74, - 0x50, 0x6e, 0xab, 0x8e, 0xee, 0x26, 0x42, 0x9c, 0xe9, 0xd1, 0x13, 0x68, 0x50, 0xce, 0x19, 0x37, - 0xea, 0x69, 0x92, 0x6c, 0x63, 0x7d, 0x87, 0xf5, 0x99, 0x61, 0xe8, 0x04, 0x36, 0x22, 0xc2, 0x49, - 0x10, 0xd0, 0x22, 0x9f, 0x92, 0xe6, 0xd3, 0xab, 0xf9, 0xf0, 0x7a, 0xa1, 0xcb, 0x82, 0x99, 0xa0, - 0xf5, 0x59, 0xd8, 0x8f, 0x39, 0xa7, 0x61, 0x7f, 0x6c, 0xa8, 0xa6, 0xd2, 0x6a, 0xe0, 0x72, 0xc9, - 0xfa, 0xa1, 0x42, 0x3d, 0xd1, 0x22, 0x04, 0xf5, 0x90, 0x0c, 0x69, 0xce, 0x24, 0x5d, 0xa3, 0x0f, - 0x00, 0x89, 0xdf, 0x90, 0x4a, 0xca, 0x0b, 0x26, 0x66, 0x75, 0x66, 0xfb, 0x66, 0x22, 0xb1, 0x43, - 0xc9, 0xc7, 0xb8, 0xd4, 0x93, 0x73, 0xae, 0x4d, 0x38, 0xbf, 0x2a, 0x48, 0xd5, 0x53, 0x52, 0x5b, - 0x55, 0xb3, 0x19, 0x3a, 0xdb, 0xb0, 0xc6, 0x62, 0x19, 0xc5, 0xd2, 0x68, 0xa4, 0x06, 0xf9, 0x6e, - 0x4a, 0x6d, 0xad, 0x44, 0x6d, 0xf7, 0x3d, 0x6c, 0x56, 0x92, 0x20, 0x1d, 0x6a, 0x03, 0x3a, 0xce, - 0xaf, 0x94, 0x2c, 0x93, 0xd6, 0x11, 0x09, 0x62, 0x9a, 0xa2, 0x68, 0xe2, 0x6c, 0x73, 0xaa, 0xbe, - 0x51, 0xac, 0xdf, 0x0a, 0x3c, 0xb3, 0xc3, 0x87, 0x98, 0xc6, 0xb4, 0xfa, 0x93, 0x61, 0xfa, 0x10, - 0x53, 0x21, 0x17, 0x22, 0xba, 0x5b, 0x80, 0xe8, 0xb4, 0x7c, 0xab, 0xd5, 0x9e, 0xab, 0xe0, 0xfd, - 0xeb, 0x8d, 0x5e, 0xc3, 0xfe, 0xd2, 0xe1, 0x22, 0x62, 0xa1, 0xa0, 0xd5, 0x67, 0x90, 0xb4, 0x5c, - 0x52, 0xb9, 0xf8, 0x2f, 0x9b, 0x43, 0xa8, 0xb6, 0x7c, 0x01, 0x73, 0x79, 0x4b, 0x3e, 0x66, 0xf2, - 0x3e, 0x94, 0xbf, 0x7b, 0x1f, 0xd6, 0x31, 0x1c, 0x2c, 0x30, 0xbf, 0xa0, 0x92, 0xf8, 0x81, 0x58, - 0x96, 0x88, 0x80, 0xb5, 0xaa, 0x29, 0xcf, 0xf4, 0x16, 0xa0, 0x9f, 0x49, 0x7a, 0x2c, 0x4a, 0xe1, - 0x69, 0x47, 0x4f, 0x57, 0x05, 0xc3, 0xcd, 0x7e, 0x51, 0x39, 0xfc, 0xa9, 0xc0, 0xd6, 0xc2, 0xe0, - 0xe8, 0x39, 0xec, 0xdf, 0x3a, 0xd7, 0x8e, 0xfb, 0xd9, 0xe9, 0x9d, 0x7f, 0xba, 0xed, 0x7a, 0x36, - 0xee, 0xb9, 0x37, 0x36, 0x3e, 0xf3, 0xae, 0x5c, 0xa7, 0xd7, 0xf5, 0xce, 0x3c, 0x5b, 0xff, 0x0f, - 0x1d, 0xc0, 0xde, 0xfc, 0xa1, 0xe3, 0x7a, 0x89, 0x00, 0x7b, 0xf6, 0x85, 0xae, 0xa0, 0x3d, 0xd8, - 0x99, 0x97, 0xe0, 0x5b, 0xc7, 0xb9, 0x72, 0x2e, 0x75, 0x15, 0xed, 0xc2, 0xf6, 0xfc, 0xf1, 0x85, - 0xeb, 0xd8, 0x7a, 0xed, 0xf0, 0x1a, 0x9a, 0x93, 0xa7, 0x84, 0xb6, 0x01, 0x15, 0x79, 0xbc, 0xb3, - 0xee, 0xf5, 0x24, 0xc2, 0x26, 0x68, 0xb3, 0x03, 0x35, 0xf8, 0x7f, 0x6a, 0xff, 0x08, 0xea, 0x99, - 0xd9, 0xc7, 0x97, 0x77, 0x2f, 0x46, 0xbe, 0xa4, 0x42, 0xb4, 0x7d, 0xd6, 0xc9, 0x56, 0x9d, 0x6f, - 0xac, 0x33, 0x92, 0x9d, 0xf4, 0x4b, 0xdb, 0x99, 0x02, 0xfb, 0xba, 0x96, 0x56, 0x8e, 0xff, 0x04, - 0x00, 0x00, 0xff, 0xff, 0xa0, 0x42, 0x72, 0x53, 0x8f, 0x05, 0x00, 0x00, + // 608 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x94, 0xdf, 0x4e, 0x13, 0x41, + 0x14, 0xc6, 0x99, 0x6d, 0x8b, 0xf6, 0x54, 0x60, 0x33, 0x11, 0x52, 0x88, 0x94, 0x65, 0xbd, 0xb0, + 0xc1, 0xa4, 0x8d, 0x70, 0x81, 0xa2, 0x26, 0x22, 0x6c, 0x08, 0xc1, 0xec, 0x92, 0xe9, 0x12, 0x13, + 0xbc, 0x68, 0xc6, 0x32, 0x31, 0x2b, 0xcb, 0xce, 0x32, 0x33, 0x4b, 0xc2, 0x0b, 0xf8, 0x0c, 0xc6, + 0x97, 0xf0, 0x35, 0xbc, 0xd3, 0x47, 0x30, 0xf8, 0x22, 0x66, 0xff, 0xb5, 0xcb, 0xd2, 0x36, 0x31, + 0xde, 0xcd, 0x9c, 0xf9, 0xce, 0x77, 0xbe, 0xf9, 0xb5, 0xb3, 0xa0, 0xd3, 0x48, 0xf1, 0x0b, 0xaa, + 0x3c, 0x1e, 0x74, 0x42, 0xc1, 0x15, 0xc7, 0x30, 0xaa, 0x98, 0xdf, 0x11, 0xe8, 0x7b, 0x7e, 0x24, + 0x15, 0x13, 0x4e, 0xc8, 0x44, 0x52, 0xc4, 0xf3, 0xa0, 0x79, 0x67, 0x4d, 0x64, 0xa0, 0x76, 0x9d, + 0x68, 0xde, 0x19, 0x7e, 0x05, 0x0f, 0x24, 0x13, 0x1e, 0xf5, 0xfb, 0x8a, 0xca, 0x73, 0xd9, 0xd4, + 0x8c, 0x4a, 0xbb, 0xb1, 0xb9, 0xdc, 0x29, 0x38, 0xbb, 0x54, 0x9e, 0xef, 0xf1, 0x40, 0x51, 0x2f, + 0x60, 0x82, 0x34, 0x52, 0x79, 0x5c, 0x94, 0x78, 0x1b, 0x6a, 0x52, 0x51, 0xc5, 0x9a, 0x15, 0x03, + 0xb5, 0xe7, 0x37, 0xd7, 0x8b, 0x6d, 0xe5, 0xd1, 0xbd, 0x58, 0x48, 0x52, 0x3d, 0x7e, 0x08, 0x35, + 0x26, 0x04, 0x17, 0xcd, 0x6a, 0x92, 0x24, 0xdd, 0x98, 0x9f, 0x61, 0xee, 0xd6, 0x30, 0xbc, 0x0d, + 0xf3, 0x21, 0x15, 0xd4, 0xf7, 0x59, 0x9e, 0x0f, 0x25, 0xf9, 0xf4, 0x72, 0x3e, 0x32, 0x97, 0xeb, + 0xd2, 0x60, 0x06, 0x34, 0x06, 0x3c, 0x18, 0x44, 0x42, 0xb0, 0x60, 0x70, 0xdd, 0xd4, 0x0c, 0xd4, + 0xae, 0x91, 0x62, 0xc9, 0xfc, 0xa2, 0x41, 0x35, 0xd6, 0x62, 0x0c, 0xd5, 0x80, 0x5e, 0xb0, 0x8c, + 0x49, 0xb2, 0xc6, 0x6f, 0x00, 0x62, 0xbf, 0x0b, 0xa6, 0x98, 0xc8, 0x99, 0x18, 0xe5, 0x99, 0x9d, + 0xe3, 0xa1, 0xc4, 0x0a, 0x94, 0xb8, 0x26, 0x85, 0x9e, 0x8c, 0x73, 0x65, 0xc8, 0xf9, 0x69, 0x4e, + 0xaa, 0x9a, 0x90, 0x5a, 0x2c, 0x9b, 0xdd, 0xa2, 0xb3, 0x04, 0xb3, 0x3c, 0x52, 0x61, 0xa4, 0x9a, + 0xb5, 0xc4, 0x20, 0xdb, 0x8d, 0xa8, 0xcd, 0x16, 0xa8, 0xad, 0xbc, 0x86, 0x85, 0x52, 0x12, 0xac, + 0x43, 0xe5, 0x9c, 0x5d, 0x67, 0x57, 0x8a, 0x97, 0x71, 0xeb, 0x15, 0xf5, 0x23, 0x96, 0xa0, 0xa8, + 0x93, 0x74, 0xb3, 0xa3, 0x3d, 0x47, 0xe6, 0x4f, 0x04, 0x2d, 0x2b, 0xb8, 0x8c, 0x58, 0xc4, 0xca, + 0x3f, 0x19, 0x61, 0x97, 0x11, 0x93, 0x6a, 0x2c, 0xa2, 0xd3, 0x31, 0x88, 0x76, 0x8a, 0xb7, 0x9a, + 0xee, 0x39, 0x0d, 0xde, 0xff, 0xde, 0xe8, 0x19, 0xac, 0x4d, 0x1c, 0x2e, 0x43, 0x1e, 0x48, 0x56, + 0x7e, 0x06, 0x71, 0xcb, 0x01, 0x53, 0xe3, 0xff, 0xb2, 0x19, 0x84, 0x72, 0xcb, 0x07, 0x30, 0x26, + 0xb7, 0x64, 0x63, 0x86, 0xef, 0x03, 0xfd, 0xdb, 0xfb, 0x30, 0xb7, 0x60, 0x7d, 0x8c, 0xf9, 0x3e, + 0x53, 0xd4, 0xf3, 0xe5, 0xa4, 0x44, 0x14, 0xcc, 0x69, 0x4d, 0x59, 0xa6, 0x97, 0x00, 0x83, 0x54, + 0xd2, 0xe7, 0x61, 0x02, 0xaf, 0xb1, 0xf9, 0x68, 0x5a, 0x30, 0x52, 0x1f, 0xe4, 0x95, 0x8d, 0x6f, + 0x08, 0x16, 0xc7, 0x06, 0xc7, 0x8f, 0x61, 0xed, 0xc4, 0x3e, 0xb2, 0x9d, 0xf7, 0x76, 0x7f, 0xef, + 0xdd, 0x49, 0xcf, 0xb5, 0x48, 0xdf, 0x39, 0xb6, 0xc8, 0xae, 0x7b, 0xe8, 0xd8, 0xfd, 0x9e, 0xbb, + 0xeb, 0x5a, 0xfa, 0x0c, 0x5e, 0x87, 0xd5, 0xbb, 0x87, 0xb6, 0xe3, 0xc6, 0x02, 0xe2, 0x5a, 0xfb, + 0x3a, 0xc2, 0xab, 0xb0, 0x7c, 0x57, 0x42, 0x4e, 0x6c, 0xfb, 0xd0, 0x3e, 0xd0, 0x35, 0xbc, 0x02, + 0x4b, 0x77, 0x8f, 0xf7, 0x1d, 0xdb, 0xd2, 0x2b, 0x1b, 0x47, 0x50, 0x1f, 0x3e, 0x25, 0xbc, 0x04, + 0x38, 0xcf, 0xe3, 0xee, 0xf6, 0x8e, 0x86, 0x11, 0x16, 0xa0, 0x71, 0x7b, 0x60, 0x03, 0xee, 0x8d, + 0xec, 0xef, 0x43, 0x35, 0x35, 0x7b, 0xfb, 0xe2, 0xc7, 0x4d, 0x0b, 0xfd, 0xba, 0x69, 0xa1, 0xdf, + 0x37, 0x2d, 0xf4, 0xf5, 0x4f, 0x6b, 0xe6, 0xf4, 0xc9, 0x95, 0xa7, 0x98, 0x94, 0x1d, 0x8f, 0x77, + 0xd3, 0x55, 0xf7, 0x13, 0xef, 0x5e, 0xa9, 0x6e, 0xf2, 0xe5, 0xed, 0x8e, 0x00, 0x7e, 0x9c, 0x4d, + 0x2a, 0x5b, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x8c, 0x32, 0x88, 0xdc, 0x9f, 0x05, 0x00, 0x00, +} + +func (m *ClusterOperation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterOperation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterOperation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Error) > 0 { + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintAutomation(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x22 + } + if m.State != 0 { + i = encodeVarintAutomation(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x18 + } + if len(m.SerialTasks) > 0 { + for iNdEx := len(m.SerialTasks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.SerialTasks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintAutomation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintAutomation(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TaskContainer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskContainer) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TaskContainer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Concurrency != 0 { + i = encodeVarintAutomation(dAtA, i, uint64(m.Concurrency)) + i-- + dAtA[i] = 0x10 + } + if len(m.ParallelTasks) > 0 { + for iNdEx := len(m.ParallelTasks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ParallelTasks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintAutomation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Task) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Task) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Task) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Error) > 0 { + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintAutomation(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x32 + } + if len(m.Output) > 0 { + i -= len(m.Output) + copy(dAtA[i:], m.Output) + i = encodeVarintAutomation(dAtA, i, uint64(len(m.Output))) + i-- + dAtA[i] = 0x2a + } + if m.State != 0 { + i = encodeVarintAutomation(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x20 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintAutomation(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0x1a + } + if len(m.Parameters) > 0 { + for k := range m.Parameters { + v := m.Parameters[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintAutomation(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintAutomation(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintAutomation(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintAutomation(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EnqueueClusterOperationRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnqueueClusterOperationRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnqueueClusterOperationRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Parameters) > 0 { + for k := range m.Parameters { + v := m.Parameters[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintAutomation(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintAutomation(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintAutomation(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintAutomation(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EnqueueClusterOperationResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnqueueClusterOperationResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnqueueClusterOperationResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintAutomation(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetClusterOperationStateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetClusterOperationStateRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetClusterOperationStateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintAutomation(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetClusterOperationStateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetClusterOperationStateResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetClusterOperationStateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.State != 0 { + i = encodeVarintAutomation(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *GetClusterOperationDetailsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetClusterOperationDetailsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetClusterOperationDetailsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintAutomation(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetClusterOperationDetailsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetClusterOperationDetailsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetClusterOperationDetailsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.ClusterOp != nil { + { + size, err := m.ClusterOp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintAutomation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} + +func encodeVarintAutomation(dAtA []byte, offset int, v uint64) int { + offset -= sovAutomation(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ClusterOperation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sovAutomation(uint64(l)) + } + if len(m.SerialTasks) > 0 { + for _, e := range m.SerialTasks { + l = e.Size() + n += 1 + l + sovAutomation(uint64(l)) + } + } + if m.State != 0 { + n += 1 + sovAutomation(uint64(m.State)) + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sovAutomation(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *TaskContainer) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ParallelTasks) > 0 { + for _, e := range m.ParallelTasks { + l = e.Size() + n += 1 + l + sovAutomation(uint64(l)) + } + } + if m.Concurrency != 0 { + n += 1 + sovAutomation(uint64(m.Concurrency)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Task) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovAutomation(uint64(l)) + } + if len(m.Parameters) > 0 { + for k, v := range m.Parameters { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovAutomation(uint64(len(k))) + 1 + len(v) + sovAutomation(uint64(len(v))) + n += mapEntrySize + 1 + sovAutomation(uint64(mapEntrySize)) + } + } + l = len(m.Id) + if l > 0 { + n += 1 + l + sovAutomation(uint64(l)) + } + if m.State != 0 { + n += 1 + sovAutomation(uint64(m.State)) + } + l = len(m.Output) + if l > 0 { + n += 1 + l + sovAutomation(uint64(l)) + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sovAutomation(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *EnqueueClusterOperationRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovAutomation(uint64(l)) + } + if len(m.Parameters) > 0 { + for k, v := range m.Parameters { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovAutomation(uint64(len(k))) + 1 + len(v) + sovAutomation(uint64(len(v))) + n += mapEntrySize + 1 + sovAutomation(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *EnqueueClusterOperationResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sovAutomation(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetClusterOperationStateRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sovAutomation(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetClusterOperationStateResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.State != 0 { + n += 1 + sovAutomation(uint64(m.State)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetClusterOperationDetailsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sovAutomation(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n } + +func (m *GetClusterOperationDetailsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ClusterOp != nil { + l = m.ClusterOp.Size() + n += 1 + l + sovAutomation(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovAutomation(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozAutomation(x uint64) (n int) { + return sovAutomation(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ClusterOperation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAutomation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterOperation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterOperation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAutomation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAutomation + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAutomation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SerialTasks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAutomation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAutomation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthAutomation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SerialTasks = append(m.SerialTasks, &TaskContainer{}) + if err := m.SerialTasks[len(m.SerialTasks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAutomation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= ClusterOperationState(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAutomation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAutomation + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAutomation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAutomation(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAutomation + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthAutomation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskContainer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAutomation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskContainer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskContainer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParallelTasks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAutomation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAutomation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthAutomation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ParallelTasks = append(m.ParallelTasks, &Task{}) + if err := m.ParallelTasks[len(m.ParallelTasks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Concurrency", wireType) + } + m.Concurrency = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAutomation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Concurrency |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipAutomation(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAutomation + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthAutomation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Task) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAutomation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Task: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Task: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAutomation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAutomation + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAutomation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAutomation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAutomation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthAutomation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Parameters == nil { + m.Parameters = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAutomation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAutomation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthAutomation + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthAutomation + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAutomation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthAutomation + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthAutomation + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipAutomation(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAutomation + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Parameters[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAutomation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAutomation + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAutomation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAutomation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= TaskState(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Output", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAutomation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAutomation + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAutomation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Output = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAutomation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAutomation + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAutomation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAutomation(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAutomation + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthAutomation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnqueueClusterOperationRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAutomation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnqueueClusterOperationRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnqueueClusterOperationRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAutomation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAutomation + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAutomation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAutomation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAutomation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthAutomation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Parameters == nil { + m.Parameters = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAutomation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAutomation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthAutomation + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthAutomation + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAutomation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthAutomation + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthAutomation + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipAutomation(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAutomation + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Parameters[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAutomation(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAutomation + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthAutomation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnqueueClusterOperationResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAutomation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnqueueClusterOperationResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnqueueClusterOperationResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAutomation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAutomation + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAutomation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAutomation(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAutomation + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthAutomation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetClusterOperationStateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAutomation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetClusterOperationStateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetClusterOperationStateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAutomation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAutomation + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAutomation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAutomation(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAutomation + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthAutomation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetClusterOperationStateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAutomation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetClusterOperationStateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetClusterOperationStateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAutomation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= ClusterOperationState(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipAutomation(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAutomation + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthAutomation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetClusterOperationDetailsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAutomation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetClusterOperationDetailsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetClusterOperationDetailsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAutomation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAutomation + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAutomation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAutomation(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAutomation + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthAutomation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetClusterOperationDetailsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAutomation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetClusterOperationDetailsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetClusterOperationDetailsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterOp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAutomation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAutomation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthAutomation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClusterOp == nil { + m.ClusterOp = &ClusterOperation{} + } + if err := m.ClusterOp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAutomation(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAutomation + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthAutomation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipAutomation(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAutomation + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAutomation + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAutomation + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthAutomation + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupAutomation + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthAutomation + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthAutomation = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowAutomation = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupAutomation = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/vt/proto/automationservice/automationservice.pb.go b/go/vt/proto/automationservice/automationservice.pb.go index 2dc222239ce..83c99fd2190 100644 --- a/go/vt/proto/automationservice/automationservice.pb.go +++ b/go/vt/proto/automationservice/automationservice.pb.go @@ -1,4 +1,4 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: automationservice.proto package automationservice @@ -12,7 +12,6 @@ import ( grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" - automation "vitess.io/vitess/go/vt/proto/automation" ) @@ -30,7 +29,7 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package func init() { proto.RegisterFile("automationservice.proto", fileDescriptor_c03abdd2a71b5164) } var fileDescriptor_c03abdd2a71b5164 = []byte{ - // 178 bytes of a gzipped FileDescriptorProto + // 195 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0x2c, 0x2d, 0xc9, 0xcf, 0x4d, 0x2c, 0xc9, 0xcc, 0xcf, 0x2b, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xc4, 0x90, 0x90, 0x12, 0x40, 0x08, 0x41, 0x14, 0x19, 0x35, 0x32, @@ -39,10 +38,11 @@ var fileDescriptor_c03abdd2a71b5164 = []byte{ 0x9a, 0x71, 0x28, 0x0a, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0x91, 0xd2, 0x26, 0x4a, 0x6d, 0x71, 0x01, 0xc8, 0x65, 0x4a, 0x0c, 0x42, 0xb5, 0x5c, 0x52, 0xee, 0xa9, 0x25, 0xe8, 0x0a, 0x5c, 0x52, 0x4b, 0x12, 0x33, 0x73, 0x8a, 0x85, 0x74, 0x91, 0x0d, 0xc3, 0xad, 0x0e, 0x66, 0xb7, 0x1e, 0xb1, - 0xca, 0x61, 0xd6, 0x3b, 0x19, 0x44, 0xe9, 0x95, 0x65, 0x96, 0xa4, 0x16, 0x17, 0xeb, 0x65, 0xe6, - 0xeb, 0x43, 0x58, 0xfa, 0xe9, 0xf9, 0xfa, 0x65, 0x25, 0xfa, 0xe0, 0x30, 0xd2, 0xc7, 0x08, 0xc7, - 0x24, 0x36, 0xb0, 0x84, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x8f, 0x4a, 0x9d, 0xc0, 0x7c, 0x01, - 0x00, 0x00, + 0xca, 0x61, 0xd6, 0x3b, 0x39, 0x9c, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, + 0x72, 0x8c, 0x33, 0x1e, 0xcb, 0x31, 0x44, 0xe9, 0x95, 0x65, 0x96, 0xa4, 0x16, 0x17, 0xeb, 0x65, + 0xe6, 0xeb, 0x43, 0x58, 0xfa, 0xe9, 0xf9, 0xfa, 0x65, 0x25, 0xfa, 0xe0, 0x30, 0xd3, 0xc7, 0x08, + 0xd7, 0x24, 0x36, 0xb0, 0x84, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x22, 0x2e, 0x47, 0x89, 0x8c, + 0x01, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/go/vt/proto/binlogdata/binlogdata.pb.go b/go/vt/proto/binlogdata/binlogdata.pb.go index eab38ace6dc..5d69cff8afd 100644 --- a/go/vt/proto/binlogdata/binlogdata.pb.go +++ b/go/vt/proto/binlogdata/binlogdata.pb.go @@ -1,14 +1,15 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: binlogdata.proto package binlogdata import ( fmt "fmt" + io "io" math "math" + math_bits "math/bits" proto "github.com/golang/protobuf/proto" - query "vitess.io/vitess/go/vt/proto/query" topodata "vitess.io/vitess/go/vt/proto/topodata" vtrpc "vitess.io/vitess/go/vt/proto/vtrpc" @@ -266,18 +267,26 @@ func (*Charset) ProtoMessage() {} func (*Charset) Descriptor() ([]byte, []int) { return fileDescriptor_5fd02bcb2e350dad, []int{0} } - func (m *Charset) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Charset.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *Charset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Charset.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_Charset.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *Charset) XXX_Merge(src proto.Message) { xxx_messageInfo_Charset.Merge(m, src) } func (m *Charset) XXX_Size() int { - return xxx_messageInfo_Charset.Size(m) + return m.Size() } func (m *Charset) XXX_DiscardUnknown() { xxx_messageInfo_Charset.DiscardUnknown(m) @@ -324,18 +333,26 @@ func (*BinlogTransaction) ProtoMessage() {} func (*BinlogTransaction) Descriptor() ([]byte, []int) { return fileDescriptor_5fd02bcb2e350dad, []int{1} } - func (m *BinlogTransaction) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BinlogTransaction.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *BinlogTransaction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BinlogTransaction.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_BinlogTransaction.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *BinlogTransaction) XXX_Merge(src proto.Message) { xxx_messageInfo_BinlogTransaction.Merge(m, src) } func (m *BinlogTransaction) XXX_Size() int { - return xxx_messageInfo_BinlogTransaction.Size(m) + return m.Size() } func (m *BinlogTransaction) XXX_DiscardUnknown() { xxx_messageInfo_BinlogTransaction.DiscardUnknown(m) @@ -375,18 +392,26 @@ func (*BinlogTransaction_Statement) ProtoMessage() {} func (*BinlogTransaction_Statement) Descriptor() ([]byte, []int) { return fileDescriptor_5fd02bcb2e350dad, []int{1, 0} } - func (m *BinlogTransaction_Statement) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BinlogTransaction_Statement.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *BinlogTransaction_Statement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BinlogTransaction_Statement.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_BinlogTransaction_Statement.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *BinlogTransaction_Statement) XXX_Merge(src proto.Message) { xxx_messageInfo_BinlogTransaction_Statement.Merge(m, src) } func (m *BinlogTransaction_Statement) XXX_Size() int { - return xxx_messageInfo_BinlogTransaction_Statement.Size(m) + return m.Size() } func (m *BinlogTransaction_Statement) XXX_DiscardUnknown() { xxx_messageInfo_BinlogTransaction_Statement.DiscardUnknown(m) @@ -434,18 +459,26 @@ func (*StreamKeyRangeRequest) ProtoMessage() {} func (*StreamKeyRangeRequest) Descriptor() ([]byte, []int) { return fileDescriptor_5fd02bcb2e350dad, []int{2} } - func (m *StreamKeyRangeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StreamKeyRangeRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *StreamKeyRangeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StreamKeyRangeRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_StreamKeyRangeRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *StreamKeyRangeRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_StreamKeyRangeRequest.Merge(m, src) } func (m *StreamKeyRangeRequest) XXX_Size() int { - return xxx_messageInfo_StreamKeyRangeRequest.Size(m) + return m.Size() } func (m *StreamKeyRangeRequest) XXX_DiscardUnknown() { xxx_messageInfo_StreamKeyRangeRequest.DiscardUnknown(m) @@ -488,18 +521,26 @@ func (*StreamKeyRangeResponse) ProtoMessage() {} func (*StreamKeyRangeResponse) Descriptor() ([]byte, []int) { return fileDescriptor_5fd02bcb2e350dad, []int{3} } - func (m *StreamKeyRangeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StreamKeyRangeResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *StreamKeyRangeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StreamKeyRangeResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_StreamKeyRangeResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *StreamKeyRangeResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_StreamKeyRangeResponse.Merge(m, src) } func (m *StreamKeyRangeResponse) XXX_Size() int { - return xxx_messageInfo_StreamKeyRangeResponse.Size(m) + return m.Size() } func (m *StreamKeyRangeResponse) XXX_DiscardUnknown() { xxx_messageInfo_StreamKeyRangeResponse.DiscardUnknown(m) @@ -533,18 +574,26 @@ func (*StreamTablesRequest) ProtoMessage() {} func (*StreamTablesRequest) Descriptor() ([]byte, []int) { return fileDescriptor_5fd02bcb2e350dad, []int{4} } - func (m *StreamTablesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StreamTablesRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *StreamTablesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StreamTablesRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_StreamTablesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *StreamTablesRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_StreamTablesRequest.Merge(m, src) } func (m *StreamTablesRequest) XXX_Size() int { - return xxx_messageInfo_StreamTablesRequest.Size(m) + return m.Size() } func (m *StreamTablesRequest) XXX_DiscardUnknown() { xxx_messageInfo_StreamTablesRequest.DiscardUnknown(m) @@ -587,18 +636,26 @@ func (*StreamTablesResponse) ProtoMessage() {} func (*StreamTablesResponse) Descriptor() ([]byte, []int) { return fileDescriptor_5fd02bcb2e350dad, []int{5} } - func (m *StreamTablesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StreamTablesResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *StreamTablesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StreamTablesResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_StreamTablesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *StreamTablesResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_StreamTablesResponse.Merge(m, src) } func (m *StreamTablesResponse) XXX_Size() int { - return xxx_messageInfo_StreamTablesResponse.Size(m) + return m.Size() } func (m *StreamTablesResponse) XXX_DiscardUnknown() { xxx_messageInfo_StreamTablesResponse.DiscardUnknown(m) @@ -647,18 +704,26 @@ func (*Rule) ProtoMessage() {} func (*Rule) Descriptor() ([]byte, []int) { return fileDescriptor_5fd02bcb2e350dad, []int{6} } - func (m *Rule) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Rule.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *Rule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Rule.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_Rule.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *Rule) XXX_Merge(src proto.Message) { xxx_messageInfo_Rule.Merge(m, src) } func (m *Rule) XXX_Size() int { - return xxx_messageInfo_Rule.Size(m) + return m.Size() } func (m *Rule) XXX_DiscardUnknown() { xxx_messageInfo_Rule.DiscardUnknown(m) @@ -704,18 +769,26 @@ func (*Filter) ProtoMessage() {} func (*Filter) Descriptor() ([]byte, []int) { return fileDescriptor_5fd02bcb2e350dad, []int{7} } - func (m *Filter) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Filter.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *Filter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Filter.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_Filter.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *Filter) XXX_Merge(src proto.Message) { xxx_messageInfo_Filter.Merge(m, src) } func (m *Filter) XXX_Size() int { - return xxx_messageInfo_Filter.Size(m) + return m.Size() } func (m *Filter) XXX_DiscardUnknown() { xxx_messageInfo_Filter.DiscardUnknown(m) @@ -761,7 +834,10 @@ type BinlogSource struct { ExternalMysql string `protobuf:"bytes,8,opt,name=external_mysql,json=externalMysql,proto3" json:"external_mysql,omitempty"` // StopAfterCopy specifies if vreplication should be stopped // after copying is done. - StopAfterCopy bool `protobuf:"varint,9,opt,name=stop_after_copy,json=stopAfterCopy,proto3" json:"stop_after_copy,omitempty"` + StopAfterCopy bool `protobuf:"varint,9,opt,name=stop_after_copy,json=stopAfterCopy,proto3" json:"stop_after_copy,omitempty"` + // ExternalCluster is the name of the mounted cluster which has the source keyspace/db for this workflow + // it is of the type + ExternalCluster string `protobuf:"bytes,10,opt,name=external_cluster,json=externalCluster,proto3" json:"external_cluster,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -773,18 +849,26 @@ func (*BinlogSource) ProtoMessage() {} func (*BinlogSource) Descriptor() ([]byte, []int) { return fileDescriptor_5fd02bcb2e350dad, []int{8} } - func (m *BinlogSource) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BinlogSource.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *BinlogSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BinlogSource.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_BinlogSource.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *BinlogSource) XXX_Merge(src proto.Message) { xxx_messageInfo_BinlogSource.Merge(m, src) } func (m *BinlogSource) XXX_Size() int { - return xxx_messageInfo_BinlogSource.Size(m) + return m.Size() } func (m *BinlogSource) XXX_DiscardUnknown() { xxx_messageInfo_BinlogSource.DiscardUnknown(m) @@ -855,6 +939,13 @@ func (m *BinlogSource) GetStopAfterCopy() bool { return false } +func (m *BinlogSource) GetExternalCluster() string { + if m != nil { + return m.ExternalCluster + } + return "" +} + // RowChange represents one row change. // If Before is set and not After, it's a delete. // If After is set and not Before, it's an insert. @@ -873,18 +964,26 @@ func (*RowChange) ProtoMessage() {} func (*RowChange) Descriptor() ([]byte, []int) { return fileDescriptor_5fd02bcb2e350dad, []int{9} } - func (m *RowChange) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RowChange.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *RowChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RowChange.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_RowChange.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *RowChange) XXX_Merge(src proto.Message) { xxx_messageInfo_RowChange.Merge(m, src) } func (m *RowChange) XXX_Size() int { - return xxx_messageInfo_RowChange.Size(m) + return m.Size() } func (m *RowChange) XXX_DiscardUnknown() { xxx_messageInfo_RowChange.DiscardUnknown(m) @@ -921,18 +1020,26 @@ func (*RowEvent) ProtoMessage() {} func (*RowEvent) Descriptor() ([]byte, []int) { return fileDescriptor_5fd02bcb2e350dad, []int{10} } - func (m *RowEvent) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RowEvent.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *RowEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RowEvent.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_RowEvent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *RowEvent) XXX_Merge(src proto.Message) { xxx_messageInfo_RowEvent.Merge(m, src) } func (m *RowEvent) XXX_Size() int { - return xxx_messageInfo_RowEvent.Size(m) + return m.Size() } func (m *RowEvent) XXX_DiscardUnknown() { xxx_messageInfo_RowEvent.DiscardUnknown(m) @@ -969,18 +1076,26 @@ func (*FieldEvent) ProtoMessage() {} func (*FieldEvent) Descriptor() ([]byte, []int) { return fileDescriptor_5fd02bcb2e350dad, []int{11} } - func (m *FieldEvent) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FieldEvent.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *FieldEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FieldEvent.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_FieldEvent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *FieldEvent) XXX_Merge(src proto.Message) { xxx_messageInfo_FieldEvent.Merge(m, src) } func (m *FieldEvent) XXX_Size() int { - return xxx_messageInfo_FieldEvent.Size(m) + return m.Size() } func (m *FieldEvent) XXX_DiscardUnknown() { xxx_messageInfo_FieldEvent.DiscardUnknown(m) @@ -1023,18 +1138,26 @@ func (*ShardGtid) ProtoMessage() {} func (*ShardGtid) Descriptor() ([]byte, []int) { return fileDescriptor_5fd02bcb2e350dad, []int{12} } - func (m *ShardGtid) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ShardGtid.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ShardGtid) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ShardGtid.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ShardGtid.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ShardGtid) XXX_Merge(src proto.Message) { xxx_messageInfo_ShardGtid.Merge(m, src) } func (m *ShardGtid) XXX_Size() int { - return xxx_messageInfo_ShardGtid.Size(m) + return m.Size() } func (m *ShardGtid) XXX_DiscardUnknown() { xxx_messageInfo_ShardGtid.DiscardUnknown(m) @@ -1084,18 +1207,26 @@ func (*VGtid) ProtoMessage() {} func (*VGtid) Descriptor() ([]byte, []int) { return fileDescriptor_5fd02bcb2e350dad, []int{13} } - func (m *VGtid) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VGtid.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *VGtid) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VGtid.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_VGtid.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *VGtid) XXX_Merge(src proto.Message) { xxx_messageInfo_VGtid.Merge(m, src) } func (m *VGtid) XXX_Size() int { - return xxx_messageInfo_VGtid.Size(m) + return m.Size() } func (m *VGtid) XXX_DiscardUnknown() { xxx_messageInfo_VGtid.DiscardUnknown(m) @@ -1125,18 +1256,26 @@ func (*KeyspaceShard) ProtoMessage() {} func (*KeyspaceShard) Descriptor() ([]byte, []int) { return fileDescriptor_5fd02bcb2e350dad, []int{14} } - func (m *KeyspaceShard) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_KeyspaceShard.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *KeyspaceShard) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_KeyspaceShard.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_KeyspaceShard.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *KeyspaceShard) XXX_Merge(src proto.Message) { xxx_messageInfo_KeyspaceShard.Merge(m, src) } func (m *KeyspaceShard) XXX_Size() int { - return xxx_messageInfo_KeyspaceShard.Size(m) + return m.Size() } func (m *KeyspaceShard) XXX_DiscardUnknown() { xxx_messageInfo_KeyspaceShard.DiscardUnknown(m) @@ -1195,18 +1334,26 @@ func (*Journal) ProtoMessage() {} func (*Journal) Descriptor() ([]byte, []int) { return fileDescriptor_5fd02bcb2e350dad, []int{15} } - func (m *Journal) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Journal.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *Journal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Journal.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_Journal.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *Journal) XXX_Merge(src proto.Message) { xxx_messageInfo_Journal.Merge(m, src) } func (m *Journal) XXX_Size() int { - return xxx_messageInfo_Journal.Size(m) + return m.Size() } func (m *Journal) XXX_DiscardUnknown() { xxx_messageInfo_Journal.DiscardUnknown(m) @@ -1307,18 +1454,26 @@ func (*VEvent) ProtoMessage() {} func (*VEvent) Descriptor() ([]byte, []int) { return fileDescriptor_5fd02bcb2e350dad, []int{16} } - func (m *VEvent) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VEvent.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *VEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VEvent.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_VEvent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *VEvent) XXX_Merge(src proto.Message) { xxx_messageInfo_VEvent.Merge(m, src) } func (m *VEvent) XXX_Size() int { - return xxx_messageInfo_VEvent.Size(m) + return m.Size() } func (m *VEvent) XXX_DiscardUnknown() { xxx_messageInfo_VEvent.DiscardUnknown(m) @@ -1418,18 +1573,26 @@ func (*MinimalTable) ProtoMessage() {} func (*MinimalTable) Descriptor() ([]byte, []int) { return fileDescriptor_5fd02bcb2e350dad, []int{17} } - func (m *MinimalTable) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MinimalTable.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *MinimalTable) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MinimalTable.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_MinimalTable.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *MinimalTable) XXX_Merge(src proto.Message) { xxx_messageInfo_MinimalTable.Merge(m, src) } func (m *MinimalTable) XXX_Size() int { - return xxx_messageInfo_MinimalTable.Size(m) + return m.Size() } func (m *MinimalTable) XXX_DiscardUnknown() { xxx_messageInfo_MinimalTable.DiscardUnknown(m) @@ -1471,18 +1634,26 @@ func (*MinimalSchema) ProtoMessage() {} func (*MinimalSchema) Descriptor() ([]byte, []int) { return fileDescriptor_5fd02bcb2e350dad, []int{18} } - func (m *MinimalSchema) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MinimalSchema.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *MinimalSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MinimalSchema.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_MinimalSchema.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *MinimalSchema) XXX_Merge(src proto.Message) { xxx_messageInfo_MinimalSchema.Merge(m, src) } func (m *MinimalSchema) XXX_Size() int { - return xxx_messageInfo_MinimalSchema.Size(m) + return m.Size() } func (m *MinimalSchema) XXX_DiscardUnknown() { xxx_messageInfo_MinimalSchema.DiscardUnknown(m) @@ -1516,18 +1687,26 @@ func (*VStreamRequest) ProtoMessage() {} func (*VStreamRequest) Descriptor() ([]byte, []int) { return fileDescriptor_5fd02bcb2e350dad, []int{19} } - func (m *VStreamRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VStreamRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *VStreamRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VStreamRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_VStreamRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *VStreamRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_VStreamRequest.Merge(m, src) } func (m *VStreamRequest) XXX_Size() int { - return xxx_messageInfo_VStreamRequest.Size(m) + return m.Size() } func (m *VStreamRequest) XXX_DiscardUnknown() { xxx_messageInfo_VStreamRequest.DiscardUnknown(m) @@ -1591,18 +1770,26 @@ func (*VStreamResponse) ProtoMessage() {} func (*VStreamResponse) Descriptor() ([]byte, []int) { return fileDescriptor_5fd02bcb2e350dad, []int{20} } - func (m *VStreamResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VStreamResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *VStreamResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VStreamResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_VStreamResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *VStreamResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_VStreamResponse.Merge(m, src) } func (m *VStreamResponse) XXX_Size() int { - return xxx_messageInfo_VStreamResponse.Size(m) + return m.Size() } func (m *VStreamResponse) XXX_DiscardUnknown() { xxx_messageInfo_VStreamResponse.DiscardUnknown(m) @@ -1635,18 +1822,26 @@ func (*VStreamRowsRequest) ProtoMessage() {} func (*VStreamRowsRequest) Descriptor() ([]byte, []int) { return fileDescriptor_5fd02bcb2e350dad, []int{21} } - func (m *VStreamRowsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VStreamRowsRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *VStreamRowsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VStreamRowsRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_VStreamRowsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *VStreamRowsRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_VStreamRowsRequest.Merge(m, src) } func (m *VStreamRowsRequest) XXX_Size() int { - return xxx_messageInfo_VStreamRowsRequest.Size(m) + return m.Size() } func (m *VStreamRowsRequest) XXX_DiscardUnknown() { xxx_messageInfo_VStreamRowsRequest.DiscardUnknown(m) @@ -1707,18 +1902,26 @@ func (*VStreamRowsResponse) ProtoMessage() {} func (*VStreamRowsResponse) Descriptor() ([]byte, []int) { return fileDescriptor_5fd02bcb2e350dad, []int{22} } - func (m *VStreamRowsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VStreamRowsResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *VStreamRowsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VStreamRowsResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_VStreamRowsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *VStreamRowsResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_VStreamRowsResponse.Merge(m, src) } func (m *VStreamRowsResponse) XXX_Size() int { - return xxx_messageInfo_VStreamRowsResponse.Size(m) + return m.Size() } func (m *VStreamRowsResponse) XXX_DiscardUnknown() { xxx_messageInfo_VStreamRowsResponse.DiscardUnknown(m) @@ -1775,18 +1978,26 @@ func (*LastPKEvent) ProtoMessage() {} func (*LastPKEvent) Descriptor() ([]byte, []int) { return fileDescriptor_5fd02bcb2e350dad, []int{23} } - func (m *LastPKEvent) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LastPKEvent.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *LastPKEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LastPKEvent.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_LastPKEvent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *LastPKEvent) XXX_Merge(src proto.Message) { xxx_messageInfo_LastPKEvent.Merge(m, src) } func (m *LastPKEvent) XXX_Size() int { - return xxx_messageInfo_LastPKEvent.Size(m) + return m.Size() } func (m *LastPKEvent) XXX_DiscardUnknown() { xxx_messageInfo_LastPKEvent.DiscardUnknown(m) @@ -1822,18 +2033,26 @@ func (*TableLastPK) ProtoMessage() {} func (*TableLastPK) Descriptor() ([]byte, []int) { return fileDescriptor_5fd02bcb2e350dad, []int{24} } - func (m *TableLastPK) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TableLastPK.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *TableLastPK) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TableLastPK.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_TableLastPK.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *TableLastPK) XXX_Merge(src proto.Message) { xxx_messageInfo_TableLastPK.Merge(m, src) } func (m *TableLastPK) XXX_Size() int { - return xxx_messageInfo_TableLastPK.Size(m) + return m.Size() } func (m *TableLastPK) XXX_DiscardUnknown() { xxx_messageInfo_TableLastPK.DiscardUnknown(m) @@ -1874,18 +2093,26 @@ func (*VStreamResultsRequest) ProtoMessage() {} func (*VStreamResultsRequest) Descriptor() ([]byte, []int) { return fileDescriptor_5fd02bcb2e350dad, []int{25} } - func (m *VStreamResultsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VStreamResultsRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *VStreamResultsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VStreamResultsRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_VStreamResultsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *VStreamResultsRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_VStreamResultsRequest.Merge(m, src) } func (m *VStreamResultsRequest) XXX_Size() int { - return xxx_messageInfo_VStreamResultsRequest.Size(m) + return m.Size() } func (m *VStreamResultsRequest) XXX_DiscardUnknown() { xxx_messageInfo_VStreamResultsRequest.DiscardUnknown(m) @@ -1938,18 +2165,26 @@ func (*VStreamResultsResponse) ProtoMessage() {} func (*VStreamResultsResponse) Descriptor() ([]byte, []int) { return fileDescriptor_5fd02bcb2e350dad, []int{26} } - func (m *VStreamResultsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VStreamResultsResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *VStreamResultsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VStreamResultsResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_VStreamResultsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *VStreamResultsResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_VStreamResultsResponse.Merge(m, src) } func (m *VStreamResultsResponse) XXX_Size() int { - return xxx_messageInfo_VStreamResultsResponse.Size(m) + return m.Size() } func (m *VStreamResultsResponse) XXX_DiscardUnknown() { xxx_messageInfo_VStreamResultsResponse.DiscardUnknown(m) @@ -2017,125 +2252,7159 @@ func init() { func init() { proto.RegisterFile("binlogdata.proto", fileDescriptor_5fd02bcb2e350dad) } var fileDescriptor_5fd02bcb2e350dad = []byte{ - // 1914 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0x4b, 0x73, 0xe3, 0xc6, - 0x11, 0x5e, 0xbe, 0xc9, 0x06, 0x45, 0x41, 0xa3, 0x47, 0x98, 0x2d, 0xdb, 0x25, 0xa3, 0xb2, 0x5e, - 0x59, 0x55, 0xa1, 0x1c, 0x26, 0xde, 0x5c, 0x62, 0x3b, 0x7c, 0x60, 0xb5, 0x5c, 0xf1, 0xa1, 0x1d, - 0x62, 0xb5, 0x2e, 0x5f, 0x50, 0x58, 0x70, 0x24, 0x21, 0x02, 0x08, 0x2c, 0x30, 0x94, 0xcc, 0x1f, - 0x90, 0xaa, 0xdc, 0xf3, 0x2b, 0x72, 0xce, 0x31, 0xc9, 0x35, 0xf9, 0x13, 0xb9, 0xe6, 0x94, 0x5f, - 0x90, 0x5b, 0x6a, 0x1e, 0x78, 0x69, 0xed, 0x95, 0xd6, 0x55, 0x39, 0xc4, 0x17, 0xd6, 0x4c, 0x4f, - 0x77, 0x4f, 0xbf, 0xbe, 0x46, 0x73, 0x40, 0x7d, 0xed, 0x2c, 0x5d, 0xff, 0x62, 0x61, 0x51, 0xab, - 0x13, 0x84, 0x3e, 0xf5, 0x11, 0xa4, 0x94, 0x87, 0xca, 0x35, 0x0d, 0x03, 0x5b, 0x1c, 0x3c, 0x54, - 0xde, 0xac, 0x48, 0xb8, 0x96, 0x9b, 0x16, 0xf5, 0x03, 0x3f, 0x95, 0xd2, 0x26, 0x50, 0x1b, 0x5c, - 0x5a, 0x61, 0x44, 0x28, 0xda, 0x83, 0xaa, 0xed, 0x3a, 0x64, 0x49, 0xdb, 0x85, 0xfd, 0xc2, 0x41, - 0x05, 0xcb, 0x1d, 0x42, 0x50, 0xb6, 0xfd, 0xe5, 0xb2, 0x5d, 0xe4, 0x54, 0xbe, 0x66, 0xbc, 0x11, - 0x09, 0xaf, 0x49, 0xd8, 0x2e, 0x09, 0x5e, 0xb1, 0xd3, 0xfe, 0x55, 0x82, 0xad, 0x3e, 0xb7, 0xc3, - 0x08, 0xad, 0x65, 0x64, 0xd9, 0xd4, 0xf1, 0x97, 0xe8, 0x18, 0x20, 0xa2, 0x16, 0x25, 0x1e, 0x59, - 0xd2, 0xa8, 0x5d, 0xd8, 0x2f, 0x1d, 0x28, 0xdd, 0xc7, 0x9d, 0x8c, 0x07, 0x6f, 0x89, 0x74, 0xe6, - 0x31, 0x3f, 0xce, 0x88, 0xa2, 0x2e, 0x28, 0xe4, 0x9a, 0x2c, 0xa9, 0x49, 0xfd, 0x2b, 0xb2, 0x6c, - 0x97, 0xf7, 0x0b, 0x07, 0x4a, 0x77, 0xab, 0x23, 0x1c, 0xd4, 0xd9, 0x89, 0xc1, 0x0e, 0x30, 0x90, - 0x64, 0xfd, 0xf0, 0xef, 0x45, 0x68, 0x24, 0xda, 0xd0, 0x18, 0xea, 0xb6, 0x45, 0xc9, 0x85, 0x1f, - 0xae, 0xb9, 0x9b, 0xad, 0xee, 0x67, 0xf7, 0x34, 0xa4, 0x33, 0x90, 0x72, 0x38, 0xd1, 0x80, 0x7e, - 0x0e, 0x35, 0x5b, 0x44, 0x8f, 0x47, 0x47, 0xe9, 0x6e, 0x67, 0x95, 0xc9, 0xc0, 0xe2, 0x98, 0x07, - 0xa9, 0x50, 0x8a, 0xde, 0xb8, 0x3c, 0x64, 0x4d, 0xcc, 0x96, 0xda, 0x9f, 0x0a, 0x50, 0x8f, 0xf5, - 0xa2, 0x6d, 0xd8, 0xec, 0x8f, 0xcd, 0x97, 0x53, 0xac, 0x0f, 0x66, 0xc7, 0xd3, 0xd1, 0x37, 0xfa, - 0x50, 0x7d, 0x80, 0x9a, 0x50, 0xef, 0x8f, 0xcd, 0xbe, 0x7e, 0x3c, 0x9a, 0xaa, 0x05, 0xb4, 0x01, - 0x8d, 0xfe, 0xd8, 0x1c, 0xcc, 0x26, 0x93, 0x91, 0xa1, 0x16, 0xd1, 0x26, 0x28, 0xfd, 0xb1, 0x89, - 0x67, 0xe3, 0x71, 0xbf, 0x37, 0x38, 0x51, 0x4b, 0x68, 0x17, 0xb6, 0xfa, 0x63, 0x73, 0x38, 0x19, - 0x9b, 0x43, 0xfd, 0x14, 0xeb, 0x83, 0x9e, 0xa1, 0x0f, 0xd5, 0x32, 0x02, 0xa8, 0x32, 0xf2, 0x70, - 0xac, 0x56, 0xe4, 0x7a, 0xae, 0x1b, 0x6a, 0x55, 0xaa, 0x1b, 0x4d, 0xe7, 0x3a, 0x36, 0xd4, 0x9a, - 0xdc, 0xbe, 0x3c, 0x1d, 0xf6, 0x0c, 0x5d, 0xad, 0xcb, 0xed, 0x50, 0x1f, 0xeb, 0x86, 0xae, 0x36, - 0x9e, 0x97, 0xeb, 0x45, 0xb5, 0xf4, 0xbc, 0x5c, 0x2f, 0xa9, 0x65, 0xed, 0x8f, 0x05, 0xd8, 0x9d, - 0xd3, 0x90, 0x58, 0xde, 0x09, 0x59, 0x63, 0x6b, 0x79, 0x41, 0x30, 0x79, 0xb3, 0x22, 0x11, 0x45, - 0x0f, 0xa1, 0x1e, 0xf8, 0x91, 0xc3, 0x62, 0xc7, 0x03, 0xdc, 0xc0, 0xc9, 0x1e, 0x1d, 0x41, 0xe3, - 0x8a, 0xac, 0xcd, 0x90, 0xf1, 0xcb, 0x80, 0xa1, 0x4e, 0x52, 0x90, 0x89, 0xa6, 0xfa, 0x95, 0x5c, - 0x65, 0xe3, 0x5b, 0xba, 0x3b, 0xbe, 0xda, 0x39, 0xec, 0xdd, 0x36, 0x2a, 0x0a, 0xfc, 0x65, 0x44, - 0xd0, 0x18, 0x90, 0x10, 0x34, 0x69, 0x9a, 0x5b, 0x6e, 0x9f, 0xd2, 0xfd, 0xf0, 0x9d, 0x05, 0x80, - 0xb7, 0x5e, 0xdf, 0x26, 0x69, 0xdf, 0xc2, 0xb6, 0xb8, 0xc7, 0xb0, 0x5e, 0xbb, 0x24, 0xba, 0x8f, - 0xeb, 0x7b, 0x50, 0xa5, 0x9c, 0xb9, 0x5d, 0xdc, 0x2f, 0x1d, 0x34, 0xb0, 0xdc, 0xbd, 0xaf, 0x87, - 0x0b, 0xd8, 0xc9, 0xdf, 0xfc, 0x3f, 0xf1, 0xef, 0x57, 0x50, 0xc6, 0x2b, 0x97, 0xa0, 0x1d, 0xa8, - 0x78, 0x16, 0xb5, 0x2f, 0xa5, 0x37, 0x62, 0xc3, 0x5c, 0x39, 0x77, 0x5c, 0x4a, 0x42, 0x9e, 0xc2, - 0x06, 0x96, 0x3b, 0xed, 0xcf, 0x05, 0xa8, 0x3e, 0xe5, 0x4b, 0xf4, 0x09, 0x54, 0xc2, 0x15, 0x73, - 0x56, 0x60, 0x5d, 0xcd, 0x5a, 0xc0, 0x34, 0x63, 0x71, 0x8c, 0x46, 0xd0, 0x3a, 0x77, 0x88, 0xbb, - 0xe0, 0xd0, 0x9d, 0xf8, 0x0b, 0x51, 0x15, 0xad, 0xee, 0xc7, 0x59, 0x01, 0xa1, 0xb3, 0xf3, 0x34, - 0xc7, 0x88, 0x6f, 0x09, 0x6a, 0x4f, 0xa0, 0x95, 0xe7, 0x60, 0x70, 0xd2, 0x31, 0x36, 0x67, 0x53, - 0x73, 0x32, 0x9a, 0x4f, 0x7a, 0xc6, 0xe0, 0x99, 0xfa, 0x80, 0x23, 0x46, 0x9f, 0x1b, 0xa6, 0xfe, - 0xf4, 0xe9, 0x0c, 0x1b, 0x6a, 0x41, 0xfb, 0x77, 0x11, 0x9a, 0x22, 0x28, 0x73, 0x7f, 0x15, 0xda, - 0x84, 0x65, 0xf1, 0x8a, 0xac, 0xa3, 0xc0, 0xb2, 0x49, 0x9c, 0xc5, 0x78, 0xcf, 0x02, 0x12, 0x5d, - 0x5a, 0xe1, 0x42, 0x7a, 0x2e, 0x36, 0xe8, 0x73, 0x50, 0x78, 0x36, 0xa9, 0x49, 0xd7, 0x01, 0xe1, - 0x79, 0x6c, 0x75, 0x77, 0xd2, 0xc2, 0xe6, 0xb9, 0xa2, 0xc6, 0x3a, 0x20, 0x18, 0x68, 0xb2, 0xce, - 0xa3, 0xa1, 0x7c, 0x0f, 0x34, 0xa4, 0x35, 0x54, 0xc9, 0xd5, 0xd0, 0x61, 0x92, 0x90, 0xaa, 0xd4, - 0xf2, 0x56, 0xf4, 0xe2, 0x24, 0xa1, 0x0e, 0x54, 0xfd, 0xa5, 0xb9, 0x58, 0xb8, 0xed, 0x1a, 0x37, - 0xf3, 0x27, 0x59, 0xde, 0xd9, 0x72, 0x38, 0x1c, 0xf7, 0x44, 0x59, 0x54, 0xfc, 0xe5, 0x70, 0xe1, - 0xa2, 0x47, 0xd0, 0x22, 0xdf, 0x52, 0x12, 0x2e, 0x2d, 0xd7, 0xf4, 0xd6, 0xac, 0x7b, 0xd5, 0xb9, - 0xeb, 0x1b, 0x31, 0x75, 0xc2, 0x88, 0xe8, 0x13, 0xd8, 0x8c, 0xa8, 0x1f, 0x98, 0xd6, 0x39, 0x25, - 0xa1, 0x69, 0xfb, 0xc1, 0xba, 0xdd, 0xd8, 0x2f, 0x1c, 0xd4, 0xf1, 0x06, 0x23, 0xf7, 0x18, 0x75, - 0xe0, 0x07, 0x6b, 0xed, 0x05, 0x34, 0xb0, 0x7f, 0x33, 0xb8, 0xe4, 0xfe, 0x68, 0x50, 0x7d, 0x4d, - 0xce, 0xfd, 0x90, 0xc8, 0x42, 0x05, 0xd9, 0xc8, 0xb1, 0x7f, 0x83, 0xe5, 0x09, 0xda, 0x87, 0x0a, - 0xd7, 0x29, 0xdb, 0x45, 0x96, 0x45, 0x1c, 0x68, 0x16, 0xd4, 0xb1, 0x7f, 0xc3, 0xd3, 0x8e, 0x3e, - 0x04, 0x11, 0x60, 0x73, 0x69, 0x79, 0x71, 0xf6, 0x1a, 0x9c, 0x32, 0xb5, 0x3c, 0x82, 0x9e, 0x80, - 0x12, 0xfa, 0x37, 0xa6, 0xcd, 0xaf, 0x17, 0x48, 0x54, 0xba, 0xbb, 0xb9, 0xe2, 0x8c, 0x8d, 0xc3, - 0x10, 0xc6, 0xcb, 0x48, 0x7b, 0x01, 0x90, 0xd6, 0xd6, 0x5d, 0x97, 0xfc, 0x8c, 0x65, 0x83, 0xb8, - 0x8b, 0x58, 0x7f, 0x53, 0x9a, 0xcc, 0x35, 0x60, 0x79, 0xa6, 0xfd, 0xa1, 0x00, 0x8d, 0x39, 0xab, - 0x9e, 0x63, 0xea, 0x2c, 0x7e, 0x40, 0xcd, 0x21, 0x28, 0x5f, 0x50, 0x67, 0xc1, 0x8b, 0xad, 0x81, - 0xf9, 0x1a, 0x7d, 0x1e, 0x1b, 0x16, 0x98, 0x57, 0x51, 0xbb, 0xcc, 0x6f, 0xcf, 0xe5, 0x97, 0x17, - 0xe2, 0xd8, 0x8a, 0xe8, 0xe9, 0x09, 0xae, 0x73, 0xd6, 0xd3, 0x93, 0x48, 0xfb, 0x0a, 0x2a, 0x67, - 0xdc, 0x8a, 0x27, 0xa0, 0x70, 0xe5, 0x26, 0xd3, 0x16, 0x63, 0x37, 0x17, 0x9e, 0xc4, 0x62, 0x0c, - 0x51, 0xbc, 0x8c, 0xb4, 0x1e, 0x6c, 0x9c, 0x48, 0x6b, 0x39, 0xc3, 0xfb, 0xbb, 0xa3, 0xfd, 0xb5, - 0x08, 0xb5, 0xe7, 0xfe, 0x8a, 0x15, 0x14, 0x6a, 0x41, 0xd1, 0x59, 0x70, 0xb9, 0x12, 0x2e, 0x3a, - 0x0b, 0xf4, 0x5b, 0x68, 0x79, 0xce, 0x45, 0x68, 0xb1, 0xb2, 0x14, 0x08, 0x13, 0x4d, 0xe2, 0xa7, - 0x59, 0xcb, 0x26, 0x31, 0x07, 0x87, 0xd9, 0x86, 0x97, 0xdd, 0x66, 0x80, 0x53, 0xca, 0x01, 0xe7, - 0x11, 0xb4, 0x5c, 0xdf, 0xb6, 0x5c, 0x33, 0x69, 0xdb, 0x65, 0x51, 0xdc, 0x9c, 0x7a, 0x1a, 0xf7, - 0xee, 0x5b, 0x71, 0xa9, 0xdc, 0x33, 0x2e, 0xe8, 0x0b, 0x68, 0x06, 0x56, 0x48, 0x1d, 0xdb, 0x09, - 0x2c, 0x36, 0xf8, 0x54, 0xb9, 0x60, 0xce, 0xec, 0x5c, 0xdc, 0x70, 0x8e, 0x1d, 0x7d, 0x0a, 0x6a, - 0xc4, 0x5b, 0x92, 0x79, 0xe3, 0x87, 0x57, 0xe7, 0xae, 0x7f, 0x13, 0xb5, 0x6b, 0xdc, 0xfe, 0x4d, - 0x41, 0x7f, 0x15, 0x93, 0xb5, 0xbf, 0x94, 0xa0, 0x7a, 0x26, 0xaa, 0xf3, 0x10, 0xca, 0x3c, 0x46, - 0x62, 0xb8, 0xd9, 0xcb, 0x5e, 0x26, 0x38, 0x78, 0x80, 0x38, 0x0f, 0xfa, 0x00, 0x1a, 0xd4, 0xf1, - 0x48, 0x44, 0x2d, 0x2f, 0xe0, 0x41, 0x2d, 0xe1, 0x94, 0xf0, 0x9d, 0x25, 0xf6, 0x01, 0x34, 0x92, - 0x71, 0x4c, 0x06, 0x2b, 0x25, 0xa0, 0x5f, 0x40, 0x83, 0xe1, 0x8b, 0x0f, 0x5f, 0xed, 0x0a, 0x07, - 0xec, 0xce, 0x2d, 0x74, 0x71, 0x13, 0x70, 0x3d, 0x8c, 0x11, 0xfb, 0x6b, 0x50, 0x38, 0x22, 0xa4, - 0x90, 0x68, 0x60, 0x7b, 0xf9, 0x06, 0x16, 0x23, 0x0f, 0x43, 0xda, 0xf3, 0xd1, 0x63, 0xa8, 0x5c, - 0x73, 0xf3, 0x6a, 0x72, 0x08, 0xcc, 0x3a, 0xca, 0x53, 0x21, 0xce, 0xd9, 0x17, 0xf6, 0x77, 0xa2, - 0xb2, 0x78, 0xeb, 0xba, 0xf5, 0x85, 0x95, 0x45, 0x87, 0x63, 0x1e, 0x36, 0xa3, 0x2d, 0x3c, 0x97, - 0x77, 0xaf, 0x06, 0x66, 0x4b, 0xf4, 0x31, 0x34, 0xed, 0x55, 0x18, 0xf2, 0xb1, 0xd3, 0xf1, 0x48, - 0x7b, 0x87, 0x07, 0x4a, 0x91, 0x34, 0xc3, 0xf1, 0x08, 0xfa, 0x0d, 0xb4, 0x5c, 0x2b, 0xa2, 0x0c, - 0x78, 0xd2, 0x91, 0x5d, 0x7e, 0x55, 0x0e, 0x7d, 0x02, 0x78, 0xc2, 0x13, 0xc5, 0x4d, 0x37, 0xda, - 0x25, 0x34, 0x27, 0xce, 0xd2, 0xf1, 0x2c, 0x97, 0x03, 0x94, 0x05, 0x3e, 0xd3, 0x5a, 0xf8, 0xfa, - 0x7e, 0x5d, 0x05, 0x7d, 0x04, 0x0a, 0x33, 0xc1, 0xf6, 0xdd, 0x95, 0xb7, 0x14, 0xd5, 0x5e, 0xc2, - 0x8d, 0xe0, 0x64, 0x20, 0x08, 0x0c, 0xa9, 0xf2, 0xa6, 0xb9, 0x7d, 0x49, 0x3c, 0x0b, 0x7d, 0x96, - 0x20, 0x43, 0xa0, 0xbd, 0x9d, 0xc7, 0x54, 0x6a, 0x54, 0x8c, 0x19, 0xed, 0x1f, 0x45, 0x68, 0x9d, - 0x89, 0x19, 0x24, 0x9e, 0x7b, 0xbe, 0x82, 0x6d, 0x72, 0x7e, 0x4e, 0x6c, 0xea, 0x5c, 0x13, 0xd3, - 0xb6, 0x5c, 0x97, 0x84, 0xa6, 0x44, 0xb0, 0xd2, 0xdd, 0xec, 0x88, 0xff, 0x22, 0x03, 0x4e, 0x1f, - 0x0d, 0xf1, 0x56, 0xc2, 0x2b, 0x49, 0x0b, 0xa4, 0xc3, 0xb6, 0xe3, 0x79, 0x64, 0xe1, 0x58, 0x34, - 0xab, 0x40, 0xb4, 0xfc, 0x5d, 0xe9, 0xe9, 0x99, 0x71, 0x6c, 0x51, 0x92, 0xaa, 0x49, 0x24, 0x12, - 0x35, 0x8f, 0x98, 0x33, 0xe1, 0x45, 0x32, 0x4a, 0x6d, 0x48, 0x49, 0x83, 0x13, 0xb1, 0x3c, 0xcc, - 0x8d, 0x69, 0xe5, 0x5b, 0x63, 0x5a, 0xfa, 0x29, 0xad, 0xdc, 0xf9, 0x29, 0xfd, 0x12, 0x36, 0x45, - 0xbb, 0x8d, 0x53, 0x1f, 0x23, 0xfc, 0x7b, 0x7b, 0x6e, 0x93, 0xa6, 0x9b, 0x48, 0xfb, 0x02, 0x36, - 0x93, 0x40, 0xca, 0x31, 0xee, 0x10, 0xaa, 0xbc, 0x7c, 0xe2, 0x74, 0xa0, 0xb7, 0xe1, 0x8b, 0x25, - 0x87, 0xf6, 0xfb, 0x22, 0xa0, 0x58, 0xde, 0xbf, 0x89, 0xfe, 0x4f, 0x93, 0xb1, 0x03, 0x15, 0x4e, - 0x97, 0x99, 0x10, 0x1b, 0x16, 0x07, 0x16, 0xd4, 0xe0, 0x2a, 0x49, 0x83, 0x10, 0x7e, 0xc1, 0x7e, - 0x31, 0x89, 0x56, 0x2e, 0xc5, 0x92, 0x43, 0xfb, 0x5b, 0x01, 0xb6, 0x73, 0x71, 0x90, 0xb1, 0x4c, - 0x11, 0x53, 0x78, 0x07, 0x62, 0x0e, 0xa0, 0x1e, 0x5c, 0xbd, 0x03, 0x59, 0xc9, 0xe9, 0x77, 0xb6, - 0xc3, 0x8f, 0xa0, 0x1c, 0xb2, 0xb6, 0x2c, 0xbe, 0xb5, 0xd9, 0xe1, 0x84, 0xd3, 0xd9, 0x84, 0x93, - 0xf3, 0x23, 0x37, 0xe1, 0x48, 0xfb, 0x1d, 0x50, 0x32, 0x9d, 0x81, 0xb5, 0x92, 0x7c, 0x55, 0xc9, - 0xd4, 0x7d, 0x6f, 0x51, 0x29, 0x99, 0xa2, 0x62, 0xfd, 0xd9, 0xf6, 0xbd, 0xc0, 0x25, 0x94, 0x88, - 0x94, 0xd5, 0x71, 0x4a, 0xd0, 0xbe, 0x06, 0x25, 0x23, 0x79, 0xd7, 0x20, 0x93, 0x26, 0xa1, 0x74, - 0x67, 0x12, 0xfe, 0x59, 0x80, 0xdd, 0xb4, 0x98, 0x57, 0x2e, 0xfd, 0x51, 0xd5, 0xa3, 0x16, 0xc2, - 0xde, 0x6d, 0xef, 0xde, 0xab, 0xca, 0x7e, 0x40, 0xed, 0x1c, 0x7e, 0x09, 0x4a, 0x66, 0x1e, 0x67, - 0x7f, 0xdb, 0x47, 0xc7, 0xd3, 0x19, 0xd6, 0xd5, 0x07, 0xa8, 0x0e, 0xe5, 0xb9, 0x31, 0x3b, 0x55, - 0x0b, 0x6c, 0xa5, 0x7f, 0xad, 0x0f, 0xc4, 0x53, 0x00, 0x5b, 0x99, 0x92, 0xa9, 0x74, 0xf8, 0x9f, - 0x02, 0x40, 0xfa, 0xc5, 0x47, 0x0a, 0xd4, 0x5e, 0x4e, 0x4f, 0xa6, 0xb3, 0x57, 0x53, 0xa1, 0xe0, - 0xd8, 0x18, 0x0d, 0xd5, 0x02, 0x6a, 0x40, 0x45, 0xbc, 0x2d, 0x14, 0xd9, 0x0d, 0xf2, 0x61, 0xa1, - 0x84, 0x9a, 0x50, 0x4f, 0x5e, 0x15, 0xca, 0xa8, 0x06, 0xa5, 0xe4, 0xed, 0x40, 0x3e, 0x16, 0x54, - 0x99, 0x42, 0xac, 0x9f, 0x8e, 0x7b, 0x03, 0x5d, 0xad, 0xb1, 0x83, 0xe4, 0xd9, 0x00, 0xa0, 0x1a, - 0xbf, 0x19, 0x30, 0xc9, 0xb9, 0x6e, 0xa8, 0xc0, 0xee, 0x99, 0x19, 0xcf, 0x74, 0xac, 0x2a, 0x8c, - 0x86, 0x67, 0xaf, 0xd4, 0x26, 0xa3, 0x3d, 0x1d, 0xe9, 0xe3, 0xa1, 0xba, 0x81, 0x36, 0xa0, 0xf1, - 0x4c, 0xef, 0x61, 0xa3, 0xaf, 0xf7, 0x0c, 0xb5, 0xc5, 0x4e, 0xce, 0xb8, 0x81, 0x9b, 0xec, 0x9a, - 0xe7, 0xb3, 0x97, 0x78, 0xda, 0x1b, 0xab, 0x2a, 0xdb, 0x9c, 0xe9, 0x78, 0x3e, 0x9a, 0x4d, 0xd5, - 0x2d, 0x76, 0xcf, 0xb8, 0x37, 0x37, 0x4e, 0x4f, 0x54, 0xc4, 0xe4, 0xe7, 0xbd, 0x33, 0xfd, 0x74, - 0x36, 0x9a, 0x1a, 0xea, 0xf6, 0xe1, 0x63, 0xf6, 0x9d, 0xcb, 0x4e, 0x80, 0x00, 0x55, 0xa3, 0xd7, - 0x1f, 0xeb, 0x73, 0xf5, 0x01, 0x5b, 0xcf, 0x9f, 0xf5, 0xf0, 0x70, 0xae, 0x16, 0xfa, 0x9f, 0x7e, - 0xf3, 0xf8, 0xda, 0xa1, 0x24, 0x8a, 0x3a, 0x8e, 0x7f, 0x24, 0x56, 0x47, 0x17, 0xfe, 0xd1, 0x35, - 0x3d, 0xe2, 0xcf, 0x63, 0x47, 0x29, 0xe6, 0x5e, 0x57, 0x39, 0xe5, 0x97, 0xff, 0x0d, 0x00, 0x00, - 0xff, 0xff, 0x23, 0xf6, 0xf5, 0x62, 0x7a, 0x13, 0x00, 0x00, + // 1955 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0x4b, 0x6f, 0x23, 0x59, + 0x15, 0xee, 0xf2, 0xdb, 0xa7, 0x1c, 0xa7, 0x72, 0xf3, 0xc0, 0xb4, 0x66, 0xa2, 0x4c, 0x89, 0x99, + 0x0e, 0x91, 0x70, 0x06, 0xc3, 0x34, 0x42, 0x62, 0x66, 0xf0, 0xa3, 0x3a, 0xed, 0x8e, 0x1f, 0xe9, + 0xeb, 0xea, 0xf4, 0x68, 0x36, 0xa5, 0x4a, 0xf9, 0x26, 0x29, 0x52, 0x76, 0xb9, 0xab, 0xae, 0x93, + 0xf1, 0x0f, 0x40, 0x62, 0x8f, 0x84, 0xf8, 0x0b, 0xac, 0x59, 0x02, 0x5b, 0x60, 0xc9, 0x0f, 0x60, + 0x81, 0x1a, 0xf1, 0x23, 0xd8, 0xa1, 0xfb, 0xa8, 0x57, 0x7a, 0xa6, 0x93, 0x1e, 0x89, 0x05, 0x6c, + 0xac, 0x7b, 0xcf, 0x3d, 0xe7, 0xdc, 0xf3, 0xfa, 0x4e, 0x1d, 0x5f, 0xd0, 0xce, 0xdc, 0xb9, 0xe7, + 0x5f, 0x4c, 0x6d, 0x6a, 0x37, 0x17, 0x81, 0x4f, 0x7d, 0x04, 0x09, 0xe5, 0xa1, 0x7a, 0x4d, 0x83, + 0x85, 0x23, 0x0e, 0x1e, 0xaa, 0xaf, 0x96, 0x24, 0x58, 0xc9, 0x4d, 0x9d, 0xfa, 0x0b, 0x3f, 0x91, + 0xd2, 0x87, 0x50, 0xee, 0x5e, 0xda, 0x41, 0x48, 0x28, 0xda, 0x81, 0x92, 0xe3, 0xb9, 0x64, 0x4e, + 0x1b, 0xca, 0x9e, 0xb2, 0x5f, 0xc4, 0x72, 0x87, 0x10, 0x14, 0x1c, 0x7f, 0x3e, 0x6f, 0xe4, 0x38, + 0x95, 0xaf, 0x19, 0x6f, 0x48, 0x82, 0x6b, 0x12, 0x34, 0xf2, 0x82, 0x57, 0xec, 0xf4, 0x7f, 0xe5, + 0x61, 0xa3, 0xc3, 0xed, 0x30, 0x03, 0x7b, 0x1e, 0xda, 0x0e, 0x75, 0xfd, 0x39, 0x3a, 0x02, 0x08, + 0xa9, 0x4d, 0xc9, 0x8c, 0xcc, 0x69, 0xd8, 0x50, 0xf6, 0xf2, 0xfb, 0x6a, 0xeb, 0x51, 0x33, 0xe5, + 0xc1, 0x1b, 0x22, 0xcd, 0x49, 0xc4, 0x8f, 0x53, 0xa2, 0xa8, 0x05, 0x2a, 0xb9, 0x26, 0x73, 0x6a, + 0x51, 0xff, 0x8a, 0xcc, 0x1b, 0x85, 0x3d, 0x65, 0x5f, 0x6d, 0x6d, 0x34, 0x85, 0x83, 0x06, 0x3b, + 0x31, 0xd9, 0x01, 0x06, 0x12, 0xaf, 0x1f, 0xfe, 0x39, 0x07, 0xd5, 0x58, 0x1b, 0x1a, 0x40, 0xc5, + 0xb1, 0x29, 0xb9, 0xf0, 0x83, 0x15, 0x77, 0xb3, 0xde, 0xfa, 0xf8, 0x9e, 0x86, 0x34, 0xbb, 0x52, + 0x0e, 0xc7, 0x1a, 0xd0, 0x0f, 0xa0, 0xec, 0x88, 0xe8, 0xf1, 0xe8, 0xa8, 0xad, 0xcd, 0xb4, 0x32, + 0x19, 0x58, 0x1c, 0xf1, 0x20, 0x0d, 0xf2, 0xe1, 0x2b, 0x8f, 0x87, 0xac, 0x86, 0xd9, 0x52, 0xff, + 0x9d, 0x02, 0x95, 0x48, 0x2f, 0xda, 0x84, 0xf5, 0xce, 0xc0, 0x7a, 0x31, 0xc2, 0x46, 0x77, 0x7c, + 0x34, 0xea, 0x7f, 0x69, 0xf4, 0xb4, 0x07, 0xa8, 0x06, 0x95, 0xce, 0xc0, 0xea, 0x18, 0x47, 0xfd, + 0x91, 0xa6, 0xa0, 0x35, 0xa8, 0x76, 0x06, 0x56, 0x77, 0x3c, 0x1c, 0xf6, 0x4d, 0x2d, 0x87, 0xd6, + 0x41, 0xed, 0x0c, 0x2c, 0x3c, 0x1e, 0x0c, 0x3a, 0xed, 0xee, 0xb1, 0x96, 0x47, 0xdb, 0xb0, 0xd1, + 0x19, 0x58, 0xbd, 0xe1, 0xc0, 0xea, 0x19, 0x27, 0xd8, 0xe8, 0xb6, 0x4d, 0xa3, 0xa7, 0x15, 0x10, + 0x40, 0x89, 0x91, 0x7b, 0x03, 0xad, 0x28, 0xd7, 0x13, 0xc3, 0xd4, 0x4a, 0x52, 0x5d, 0x7f, 0x34, + 0x31, 0xb0, 0xa9, 0x95, 0xe5, 0xf6, 0xc5, 0x49, 0xaf, 0x6d, 0x1a, 0x5a, 0x45, 0x6e, 0x7b, 0xc6, + 0xc0, 0x30, 0x0d, 0xad, 0xfa, 0xac, 0x50, 0xc9, 0x69, 0xf9, 0x67, 0x85, 0x4a, 0x5e, 0x2b, 0xe8, + 0xbf, 0x56, 0x60, 0x7b, 0x42, 0x03, 0x62, 0xcf, 0x8e, 0xc9, 0x0a, 0xdb, 0xf3, 0x0b, 0x82, 0xc9, + 0xab, 0x25, 0x09, 0x29, 0x7a, 0x08, 0x95, 0x85, 0x1f, 0xba, 0x2c, 0x76, 0x3c, 0xc0, 0x55, 0x1c, + 0xef, 0xd1, 0x21, 0x54, 0xaf, 0xc8, 0xca, 0x0a, 0x18, 0xbf, 0x0c, 0x18, 0x6a, 0xc6, 0x05, 0x19, + 0x6b, 0xaa, 0x5c, 0xc9, 0x55, 0x3a, 0xbe, 0xf9, 0xbb, 0xe3, 0xab, 0x9f, 0xc3, 0xce, 0x6d, 0xa3, + 0xc2, 0x85, 0x3f, 0x0f, 0x09, 0x1a, 0x00, 0x12, 0x82, 0x16, 0x4d, 0x72, 0xcb, 0xed, 0x53, 0x5b, + 0xef, 0xbf, 0xb5, 0x00, 0xf0, 0xc6, 0xd9, 0x6d, 0x92, 0xfe, 0x15, 0x6c, 0x8a, 0x7b, 0x4c, 0xfb, + 0xcc, 0x23, 0xe1, 0x7d, 0x5c, 0xdf, 0x81, 0x12, 0xe5, 0xcc, 0x8d, 0xdc, 0x5e, 0x7e, 0xbf, 0x8a, + 0xe5, 0xee, 0x5d, 0x3d, 0x9c, 0xc2, 0x56, 0xf6, 0xe6, 0xff, 0x8a, 0x7f, 0x3f, 0x86, 0x02, 0x5e, + 0x7a, 0x04, 0x6d, 0x41, 0x71, 0x66, 0x53, 0xe7, 0x52, 0x7a, 0x23, 0x36, 0xcc, 0x95, 0x73, 0xd7, + 0xa3, 0x24, 0xe0, 0x29, 0xac, 0x62, 0xb9, 0xd3, 0x7f, 0xaf, 0x40, 0xe9, 0x09, 0x5f, 0xa2, 0x8f, + 0xa0, 0x18, 0x2c, 0x99, 0xb3, 0x02, 0xeb, 0x5a, 0xda, 0x02, 0xa6, 0x19, 0x8b, 0x63, 0xd4, 0x87, + 0xfa, 0xb9, 0x4b, 0xbc, 0x29, 0x87, 0xee, 0xd0, 0x9f, 0x8a, 0xaa, 0xa8, 0xb7, 0x3e, 0x48, 0x0b, + 0x08, 0x9d, 0xcd, 0x27, 0x19, 0x46, 0x7c, 0x4b, 0x50, 0x7f, 0x0c, 0xf5, 0x2c, 0x07, 0x83, 0x93, + 0x81, 0xb1, 0x35, 0x1e, 0x59, 0xc3, 0xfe, 0x64, 0xd8, 0x36, 0xbb, 0x4f, 0xb5, 0x07, 0x1c, 0x31, + 0xc6, 0xc4, 0xb4, 0x8c, 0x27, 0x4f, 0xc6, 0xd8, 0xd4, 0x14, 0xfd, 0x37, 0x79, 0xa8, 0x89, 0xa0, + 0x4c, 0xfc, 0x65, 0xe0, 0x10, 0x96, 0xc5, 0x2b, 0xb2, 0x0a, 0x17, 0xb6, 0x43, 0xa2, 0x2c, 0x46, + 0x7b, 0x16, 0x90, 0xf0, 0xd2, 0x0e, 0xa6, 0xd2, 0x73, 0xb1, 0x41, 0x9f, 0x80, 0xca, 0xb3, 0x49, + 0x2d, 0xba, 0x5a, 0x10, 0x9e, 0xc7, 0x7a, 0x6b, 0x2b, 0x29, 0x6c, 0x9e, 0x2b, 0x6a, 0xae, 0x16, + 0x04, 0x03, 0x8d, 0xd7, 0x59, 0x34, 0x14, 0xee, 0x81, 0x86, 0xa4, 0x86, 0x8a, 0x99, 0x1a, 0x3a, + 0x88, 0x13, 0x52, 0x92, 0x5a, 0xde, 0x88, 0x5e, 0x94, 0x24, 0xd4, 0x84, 0x92, 0x3f, 0xb7, 0xa6, + 0x53, 0xaf, 0x51, 0xe6, 0x66, 0x7e, 0x27, 0xcd, 0x3b, 0x9e, 0xf7, 0x7a, 0x83, 0xb6, 0x28, 0x8b, + 0xa2, 0x3f, 0xef, 0x4d, 0x3d, 0xf4, 0x21, 0xd4, 0xc9, 0x57, 0x94, 0x04, 0x73, 0xdb, 0xb3, 0x66, + 0x2b, 0xd6, 0xbd, 0x2a, 0xdc, 0xf5, 0xb5, 0x88, 0x3a, 0x64, 0x44, 0xf4, 0x11, 0xac, 0x87, 0xd4, + 0x5f, 0x58, 0xf6, 0x39, 0x25, 0x81, 0xe5, 0xf8, 0x8b, 0x55, 0xa3, 0xba, 0xa7, 0xec, 0x57, 0xf0, + 0x1a, 0x23, 0xb7, 0x19, 0xb5, 0xeb, 0x2f, 0x56, 0xe8, 0xfb, 0xa0, 0xc5, 0xea, 0x1c, 0x6f, 0x19, + 0x32, 0xa3, 0x81, 0x2b, 0x5c, 0x8f, 0xe8, 0x5d, 0x41, 0xd6, 0x9f, 0x43, 0x15, 0xfb, 0x37, 0xdd, + 0x4b, 0xee, 0xba, 0x0e, 0xa5, 0x33, 0x72, 0xee, 0x07, 0x44, 0xd6, 0x34, 0xc8, 0x9e, 0x8f, 0xfd, + 0x1b, 0x2c, 0x4f, 0xd0, 0x1e, 0x14, 0xf9, 0xf5, 0xb2, 0xb3, 0xa4, 0x59, 0xc4, 0x81, 0x6e, 0x43, + 0x05, 0xfb, 0x37, 0xbc, 0x42, 0xd0, 0xfb, 0x20, 0x72, 0x61, 0xcd, 0xed, 0x59, 0x94, 0xe8, 0x2a, + 0xa7, 0x8c, 0xec, 0x19, 0x41, 0x8f, 0x41, 0x0d, 0xfc, 0x1b, 0xcb, 0xe1, 0xd7, 0x0b, 0xd0, 0xaa, + 0xad, 0xed, 0x4c, 0x1d, 0x47, 0xc6, 0x61, 0x08, 0xa2, 0x65, 0xa8, 0x3f, 0x07, 0x48, 0xca, 0xf0, + 0xae, 0x4b, 0xbe, 0xc7, 0x12, 0x47, 0xbc, 0x69, 0xa4, 0xbf, 0x26, 0x4d, 0xe6, 0x1a, 0xb0, 0x3c, + 0xd3, 0x7f, 0xa5, 0x40, 0x75, 0xc2, 0x0a, 0xed, 0x88, 0xba, 0xd3, 0x6f, 0x51, 0x9e, 0x08, 0x0a, + 0x17, 0xd4, 0x9d, 0xf2, 0xba, 0xac, 0x62, 0xbe, 0x46, 0x9f, 0x44, 0x86, 0x2d, 0xac, 0xab, 0xb0, + 0x51, 0xe0, 0xb7, 0x67, 0x4a, 0x81, 0xd7, 0xec, 0xc0, 0x0e, 0xe9, 0xc9, 0x31, 0xae, 0x70, 0xd6, + 0x93, 0xe3, 0x50, 0xff, 0x1c, 0x8a, 0xa7, 0xdc, 0x8a, 0xc7, 0xa0, 0x72, 0xe5, 0x16, 0xd3, 0x16, + 0xc1, 0x3c, 0x13, 0x9e, 0xd8, 0x62, 0x0c, 0x61, 0xb4, 0x0c, 0xf5, 0x36, 0xac, 0x1d, 0x4b, 0x6b, + 0x39, 0xc3, 0xbb, 0xbb, 0xa3, 0xff, 0x31, 0x07, 0xe5, 0x67, 0xfe, 0x92, 0x95, 0x0a, 0xaa, 0x43, + 0xce, 0x9d, 0x72, 0xb9, 0x3c, 0xce, 0xb9, 0x53, 0xf4, 0x73, 0xa8, 0xcf, 0xdc, 0x8b, 0xc0, 0x66, + 0x15, 0x2c, 0xc0, 0x28, 0xfa, 0xc9, 0x77, 0xd3, 0x96, 0x0d, 0x23, 0x0e, 0x8e, 0xc8, 0xb5, 0x59, + 0x7a, 0x9b, 0xc2, 0x58, 0x3e, 0x83, 0xb1, 0x0f, 0xa1, 0xee, 0xf9, 0x8e, 0xed, 0x59, 0x71, 0x87, + 0x2f, 0x08, 0x1c, 0x70, 0xea, 0x49, 0xd4, 0xe6, 0x6f, 0xc5, 0xa5, 0x78, 0xcf, 0xb8, 0xa0, 0x4f, + 0xa1, 0xb6, 0xb0, 0x03, 0xea, 0x3a, 0xee, 0xc2, 0x66, 0x33, 0x52, 0x89, 0x0b, 0x66, 0xcc, 0xce, + 0xc4, 0x0d, 0x67, 0xd8, 0x19, 0xac, 0x42, 0xde, 0xbd, 0xac, 0x1b, 0x3f, 0xb8, 0x3a, 0xf7, 0xfc, + 0x9b, 0xb0, 0x51, 0xe6, 0xf6, 0xaf, 0x0b, 0xfa, 0xcb, 0x88, 0xac, 0xff, 0x21, 0x0f, 0xa5, 0x53, + 0x51, 0x9d, 0x07, 0x50, 0xe0, 0x31, 0x12, 0x73, 0xd0, 0x4e, 0xfa, 0x32, 0xc1, 0xc1, 0x03, 0xc4, + 0x79, 0xd0, 0x7b, 0x50, 0xa5, 0xee, 0x8c, 0x84, 0xd4, 0x9e, 0x2d, 0x78, 0x50, 0xf3, 0x38, 0x21, + 0x7c, 0x6d, 0x89, 0xbd, 0x07, 0xd5, 0x78, 0x72, 0x93, 0xc1, 0x4a, 0x08, 0xe8, 0x87, 0x50, 0x65, + 0xf8, 0xe2, 0x73, 0x5a, 0xa3, 0xc8, 0x01, 0xbb, 0x75, 0x0b, 0x5d, 0xdc, 0x04, 0x5c, 0x09, 0x22, + 0xc4, 0xfe, 0x04, 0x54, 0x8e, 0x08, 0x29, 0x24, 0x7a, 0xdd, 0x4e, 0xb6, 0xd7, 0x45, 0xc8, 0xc3, + 0x90, 0x7c, 0x1e, 0xd0, 0x23, 0x28, 0x5e, 0x73, 0xf3, 0xca, 0x72, 0x5e, 0x4c, 0x3b, 0xca, 0x53, + 0x21, 0xce, 0xd9, 0xc7, 0xf8, 0x17, 0xa2, 0xb2, 0x78, 0x97, 0xbb, 0xf5, 0x31, 0x96, 0x45, 0x87, + 0x23, 0x1e, 0x36, 0xce, 0x4d, 0x67, 0x1e, 0x6f, 0x74, 0x55, 0xcc, 0x96, 0xe8, 0x03, 0xa8, 0x39, + 0xcb, 0x20, 0xe0, 0x13, 0xaa, 0x3b, 0x23, 0x8d, 0x2d, 0x1e, 0x28, 0x55, 0xd2, 0x4c, 0x77, 0x46, + 0xd0, 0xcf, 0xa0, 0xee, 0xd9, 0x21, 0x65, 0xc0, 0x93, 0x8e, 0x6c, 0xf3, 0xab, 0x32, 0xe8, 0x13, + 0xc0, 0x13, 0x9e, 0xa8, 0x5e, 0xb2, 0xd1, 0x2f, 0xa1, 0x36, 0x74, 0xe7, 0xee, 0xcc, 0xf6, 0x38, + 0x40, 0x59, 0xe0, 0x53, 0xad, 0x85, 0xaf, 0xef, 0xd7, 0x55, 0xd0, 0x2e, 0xa8, 0xcc, 0x04, 0xc7, + 0xf7, 0x96, 0xb3, 0xb9, 0xa8, 0xf6, 0x3c, 0xae, 0x2e, 0x8e, 0xbb, 0x82, 0xc0, 0x90, 0x2a, 0x6f, + 0x9a, 0x38, 0x97, 0x64, 0x66, 0xa3, 0x8f, 0x63, 0x64, 0x08, 0xb4, 0x37, 0xb2, 0x98, 0x4a, 0x8c, + 0x8a, 0x30, 0xa3, 0xff, 0x25, 0x07, 0xf5, 0x53, 0x31, 0xae, 0x44, 0x23, 0xd2, 0xe7, 0xb0, 0x49, + 0xce, 0xcf, 0x89, 0x43, 0xdd, 0x6b, 0x62, 0x39, 0xb6, 0xe7, 0x91, 0xc0, 0x92, 0x08, 0x56, 0x5b, + 0xeb, 0x4d, 0xf1, 0xb7, 0xa5, 0xcb, 0xe9, 0xfd, 0x1e, 0xde, 0x88, 0x79, 0x25, 0x69, 0x8a, 0x0c, + 0xd8, 0x74, 0x67, 0x33, 0x32, 0x75, 0x6d, 0x9a, 0x56, 0x20, 0x5a, 0xfe, 0xb6, 0xf4, 0xf4, 0xd4, + 0x3c, 0xb2, 0x29, 0x49, 0xd4, 0xc4, 0x12, 0xb1, 0x9a, 0x0f, 0x99, 0x33, 0xc1, 0x45, 0x3c, 0x75, + 0xad, 0x49, 0x49, 0x93, 0x13, 0xb1, 0x3c, 0xcc, 0x4c, 0x74, 0x85, 0x5b, 0x13, 0x5d, 0xf2, 0xd5, + 0x2d, 0xde, 0xf9, 0xd5, 0xfd, 0x0c, 0xd6, 0x45, 0xbb, 0x8d, 0x52, 0x1f, 0x21, 0xfc, 0x1b, 0x7b, + 0x6e, 0x8d, 0x26, 0x9b, 0x50, 0xff, 0x14, 0xd6, 0xe3, 0x40, 0xca, 0x89, 0xef, 0x00, 0x4a, 0xbc, + 0x7c, 0xa2, 0x74, 0xa0, 0x37, 0xe1, 0x8b, 0x25, 0x87, 0xfe, 0xcb, 0x1c, 0xa0, 0x48, 0xde, 0xbf, + 0x09, 0xff, 0x47, 0x93, 0xb1, 0x05, 0x45, 0x4e, 0x97, 0x99, 0x10, 0x1b, 0x16, 0x07, 0x16, 0xd4, + 0xc5, 0x55, 0x9c, 0x06, 0x21, 0xfc, 0x9c, 0xfd, 0x62, 0x12, 0x2e, 0x3d, 0x8a, 0x25, 0x87, 0xfe, + 0x27, 0x05, 0x36, 0x33, 0x71, 0x90, 0xb1, 0x4c, 0x10, 0xa3, 0xbc, 0x05, 0x31, 0xfb, 0x50, 0x59, + 0x5c, 0xbd, 0x05, 0x59, 0xf1, 0xe9, 0xd7, 0xb6, 0xc3, 0x5d, 0x28, 0x04, 0xac, 0x2d, 0x8b, 0x6f, + 0x6d, 0x7a, 0x38, 0xe1, 0x74, 0x36, 0xe1, 0x64, 0xfc, 0xc8, 0x4c, 0x38, 0xd2, 0x7e, 0x17, 0xd4, + 0x54, 0x67, 0x60, 0xad, 0x24, 0x5b, 0x55, 0x32, 0x75, 0xdf, 0x58, 0x54, 0x6a, 0xaa, 0xa8, 0x58, + 0x7f, 0x76, 0xfc, 0xd9, 0xc2, 0x23, 0x94, 0x88, 0x94, 0x55, 0x70, 0x42, 0xd0, 0xbf, 0x00, 0x35, + 0x25, 0x79, 0xd7, 0x20, 0x93, 0x24, 0x21, 0x7f, 0x67, 0x12, 0xfe, 0xae, 0xc0, 0x76, 0x52, 0xcc, + 0x4b, 0x8f, 0xfe, 0x5f, 0xd5, 0xa3, 0x1e, 0xc0, 0xce, 0x6d, 0xef, 0xde, 0xa9, 0xca, 0xbe, 0x45, + 0xed, 0x1c, 0x7c, 0x06, 0x6a, 0x6a, 0x74, 0x67, 0xff, 0xf0, 0xfb, 0x47, 0xa3, 0x31, 0x36, 0xb4, + 0x07, 0xa8, 0x02, 0x85, 0x89, 0x39, 0x3e, 0xd1, 0x14, 0xb6, 0x32, 0xbe, 0x30, 0xba, 0xe2, 0xd5, + 0x80, 0xad, 0x2c, 0xc9, 0x94, 0x3f, 0xf8, 0xb7, 0x02, 0x90, 0x7c, 0xf1, 0x91, 0x0a, 0xe5, 0x17, + 0xa3, 0xe3, 0xd1, 0xf8, 0xe5, 0x48, 0x28, 0x38, 0x32, 0xfb, 0x3d, 0x4d, 0x41, 0x55, 0x28, 0x8a, + 0x67, 0x88, 0x1c, 0xbb, 0x41, 0xbe, 0x41, 0xe4, 0x51, 0x0d, 0x2a, 0xf1, 0x03, 0x44, 0x01, 0x95, + 0x21, 0x1f, 0x3f, 0x33, 0xc8, 0x77, 0x85, 0x12, 0x53, 0x88, 0x8d, 0x93, 0x41, 0xbb, 0x6b, 0x68, + 0x65, 0x76, 0x10, 0xbf, 0x30, 0x00, 0x94, 0xa2, 0xe7, 0x05, 0x26, 0x39, 0x31, 0x4c, 0x0d, 0xd8, + 0x3d, 0x63, 0xf3, 0xa9, 0x81, 0x35, 0x95, 0xd1, 0xf0, 0xf8, 0xa5, 0x56, 0x63, 0xb4, 0x27, 0x7d, + 0x63, 0xd0, 0xd3, 0xd6, 0xd0, 0x1a, 0x54, 0x9f, 0x1a, 0x6d, 0x6c, 0x76, 0x8c, 0xb6, 0xa9, 0xd5, + 0xd9, 0xc9, 0x29, 0x37, 0x70, 0x9d, 0x5d, 0xf3, 0x6c, 0xfc, 0x02, 0x8f, 0xda, 0x03, 0x4d, 0x63, + 0x9b, 0x53, 0x03, 0x4f, 0xfa, 0xe3, 0x91, 0xb6, 0xc1, 0xee, 0x19, 0xb4, 0x27, 0xe6, 0xc9, 0xb1, + 0x86, 0x98, 0xfc, 0xa4, 0x7d, 0x6a, 0x9c, 0x8c, 0xfb, 0x23, 0x53, 0xdb, 0x3c, 0x78, 0xc4, 0xbe, + 0x73, 0xe9, 0x09, 0x10, 0xa0, 0x64, 0xb6, 0x3b, 0x03, 0x63, 0xa2, 0x3d, 0x60, 0xeb, 0xc9, 0xd3, + 0x36, 0xee, 0x4d, 0x34, 0xa5, 0xf3, 0xd3, 0xbf, 0xbe, 0xde, 0x55, 0xfe, 0xf6, 0x7a, 0x57, 0xf9, + 0xc7, 0xeb, 0x5d, 0xe5, 0xb7, 0xff, 0xdc, 0x7d, 0xf0, 0xe5, 0xa3, 0x6b, 0x97, 0x92, 0x30, 0x6c, + 0xba, 0xfe, 0xa1, 0x58, 0x1d, 0x5e, 0xf8, 0x87, 0xd7, 0xf4, 0x90, 0xbf, 0xac, 0x1d, 0x26, 0x18, + 0x3c, 0x2b, 0x71, 0xca, 0x8f, 0xfe, 0x13, 0x00, 0x00, 0xff, 0xff, 0x22, 0xbd, 0x3f, 0x86, 0xb5, + 0x13, 0x00, 0x00, +} + +func (m *Charset) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Charset) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Charset) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Server != 0 { + i = encodeVarintBinlogdata(dAtA, i, uint64(m.Server)) + i-- + dAtA[i] = 0x18 + } + if m.Conn != 0 { + i = encodeVarintBinlogdata(dAtA, i, uint64(m.Conn)) + i-- + dAtA[i] = 0x10 + } + if m.Client != 0 { + i = encodeVarintBinlogdata(dAtA, i, uint64(m.Client)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *BinlogTransaction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BinlogTransaction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BinlogTransaction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.EventToken != nil { + { + size, err := m.EventToken.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.Statements) > 0 { + for iNdEx := len(m.Statements) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Statements[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *BinlogTransaction_Statement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BinlogTransaction_Statement) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BinlogTransaction_Statement) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Sql) > 0 { + i -= len(m.Sql) + copy(dAtA[i:], m.Sql) + i = encodeVarintBinlogdata(dAtA, i, uint64(len(m.Sql))) + i-- + dAtA[i] = 0x1a + } + if m.Charset != nil { + { + size, err := m.Charset.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Category != 0 { + i = encodeVarintBinlogdata(dAtA, i, uint64(m.Category)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *StreamKeyRangeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StreamKeyRangeRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StreamKeyRangeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Charset != nil { + { + size, err := m.Charset.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.KeyRange != nil { + { + size, err := m.KeyRange.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarintBinlogdata(dAtA, i, uint64(len(m.Position))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StreamKeyRangeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StreamKeyRangeResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StreamKeyRangeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.BinlogTransaction != nil { + { + size, err := m.BinlogTransaction.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StreamTablesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StreamTablesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StreamTablesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Charset != nil { + { + size, err := m.Charset.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Tables) > 0 { + for iNdEx := len(m.Tables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Tables[iNdEx]) + copy(dAtA[i:], m.Tables[iNdEx]) + i = encodeVarintBinlogdata(dAtA, i, uint64(len(m.Tables[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarintBinlogdata(dAtA, i, uint64(len(m.Position))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StreamTablesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StreamTablesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } + +func (m *StreamTablesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.BinlogTransaction != nil { + { + size, err := m.BinlogTransaction.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Rule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Rule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Rule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Filter) > 0 { + i -= len(m.Filter) + copy(dAtA[i:], m.Filter) + i = encodeVarintBinlogdata(dAtA, i, uint64(len(m.Filter))) + i-- + dAtA[i] = 0x12 + } + if len(m.Match) > 0 { + i -= len(m.Match) + copy(dAtA[i:], m.Match) + i = encodeVarintBinlogdata(dAtA, i, uint64(len(m.Match))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Filter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Filter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Filter) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.FieldEventMode != 0 { + i = encodeVarintBinlogdata(dAtA, i, uint64(m.FieldEventMode)) + i-- + dAtA[i] = 0x10 + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *BinlogSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BinlogSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BinlogSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.ExternalCluster) > 0 { + i -= len(m.ExternalCluster) + copy(dAtA[i:], m.ExternalCluster) + i = encodeVarintBinlogdata(dAtA, i, uint64(len(m.ExternalCluster))) + i-- + dAtA[i] = 0x52 + } + if m.StopAfterCopy { + i-- + if m.StopAfterCopy { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + } + if len(m.ExternalMysql) > 0 { + i -= len(m.ExternalMysql) + copy(dAtA[i:], m.ExternalMysql) + i = encodeVarintBinlogdata(dAtA, i, uint64(len(m.ExternalMysql))) + i-- + dAtA[i] = 0x42 + } + if m.OnDdl != 0 { + i = encodeVarintBinlogdata(dAtA, i, uint64(m.OnDdl)) + i-- + dAtA[i] = 0x38 + } + if m.Filter != nil { + { + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if len(m.Tables) > 0 { + for iNdEx := len(m.Tables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Tables[iNdEx]) + copy(dAtA[i:], m.Tables[iNdEx]) + i = encodeVarintBinlogdata(dAtA, i, uint64(len(m.Tables[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if m.KeyRange != nil { + { + size, err := m.KeyRange.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.TabletType != 0 { + i = encodeVarintBinlogdata(dAtA, i, uint64(m.TabletType)) + i-- + dAtA[i] = 0x18 + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarintBinlogdata(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintBinlogdata(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RowChange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RowChange) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RowChange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.After != nil { + { + size, err := m.After.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Before != nil { + { + size, err := m.Before.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RowEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RowEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RowEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.RowChanges) > 0 { + for iNdEx := len(m.RowChanges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RowChanges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.TableName) > 0 { + i -= len(m.TableName) + copy(dAtA[i:], m.TableName) + i = encodeVarintBinlogdata(dAtA, i, uint64(len(m.TableName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FieldEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FieldEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FieldEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Fields) > 0 { + for iNdEx := len(m.Fields) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Fields[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.TableName) > 0 { + i -= len(m.TableName) + copy(dAtA[i:], m.TableName) + i = encodeVarintBinlogdata(dAtA, i, uint64(len(m.TableName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ShardGtid) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ShardGtid) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ShardGtid) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.TablePKs) > 0 { + for iNdEx := len(m.TablePKs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TablePKs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Gtid) > 0 { + i -= len(m.Gtid) + copy(dAtA[i:], m.Gtid) + i = encodeVarintBinlogdata(dAtA, i, uint64(len(m.Gtid))) + i-- + dAtA[i] = 0x1a + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarintBinlogdata(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintBinlogdata(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VGtid) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VGtid) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VGtid) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.ShardGtids) > 0 { + for iNdEx := len(m.ShardGtids) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ShardGtids[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *KeyspaceShard) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyspaceShard) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KeyspaceShard) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarintBinlogdata(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintBinlogdata(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Journal) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Journal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Journal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.SourceWorkflows) > 0 { + for iNdEx := len(m.SourceWorkflows) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SourceWorkflows[iNdEx]) + copy(dAtA[i:], m.SourceWorkflows[iNdEx]) + i = encodeVarintBinlogdata(dAtA, i, uint64(len(m.SourceWorkflows[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if len(m.Participants) > 0 { + for iNdEx := len(m.Participants) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Participants[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.ShardGtids) > 0 { + for iNdEx := len(m.ShardGtids) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ShardGtids[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.LocalPosition) > 0 { + i -= len(m.LocalPosition) + copy(dAtA[i:], m.LocalPosition) + i = encodeVarintBinlogdata(dAtA, i, uint64(len(m.LocalPosition))) + i-- + dAtA[i] = 0x22 + } + if len(m.Tables) > 0 { + for iNdEx := len(m.Tables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Tables[iNdEx]) + copy(dAtA[i:], m.Tables[iNdEx]) + i = encodeVarintBinlogdata(dAtA, i, uint64(len(m.Tables[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if m.MigrationType != 0 { + i = encodeVarintBinlogdata(dAtA, i, uint64(m.MigrationType)) + i-- + dAtA[i] = 0x10 + } + if m.Id != 0 { + i = encodeVarintBinlogdata(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *VEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.LastPKEvent != nil { + { + size, err := m.LastPKEvent.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + if m.CurrentTime != 0 { + i = encodeVarintBinlogdata(dAtA, i, uint64(m.CurrentTime)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa0 + } + if len(m.Dml) > 0 { + i -= len(m.Dml) + copy(dAtA[i:], m.Dml) + i = encodeVarintBinlogdata(dAtA, i, uint64(len(m.Dml))) + i-- + dAtA[i] = 0x4a + } + if m.Journal != nil { + { + size, err := m.Journal.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.Vgtid != nil { + { + size, err := m.Vgtid.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.FieldEvent != nil { + { + size, err := m.FieldEvent.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.RowEvent != nil { + { + size, err := m.RowEvent.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.Statement) > 0 { + i -= len(m.Statement) + copy(dAtA[i:], m.Statement) + i = encodeVarintBinlogdata(dAtA, i, uint64(len(m.Statement))) + i-- + dAtA[i] = 0x22 + } + if len(m.Gtid) > 0 { + i -= len(m.Gtid) + copy(dAtA[i:], m.Gtid) + i = encodeVarintBinlogdata(dAtA, i, uint64(len(m.Gtid))) + i-- + dAtA[i] = 0x1a + } + if m.Timestamp != 0 { + i = encodeVarintBinlogdata(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x10 + } + if m.Type != 0 { + i = encodeVarintBinlogdata(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MinimalTable) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MinimalTable) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MinimalTable) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.PKColumns) > 0 { + dAtA18 := make([]byte, len(m.PKColumns)*10) + var j17 int + for _, num1 := range m.PKColumns { + num := uint64(num1) + for num >= 1<<7 { + dAtA18[j17] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j17++ + } + dAtA18[j17] = uint8(num) + j17++ + } + i -= j17 + copy(dAtA[i:], dAtA18[:j17]) + i = encodeVarintBinlogdata(dAtA, i, uint64(j17)) + i-- + dAtA[i] = 0x1a + } + if len(m.Fields) > 0 { + for iNdEx := len(m.Fields) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Fields[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintBinlogdata(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MinimalSchema) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MinimalSchema) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MinimalSchema) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Tables) > 0 { + for iNdEx := len(m.Tables) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tables[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *VStreamRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VStreamRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VStreamRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.TableLastPKs) > 0 { + for iNdEx := len(m.TableLastPKs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TableLastPKs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.Filter != nil { + { + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarintBinlogdata(dAtA, i, uint64(len(m.Position))) + i-- + dAtA[i] = 0x22 + } + if m.Target != nil { + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ImmediateCallerId != nil { + { + size, err := m.ImmediateCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.EffectiveCallerId != nil { + { + size, err := m.EffectiveCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VStreamResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VStreamResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VStreamResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *VStreamRowsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VStreamRowsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VStreamRowsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Lastpk != nil { + { + size, err := m.Lastpk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.Query) > 0 { + i -= len(m.Query) + copy(dAtA[i:], m.Query) + i = encodeVarintBinlogdata(dAtA, i, uint64(len(m.Query))) + i-- + dAtA[i] = 0x22 + } + if m.Target != nil { + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ImmediateCallerId != nil { + { + size, err := m.ImmediateCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.EffectiveCallerId != nil { + { + size, err := m.EffectiveCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VStreamRowsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VStreamRowsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VStreamRowsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Lastpk != nil { + { + size, err := m.Lastpk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.Rows) > 0 { + for iNdEx := len(m.Rows) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rows[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Gtid) > 0 { + i -= len(m.Gtid) + copy(dAtA[i:], m.Gtid) + i = encodeVarintBinlogdata(dAtA, i, uint64(len(m.Gtid))) + i-- + dAtA[i] = 0x1a + } + if len(m.Pkfields) > 0 { + for iNdEx := len(m.Pkfields) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Pkfields[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Fields) > 0 { + for iNdEx := len(m.Fields) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Fields[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *LastPKEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LastPKEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LastPKEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Completed { + i-- + if m.Completed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.TableLastPK != nil { + { + size, err := m.TableLastPK.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TableLastPK) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TableLastPK) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TableLastPK) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Lastpk != nil { + { + size, err := m.Lastpk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.TableName) > 0 { + i -= len(m.TableName) + copy(dAtA[i:], m.TableName) + i = encodeVarintBinlogdata(dAtA, i, uint64(len(m.TableName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VStreamResultsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VStreamResultsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VStreamResultsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Query) > 0 { + i -= len(m.Query) + copy(dAtA[i:], m.Query) + i = encodeVarintBinlogdata(dAtA, i, uint64(len(m.Query))) + i-- + dAtA[i] = 0x22 + } + if m.Target != nil { + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ImmediateCallerId != nil { + { + size, err := m.ImmediateCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.EffectiveCallerId != nil { + { + size, err := m.EffectiveCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VStreamResultsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VStreamResultsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VStreamResultsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Rows) > 0 { + for iNdEx := len(m.Rows) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rows[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Gtid) > 0 { + i -= len(m.Gtid) + copy(dAtA[i:], m.Gtid) + i = encodeVarintBinlogdata(dAtA, i, uint64(len(m.Gtid))) + i-- + dAtA[i] = 0x1a + } + if len(m.Fields) > 0 { + for iNdEx := len(m.Fields) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Fields[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinlogdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintBinlogdata(dAtA []byte, offset int, v uint64) int { + offset -= sovBinlogdata(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Charset) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Client != 0 { + n += 1 + sovBinlogdata(uint64(m.Client)) + } + if m.Conn != 0 { + n += 1 + sovBinlogdata(uint64(m.Conn)) + } + if m.Server != 0 { + n += 1 + sovBinlogdata(uint64(m.Server)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BinlogTransaction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Statements) > 0 { + for _, e := range m.Statements { + l = e.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + } + if m.EventToken != nil { + l = m.EventToken.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BinlogTransaction_Statement) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Category != 0 { + n += 1 + sovBinlogdata(uint64(m.Category)) + } + if m.Charset != nil { + l = m.Charset.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + l = len(m.Sql) + if l > 0 { + n += 1 + l + sovBinlogdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StreamKeyRangeRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Position) + if l > 0 { + n += 1 + l + sovBinlogdata(uint64(l)) + } + if m.KeyRange != nil { + l = m.KeyRange.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + if m.Charset != nil { + l = m.Charset.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StreamKeyRangeResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BinlogTransaction != nil { + l = m.BinlogTransaction.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StreamTablesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Position) + if l > 0 { + n += 1 + l + sovBinlogdata(uint64(l)) + } + if len(m.Tables) > 0 { + for _, s := range m.Tables { + l = len(s) + n += 1 + l + sovBinlogdata(uint64(l)) + } + } + if m.Charset != nil { + l = m.Charset.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StreamTablesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BinlogTransaction != nil { + l = m.BinlogTransaction.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Rule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Match) + if l > 0 { + n += 1 + l + sovBinlogdata(uint64(l)) + } + l = len(m.Filter) + if l > 0 { + n += 1 + l + sovBinlogdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Filter) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + } + if m.FieldEventMode != 0 { + n += 1 + sovBinlogdata(uint64(m.FieldEventMode)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BinlogSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovBinlogdata(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sovBinlogdata(uint64(l)) + } + if m.TabletType != 0 { + n += 1 + sovBinlogdata(uint64(m.TabletType)) + } + if m.KeyRange != nil { + l = m.KeyRange.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + if len(m.Tables) > 0 { + for _, s := range m.Tables { + l = len(s) + n += 1 + l + sovBinlogdata(uint64(l)) + } + } + if m.Filter != nil { + l = m.Filter.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + if m.OnDdl != 0 { + n += 1 + sovBinlogdata(uint64(m.OnDdl)) + } + l = len(m.ExternalMysql) + if l > 0 { + n += 1 + l + sovBinlogdata(uint64(l)) + } + if m.StopAfterCopy { + n += 2 + } + l = len(m.ExternalCluster) + if l > 0 { + n += 1 + l + sovBinlogdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RowChange) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Before != nil { + l = m.Before.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + if m.After != nil { + l = m.After.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RowEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TableName) + if l > 0 { + n += 1 + l + sovBinlogdata(uint64(l)) + } + if len(m.RowChanges) > 0 { + for _, e := range m.RowChanges { + l = e.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *FieldEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TableName) + if l > 0 { + n += 1 + l + sovBinlogdata(uint64(l)) + } + if len(m.Fields) > 0 { + for _, e := range m.Fields { + l = e.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ShardGtid) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovBinlogdata(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sovBinlogdata(uint64(l)) + } + l = len(m.Gtid) + if l > 0 { + n += 1 + l + sovBinlogdata(uint64(l)) + } + if len(m.TablePKs) > 0 { + for _, e := range m.TablePKs { + l = e.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *VGtid) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ShardGtids) > 0 { + for _, e := range m.ShardGtids { + l = e.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *KeyspaceShard) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovBinlogdata(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sovBinlogdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Journal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sovBinlogdata(uint64(m.Id)) + } + if m.MigrationType != 0 { + n += 1 + sovBinlogdata(uint64(m.MigrationType)) + } + if len(m.Tables) > 0 { + for _, s := range m.Tables { + l = len(s) + n += 1 + l + sovBinlogdata(uint64(l)) + } + } + l = len(m.LocalPosition) + if l > 0 { + n += 1 + l + sovBinlogdata(uint64(l)) + } + if len(m.ShardGtids) > 0 { + for _, e := range m.ShardGtids { + l = e.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + } + if len(m.Participants) > 0 { + for _, e := range m.Participants { + l = e.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + } + if len(m.SourceWorkflows) > 0 { + for _, s := range m.SourceWorkflows { + l = len(s) + n += 1 + l + sovBinlogdata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *VEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovBinlogdata(uint64(m.Type)) + } + if m.Timestamp != 0 { + n += 1 + sovBinlogdata(uint64(m.Timestamp)) + } + l = len(m.Gtid) + if l > 0 { + n += 1 + l + sovBinlogdata(uint64(l)) + } + l = len(m.Statement) + if l > 0 { + n += 1 + l + sovBinlogdata(uint64(l)) + } + if m.RowEvent != nil { + l = m.RowEvent.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + if m.FieldEvent != nil { + l = m.FieldEvent.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + if m.Vgtid != nil { + l = m.Vgtid.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + if m.Journal != nil { + l = m.Journal.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + l = len(m.Dml) + if l > 0 { + n += 1 + l + sovBinlogdata(uint64(l)) + } + if m.CurrentTime != 0 { + n += 2 + sovBinlogdata(uint64(m.CurrentTime)) + } + if m.LastPKEvent != nil { + l = m.LastPKEvent.Size() + n += 2 + l + sovBinlogdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MinimalTable) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovBinlogdata(uint64(l)) + } + if len(m.Fields) > 0 { + for _, e := range m.Fields { + l = e.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + } + if len(m.PKColumns) > 0 { + l = 0 + for _, e := range m.PKColumns { + l += sovBinlogdata(uint64(e)) + } + n += 1 + sovBinlogdata(uint64(l)) + l + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MinimalSchema) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Tables) > 0 { + for _, e := range m.Tables { + l = e.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *VStreamRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EffectiveCallerId != nil { + l = m.EffectiveCallerId.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + if m.ImmediateCallerId != nil { + l = m.ImmediateCallerId.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + if m.Target != nil { + l = m.Target.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + l = len(m.Position) + if l > 0 { + n += 1 + l + sovBinlogdata(uint64(l)) + } + if m.Filter != nil { + l = m.Filter.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + if len(m.TableLastPKs) > 0 { + for _, e := range m.TableLastPKs { + l = e.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *VStreamResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *VStreamRowsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EffectiveCallerId != nil { + l = m.EffectiveCallerId.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + if m.ImmediateCallerId != nil { + l = m.ImmediateCallerId.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + if m.Target != nil { + l = m.Target.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + l = len(m.Query) + if l > 0 { + n += 1 + l + sovBinlogdata(uint64(l)) + } + if m.Lastpk != nil { + l = m.Lastpk.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *VStreamRowsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Fields) > 0 { + for _, e := range m.Fields { + l = e.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + } + if len(m.Pkfields) > 0 { + for _, e := range m.Pkfields { + l = e.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + } + l = len(m.Gtid) + if l > 0 { + n += 1 + l + sovBinlogdata(uint64(l)) + } + if len(m.Rows) > 0 { + for _, e := range m.Rows { + l = e.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + } + if m.Lastpk != nil { + l = m.Lastpk.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *LastPKEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TableLastPK != nil { + l = m.TableLastPK.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + if m.Completed { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *TableLastPK) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TableName) + if l > 0 { + n += 1 + l + sovBinlogdata(uint64(l)) + } + if m.Lastpk != nil { + l = m.Lastpk.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *VStreamResultsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EffectiveCallerId != nil { + l = m.EffectiveCallerId.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + if m.ImmediateCallerId != nil { + l = m.ImmediateCallerId.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + if m.Target != nil { + l = m.Target.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + l = len(m.Query) + if l > 0 { + n += 1 + l + sovBinlogdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *VStreamResultsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Fields) > 0 { + for _, e := range m.Fields { + l = e.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + } + l = len(m.Gtid) + if l > 0 { + n += 1 + l + sovBinlogdata(uint64(l)) + } + if len(m.Rows) > 0 { + for _, e := range m.Rows { + l = e.Size() + n += 1 + l + sovBinlogdata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovBinlogdata(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozBinlogdata(x uint64) (n int) { + return sovBinlogdata(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Charset) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Charset: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Charset: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Client", wireType) + } + m.Client = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Client |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Conn", wireType) + } + m.Conn = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Conn |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Server", wireType) + } + m.Server = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Server |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipBinlogdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BinlogTransaction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BinlogTransaction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BinlogTransaction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Statements", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Statements = append(m.Statements, &BinlogTransaction_Statement{}) + if err := m.Statements[len(m.Statements)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EventToken", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EventToken == nil { + m.EventToken = &query.EventToken{} + } + if err := m.EventToken.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBinlogdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BinlogTransaction_Statement) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Statement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Statement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Category", wireType) + } + m.Category = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Category |= BinlogTransaction_Statement_Category(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Charset", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Charset == nil { + m.Charset = &Charset{} + } + if err := m.Charset.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sql", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sql = append(m.Sql[:0], dAtA[iNdEx:postIndex]...) + if m.Sql == nil { + m.Sql = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBinlogdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StreamKeyRangeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StreamKeyRangeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StreamKeyRangeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Position = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KeyRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.KeyRange == nil { + m.KeyRange = &topodata.KeyRange{} + } + if err := m.KeyRange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Charset", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Charset == nil { + m.Charset = &Charset{} + } + if err := m.Charset.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBinlogdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StreamKeyRangeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StreamKeyRangeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StreamKeyRangeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BinlogTransaction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BinlogTransaction == nil { + m.BinlogTransaction = &BinlogTransaction{} + } + if err := m.BinlogTransaction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBinlogdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StreamTablesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StreamTablesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StreamTablesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Position = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tables = append(m.Tables, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Charset", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Charset == nil { + m.Charset = &Charset{} + } + if err := m.Charset.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBinlogdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StreamTablesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StreamTablesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StreamTablesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BinlogTransaction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BinlogTransaction == nil { + m.BinlogTransaction = &BinlogTransaction{} + } + if err := m.BinlogTransaction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBinlogdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Rule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Rule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Match", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Match = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filter = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBinlogdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Filter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Filter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Filter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, &Rule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldEventMode", wireType) + } + m.FieldEventMode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FieldEventMode |= Filter_FieldEventMode(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipBinlogdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BinlogSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BinlogSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BinlogSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletType", wireType) + } + m.TabletType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TabletType |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KeyRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.KeyRange == nil { + m.KeyRange = &topodata.KeyRange{} + } + if err := m.KeyRange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tables = append(m.Tables, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filter == nil { + m.Filter = &Filter{} + } + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OnDdl", wireType) + } + m.OnDdl = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OnDdl |= OnDDLAction(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalMysql", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalMysql = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StopAfterCopy", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.StopAfterCopy = bool(v != 0) + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalCluster", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalCluster = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBinlogdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RowChange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RowChange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RowChange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Before", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Before == nil { + m.Before = &query.Row{} + } + if err := m.Before.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field After", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.After == nil { + m.After = &query.Row{} + } + if err := m.After.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBinlogdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RowEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RowEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RowEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TableName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TableName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RowChanges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RowChanges = append(m.RowChanges, &RowChange{}) + if err := m.RowChanges[len(m.RowChanges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBinlogdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FieldEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FieldEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FieldEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TableName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TableName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Fields = append(m.Fields, &query.Field{}) + if err := m.Fields[len(m.Fields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBinlogdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ShardGtid) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ShardGtid: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ShardGtid: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gtid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Gtid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TablePKs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TablePKs = append(m.TablePKs, &TableLastPK{}) + if err := m.TablePKs[len(m.TablePKs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBinlogdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VGtid) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VGtid: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VGtid: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShardGtids", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShardGtids = append(m.ShardGtids, &ShardGtid{}) + if err := m.ShardGtids[len(m.ShardGtids)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBinlogdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KeyspaceShard) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyspaceShard: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyspaceShard: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBinlogdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Journal) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Journal: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Journal: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MigrationType", wireType) + } + m.MigrationType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MigrationType |= MigrationType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tables = append(m.Tables, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalPosition", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LocalPosition = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShardGtids", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShardGtids = append(m.ShardGtids, &ShardGtid{}) + if err := m.ShardGtids[len(m.ShardGtids)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Participants", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Participants = append(m.Participants, &KeyspaceShard{}) + if err := m.Participants[len(m.Participants)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceWorkflows", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourceWorkflows = append(m.SourceWorkflows, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBinlogdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= VEventType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gtid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Gtid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Statement", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Statement = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RowEvent", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RowEvent == nil { + m.RowEvent = &RowEvent{} + } + if err := m.RowEvent.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldEvent", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FieldEvent == nil { + m.FieldEvent = &FieldEvent{} + } + if err := m.FieldEvent.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vgtid", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Vgtid == nil { + m.Vgtid = &VGtid{} + } + if err := m.Vgtid.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Journal", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Journal == nil { + m.Journal = &Journal{} + } + if err := m.Journal.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dml", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Dml = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 20: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentTime", wireType) + } + m.CurrentTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastPKEvent", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastPKEvent == nil { + m.LastPKEvent = &LastPKEvent{} + } + if err := m.LastPKEvent.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBinlogdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MinimalTable) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MinimalTable: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MinimalTable: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Fields = append(m.Fields, &query.Field{}) + if err := m.Fields[len(m.Fields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType == 0 { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PKColumns = append(m.PKColumns, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.PKColumns) == 0 { + m.PKColumns = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PKColumns = append(m.PKColumns, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PKColumns", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipBinlogdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MinimalSchema) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MinimalSchema: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MinimalSchema: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tables = append(m.Tables, &MinimalTable{}) + if err := m.Tables[len(m.Tables)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBinlogdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VStreamRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VStreamRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VStreamRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EffectiveCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EffectiveCallerId == nil { + m.EffectiveCallerId = &vtrpc.CallerID{} + } + if err := m.EffectiveCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImmediateCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImmediateCallerId == nil { + m.ImmediateCallerId = &query.VTGateCallerID{} + } + if err := m.ImmediateCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Target == nil { + m.Target = &query.Target{} + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Position = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filter == nil { + m.Filter = &Filter{} + } + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TableLastPKs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TableLastPKs = append(m.TableLastPKs, &TableLastPK{}) + if err := m.TableLastPKs[len(m.TableLastPKs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBinlogdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VStreamResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VStreamResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VStreamResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, &VEvent{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBinlogdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VStreamRowsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VStreamRowsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VStreamRowsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EffectiveCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EffectiveCallerId == nil { + m.EffectiveCallerId = &vtrpc.CallerID{} + } + if err := m.EffectiveCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImmediateCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImmediateCallerId == nil { + m.ImmediateCallerId = &query.VTGateCallerID{} + } + if err := m.ImmediateCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Target == nil { + m.Target = &query.Target{} + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Query = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Lastpk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Lastpk == nil { + m.Lastpk = &query.QueryResult{} + } + if err := m.Lastpk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBinlogdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VStreamRowsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VStreamRowsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VStreamRowsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Fields = append(m.Fields, &query.Field{}) + if err := m.Fields[len(m.Fields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pkfields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pkfields = append(m.Pkfields, &query.Field{}) + if err := m.Pkfields[len(m.Pkfields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gtid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Gtid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rows", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rows = append(m.Rows, &query.Row{}) + if err := m.Rows[len(m.Rows)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Lastpk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Lastpk == nil { + m.Lastpk = &query.Row{} + } + if err := m.Lastpk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBinlogdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LastPKEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LastPKEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LastPKEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TableLastPK", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TableLastPK == nil { + m.TableLastPK = &TableLastPK{} + } + if err := m.TableLastPK.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Completed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Completed = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipBinlogdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TableLastPK) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TableLastPK: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TableLastPK: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TableName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TableName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Lastpk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Lastpk == nil { + m.Lastpk = &query.QueryResult{} + } + if err := m.Lastpk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBinlogdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VStreamResultsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VStreamResultsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VStreamResultsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EffectiveCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EffectiveCallerId == nil { + m.EffectiveCallerId = &vtrpc.CallerID{} + } + if err := m.EffectiveCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImmediateCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImmediateCallerId == nil { + m.ImmediateCallerId = &query.VTGateCallerID{} + } + if err := m.ImmediateCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Target == nil { + m.Target = &query.Target{} + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Query = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBinlogdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VStreamResultsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VStreamResultsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VStreamResultsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Fields = append(m.Fields, &query.Field{}) + if err := m.Fields[len(m.Fields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gtid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Gtid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rows", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rows = append(m.Rows, &query.Row{}) + if err := m.Rows[len(m.Rows)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBinlogdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthBinlogdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipBinlogdata(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthBinlogdata + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupBinlogdata + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthBinlogdata + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthBinlogdata = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowBinlogdata = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupBinlogdata = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/vt/proto/binlogservice/binlogservice.pb.go b/go/vt/proto/binlogservice/binlogservice.pb.go index 0d2d06418fa..c0d419b0cb0 100644 --- a/go/vt/proto/binlogservice/binlogservice.pb.go +++ b/go/vt/proto/binlogservice/binlogservice.pb.go @@ -1,4 +1,4 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: binlogservice.proto package binlogservice @@ -12,7 +12,6 @@ import ( grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" - binlogdata "vitess.io/vitess/go/vt/proto/binlogdata" ) @@ -30,7 +29,7 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package func init() { proto.RegisterFile("binlogservice.proto", fileDescriptor_4ccdea02fd9c8d58) } var fileDescriptor_4ccdea02fd9c8d58 = []byte{ - // 177 bytes of a gzipped FileDescriptorProto + // 194 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4e, 0xca, 0xcc, 0xcb, 0xc9, 0x4f, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x45, 0x11, 0x94, 0x12, 0x80, 0x70, 0x53, 0x12, 0x4b, 0x12, 0x21, 0x0a, 0x8c, 0x0e, 0x31, @@ -39,10 +38,11 @@ var fileDescriptor_4ccdea02fd9c8d58 = []byte{ 0xba, 0x50, 0xe5, 0x82, 0x52, 0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0xa4, 0x94, 0xf0, 0x29, 0x29, 0x2e, 0xc8, 0xcf, 0x2b, 0x4e, 0x55, 0x62, 0x30, 0x60, 0x14, 0x0a, 0xe5, 0xe2, 0x81, 0xc8, 0x86, 0x24, 0x26, 0xe5, 0xa4, 0x16, 0x0b, 0xc9, 0x63, 0xea, 0x83, 0xc8, 0xc0, 0x0c, 0x56, 0xc0, 0xad, 0x00, - 0x61, 0xac, 0x93, 0x4e, 0x94, 0x56, 0x59, 0x66, 0x49, 0x6a, 0x71, 0xb1, 0x5e, 0x66, 0xbe, 0x3e, - 0x84, 0xa5, 0x9f, 0x9e, 0xaf, 0x5f, 0x56, 0xa2, 0x0f, 0xf6, 0xa4, 0x3e, 0x4a, 0x20, 0x24, 0xb1, - 0x81, 0x05, 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x4a, 0xf4, 0x0a, 0x9c, 0x31, 0x01, 0x00, - 0x00, + 0x61, 0xac, 0x93, 0xcd, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, + 0x38, 0xe3, 0xb1, 0x1c, 0x43, 0x94, 0x56, 0x59, 0x66, 0x49, 0x6a, 0x71, 0xb1, 0x5e, 0x66, 0xbe, + 0x3e, 0x84, 0xa5, 0x9f, 0x9e, 0xaf, 0x5f, 0x56, 0xa2, 0x0f, 0xf6, 0xb4, 0x3e, 0x4a, 0xa0, 0x24, + 0xb1, 0x81, 0x05, 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xda, 0xef, 0x13, 0x20, 0x41, 0x01, + 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/go/vt/proto/logutil/logutil.pb.go b/go/vt/proto/logutil/logutil.pb.go index d51f2962abb..0185fb41955 100644 --- a/go/vt/proto/logutil/logutil.pb.go +++ b/go/vt/proto/logutil/logutil.pb.go @@ -1,14 +1,15 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: logutil.proto package logutil import ( fmt "fmt" + io "io" math "math" + math_bits "math/bits" proto "github.com/golang/protobuf/proto" - vttime "vitess.io/vitess/go/vt/proto/vttime" ) @@ -77,18 +78,26 @@ func (*Event) ProtoMessage() {} func (*Event) Descriptor() ([]byte, []int) { return fileDescriptor_31f5dd3702a8edf9, []int{0} } - func (m *Event) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Event.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Event.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_Event.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *Event) XXX_Merge(src proto.Message) { xxx_messageInfo_Event.Merge(m, src) } func (m *Event) XXX_Size() int { - return xxx_messageInfo_Event.Size(m) + return m.Size() } func (m *Event) XXX_DiscardUnknown() { xxx_messageInfo_Event.DiscardUnknown(m) @@ -139,20 +148,409 @@ func init() { func init() { proto.RegisterFile("logutil.proto", fileDescriptor_31f5dd3702a8edf9) } var fileDescriptor_31f5dd3702a8edf9 = []byte{ - // 236 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x34, 0x8f, 0x5f, 0x4b, 0xc3, 0x30, - 0x14, 0xc5, 0xcd, 0xda, 0x38, 0x77, 0x37, 0x47, 0xb9, 0xf8, 0x10, 0x7c, 0x0a, 0x32, 0xa4, 0xf8, - 0xd0, 0xc0, 0x04, 0xdf, 0x55, 0xaa, 0x0c, 0x46, 0x0b, 0x57, 0x41, 0xf0, 0x4d, 0xe1, 0x3a, 0x02, - 0xd9, 0x22, 0x2e, 0xcd, 0xb7, 0xf0, 0x3b, 0x4b, 0xd3, 0xfa, 0x76, 0xce, 0xef, 0x1c, 0xee, 0x1f, - 0x38, 0x77, 0x7e, 0xd7, 0x05, 0xeb, 0xaa, 0xef, 0x1f, 0x1f, 0x3c, 0x4e, 0x47, 0x7b, 0xb9, 0x88, - 0x21, 0xd8, 0x3d, 0x0f, 0xf8, 0xea, 0x57, 0x80, 0xac, 0x23, 0x1f, 0x02, 0x6a, 0xc8, 0x7b, 0xae, - 0x84, 0x16, 0xe5, 0x7c, 0xbd, 0xa8, 0xc6, 0xda, 0xab, 0xdd, 0x33, 0xa5, 0x04, 0x57, 0x20, 0x1d, - 0x47, 0x76, 0x6a, 0xa2, 0x45, 0xb9, 0x5c, 0x2f, 0xab, 0xff, 0x0d, 0xdb, 0x9e, 0xd2, 0x10, 0x22, - 0x42, 0xfe, 0x65, 0x1d, 0xab, 0x4c, 0x8b, 0x72, 0x46, 0x49, 0xf7, 0xcc, 0xd9, 0x03, 0xab, 0x5c, - 0x8b, 0x32, 0xa3, 0xa4, 0xf1, 0x02, 0x64, 0xfc, 0x70, 0x1d, 0x2b, 0x99, 0x8a, 0x83, 0xb9, 0xb9, - 0x03, 0x99, 0xa6, 0xe1, 0x19, 0xe4, 0x9b, 0xe6, 0xa9, 0x2d, 0x4e, 0x70, 0x0e, 0xd3, 0xb7, 0x7b, - 0x6a, 0x36, 0xcd, 0x73, 0x21, 0x70, 0x06, 0xb2, 0x26, 0x6a, 0xa9, 0x98, 0xf4, 0xfc, 0xb1, 0x6d, - 0x5e, 0xda, 0x6d, 0x5d, 0x64, 0x0f, 0xd7, 0xef, 0xab, 0x68, 0x03, 0x1f, 0x8f, 0x95, 0xf5, 0x66, - 0x50, 0x66, 0xe7, 0x4d, 0x0c, 0x26, 0xfd, 0x69, 0xc6, 0x53, 0x3f, 0x4f, 0x93, 0xbd, 0xfd, 0x0b, - 0x00, 0x00, 0xff, 0xff, 0xa4, 0x27, 0x83, 0x63, 0x1e, 0x01, 0x00, 0x00, + // 258 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x34, 0x8f, 0xcd, 0x4a, 0xc3, 0x40, + 0x14, 0x85, 0x7b, 0x9b, 0x8c, 0xb5, 0xb7, 0xb5, 0x84, 0xc1, 0x45, 0x70, 0x11, 0x06, 0xe9, 0x22, + 0xb8, 0xc8, 0x40, 0x85, 0xee, 0x55, 0xa2, 0x14, 0x4a, 0x02, 0xa3, 0x20, 0xb8, 0x53, 0xb8, 0x96, + 0x81, 0x69, 0x47, 0xec, 0x74, 0xde, 0xc2, 0xbd, 0x8f, 0xe4, 0xd2, 0x47, 0x90, 0xf8, 0x22, 0x92, + 0x49, 0xdc, 0x9d, 0xf3, 0x9d, 0xc3, 0xfd, 0xc1, 0x13, 0x63, 0x37, 0x07, 0xa7, 0x4d, 0xf1, 0xf6, + 0x6e, 0x9d, 0xe5, 0xa3, 0xde, 0x9e, 0x4d, 0xbd, 0x73, 0x7a, 0x4b, 0x1d, 0x3e, 0xff, 0x00, 0x64, + 0xa5, 0xa7, 0x9d, 0xe3, 0x02, 0xe3, 0x96, 0xa7, 0x20, 0x20, 0x9f, 0x2c, 0xa6, 0x45, 0x5f, 0x7b, + 0xd0, 0x5b, 0x52, 0x21, 0xe1, 0x73, 0x64, 0x86, 0x3c, 0x99, 0x74, 0x28, 0x20, 0x9f, 0x2d, 0x66, + 0xc5, 0xff, 0x86, 0x75, 0x4b, 0x55, 0x17, 0x72, 0x8e, 0xf1, 0xab, 0x36, 0x94, 0x46, 0x02, 0xf2, + 0xb1, 0x0a, 0xba, 0x65, 0x46, 0xef, 0x28, 0x8d, 0x05, 0xe4, 0x91, 0x0a, 0x9a, 0x9f, 0x22, 0xf3, + 0xcf, 0xe6, 0x40, 0x29, 0x0b, 0xc5, 0xce, 0x5c, 0x2c, 0x91, 0x85, 0x69, 0xfc, 0x18, 0xe3, 0x55, + 0x75, 0x5b, 0x27, 0x03, 0x3e, 0xc1, 0xd1, 0xe3, 0x95, 0xaa, 0x56, 0xd5, 0x5d, 0x02, 0x7c, 0x8c, + 0xac, 0x54, 0xaa, 0x56, 0xc9, 0xb0, 0xe5, 0x37, 0x75, 0x75, 0x5f, 0xaf, 0xcb, 0x24, 0xba, 0x5e, + 0x7e, 0x35, 0x19, 0x7c, 0x37, 0x19, 0xfc, 0x34, 0x19, 0x7c, 0xfe, 0x66, 0x83, 0xa7, 0xb9, 0xd7, + 0x8e, 0xf6, 0xfb, 0x42, 0x5b, 0xd9, 0x29, 0xb9, 0xb1, 0xd2, 0x3b, 0x19, 0xfe, 0x96, 0xfd, 0xe9, + 0x2f, 0x47, 0xc1, 0x5e, 0xfe, 0x05, 0x00, 0x00, 0xff, 0xff, 0x7b, 0xc4, 0x19, 0xa7, 0x2e, 0x01, + 0x00, 0x00, +} + +func (m *Event) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Event) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintLogutil(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x2a + } + if m.Line != 0 { + i = encodeVarintLogutil(dAtA, i, uint64(m.Line)) + i-- + dAtA[i] = 0x20 + } + if len(m.File) > 0 { + i -= len(m.File) + copy(dAtA[i:], m.File) + i = encodeVarintLogutil(dAtA, i, uint64(len(m.File))) + i-- + dAtA[i] = 0x1a + } + if m.Level != 0 { + i = encodeVarintLogutil(dAtA, i, uint64(m.Level)) + i-- + dAtA[i] = 0x10 + } + if m.Time != nil { + { + size, err := m.Time.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintLogutil(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } + +func encodeVarintLogutil(dAtA []byte, offset int, v uint64) int { + offset -= sovLogutil(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Event) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Time != nil { + l = m.Time.Size() + n += 1 + l + sovLogutil(uint64(l)) + } + if m.Level != 0 { + n += 1 + sovLogutil(uint64(m.Level)) + } + l = len(m.File) + if l > 0 { + n += 1 + l + sovLogutil(uint64(l)) + } + if m.Line != 0 { + n += 1 + sovLogutil(uint64(m.Line)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovLogutil(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovLogutil(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozLogutil(x uint64) (n int) { + return sovLogutil(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Event) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogutil + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Event: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogutil + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogutil + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogutil + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Time == nil { + m.Time = &vttime.Time{} + } + if err := m.Time.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) + } + m.Level = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogutil + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Level |= Level(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field File", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogutil + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogutil + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogutil + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.File = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType) + } + m.Line = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogutil + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Line |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogutil + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogutil + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogutil + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogutil(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogutil + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogutil + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipLogutil(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogutil + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogutil + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogutil + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthLogutil + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupLogutil + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthLogutil + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthLogutil = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowLogutil = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupLogutil = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/vt/proto/mysqlctl/mysqlctl.pb.go b/go/vt/proto/mysqlctl/mysqlctl.pb.go index 2b80a9971d9..27512f92d99 100644 --- a/go/vt/proto/mysqlctl/mysqlctl.pb.go +++ b/go/vt/proto/mysqlctl/mysqlctl.pb.go @@ -1,4 +1,4 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: mysqlctl.proto package mysqlctl @@ -6,7 +6,9 @@ package mysqlctl import ( context "context" fmt "fmt" + io "io" math "math" + math_bits "math/bits" proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" @@ -38,18 +40,26 @@ func (*StartRequest) ProtoMessage() {} func (*StartRequest) Descriptor() ([]byte, []int) { return fileDescriptor_cd8c110e42f9cbb9, []int{0} } - func (m *StartRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StartRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *StartRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StartRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_StartRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *StartRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_StartRequest.Merge(m, src) } func (m *StartRequest) XXX_Size() int { - return xxx_messageInfo_StartRequest.Size(m) + return m.Size() } func (m *StartRequest) XXX_DiscardUnknown() { xxx_messageInfo_StartRequest.DiscardUnknown(m) @@ -76,18 +86,26 @@ func (*StartResponse) ProtoMessage() {} func (*StartResponse) Descriptor() ([]byte, []int) { return fileDescriptor_cd8c110e42f9cbb9, []int{1} } - func (m *StartResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StartResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *StartResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StartResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_StartResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *StartResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_StartResponse.Merge(m, src) } func (m *StartResponse) XXX_Size() int { - return xxx_messageInfo_StartResponse.Size(m) + return m.Size() } func (m *StartResponse) XXX_DiscardUnknown() { xxx_messageInfo_StartResponse.DiscardUnknown(m) @@ -108,18 +126,26 @@ func (*ShutdownRequest) ProtoMessage() {} func (*ShutdownRequest) Descriptor() ([]byte, []int) { return fileDescriptor_cd8c110e42f9cbb9, []int{2} } - func (m *ShutdownRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ShutdownRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ShutdownRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ShutdownRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ShutdownRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ShutdownRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ShutdownRequest.Merge(m, src) } func (m *ShutdownRequest) XXX_Size() int { - return xxx_messageInfo_ShutdownRequest.Size(m) + return m.Size() } func (m *ShutdownRequest) XXX_DiscardUnknown() { xxx_messageInfo_ShutdownRequest.DiscardUnknown(m) @@ -146,18 +172,26 @@ func (*ShutdownResponse) ProtoMessage() {} func (*ShutdownResponse) Descriptor() ([]byte, []int) { return fileDescriptor_cd8c110e42f9cbb9, []int{3} } - func (m *ShutdownResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ShutdownResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ShutdownResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ShutdownResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ShutdownResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ShutdownResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ShutdownResponse.Merge(m, src) } func (m *ShutdownResponse) XXX_Size() int { - return xxx_messageInfo_ShutdownResponse.Size(m) + return m.Size() } func (m *ShutdownResponse) XXX_DiscardUnknown() { xxx_messageInfo_ShutdownResponse.DiscardUnknown(m) @@ -177,18 +211,26 @@ func (*RunMysqlUpgradeRequest) ProtoMessage() {} func (*RunMysqlUpgradeRequest) Descriptor() ([]byte, []int) { return fileDescriptor_cd8c110e42f9cbb9, []int{4} } - func (m *RunMysqlUpgradeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RunMysqlUpgradeRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *RunMysqlUpgradeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RunMysqlUpgradeRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_RunMysqlUpgradeRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *RunMysqlUpgradeRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_RunMysqlUpgradeRequest.Merge(m, src) } func (m *RunMysqlUpgradeRequest) XXX_Size() int { - return xxx_messageInfo_RunMysqlUpgradeRequest.Size(m) + return m.Size() } func (m *RunMysqlUpgradeRequest) XXX_DiscardUnknown() { xxx_messageInfo_RunMysqlUpgradeRequest.DiscardUnknown(m) @@ -208,18 +250,26 @@ func (*RunMysqlUpgradeResponse) ProtoMessage() {} func (*RunMysqlUpgradeResponse) Descriptor() ([]byte, []int) { return fileDescriptor_cd8c110e42f9cbb9, []int{5} } - func (m *RunMysqlUpgradeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RunMysqlUpgradeResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *RunMysqlUpgradeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RunMysqlUpgradeResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_RunMysqlUpgradeResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *RunMysqlUpgradeResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_RunMysqlUpgradeResponse.Merge(m, src) } func (m *RunMysqlUpgradeResponse) XXX_Size() int { - return xxx_messageInfo_RunMysqlUpgradeResponse.Size(m) + return m.Size() } func (m *RunMysqlUpgradeResponse) XXX_DiscardUnknown() { xxx_messageInfo_RunMysqlUpgradeResponse.DiscardUnknown(m) @@ -239,18 +289,26 @@ func (*ReinitConfigRequest) ProtoMessage() {} func (*ReinitConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor_cd8c110e42f9cbb9, []int{6} } - func (m *ReinitConfigRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReinitConfigRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ReinitConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReinitConfigRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ReinitConfigRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ReinitConfigRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ReinitConfigRequest.Merge(m, src) } func (m *ReinitConfigRequest) XXX_Size() int { - return xxx_messageInfo_ReinitConfigRequest.Size(m) + return m.Size() } func (m *ReinitConfigRequest) XXX_DiscardUnknown() { xxx_messageInfo_ReinitConfigRequest.DiscardUnknown(m) @@ -270,18 +328,26 @@ func (*ReinitConfigResponse) ProtoMessage() {} func (*ReinitConfigResponse) Descriptor() ([]byte, []int) { return fileDescriptor_cd8c110e42f9cbb9, []int{7} } - func (m *ReinitConfigResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReinitConfigResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ReinitConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReinitConfigResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ReinitConfigResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ReinitConfigResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ReinitConfigResponse.Merge(m, src) } func (m *ReinitConfigResponse) XXX_Size() int { - return xxx_messageInfo_ReinitConfigResponse.Size(m) + return m.Size() } func (m *ReinitConfigResponse) XXX_DiscardUnknown() { xxx_messageInfo_ReinitConfigResponse.DiscardUnknown(m) @@ -301,18 +367,26 @@ func (*RefreshConfigRequest) ProtoMessage() {} func (*RefreshConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor_cd8c110e42f9cbb9, []int{8} } - func (m *RefreshConfigRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RefreshConfigRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *RefreshConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RefreshConfigRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_RefreshConfigRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *RefreshConfigRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_RefreshConfigRequest.Merge(m, src) } func (m *RefreshConfigRequest) XXX_Size() int { - return xxx_messageInfo_RefreshConfigRequest.Size(m) + return m.Size() } func (m *RefreshConfigRequest) XXX_DiscardUnknown() { xxx_messageInfo_RefreshConfigRequest.DiscardUnknown(m) @@ -332,18 +406,26 @@ func (*RefreshConfigResponse) ProtoMessage() {} func (*RefreshConfigResponse) Descriptor() ([]byte, []int) { return fileDescriptor_cd8c110e42f9cbb9, []int{9} } - func (m *RefreshConfigResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RefreshConfigResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *RefreshConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RefreshConfigResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_RefreshConfigResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *RefreshConfigResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_RefreshConfigResponse.Merge(m, src) } func (m *RefreshConfigResponse) XXX_Size() int { - return xxx_messageInfo_RefreshConfigResponse.Size(m) + return m.Size() } func (m *RefreshConfigResponse) XXX_DiscardUnknown() { xxx_messageInfo_RefreshConfigResponse.DiscardUnknown(m) @@ -351,6 +433,62 @@ func (m *RefreshConfigResponse) XXX_DiscardUnknown() { var xxx_messageInfo_RefreshConfigResponse proto.InternalMessageInfo +// BackupInfo is the read-only attributes of a mysqlctl/backupstorage.BackupHandle. +type BackupInfo struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Directory string `protobuf:"bytes,2,opt,name=directory,proto3" json:"directory,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BackupInfo) Reset() { *m = BackupInfo{} } +func (m *BackupInfo) String() string { return proto.CompactTextString(m) } +func (*BackupInfo) ProtoMessage() {} +func (*BackupInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_cd8c110e42f9cbb9, []int{10} +} +func (m *BackupInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BackupInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BackupInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BackupInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_BackupInfo.Merge(m, src) +} +func (m *BackupInfo) XXX_Size() int { + return m.Size() +} +func (m *BackupInfo) XXX_DiscardUnknown() { + xxx_messageInfo_BackupInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_BackupInfo proto.InternalMessageInfo + +func (m *BackupInfo) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *BackupInfo) GetDirectory() string { + if m != nil { + return m.Directory + } + return "" +} + func init() { proto.RegisterType((*StartRequest)(nil), "mysqlctl.StartRequest") proto.RegisterType((*StartResponse)(nil), "mysqlctl.StartResponse") @@ -362,34 +500,39 @@ func init() { proto.RegisterType((*ReinitConfigResponse)(nil), "mysqlctl.ReinitConfigResponse") proto.RegisterType((*RefreshConfigRequest)(nil), "mysqlctl.RefreshConfigRequest") proto.RegisterType((*RefreshConfigResponse)(nil), "mysqlctl.RefreshConfigResponse") + proto.RegisterType((*BackupInfo)(nil), "mysqlctl.BackupInfo") } func init() { proto.RegisterFile("mysqlctl.proto", fileDescriptor_cd8c110e42f9cbb9) } var fileDescriptor_cd8c110e42f9cbb9 = []byte{ - // 339 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0x4d, 0x4f, 0xfa, 0x30, - 0x1c, 0xc7, 0xff, 0x84, 0xfc, 0xcd, 0xfc, 0x09, 0xce, 0x54, 0x79, 0x6a, 0xa2, 0xe0, 0x12, 0x95, - 0x13, 0x4d, 0xf4, 0xa4, 0x37, 0x25, 0xf1, 0x66, 0x4c, 0x4a, 0x4c, 0x8c, 0x17, 0x32, 0xa5, 0x8c, - 0x26, 0xb8, 0x42, 0x5b, 0x20, 0xbe, 0x05, 0x5f, 0xb5, 0xb1, 0x6b, 0xc7, 0xc6, 0xc0, 0xdb, 0xfa, - 0x7d, 0x6a, 0xf6, 0xd9, 0xe0, 0xf0, 0xf3, 0x4b, 0xcd, 0xa7, 0x1f, 0x7a, 0xda, 0x9b, 0x49, 0xa1, - 0x05, 0xf2, 0xdc, 0x39, 0x20, 0x50, 0x19, 0xe8, 0x50, 0x6a, 0xca, 0xe6, 0x0b, 0xa6, 0x34, 0x6a, - 0xc3, 0x81, 0xf1, 0x46, 0xc3, 0x50, 0x46, 0xaa, 0x59, 0xea, 0x94, 0xbb, 0xfb, 0x14, 0x12, 0xe9, - 0x5e, 0x46, 0x2a, 0xf0, 0xa1, 0x6a, 0x0b, 0x6a, 0x26, 0x62, 0xc5, 0x82, 0x5b, 0xf0, 0x07, 0x93, - 0x85, 0x1e, 0x89, 0x55, 0xec, 0x46, 0x2e, 0xc1, 0x5f, 0x85, 0x5c, 0x0f, 0xc7, 0x42, 0x0e, 0x93, - 0x6a, 0xb3, 0xd4, 0x29, 0x75, 0x3d, 0x5a, 0xfd, 0x95, 0x1f, 0x85, 0x7c, 0x32, 0x62, 0x80, 0xe0, - 0x68, 0x5d, 0xb5, 0x73, 0x4d, 0xa8, 0xd3, 0x45, 0x6c, 0x02, 0x2f, 0xb3, 0x48, 0x86, 0x23, 0x66, - 0x57, 0x83, 0x16, 0x34, 0x0a, 0x8e, 0x2d, 0xd5, 0xe0, 0x98, 0x32, 0x1e, 0x73, 0xdd, 0x17, 0xf1, - 0x98, 0x47, 0xae, 0x51, 0x87, 0x93, 0xbc, 0x6c, 0xe3, 0x46, 0x1f, 0x4b, 0xa6, 0x26, 0xf9, 0x7c, - 0x03, 0x6a, 0x1b, 0x7a, 0x52, 0xb8, 0xfe, 0x2e, 0x83, 0x67, 0x2e, 0xee, 0xeb, 0x29, 0xba, 0x83, - 0xff, 0x86, 0x00, 0xaa, 0xf7, 0x52, 0xac, 0x59, 0x86, 0xb8, 0x51, 0xd0, 0xed, 0xbd, 0xff, 0x50, - 0x1f, 0x3c, 0xf7, 0xc6, 0xa8, 0x95, 0x89, 0xe5, 0x01, 0x62, 0xbc, 0xcd, 0x4a, 0x47, 0x5e, 0xc1, - 0xdf, 0x00, 0x81, 0x3a, 0xeb, 0xc2, 0x76, 0x7a, 0xf8, 0xfc, 0x8f, 0x44, 0xba, 0xfc, 0x0c, 0x95, - 0x2c, 0x30, 0x74, 0x9a, 0x29, 0x15, 0xf9, 0xe2, 0xb3, 0x5d, 0x76, 0x3a, 0x48, 0xa1, 0x9a, 0x23, - 0x8a, 0x72, 0x95, 0xe2, 0x27, 0xc0, 0xed, 0x9d, 0xbe, 0xdb, 0x7c, 0xb8, 0x7a, 0xbb, 0x58, 0x72, - 0xcd, 0x94, 0xea, 0x71, 0x41, 0x92, 0x27, 0x12, 0x09, 0xb2, 0xd4, 0xc4, 0xfc, 0xdc, 0xc4, 0x0d, - 0xbc, 0xef, 0x99, 0xf3, 0xcd, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb8, 0xe2, 0x15, 0x86, 0xfe, - 0x02, 0x00, 0x00, + // 405 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x53, 0xdf, 0x6a, 0xda, 0x50, + 0x1c, 0x36, 0x73, 0x1b, 0xf1, 0x37, 0x5d, 0xc6, 0xd9, 0xd4, 0x18, 0xb6, 0xe8, 0x02, 0x1b, 0x5e, + 0x19, 0xd8, 0x2e, 0xc6, 0x7a, 0x51, 0xa8, 0x42, 0xa1, 0x17, 0xa5, 0x10, 0x29, 0x94, 0xde, 0x48, + 0x6a, 0x4e, 0x62, 0xa8, 0xe6, 0xc4, 0x73, 0x4e, 0x14, 0x5f, 0xa1, 0x4f, 0xd0, 0x47, 0xea, 0x65, + 0x1f, 0xa1, 0xd8, 0x17, 0x29, 0x3d, 0x26, 0x31, 0x31, 0xda, 0xbb, 0x73, 0xbe, 0x7f, 0x9c, 0x7c, + 0x1f, 0x81, 0xcf, 0xb3, 0x15, 0x9b, 0x4f, 0xc7, 0x7c, 0xda, 0x0b, 0x29, 0xe1, 0x04, 0xc9, 0xc9, + 0xdd, 0x30, 0xa1, 0x3a, 0xe4, 0x36, 0xe5, 0x16, 0x9e, 0x47, 0x98, 0x71, 0xd4, 0x86, 0x4f, 0x82, + 0x73, 0x46, 0x36, 0xf5, 0x98, 0x2a, 0x75, 0xca, 0xdd, 0x8a, 0x05, 0x1b, 0xe8, 0x84, 0x7a, 0xcc, + 0x50, 0xa0, 0x16, 0x1b, 0x58, 0x48, 0x02, 0x86, 0x8d, 0xff, 0xa0, 0x0c, 0x27, 0x11, 0x77, 0xc8, + 0x32, 0x48, 0x42, 0x7e, 0x83, 0xb2, 0xb4, 0x7d, 0x3e, 0x72, 0x09, 0x1d, 0x6d, 0xac, 0xaa, 0xd4, + 0x91, 0xba, 0xb2, 0x55, 0x7b, 0x85, 0x4f, 0x09, 0x3d, 0x17, 0xa0, 0x81, 0xe0, 0xcb, 0xd6, 0x1a, + 0xc7, 0xa9, 0xd0, 0xb0, 0xa2, 0x40, 0x08, 0x2e, 0x43, 0x8f, 0xda, 0x0e, 0x8e, 0x53, 0x8d, 0x16, + 0x34, 0x0b, 0x4c, 0x6c, 0xaa, 0xc3, 0x57, 0x0b, 0xfb, 0x81, 0xcf, 0x07, 0x24, 0x70, 0x7d, 0x2f, + 0x71, 0x34, 0xe0, 0x5b, 0x1e, 0x8e, 0xe5, 0x02, 0x77, 0x29, 0x66, 0x93, 0xbc, 0xbe, 0x09, 0xf5, + 0x1d, 0x3c, 0x36, 0x1c, 0x03, 0xf4, 0xed, 0xf1, 0x6d, 0x14, 0x9e, 0x05, 0x2e, 0x41, 0x08, 0xde, + 0x07, 0xf6, 0x0c, 0x8b, 0x6f, 0xaa, 0x58, 0xe2, 0x8c, 0xbe, 0x43, 0xc5, 0xf1, 0x29, 0x1e, 0x73, + 0x42, 0x57, 0xea, 0x3b, 0x41, 0x6c, 0x81, 0x3f, 0x77, 0x65, 0x90, 0xc5, 0xc3, 0x07, 0x7c, 0x8a, + 0x8e, 0xe0, 0x83, 0x68, 0x10, 0x35, 0x7a, 0xe9, 0x2c, 0xd9, 0x0d, 0xb4, 0x66, 0x01, 0x8f, 0x9f, + 0x51, 0x42, 0x03, 0x90, 0x93, 0xc6, 0x50, 0x2b, 0x23, 0xcb, 0x0f, 0xa0, 0x69, 0xfb, 0xa8, 0x34, + 0xe4, 0x0a, 0x94, 0x9d, 0x22, 0x51, 0x67, 0x6b, 0xd8, 0xdf, 0xbe, 0xf6, 0xf3, 0x0d, 0x45, 0x9a, + 0x7c, 0x01, 0xd5, 0x6c, 0xe1, 0xe8, 0x47, 0xc6, 0x54, 0xdc, 0x47, 0xd3, 0x0f, 0xd1, 0x69, 0xa0, + 0x05, 0xb5, 0xdc, 0x22, 0x28, 0x67, 0x29, 0x4e, 0xa8, 0xb5, 0x0f, 0xf2, 0x49, 0x66, 0xff, 0xdf, + 0xc3, 0x5a, 0x97, 0x1e, 0xd7, 0xba, 0xf4, 0xb4, 0xd6, 0xa5, 0xfb, 0x67, 0xbd, 0x74, 0xfd, 0x6b, + 0xe1, 0x73, 0xcc, 0x58, 0xcf, 0x27, 0xe6, 0xe6, 0x64, 0x7a, 0xc4, 0x5c, 0x70, 0x53, 0xfc, 0x2c, + 0x66, 0x12, 0x78, 0xf3, 0x51, 0xdc, 0xff, 0xbe, 0x04, 0x00, 0x00, 0xff, 0xff, 0x5d, 0xef, 0x36, + 0xdc, 0x4e, 0x03, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -615,3 +758,1294 @@ var _MysqlCtl_serviceDesc = grpc.ServiceDesc{ Streams: []grpc.StreamDesc{}, Metadata: "mysqlctl.proto", } + +func (m *StartRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StartRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StartRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.MysqldArgs) > 0 { + for iNdEx := len(m.MysqldArgs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.MysqldArgs[iNdEx]) + copy(dAtA[i:], m.MysqldArgs[iNdEx]) + i = encodeVarintMysqlctl(dAtA, i, uint64(len(m.MysqldArgs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *StartResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StartResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StartResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *ShutdownRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ShutdownRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ShutdownRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.WaitForMysqld { + i-- + if m.WaitForMysqld { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ShutdownResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ShutdownResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ShutdownResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *RunMysqlUpgradeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RunMysqlUpgradeRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RunMysqlUpgradeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *RunMysqlUpgradeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RunMysqlUpgradeResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RunMysqlUpgradeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *ReinitConfigRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReinitConfigRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReinitConfigRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *ReinitConfigResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReinitConfigResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReinitConfigResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *RefreshConfigRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RefreshConfigRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RefreshConfigRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *RefreshConfigResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RefreshConfigResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RefreshConfigResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *BackupInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BackupInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BackupInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Directory) > 0 { + i -= len(m.Directory) + copy(dAtA[i:], m.Directory) + i = encodeVarintMysqlctl(dAtA, i, uint64(len(m.Directory))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintMysqlctl(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintMysqlctl(dAtA []byte, offset int, v uint64) int { + offset -= sovMysqlctl(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *StartRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.MysqldArgs) > 0 { + for _, s := range m.MysqldArgs { + l = len(s) + n += 1 + l + sovMysqlctl(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StartResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ShutdownRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.WaitForMysqld { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ShutdownResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RunMysqlUpgradeRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RunMysqlUpgradeResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReinitConfigRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReinitConfigResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RefreshConfigRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RefreshConfigResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BackupInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovMysqlctl(uint64(l)) + } + l = len(m.Directory) + if l > 0 { + n += 1 + l + sovMysqlctl(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovMysqlctl(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozMysqlctl(x uint64) (n int) { + return sovMysqlctl(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *StartRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMysqlctl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StartRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StartRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MysqldArgs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMysqlctl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMysqlctl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMysqlctl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MysqldArgs = append(m.MysqldArgs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMysqlctl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMysqlctl + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMysqlctl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StartResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMysqlctl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StartResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StartResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipMysqlctl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMysqlctl + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMysqlctl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ShutdownRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMysqlctl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ShutdownRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ShutdownRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WaitForMysqld", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMysqlctl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.WaitForMysqld = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipMysqlctl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMysqlctl + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMysqlctl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ShutdownResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMysqlctl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ShutdownResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ShutdownResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipMysqlctl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMysqlctl + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMysqlctl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RunMysqlUpgradeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMysqlctl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RunMysqlUpgradeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RunMysqlUpgradeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipMysqlctl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMysqlctl + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMysqlctl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RunMysqlUpgradeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMysqlctl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RunMysqlUpgradeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RunMysqlUpgradeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipMysqlctl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMysqlctl + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMysqlctl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReinitConfigRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMysqlctl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReinitConfigRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReinitConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipMysqlctl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMysqlctl + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMysqlctl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReinitConfigResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMysqlctl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReinitConfigResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReinitConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipMysqlctl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMysqlctl + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMysqlctl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RefreshConfigRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMysqlctl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RefreshConfigRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RefreshConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipMysqlctl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMysqlctl + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMysqlctl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RefreshConfigResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMysqlctl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RefreshConfigResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RefreshConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipMysqlctl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMysqlctl + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMysqlctl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BackupInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMysqlctl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BackupInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BackupInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMysqlctl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMysqlctl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMysqlctl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Directory", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMysqlctl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMysqlctl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMysqlctl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Directory = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMysqlctl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMysqlctl + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMysqlctl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMysqlctl(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMysqlctl + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMysqlctl + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMysqlctl + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthMysqlctl + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupMysqlctl + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthMysqlctl + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthMysqlctl = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMysqlctl = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupMysqlctl = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/vt/proto/query/query.pb.go b/go/vt/proto/query/query.pb.go index b8a794d4d1d..bec6552eb8c 100644 --- a/go/vt/proto/query/query.pb.go +++ b/go/vt/proto/query/query.pb.go @@ -1,14 +1,16 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: query.proto package query import ( + encoding_binary "encoding/binary" fmt "fmt" + io "io" math "math" + math_bits "math/bits" proto "github.com/golang/protobuf/proto" - topodata "vitess.io/vitess/go/vt/proto/topodata" vtrpc "vitess.io/vitess/go/vt/proto/vtrpc" ) @@ -463,6 +465,43 @@ func (ExecuteOptions_TransactionIsolation) EnumDescriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{6, 2} } +type ExecuteOptions_PlannerVersion int32 + +const ( + ExecuteOptions_DEFAULT_PLANNER ExecuteOptions_PlannerVersion = 0 + ExecuteOptions_V3 ExecuteOptions_PlannerVersion = 1 + ExecuteOptions_Gen4 ExecuteOptions_PlannerVersion = 2 + ExecuteOptions_Gen4Greedy ExecuteOptions_PlannerVersion = 3 + ExecuteOptions_Gen4Left2Right ExecuteOptions_PlannerVersion = 4 + ExecuteOptions_Gen4WithFallback ExecuteOptions_PlannerVersion = 5 +) + +var ExecuteOptions_PlannerVersion_name = map[int32]string{ + 0: "DEFAULT_PLANNER", + 1: "V3", + 2: "Gen4", + 3: "Gen4Greedy", + 4: "Gen4Left2Right", + 5: "Gen4WithFallback", +} + +var ExecuteOptions_PlannerVersion_value = map[string]int32{ + "DEFAULT_PLANNER": 0, + "V3": 1, + "Gen4": 2, + "Gen4Greedy": 3, + "Gen4Left2Right": 4, + "Gen4WithFallback": 5, +} + +func (x ExecuteOptions_PlannerVersion) String() string { + return proto.EnumName(ExecuteOptions_PlannerVersion_name, int32(x)) +} + +func (ExecuteOptions_PlannerVersion) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{6, 3} +} + // The category of one statement. type StreamEvent_Statement_Category int32 @@ -512,18 +551,26 @@ func (*Target) ProtoMessage() {} func (*Target) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{0} } - func (m *Target) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Target.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *Target) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Target.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_Target.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *Target) XXX_Merge(src proto.Message) { xxx_messageInfo_Target.Merge(m, src) } func (m *Target) XXX_Size() int { - return xxx_messageInfo_Target.Size(m) + return m.Size() } func (m *Target) XXX_DiscardUnknown() { xxx_messageInfo_Target.DiscardUnknown(m) @@ -581,18 +628,26 @@ func (*VTGateCallerID) ProtoMessage() {} func (*VTGateCallerID) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{1} } - func (m *VTGateCallerID) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VTGateCallerID.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *VTGateCallerID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VTGateCallerID.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_VTGateCallerID.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *VTGateCallerID) XXX_Merge(src proto.Message) { xxx_messageInfo_VTGateCallerID.Merge(m, src) } func (m *VTGateCallerID) XXX_Size() int { - return xxx_messageInfo_VTGateCallerID.Size(m) + return m.Size() } func (m *VTGateCallerID) XXX_DiscardUnknown() { xxx_messageInfo_VTGateCallerID.DiscardUnknown(m) @@ -638,18 +693,26 @@ func (*EventToken) ProtoMessage() {} func (*EventToken) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{2} } - func (m *EventToken) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EventToken.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *EventToken) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EventToken.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_EventToken.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *EventToken) XXX_Merge(src proto.Message) { xxx_messageInfo_EventToken.Merge(m, src) } func (m *EventToken) XXX_Size() int { - return xxx_messageInfo_EventToken.Size(m) + return m.Size() } func (m *EventToken) XXX_DiscardUnknown() { xxx_messageInfo_EventToken.DiscardUnknown(m) @@ -693,18 +756,26 @@ func (*Value) ProtoMessage() {} func (*Value) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{3} } - func (m *Value) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Value.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Value.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *Value) XXX_Merge(src proto.Message) { xxx_messageInfo_Value.Merge(m, src) } func (m *Value) XXX_Size() int { - return xxx_messageInfo_Value.Size(m) + return m.Size() } func (m *Value) XXX_DiscardUnknown() { xxx_messageInfo_Value.DiscardUnknown(m) @@ -743,18 +814,26 @@ func (*BindVariable) ProtoMessage() {} func (*BindVariable) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{4} } - func (m *BindVariable) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BindVariable.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *BindVariable) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BindVariable.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_BindVariable.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *BindVariable) XXX_Merge(src proto.Message) { xxx_messageInfo_BindVariable.Merge(m, src) } func (m *BindVariable) XXX_Size() int { - return xxx_messageInfo_BindVariable.Size(m) + return m.Size() } func (m *BindVariable) XXX_DiscardUnknown() { xxx_messageInfo_BindVariable.DiscardUnknown(m) @@ -801,18 +880,26 @@ func (*BoundQuery) ProtoMessage() {} func (*BoundQuery) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{5} } - func (m *BoundQuery) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BoundQuery.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *BoundQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BoundQuery.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_BoundQuery.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *BoundQuery) XXX_Merge(src proto.Message) { xxx_messageInfo_BoundQuery.Merge(m, src) } func (m *BoundQuery) XXX_Size() int { - return xxx_messageInfo_BoundQuery.Size(m) + return m.Size() } func (m *BoundQuery) XXX_DiscardUnknown() { xxx_messageInfo_BoundQuery.DiscardUnknown(m) @@ -858,7 +945,14 @@ type ExecuteOptions struct { TransactionIsolation ExecuteOptions_TransactionIsolation `protobuf:"varint,9,opt,name=transaction_isolation,json=transactionIsolation,proto3,enum=query.ExecuteOptions_TransactionIsolation" json:"transaction_isolation,omitempty"` // skip_query_plan_cache specifies if the query plan should be cached by vitess. // By default all query plans are cached. - SkipQueryPlanCache bool `protobuf:"varint,10,opt,name=skip_query_plan_cache,json=skipQueryPlanCache,proto3" json:"skip_query_plan_cache,omitempty"` + SkipQueryPlanCache bool `protobuf:"varint,10,opt,name=skip_query_plan_cache,json=skipQueryPlanCache,proto3" json:"skip_query_plan_cache,omitempty"` + // PlannerVersion specifies which planner to use. + // If DEFAULT is chosen, whatever vtgate was started with will be used + PlannerVersion ExecuteOptions_PlannerVersion `protobuf:"varint,11,opt,name=planner_version,json=plannerVersion,proto3,enum=query.ExecuteOptions_PlannerVersion" json:"planner_version,omitempty"` + // has_created_temp_tables signals whether plans created in this session should be cached or not + // if the user has created temp tables, Vitess will not reuse plans created for this session in other sessions. + // The current session can still use other sessions cached plans. + HasCreatedTempTables bool `protobuf:"varint,12,opt,name=has_created_temp_tables,json=hasCreatedTempTables,proto3" json:"has_created_temp_tables,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -870,18 +964,26 @@ func (*ExecuteOptions) ProtoMessage() {} func (*ExecuteOptions) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{6} } - func (m *ExecuteOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExecuteOptions.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ExecuteOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExecuteOptions.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ExecuteOptions.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ExecuteOptions) XXX_Merge(src proto.Message) { xxx_messageInfo_ExecuteOptions.Merge(m, src) } func (m *ExecuteOptions) XXX_Size() int { - return xxx_messageInfo_ExecuteOptions.Size(m) + return m.Size() } func (m *ExecuteOptions) XXX_DiscardUnknown() { xxx_messageInfo_ExecuteOptions.DiscardUnknown(m) @@ -931,6 +1033,20 @@ func (m *ExecuteOptions) GetSkipQueryPlanCache() bool { return false } +func (m *ExecuteOptions) GetPlannerVersion() ExecuteOptions_PlannerVersion { + if m != nil { + return m.PlannerVersion + } + return ExecuteOptions_DEFAULT_PLANNER +} + +func (m *ExecuteOptions) GetHasCreatedTempTables() bool { + if m != nil { + return m.HasCreatedTempTables + } + return false +} + // Field describes a single column returned by a query type Field struct { // name of the field as returned by mysql C API @@ -965,18 +1081,26 @@ func (*Field) ProtoMessage() {} func (*Field) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{7} } - func (m *Field) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Field.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *Field) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Field.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_Field.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *Field) XXX_Merge(src proto.Message) { xxx_messageInfo_Field.Merge(m, src) } func (m *Field) XXX_Size() int { - return xxx_messageInfo_Field.Size(m) + return m.Size() } func (m *Field) XXX_DiscardUnknown() { xxx_messageInfo_Field.DiscardUnknown(m) @@ -1081,18 +1205,26 @@ func (*Row) ProtoMessage() {} func (*Row) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{8} } - func (m *Row) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Row.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *Row) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Row.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_Row.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *Row) XXX_Merge(src proto.Message) { xxx_messageInfo_Row.Merge(m, src) } func (m *Row) XXX_Size() int { - return xxx_messageInfo_Row.Size(m) + return m.Size() } func (m *Row) XXX_DiscardUnknown() { xxx_messageInfo_Row.DiscardUnknown(m) @@ -1139,18 +1271,26 @@ func (*QueryResult) ProtoMessage() {} func (*QueryResult) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{9} } - func (m *QueryResult) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_QueryResult.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *QueryResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_QueryResult.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_QueryResult.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *QueryResult) XXX_Merge(src proto.Message) { xxx_messageInfo_QueryResult.Merge(m, src) } func (m *QueryResult) XXX_Size() int { - return xxx_messageInfo_QueryResult.Size(m) + return m.Size() } func (m *QueryResult) XXX_DiscardUnknown() { xxx_messageInfo_QueryResult.DiscardUnknown(m) @@ -1202,18 +1342,26 @@ func (*QueryWarning) ProtoMessage() {} func (*QueryWarning) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{10} } - func (m *QueryWarning) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_QueryWarning.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *QueryWarning) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_QueryWarning.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_QueryWarning.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *QueryWarning) XXX_Merge(src proto.Message) { xxx_messageInfo_QueryWarning.Merge(m, src) } func (m *QueryWarning) XXX_Size() int { - return xxx_messageInfo_QueryWarning.Size(m) + return m.Size() } func (m *QueryWarning) XXX_DiscardUnknown() { xxx_messageInfo_QueryWarning.DiscardUnknown(m) @@ -1254,18 +1402,26 @@ func (*StreamEvent) ProtoMessage() {} func (*StreamEvent) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{11} } - func (m *StreamEvent) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StreamEvent.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *StreamEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StreamEvent.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_StreamEvent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *StreamEvent) XXX_Merge(src proto.Message) { xxx_messageInfo_StreamEvent.Merge(m, src) } func (m *StreamEvent) XXX_Size() int { - return xxx_messageInfo_StreamEvent.Size(m) + return m.Size() } func (m *StreamEvent) XXX_DiscardUnknown() { xxx_messageInfo_StreamEvent.DiscardUnknown(m) @@ -1308,18 +1464,26 @@ func (*StreamEvent_Statement) ProtoMessage() {} func (*StreamEvent_Statement) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{11, 0} } - func (m *StreamEvent_Statement) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StreamEvent_Statement.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *StreamEvent_Statement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StreamEvent_Statement.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_StreamEvent_Statement.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *StreamEvent_Statement) XXX_Merge(src proto.Message) { xxx_messageInfo_StreamEvent_Statement.Merge(m, src) } func (m *StreamEvent_Statement) XXX_Size() int { - return xxx_messageInfo_StreamEvent_Statement.Size(m) + return m.Size() } func (m *StreamEvent_Statement) XXX_DiscardUnknown() { xxx_messageInfo_StreamEvent_Statement.DiscardUnknown(m) @@ -1382,18 +1546,26 @@ func (*ExecuteRequest) ProtoMessage() {} func (*ExecuteRequest) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{12} } - func (m *ExecuteRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExecuteRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ExecuteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExecuteRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ExecuteRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ExecuteRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ExecuteRequest.Merge(m, src) } func (m *ExecuteRequest) XXX_Size() int { - return xxx_messageInfo_ExecuteRequest.Size(m) + return m.Size() } func (m *ExecuteRequest) XXX_DiscardUnknown() { xxx_messageInfo_ExecuteRequest.DiscardUnknown(m) @@ -1464,18 +1636,26 @@ func (*ExecuteResponse) ProtoMessage() {} func (*ExecuteResponse) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{13} } - func (m *ExecuteResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExecuteResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ExecuteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExecuteResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ExecuteResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ExecuteResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ExecuteResponse.Merge(m, src) } func (m *ExecuteResponse) XXX_Size() int { - return xxx_messageInfo_ExecuteResponse.Size(m) + return m.Size() } func (m *ExecuteResponse) XXX_DiscardUnknown() { xxx_messageInfo_ExecuteResponse.DiscardUnknown(m) @@ -1509,18 +1689,26 @@ func (*ResultWithError) ProtoMessage() {} func (*ResultWithError) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{14} } - func (m *ResultWithError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResultWithError.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ResultWithError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResultWithError.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ResultWithError.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ResultWithError) XXX_Merge(src proto.Message) { xxx_messageInfo_ResultWithError.Merge(m, src) } func (m *ResultWithError) XXX_Size() int { - return xxx_messageInfo_ResultWithError.Size(m) + return m.Size() } func (m *ResultWithError) XXX_DiscardUnknown() { xxx_messageInfo_ResultWithError.DiscardUnknown(m) @@ -1562,18 +1750,26 @@ func (*ExecuteBatchRequest) ProtoMessage() {} func (*ExecuteBatchRequest) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{15} } - func (m *ExecuteBatchRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExecuteBatchRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ExecuteBatchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExecuteBatchRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ExecuteBatchRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ExecuteBatchRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ExecuteBatchRequest.Merge(m, src) } func (m *ExecuteBatchRequest) XXX_Size() int { - return xxx_messageInfo_ExecuteBatchRequest.Size(m) + return m.Size() } func (m *ExecuteBatchRequest) XXX_DiscardUnknown() { xxx_messageInfo_ExecuteBatchRequest.DiscardUnknown(m) @@ -1644,18 +1840,26 @@ func (*ExecuteBatchResponse) ProtoMessage() {} func (*ExecuteBatchResponse) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{16} } - func (m *ExecuteBatchResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExecuteBatchResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ExecuteBatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExecuteBatchResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ExecuteBatchResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ExecuteBatchResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ExecuteBatchResponse.Merge(m, src) } func (m *ExecuteBatchResponse) XXX_Size() int { - return xxx_messageInfo_ExecuteBatchResponse.Size(m) + return m.Size() } func (m *ExecuteBatchResponse) XXX_DiscardUnknown() { xxx_messageInfo_ExecuteBatchResponse.DiscardUnknown(m) @@ -1689,18 +1893,26 @@ func (*StreamExecuteRequest) ProtoMessage() {} func (*StreamExecuteRequest) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{17} } - func (m *StreamExecuteRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StreamExecuteRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *StreamExecuteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StreamExecuteRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_StreamExecuteRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *StreamExecuteRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_StreamExecuteRequest.Merge(m, src) } func (m *StreamExecuteRequest) XXX_Size() int { - return xxx_messageInfo_StreamExecuteRequest.Size(m) + return m.Size() } func (m *StreamExecuteRequest) XXX_DiscardUnknown() { xxx_messageInfo_StreamExecuteRequest.DiscardUnknown(m) @@ -1764,18 +1976,26 @@ func (*StreamExecuteResponse) ProtoMessage() {} func (*StreamExecuteResponse) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{18} } - func (m *StreamExecuteResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StreamExecuteResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *StreamExecuteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StreamExecuteResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_StreamExecuteResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *StreamExecuteResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_StreamExecuteResponse.Merge(m, src) } func (m *StreamExecuteResponse) XXX_Size() int { - return xxx_messageInfo_StreamExecuteResponse.Size(m) + return m.Size() } func (m *StreamExecuteResponse) XXX_DiscardUnknown() { xxx_messageInfo_StreamExecuteResponse.DiscardUnknown(m) @@ -1807,18 +2027,26 @@ func (*BeginRequest) ProtoMessage() {} func (*BeginRequest) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{19} } - func (m *BeginRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BeginRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *BeginRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BeginRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_BeginRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *BeginRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_BeginRequest.Merge(m, src) } func (m *BeginRequest) XXX_Size() int { - return xxx_messageInfo_BeginRequest.Size(m) + return m.Size() } func (m *BeginRequest) XXX_DiscardUnknown() { xxx_messageInfo_BeginRequest.DiscardUnknown(m) @@ -1869,18 +2097,26 @@ func (*BeginResponse) ProtoMessage() {} func (*BeginResponse) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{20} } - func (m *BeginResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BeginResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *BeginResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BeginResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_BeginResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *BeginResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_BeginResponse.Merge(m, src) } func (m *BeginResponse) XXX_Size() int { - return xxx_messageInfo_BeginResponse.Size(m) + return m.Size() } func (m *BeginResponse) XXX_DiscardUnknown() { xxx_messageInfo_BeginResponse.DiscardUnknown(m) @@ -1919,18 +2155,26 @@ func (*CommitRequest) ProtoMessage() {} func (*CommitRequest) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{21} } - func (m *CommitRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CommitRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *CommitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CommitRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_CommitRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *CommitRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_CommitRequest.Merge(m, src) } func (m *CommitRequest) XXX_Size() int { - return xxx_messageInfo_CommitRequest.Size(m) + return m.Size() } func (m *CommitRequest) XXX_DiscardUnknown() { xxx_messageInfo_CommitRequest.DiscardUnknown(m) @@ -1980,18 +2224,26 @@ func (*CommitResponse) ProtoMessage() {} func (*CommitResponse) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{22} } - func (m *CommitResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CommitResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *CommitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CommitResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_CommitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *CommitResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_CommitResponse.Merge(m, src) } func (m *CommitResponse) XXX_Size() int { - return xxx_messageInfo_CommitResponse.Size(m) + return m.Size() } func (m *CommitResponse) XXX_DiscardUnknown() { xxx_messageInfo_CommitResponse.DiscardUnknown(m) @@ -2023,18 +2275,26 @@ func (*RollbackRequest) ProtoMessage() {} func (*RollbackRequest) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{23} } - func (m *RollbackRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RollbackRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *RollbackRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RollbackRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_RollbackRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *RollbackRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_RollbackRequest.Merge(m, src) } func (m *RollbackRequest) XXX_Size() int { - return xxx_messageInfo_RollbackRequest.Size(m) + return m.Size() } func (m *RollbackRequest) XXX_DiscardUnknown() { xxx_messageInfo_RollbackRequest.DiscardUnknown(m) @@ -2084,18 +2344,26 @@ func (*RollbackResponse) ProtoMessage() {} func (*RollbackResponse) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{24} } - func (m *RollbackResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RollbackResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *RollbackResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RollbackResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_RollbackResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *RollbackResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_RollbackResponse.Merge(m, src) } func (m *RollbackResponse) XXX_Size() int { - return xxx_messageInfo_RollbackResponse.Size(m) + return m.Size() } func (m *RollbackResponse) XXX_DiscardUnknown() { xxx_messageInfo_RollbackResponse.DiscardUnknown(m) @@ -2128,18 +2396,26 @@ func (*PrepareRequest) ProtoMessage() {} func (*PrepareRequest) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{25} } - func (m *PrepareRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PrepareRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *PrepareRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PrepareRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_PrepareRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *PrepareRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_PrepareRequest.Merge(m, src) } func (m *PrepareRequest) XXX_Size() int { - return xxx_messageInfo_PrepareRequest.Size(m) + return m.Size() } func (m *PrepareRequest) XXX_DiscardUnknown() { xxx_messageInfo_PrepareRequest.DiscardUnknown(m) @@ -2195,18 +2471,26 @@ func (*PrepareResponse) ProtoMessage() {} func (*PrepareResponse) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{26} } - func (m *PrepareResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PrepareResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *PrepareResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PrepareResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_PrepareResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *PrepareResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_PrepareResponse.Merge(m, src) } func (m *PrepareResponse) XXX_Size() int { - return xxx_messageInfo_PrepareResponse.Size(m) + return m.Size() } func (m *PrepareResponse) XXX_DiscardUnknown() { xxx_messageInfo_PrepareResponse.DiscardUnknown(m) @@ -2231,18 +2515,26 @@ func (*CommitPreparedRequest) ProtoMessage() {} func (*CommitPreparedRequest) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{27} } - func (m *CommitPreparedRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CommitPreparedRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *CommitPreparedRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CommitPreparedRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_CommitPreparedRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *CommitPreparedRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_CommitPreparedRequest.Merge(m, src) } func (m *CommitPreparedRequest) XXX_Size() int { - return xxx_messageInfo_CommitPreparedRequest.Size(m) + return m.Size() } func (m *CommitPreparedRequest) XXX_DiscardUnknown() { xxx_messageInfo_CommitPreparedRequest.DiscardUnknown(m) @@ -2291,18 +2583,26 @@ func (*CommitPreparedResponse) ProtoMessage() {} func (*CommitPreparedResponse) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{28} } - func (m *CommitPreparedResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CommitPreparedResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *CommitPreparedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CommitPreparedResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_CommitPreparedResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *CommitPreparedResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_CommitPreparedResponse.Merge(m, src) } func (m *CommitPreparedResponse) XXX_Size() int { - return xxx_messageInfo_CommitPreparedResponse.Size(m) + return m.Size() } func (m *CommitPreparedResponse) XXX_DiscardUnknown() { xxx_messageInfo_CommitPreparedResponse.DiscardUnknown(m) @@ -2328,18 +2628,26 @@ func (*RollbackPreparedRequest) ProtoMessage() {} func (*RollbackPreparedRequest) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{29} } - func (m *RollbackPreparedRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RollbackPreparedRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *RollbackPreparedRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RollbackPreparedRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_RollbackPreparedRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *RollbackPreparedRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_RollbackPreparedRequest.Merge(m, src) } func (m *RollbackPreparedRequest) XXX_Size() int { - return xxx_messageInfo_RollbackPreparedRequest.Size(m) + return m.Size() } func (m *RollbackPreparedRequest) XXX_DiscardUnknown() { xxx_messageInfo_RollbackPreparedRequest.DiscardUnknown(m) @@ -2395,18 +2703,26 @@ func (*RollbackPreparedResponse) ProtoMessage() {} func (*RollbackPreparedResponse) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{30} } - func (m *RollbackPreparedResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RollbackPreparedResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *RollbackPreparedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RollbackPreparedResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_RollbackPreparedResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *RollbackPreparedResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_RollbackPreparedResponse.Merge(m, src) } func (m *RollbackPreparedResponse) XXX_Size() int { - return xxx_messageInfo_RollbackPreparedResponse.Size(m) + return m.Size() } func (m *RollbackPreparedResponse) XXX_DiscardUnknown() { xxx_messageInfo_RollbackPreparedResponse.DiscardUnknown(m) @@ -2432,18 +2748,26 @@ func (*CreateTransactionRequest) ProtoMessage() {} func (*CreateTransactionRequest) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{31} } - func (m *CreateTransactionRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CreateTransactionRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *CreateTransactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CreateTransactionRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_CreateTransactionRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *CreateTransactionRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_CreateTransactionRequest.Merge(m, src) } func (m *CreateTransactionRequest) XXX_Size() int { - return xxx_messageInfo_CreateTransactionRequest.Size(m) + return m.Size() } func (m *CreateTransactionRequest) XXX_DiscardUnknown() { xxx_messageInfo_CreateTransactionRequest.DiscardUnknown(m) @@ -2499,18 +2823,26 @@ func (*CreateTransactionResponse) ProtoMessage() {} func (*CreateTransactionResponse) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{32} } - func (m *CreateTransactionResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CreateTransactionResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *CreateTransactionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CreateTransactionResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_CreateTransactionResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *CreateTransactionResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_CreateTransactionResponse.Merge(m, src) } func (m *CreateTransactionResponse) XXX_Size() int { - return xxx_messageInfo_CreateTransactionResponse.Size(m) + return m.Size() } func (m *CreateTransactionResponse) XXX_DiscardUnknown() { xxx_messageInfo_CreateTransactionResponse.DiscardUnknown(m) @@ -2536,18 +2868,26 @@ func (*StartCommitRequest) ProtoMessage() {} func (*StartCommitRequest) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{33} } - func (m *StartCommitRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StartCommitRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *StartCommitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StartCommitRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_StartCommitRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *StartCommitRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_StartCommitRequest.Merge(m, src) } func (m *StartCommitRequest) XXX_Size() int { - return xxx_messageInfo_StartCommitRequest.Size(m) + return m.Size() } func (m *StartCommitRequest) XXX_DiscardUnknown() { xxx_messageInfo_StartCommitRequest.DiscardUnknown(m) @@ -2603,18 +2943,26 @@ func (*StartCommitResponse) ProtoMessage() {} func (*StartCommitResponse) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{34} } - func (m *StartCommitResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StartCommitResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *StartCommitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StartCommitResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_StartCommitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *StartCommitResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_StartCommitResponse.Merge(m, src) } func (m *StartCommitResponse) XXX_Size() int { - return xxx_messageInfo_StartCommitResponse.Size(m) + return m.Size() } func (m *StartCommitResponse) XXX_DiscardUnknown() { xxx_messageInfo_StartCommitResponse.DiscardUnknown(m) @@ -2640,18 +2988,26 @@ func (*SetRollbackRequest) ProtoMessage() {} func (*SetRollbackRequest) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{35} } - func (m *SetRollbackRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetRollbackRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *SetRollbackRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetRollbackRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_SetRollbackRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *SetRollbackRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_SetRollbackRequest.Merge(m, src) } func (m *SetRollbackRequest) XXX_Size() int { - return xxx_messageInfo_SetRollbackRequest.Size(m) + return m.Size() } func (m *SetRollbackRequest) XXX_DiscardUnknown() { xxx_messageInfo_SetRollbackRequest.DiscardUnknown(m) @@ -2707,18 +3063,26 @@ func (*SetRollbackResponse) ProtoMessage() {} func (*SetRollbackResponse) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{36} } - func (m *SetRollbackResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetRollbackResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *SetRollbackResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetRollbackResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_SetRollbackResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *SetRollbackResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_SetRollbackResponse.Merge(m, src) } func (m *SetRollbackResponse) XXX_Size() int { - return xxx_messageInfo_SetRollbackResponse.Size(m) + return m.Size() } func (m *SetRollbackResponse) XXX_DiscardUnknown() { xxx_messageInfo_SetRollbackResponse.DiscardUnknown(m) @@ -2743,18 +3107,26 @@ func (*ConcludeTransactionRequest) ProtoMessage() {} func (*ConcludeTransactionRequest) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{37} } - func (m *ConcludeTransactionRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ConcludeTransactionRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ConcludeTransactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ConcludeTransactionRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ConcludeTransactionRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ConcludeTransactionRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ConcludeTransactionRequest.Merge(m, src) } func (m *ConcludeTransactionRequest) XXX_Size() int { - return xxx_messageInfo_ConcludeTransactionRequest.Size(m) + return m.Size() } func (m *ConcludeTransactionRequest) XXX_DiscardUnknown() { xxx_messageInfo_ConcludeTransactionRequest.DiscardUnknown(m) @@ -2803,18 +3175,26 @@ func (*ConcludeTransactionResponse) ProtoMessage() {} func (*ConcludeTransactionResponse) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{38} } - func (m *ConcludeTransactionResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ConcludeTransactionResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ConcludeTransactionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ConcludeTransactionResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ConcludeTransactionResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ConcludeTransactionResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ConcludeTransactionResponse.Merge(m, src) } func (m *ConcludeTransactionResponse) XXX_Size() int { - return xxx_messageInfo_ConcludeTransactionResponse.Size(m) + return m.Size() } func (m *ConcludeTransactionResponse) XXX_DiscardUnknown() { xxx_messageInfo_ConcludeTransactionResponse.DiscardUnknown(m) @@ -2839,18 +3219,26 @@ func (*ReadTransactionRequest) ProtoMessage() {} func (*ReadTransactionRequest) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{39} } - func (m *ReadTransactionRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReadTransactionRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ReadTransactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReadTransactionRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ReadTransactionRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ReadTransactionRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ReadTransactionRequest.Merge(m, src) } func (m *ReadTransactionRequest) XXX_Size() int { - return xxx_messageInfo_ReadTransactionRequest.Size(m) + return m.Size() } func (m *ReadTransactionRequest) XXX_DiscardUnknown() { xxx_messageInfo_ReadTransactionRequest.DiscardUnknown(m) @@ -2900,18 +3288,26 @@ func (*ReadTransactionResponse) ProtoMessage() {} func (*ReadTransactionResponse) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{40} } - func (m *ReadTransactionResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReadTransactionResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ReadTransactionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReadTransactionResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ReadTransactionResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ReadTransactionResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ReadTransactionResponse.Merge(m, src) } func (m *ReadTransactionResponse) XXX_Size() int { - return xxx_messageInfo_ReadTransactionResponse.Size(m) + return m.Size() } func (m *ReadTransactionResponse) XXX_DiscardUnknown() { xxx_messageInfo_ReadTransactionResponse.DiscardUnknown(m) @@ -2946,18 +3342,26 @@ func (*BeginExecuteRequest) ProtoMessage() {} func (*BeginExecuteRequest) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{41} } - func (m *BeginExecuteRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BeginExecuteRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *BeginExecuteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BeginExecuteRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_BeginExecuteRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *BeginExecuteRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_BeginExecuteRequest.Merge(m, src) } func (m *BeginExecuteRequest) XXX_Size() int { - return xxx_messageInfo_BeginExecuteRequest.Size(m) + return m.Size() } func (m *BeginExecuteRequest) XXX_DiscardUnknown() { xxx_messageInfo_BeginExecuteRequest.DiscardUnknown(m) @@ -3035,18 +3439,26 @@ func (*BeginExecuteResponse) ProtoMessage() {} func (*BeginExecuteResponse) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{42} } - func (m *BeginExecuteResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BeginExecuteResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *BeginExecuteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BeginExecuteResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_BeginExecuteResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *BeginExecuteResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_BeginExecuteResponse.Merge(m, src) } func (m *BeginExecuteResponse) XXX_Size() int { - return xxx_messageInfo_BeginExecuteResponse.Size(m) + return m.Size() } func (m *BeginExecuteResponse) XXX_DiscardUnknown() { xxx_messageInfo_BeginExecuteResponse.DiscardUnknown(m) @@ -3101,18 +3513,26 @@ func (*BeginExecuteBatchRequest) ProtoMessage() {} func (*BeginExecuteBatchRequest) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{43} } - func (m *BeginExecuteBatchRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BeginExecuteBatchRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *BeginExecuteBatchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BeginExecuteBatchRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_BeginExecuteBatchRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *BeginExecuteBatchRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_BeginExecuteBatchRequest.Merge(m, src) } func (m *BeginExecuteBatchRequest) XXX_Size() int { - return xxx_messageInfo_BeginExecuteBatchRequest.Size(m) + return m.Size() } func (m *BeginExecuteBatchRequest) XXX_DiscardUnknown() { xxx_messageInfo_BeginExecuteBatchRequest.DiscardUnknown(m) @@ -3183,18 +3603,26 @@ func (*BeginExecuteBatchResponse) ProtoMessage() {} func (*BeginExecuteBatchResponse) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{44} } - func (m *BeginExecuteBatchResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BeginExecuteBatchResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *BeginExecuteBatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BeginExecuteBatchResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_BeginExecuteBatchResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *BeginExecuteBatchResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_BeginExecuteBatchResponse.Merge(m, src) } func (m *BeginExecuteBatchResponse) XXX_Size() int { - return xxx_messageInfo_BeginExecuteBatchResponse.Size(m) + return m.Size() } func (m *BeginExecuteBatchResponse) XXX_DiscardUnknown() { xxx_messageInfo_BeginExecuteBatchResponse.DiscardUnknown(m) @@ -3248,18 +3676,26 @@ func (*MessageStreamRequest) ProtoMessage() {} func (*MessageStreamRequest) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{45} } - func (m *MessageStreamRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MessageStreamRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *MessageStreamRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MessageStreamRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_MessageStreamRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *MessageStreamRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_MessageStreamRequest.Merge(m, src) } func (m *MessageStreamRequest) XXX_Size() int { - return xxx_messageInfo_MessageStreamRequest.Size(m) + return m.Size() } func (m *MessageStreamRequest) XXX_DiscardUnknown() { xxx_messageInfo_MessageStreamRequest.DiscardUnknown(m) @@ -3309,18 +3745,26 @@ func (*MessageStreamResponse) ProtoMessage() {} func (*MessageStreamResponse) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{46} } - func (m *MessageStreamResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MessageStreamResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *MessageStreamResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MessageStreamResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_MessageStreamResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *MessageStreamResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_MessageStreamResponse.Merge(m, src) } func (m *MessageStreamResponse) XXX_Size() int { - return xxx_messageInfo_MessageStreamResponse.Size(m) + return m.Size() } func (m *MessageStreamResponse) XXX_DiscardUnknown() { xxx_messageInfo_MessageStreamResponse.DiscardUnknown(m) @@ -3354,18 +3798,26 @@ func (*MessageAckRequest) ProtoMessage() {} func (*MessageAckRequest) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{47} } - func (m *MessageAckRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MessageAckRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *MessageAckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MessageAckRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_MessageAckRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *MessageAckRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_MessageAckRequest.Merge(m, src) } func (m *MessageAckRequest) XXX_Size() int { - return xxx_messageInfo_MessageAckRequest.Size(m) + return m.Size() } func (m *MessageAckRequest) XXX_DiscardUnknown() { xxx_messageInfo_MessageAckRequest.DiscardUnknown(m) @@ -3425,18 +3877,26 @@ func (*MessageAckResponse) ProtoMessage() {} func (*MessageAckResponse) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{48} } - func (m *MessageAckResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MessageAckResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *MessageAckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MessageAckResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_MessageAckResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *MessageAckResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_MessageAckResponse.Merge(m, src) } func (m *MessageAckResponse) XXX_Size() int { - return xxx_messageInfo_MessageAckResponse.Size(m) + return m.Size() } func (m *MessageAckResponse) XXX_DiscardUnknown() { xxx_messageInfo_MessageAckResponse.DiscardUnknown(m) @@ -3471,18 +3931,26 @@ func (*ReserveExecuteRequest) ProtoMessage() {} func (*ReserveExecuteRequest) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{49} } - func (m *ReserveExecuteRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReserveExecuteRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ReserveExecuteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReserveExecuteRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ReserveExecuteRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ReserveExecuteRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ReserveExecuteRequest.Merge(m, src) } func (m *ReserveExecuteRequest) XXX_Size() int { - return xxx_messageInfo_ReserveExecuteRequest.Size(m) + return m.Size() } func (m *ReserveExecuteRequest) XXX_DiscardUnknown() { xxx_messageInfo_ReserveExecuteRequest.DiscardUnknown(m) @@ -3557,18 +4025,26 @@ func (*ReserveExecuteResponse) ProtoMessage() {} func (*ReserveExecuteResponse) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{50} } - func (m *ReserveExecuteResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReserveExecuteResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ReserveExecuteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReserveExecuteResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ReserveExecuteResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ReserveExecuteResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ReserveExecuteResponse.Merge(m, src) } func (m *ReserveExecuteResponse) XXX_Size() int { - return xxx_messageInfo_ReserveExecuteResponse.Size(m) + return m.Size() } func (m *ReserveExecuteResponse) XXX_DiscardUnknown() { xxx_messageInfo_ReserveExecuteResponse.DiscardUnknown(m) @@ -3623,18 +4099,26 @@ func (*ReserveBeginExecuteRequest) ProtoMessage() {} func (*ReserveBeginExecuteRequest) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{51} } - func (m *ReserveBeginExecuteRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReserveBeginExecuteRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ReserveBeginExecuteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReserveBeginExecuteRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ReserveBeginExecuteRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ReserveBeginExecuteRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ReserveBeginExecuteRequest.Merge(m, src) } func (m *ReserveBeginExecuteRequest) XXX_Size() int { - return xxx_messageInfo_ReserveBeginExecuteRequest.Size(m) + return m.Size() } func (m *ReserveBeginExecuteRequest) XXX_DiscardUnknown() { xxx_messageInfo_ReserveBeginExecuteRequest.DiscardUnknown(m) @@ -3706,18 +4190,26 @@ func (*ReserveBeginExecuteResponse) ProtoMessage() {} func (*ReserveBeginExecuteResponse) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{52} } - func (m *ReserveBeginExecuteResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReserveBeginExecuteResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ReserveBeginExecuteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReserveBeginExecuteResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ReserveBeginExecuteResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ReserveBeginExecuteResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ReserveBeginExecuteResponse.Merge(m, src) } func (m *ReserveBeginExecuteResponse) XXX_Size() int { - return xxx_messageInfo_ReserveBeginExecuteResponse.Size(m) + return m.Size() } func (m *ReserveBeginExecuteResponse) XXX_DiscardUnknown() { xxx_messageInfo_ReserveBeginExecuteResponse.DiscardUnknown(m) @@ -3778,18 +4270,26 @@ func (*ReleaseRequest) ProtoMessage() {} func (*ReleaseRequest) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{53} } - func (m *ReleaseRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReleaseRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ReleaseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReleaseRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ReleaseRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ReleaseRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ReleaseRequest.Merge(m, src) } func (m *ReleaseRequest) XXX_Size() int { - return xxx_messageInfo_ReleaseRequest.Size(m) + return m.Size() } func (m *ReleaseRequest) XXX_DiscardUnknown() { xxx_messageInfo_ReleaseRequest.DiscardUnknown(m) @@ -3845,18 +4345,26 @@ func (*ReleaseResponse) ProtoMessage() {} func (*ReleaseResponse) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{54} } - func (m *ReleaseResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReleaseResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ReleaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReleaseResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ReleaseResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ReleaseResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ReleaseResponse.Merge(m, src) } func (m *ReleaseResponse) XXX_Size() int { - return xxx_messageInfo_ReleaseResponse.Size(m) + return m.Size() } func (m *ReleaseResponse) XXX_DiscardUnknown() { xxx_messageInfo_ReleaseResponse.DiscardUnknown(m) @@ -3877,18 +4385,26 @@ func (*StreamHealthRequest) ProtoMessage() {} func (*StreamHealthRequest) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{55} } - func (m *StreamHealthRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StreamHealthRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *StreamHealthRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StreamHealthRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_StreamHealthRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *StreamHealthRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_StreamHealthRequest.Merge(m, src) } func (m *StreamHealthRequest) XXX_Size() int { - return xxx_messageInfo_StreamHealthRequest.Size(m) + return m.Size() } func (m *StreamHealthRequest) XXX_DiscardUnknown() { xxx_messageInfo_StreamHealthRequest.DiscardUnknown(m) @@ -3937,18 +4453,26 @@ func (*RealtimeStats) ProtoMessage() {} func (*RealtimeStats) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{56} } - func (m *RealtimeStats) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RealtimeStats.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *RealtimeStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RealtimeStats.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_RealtimeStats.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *RealtimeStats) XXX_Merge(src proto.Message) { xxx_messageInfo_RealtimeStats.Merge(m, src) } func (m *RealtimeStats) XXX_Size() int { - return xxx_messageInfo_RealtimeStats.Size(m) + return m.Size() } func (m *RealtimeStats) XXX_DiscardUnknown() { xxx_messageInfo_RealtimeStats.DiscardUnknown(m) @@ -4026,18 +4550,26 @@ func (*AggregateStats) ProtoMessage() {} func (*AggregateStats) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{57} } - func (m *AggregateStats) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AggregateStats.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *AggregateStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AggregateStats.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_AggregateStats.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *AggregateStats) XXX_Merge(src proto.Message) { xxx_messageInfo_AggregateStats.Merge(m, src) } func (m *AggregateStats) XXX_Size() int { - return xxx_messageInfo_AggregateStats.Size(m) + return m.Size() } func (m *AggregateStats) XXX_DiscardUnknown() { xxx_messageInfo_AggregateStats.DiscardUnknown(m) @@ -4132,18 +4664,26 @@ func (*StreamHealthResponse) ProtoMessage() {} func (*StreamHealthResponse) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{58} } - func (m *StreamHealthResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StreamHealthResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *StreamHealthResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StreamHealthResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_StreamHealthResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *StreamHealthResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_StreamHealthResponse.Merge(m, src) } func (m *StreamHealthResponse) XXX_Size() int { - return xxx_messageInfo_StreamHealthResponse.Size(m) + return m.Size() } func (m *StreamHealthResponse) XXX_DiscardUnknown() { xxx_messageInfo_StreamHealthResponse.DiscardUnknown(m) @@ -4203,18 +4743,26 @@ func (*TransactionMetadata) ProtoMessage() {} func (*TransactionMetadata) Descriptor() ([]byte, []int) { return fileDescriptor_5c6ac9b241082464, []int{59} } - func (m *TransactionMetadata) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TransactionMetadata.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *TransactionMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TransactionMetadata.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_TransactionMetadata.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *TransactionMetadata) XXX_Merge(src proto.Message) { xxx_messageInfo_TransactionMetadata.Merge(m, src) } func (m *TransactionMetadata) XXX_Size() int { - return xxx_messageInfo_TransactionMetadata.Size(m) + return m.Size() } func (m *TransactionMetadata) XXX_DiscardUnknown() { xxx_messageInfo_TransactionMetadata.DiscardUnknown(m) @@ -4258,6 +4806,7 @@ func init() { proto.RegisterEnum("query.ExecuteOptions_IncludedFields", ExecuteOptions_IncludedFields_name, ExecuteOptions_IncludedFields_value) proto.RegisterEnum("query.ExecuteOptions_Workload", ExecuteOptions_Workload_name, ExecuteOptions_Workload_value) proto.RegisterEnum("query.ExecuteOptions_TransactionIsolation", ExecuteOptions_TransactionIsolation_name, ExecuteOptions_TransactionIsolation_value) + proto.RegisterEnum("query.ExecuteOptions_PlannerVersion", ExecuteOptions_PlannerVersion_name, ExecuteOptions_PlannerVersion_value) proto.RegisterEnum("query.StreamEvent_Statement_Category", StreamEvent_Statement_Category_name, StreamEvent_Statement_Category_value) proto.RegisterType((*Target)(nil), "query.Target") proto.RegisterType((*VTGateCallerID)(nil), "query.VTGateCallerID") @@ -4326,203 +4875,15226 @@ func init() { func init() { proto.RegisterFile("query.proto", fileDescriptor_5c6ac9b241082464) } var fileDescriptor_5c6ac9b241082464 = []byte{ - // 3158 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0x4b, 0x70, 0x1b, 0x47, - 0x7a, 0xd6, 0xe0, 0x45, 0xe0, 0x07, 0x01, 0x36, 0x9b, 0xa4, 0x04, 0x51, 0x7e, 0xd0, 0x63, 0xcb, - 0x66, 0x98, 0x84, 0x92, 0x28, 0x59, 0x51, 0x6c, 0x27, 0xd1, 0x10, 0x1c, 0xca, 0x90, 0x80, 0x01, - 0xd4, 0x18, 0x48, 0x96, 0x2a, 0x55, 0x53, 0x43, 0xa0, 0x05, 0x4e, 0x71, 0x80, 0x81, 0x66, 0x86, - 0x94, 0x78, 0x53, 0xe2, 0x38, 0xce, 0x3b, 0xce, 0x3b, 0x8e, 0x2b, 0xae, 0x54, 0xe5, 0x90, 0xca, - 0x65, 0xcf, 0x3e, 0xef, 0xc1, 0x87, 0x3d, 0x6c, 0xd5, 0x1e, 0x77, 0xf7, 0xb0, 0xbb, 0x87, 0xad, - 0xdd, 0x93, 0x6b, 0x6b, 0x0f, 0x7b, 0xd8, 0xc3, 0xd6, 0x56, 0x3f, 0x66, 0x00, 0x90, 0xb0, 0x44, - 0xcb, 0xeb, 0xda, 0x92, 0xac, 0x5b, 0xff, 0x8f, 0x7e, 0x7c, 0x5f, 0xff, 0xf3, 0xf7, 0x63, 0x1a, - 0xf2, 0x77, 0x77, 0xa9, 0xbf, 0xbf, 0x3a, 0xf0, 0xbd, 0xd0, 0xc3, 0x69, 0x2e, 0x2c, 0x16, 0x43, - 0x6f, 0xe0, 0x75, 0xec, 0xd0, 0x16, 0xea, 0xc5, 0xfc, 0x5e, 0xe8, 0x0f, 0xda, 0x42, 0x50, 0xdf, - 0x53, 0x20, 0x63, 0xda, 0x7e, 0x97, 0x86, 0x78, 0x11, 0xb2, 0x3b, 0x74, 0x3f, 0x18, 0xd8, 0x6d, - 0x5a, 0x52, 0x96, 0x94, 0xe5, 0x1c, 0x89, 0x65, 0x3c, 0x0f, 0xe9, 0x60, 0xdb, 0xf6, 0x3b, 0xa5, - 0x04, 0x37, 0x08, 0x01, 0xbf, 0x0e, 0xf9, 0xd0, 0xde, 0x72, 0x69, 0x68, 0x85, 0xfb, 0x03, 0x5a, - 0x4a, 0x2e, 0x29, 0xcb, 0xc5, 0xb5, 0xf9, 0xd5, 0xb8, 0x3f, 0x93, 0x1b, 0xcd, 0xfd, 0x01, 0x25, - 0x10, 0xc6, 0x65, 0x8c, 0x21, 0xd5, 0xa6, 0xae, 0x5b, 0x4a, 0xf1, 0xb6, 0x78, 0x59, 0xdd, 0x80, - 0xe2, 0x0d, 0xf3, 0x8a, 0x1d, 0xd2, 0xb2, 0xed, 0xba, 0xd4, 0xaf, 0x6c, 0xb0, 0xe1, 0xec, 0x06, - 0xd4, 0xef, 0xdb, 0xbd, 0x78, 0x38, 0x91, 0x8c, 0x8f, 0x43, 0xa6, 0xeb, 0x7b, 0xbb, 0x83, 0xa0, - 0x94, 0x58, 0x4a, 0x2e, 0xe7, 0x88, 0x94, 0xd4, 0x3f, 0x05, 0xd0, 0xf7, 0x68, 0x3f, 0x34, 0xbd, - 0x1d, 0xda, 0xc7, 0xcf, 0x41, 0x2e, 0x74, 0x7a, 0x34, 0x08, 0xed, 0xde, 0x80, 0x37, 0x91, 0x24, - 0x43, 0xc5, 0xe7, 0x40, 0x5a, 0x84, 0xec, 0xc0, 0x0b, 0x9c, 0xd0, 0xf1, 0xfa, 0x1c, 0x4f, 0x8e, - 0xc4, 0xb2, 0xfa, 0xc7, 0x90, 0xbe, 0x61, 0xbb, 0xbb, 0x14, 0xbf, 0x08, 0x29, 0x0e, 0x58, 0xe1, - 0x80, 0xf3, 0xab, 0x82, 0x74, 0x8e, 0x93, 0x1b, 0x58, 0xdb, 0x7b, 0xcc, 0x93, 0xb7, 0x3d, 0x4d, - 0x84, 0xa0, 0xee, 0xc0, 0xf4, 0xba, 0xd3, 0xef, 0xdc, 0xb0, 0x7d, 0x87, 0x91, 0xf1, 0x98, 0xcd, - 0xe0, 0x57, 0x20, 0xc3, 0x0b, 0x41, 0x29, 0xb9, 0x94, 0x5c, 0xce, 0xaf, 0x4d, 0xcb, 0x8a, 0x7c, - 0x6c, 0x44, 0xda, 0xd4, 0x6f, 0x2a, 0x00, 0xeb, 0xde, 0x6e, 0xbf, 0x73, 0x9d, 0x19, 0x31, 0x82, - 0x64, 0x70, 0xd7, 0x95, 0x44, 0xb2, 0x22, 0xbe, 0x06, 0xc5, 0x2d, 0xa7, 0xdf, 0xb1, 0xf6, 0xe4, - 0x70, 0x04, 0x97, 0xf9, 0xb5, 0x57, 0x64, 0x73, 0xc3, 0xca, 0xab, 0xa3, 0xa3, 0x0e, 0xf4, 0x7e, - 0xe8, 0xef, 0x93, 0xc2, 0xd6, 0xa8, 0x6e, 0xb1, 0x05, 0xf8, 0xb0, 0x13, 0xeb, 0x74, 0x87, 0xee, - 0x47, 0x9d, 0xee, 0xd0, 0x7d, 0xfc, 0x3b, 0xa3, 0x88, 0xf2, 0x6b, 0x73, 0x51, 0x5f, 0x23, 0x75, - 0x25, 0xcc, 0x37, 0x12, 0x97, 0x14, 0xf5, 0x93, 0x34, 0x14, 0xf5, 0xfb, 0xb4, 0xbd, 0x1b, 0xd2, - 0xfa, 0x80, 0xcd, 0x41, 0x80, 0x6b, 0x30, 0xe3, 0xf4, 0xdb, 0xee, 0x6e, 0x87, 0x76, 0xac, 0x3b, - 0x0e, 0x75, 0x3b, 0x01, 0x8f, 0xa3, 0x62, 0x3c, 0xee, 0x71, 0xff, 0xd5, 0x8a, 0x74, 0xde, 0xe4, - 0xbe, 0xa4, 0xe8, 0x8c, 0xc9, 0x78, 0x05, 0x66, 0xdb, 0xae, 0x43, 0xfb, 0xa1, 0x75, 0x87, 0xe1, - 0xb5, 0x7c, 0xef, 0x5e, 0x50, 0x4a, 0x2f, 0x29, 0xcb, 0x59, 0x32, 0x23, 0x0c, 0x9b, 0x4c, 0x4f, - 0xbc, 0x7b, 0x01, 0x7e, 0x03, 0xb2, 0xf7, 0x3c, 0x7f, 0xc7, 0xf5, 0xec, 0x4e, 0x29, 0xc3, 0xfb, - 0x7c, 0x61, 0x72, 0x9f, 0x37, 0xa5, 0x17, 0x89, 0xfd, 0xf1, 0x32, 0xa0, 0xe0, 0xae, 0x6b, 0x05, - 0xd4, 0xa5, 0xed, 0xd0, 0x72, 0x9d, 0x9e, 0x13, 0x96, 0xb2, 0x3c, 0x24, 0x8b, 0xc1, 0x5d, 0xb7, - 0xc9, 0xd5, 0x55, 0xa6, 0xc5, 0x16, 0x2c, 0x84, 0xbe, 0xdd, 0x0f, 0xec, 0x36, 0x6b, 0xcc, 0x72, - 0x02, 0xcf, 0xb5, 0x79, 0x38, 0xe6, 0x78, 0x97, 0x2b, 0x93, 0xbb, 0x34, 0x87, 0x55, 0x2a, 0x51, - 0x0d, 0x32, 0x1f, 0x4e, 0xd0, 0xe2, 0x73, 0xb0, 0x10, 0xec, 0x38, 0x03, 0x8b, 0xb7, 0x63, 0x0d, - 0x5c, 0xbb, 0x6f, 0xb5, 0xed, 0xf6, 0x36, 0x2d, 0x01, 0x87, 0x8d, 0x99, 0x91, 0xcf, 0x7b, 0xc3, - 0xb5, 0xfb, 0x65, 0x66, 0x51, 0xdf, 0x84, 0xe2, 0x38, 0x8f, 0x78, 0x16, 0x0a, 0xe6, 0xad, 0x86, - 0x6e, 0x69, 0xc6, 0x86, 0x65, 0x68, 0x35, 0x1d, 0x1d, 0xc3, 0x05, 0xc8, 0x71, 0x55, 0xdd, 0xa8, - 0xde, 0x42, 0x0a, 0x9e, 0x82, 0xa4, 0x56, 0xad, 0xa2, 0x84, 0x7a, 0x09, 0xb2, 0x11, 0x21, 0x78, - 0x06, 0xf2, 0x2d, 0xa3, 0xd9, 0xd0, 0xcb, 0x95, 0xcd, 0x8a, 0xbe, 0x81, 0x8e, 0xe1, 0x2c, 0xa4, - 0xea, 0x55, 0xb3, 0x81, 0x14, 0x51, 0xd2, 0x1a, 0x28, 0xc1, 0x6a, 0x6e, 0xac, 0x6b, 0x28, 0xa9, - 0xfe, 0x9f, 0x02, 0xf3, 0x93, 0x80, 0xe1, 0x3c, 0x4c, 0x6d, 0xe8, 0x9b, 0x5a, 0xab, 0x6a, 0xa2, - 0x63, 0x78, 0x0e, 0x66, 0x88, 0xde, 0xd0, 0x35, 0x53, 0x5b, 0xaf, 0xea, 0x16, 0xd1, 0xb5, 0x0d, - 0xa4, 0x60, 0x0c, 0x45, 0x56, 0xb2, 0xca, 0xf5, 0x5a, 0xad, 0x62, 0x9a, 0xfa, 0x06, 0x4a, 0xe0, - 0x79, 0x40, 0x5c, 0xd7, 0x32, 0x86, 0xda, 0x24, 0x46, 0x30, 0xdd, 0xd4, 0x49, 0x45, 0xab, 0x56, - 0x6e, 0xb3, 0x06, 0x50, 0x0a, 0xbf, 0x04, 0xcf, 0x97, 0xeb, 0x46, 0xb3, 0xd2, 0x34, 0x75, 0xc3, - 0xb4, 0x9a, 0x86, 0xd6, 0x68, 0xbe, 0x5d, 0x37, 0x79, 0xcb, 0x02, 0x5c, 0x1a, 0x17, 0x01, 0xb4, - 0x96, 0x59, 0x17, 0xed, 0xa0, 0xcc, 0xd5, 0x54, 0x56, 0x41, 0x89, 0xab, 0xa9, 0x6c, 0x02, 0x25, - 0xaf, 0xa6, 0xb2, 0x49, 0x94, 0x52, 0x3f, 0x49, 0x40, 0x9a, 0x73, 0xc5, 0xd2, 0xdd, 0x48, 0x12, - 0xe3, 0xe5, 0xf8, 0xd3, 0x4f, 0x3c, 0xe4, 0xd3, 0xe7, 0x19, 0x53, 0x26, 0x21, 0x21, 0xe0, 0x53, - 0x90, 0xf3, 0xfc, 0xae, 0x25, 0x2c, 0x22, 0x7d, 0x66, 0x3d, 0xbf, 0xcb, 0xf3, 0x2c, 0x4b, 0x5d, - 0x2c, 0xeb, 0x6e, 0xd9, 0x01, 0xe5, 0x11, 0x9c, 0x23, 0xb1, 0x8c, 0x4f, 0x02, 0xf3, 0xb3, 0xf8, - 0x38, 0x32, 0xdc, 0x36, 0xe5, 0xf9, 0x5d, 0x83, 0x0d, 0xe5, 0x65, 0x28, 0xb4, 0x3d, 0x77, 0xb7, - 0xd7, 0xb7, 0x5c, 0xda, 0xef, 0x86, 0xdb, 0xa5, 0xa9, 0x25, 0x65, 0xb9, 0x40, 0xa6, 0x85, 0xb2, - 0xca, 0x75, 0xb8, 0x04, 0x53, 0xed, 0x6d, 0xdb, 0x0f, 0xa8, 0x88, 0xda, 0x02, 0x89, 0x44, 0xde, - 0x2b, 0x6d, 0x3b, 0x3d, 0xdb, 0x0d, 0x78, 0x84, 0x16, 0x48, 0x2c, 0x33, 0x10, 0x77, 0x5c, 0xbb, - 0x1b, 0xf0, 0xc8, 0x2a, 0x10, 0x21, 0xe0, 0x17, 0x21, 0x2f, 0x3b, 0xe4, 0x14, 0xe4, 0xf9, 0x70, - 0x40, 0xa8, 0x18, 0x03, 0xea, 0x1f, 0x40, 0x92, 0x78, 0xf7, 0x58, 0x9f, 0x62, 0x44, 0x41, 0x49, - 0x59, 0x4a, 0x2e, 0x63, 0x12, 0x89, 0x2c, 0xfd, 0xcb, 0x0c, 0x28, 0x12, 0x63, 0x94, 0xf3, 0x3e, - 0x52, 0x20, 0xcf, 0x23, 0x97, 0xd0, 0x60, 0xd7, 0x0d, 0x59, 0xa6, 0x94, 0x29, 0x42, 0x19, 0xcb, - 0x94, 0x7c, 0x5e, 0x88, 0xb4, 0x31, 0x02, 0xd8, 0x57, 0x6f, 0xd9, 0x77, 0xee, 0xd0, 0x76, 0x48, - 0xc5, 0x82, 0x90, 0x22, 0xd3, 0x4c, 0xa9, 0x49, 0x1d, 0x63, 0xde, 0xe9, 0x07, 0xd4, 0x0f, 0x2d, - 0xa7, 0xc3, 0xe7, 0x24, 0x45, 0xb2, 0x42, 0x51, 0xe9, 0xe0, 0x17, 0x20, 0xc5, 0xf3, 0x46, 0x8a, - 0xf7, 0x02, 0xb2, 0x17, 0xe2, 0xdd, 0x23, 0x5c, 0x7f, 0x35, 0x95, 0x4d, 0xa3, 0x8c, 0xfa, 0x16, - 0x4c, 0xf3, 0xc1, 0xdd, 0xb4, 0xfd, 0xbe, 0xd3, 0xef, 0xf2, 0x65, 0xd0, 0xeb, 0x88, 0xb8, 0x28, - 0x10, 0x5e, 0x66, 0x98, 0x7b, 0x34, 0x08, 0xec, 0x2e, 0x95, 0xcb, 0x52, 0x24, 0xaa, 0xff, 0x93, - 0x84, 0x7c, 0x33, 0xf4, 0xa9, 0xdd, 0xe3, 0x2b, 0x1c, 0x7e, 0x0b, 0x20, 0x08, 0xed, 0x90, 0xf6, - 0x68, 0x3f, 0x8c, 0xf0, 0x3d, 0x27, 0x7b, 0x1e, 0xf1, 0x5b, 0x6d, 0x46, 0x4e, 0x64, 0xc4, 0x1f, - 0xaf, 0x41, 0x9e, 0x32, 0xb3, 0x15, 0xb2, 0x95, 0x52, 0x66, 0xe3, 0xd9, 0x28, 0xb5, 0xc4, 0x4b, - 0x28, 0x01, 0x1a, 0x97, 0x17, 0x3f, 0x4e, 0x40, 0x2e, 0x6e, 0x0d, 0x6b, 0x90, 0x6d, 0xdb, 0x21, - 0xed, 0x7a, 0xfe, 0xbe, 0x5c, 0xc0, 0x4e, 0x3f, 0xac, 0xf7, 0xd5, 0xb2, 0x74, 0x26, 0x71, 0x35, - 0xfc, 0x3c, 0x88, 0x5d, 0x81, 0x08, 0x4b, 0x81, 0x37, 0xc7, 0x35, 0x3c, 0x30, 0xdf, 0x00, 0x3c, - 0xf0, 0x9d, 0x9e, 0xed, 0xef, 0x5b, 0x3b, 0x74, 0x3f, 0x4a, 0xf6, 0xc9, 0x09, 0x33, 0x89, 0xa4, - 0xdf, 0x35, 0xba, 0x2f, 0xd3, 0xd3, 0xa5, 0xf1, 0xba, 0x32, 0x5a, 0x0e, 0xcf, 0xcf, 0x48, 0x4d, - 0xbe, 0x7c, 0x06, 0xd1, 0x42, 0x99, 0xe6, 0x81, 0xc5, 0x8a, 0xea, 0x6b, 0x90, 0x8d, 0x06, 0x8f, - 0x73, 0x90, 0xd6, 0x7d, 0xdf, 0xf3, 0xd1, 0x31, 0x9e, 0xa5, 0x6a, 0x55, 0x91, 0xe8, 0x36, 0x36, - 0x58, 0xa2, 0xfb, 0x51, 0x22, 0x5e, 0xad, 0x08, 0xbd, 0xbb, 0x4b, 0x83, 0x10, 0xff, 0x09, 0xcc, - 0x51, 0x1e, 0x42, 0xce, 0x1e, 0xb5, 0xda, 0x7c, 0x6b, 0xc3, 0x02, 0x48, 0xe1, 0x7c, 0xcf, 0xac, - 0x8a, 0x9d, 0x58, 0xb4, 0xe5, 0x21, 0xb3, 0xb1, 0xaf, 0x54, 0x75, 0xb0, 0x0e, 0x73, 0x4e, 0xaf, - 0x47, 0x3b, 0x8e, 0x1d, 0x8e, 0x36, 0x20, 0x26, 0x6c, 0x21, 0x5a, 0xf9, 0xc7, 0x76, 0x4e, 0x64, - 0x36, 0xae, 0x11, 0x37, 0x73, 0x1a, 0x32, 0x21, 0xdf, 0xe5, 0xf1, 0xd8, 0xcd, 0xaf, 0x15, 0xa2, - 0x8c, 0xc3, 0x95, 0x44, 0x1a, 0xf1, 0x6b, 0x20, 0xf6, 0x8c, 0x3c, 0xb7, 0x0c, 0x03, 0x62, 0xb8, - 0x15, 0x20, 0xc2, 0x8e, 0x4f, 0x43, 0x71, 0x6c, 0x91, 0xea, 0x70, 0xc2, 0x92, 0xa4, 0x30, 0xba, - 0xe2, 0x74, 0xf0, 0x19, 0x98, 0xf2, 0xc4, 0x02, 0xc5, 0xb3, 0xce, 0x70, 0xc4, 0xe3, 0xab, 0x17, - 0x89, 0xbc, 0x58, 0x6e, 0xf0, 0x69, 0x40, 0xfd, 0x3d, 0xda, 0x61, 0x8d, 0x4e, 0xf1, 0x46, 0x21, - 0x52, 0x55, 0x3a, 0xea, 0x1f, 0xc1, 0x4c, 0x4c, 0x71, 0x30, 0xf0, 0xfa, 0x01, 0xc5, 0x2b, 0x90, - 0xf1, 0xf9, 0xf7, 0x2e, 0x69, 0xc5, 0xb2, 0x8f, 0x91, 0x4c, 0x40, 0xa4, 0x87, 0xda, 0x81, 0x19, - 0xa1, 0xb9, 0xe9, 0x84, 0xdb, 0x7c, 0x26, 0xf1, 0x69, 0x48, 0x53, 0x56, 0x38, 0x30, 0x29, 0xa4, - 0x51, 0xe6, 0x76, 0x22, 0xac, 0x23, 0xbd, 0x24, 0x1e, 0xd9, 0xcb, 0xcf, 0x12, 0x30, 0x27, 0x47, - 0xb9, 0x6e, 0x87, 0xed, 0xed, 0x27, 0x34, 0x1a, 0x7e, 0x17, 0xa6, 0x98, 0xde, 0x89, 0xbf, 0x9c, - 0x09, 0xf1, 0x10, 0x79, 0xb0, 0x88, 0xb0, 0x03, 0x6b, 0x64, 0xfa, 0xe5, 0x2e, 0xaa, 0x60, 0x07, - 0x23, 0x4b, 0xf8, 0x84, 0xc0, 0xc9, 0x3c, 0x22, 0x70, 0xa6, 0x8e, 0x12, 0x38, 0xea, 0x06, 0xcc, - 0x8f, 0x33, 0x2e, 0x83, 0xe3, 0xf7, 0x60, 0x4a, 0x4c, 0x4a, 0x94, 0x23, 0x27, 0xcd, 0x5b, 0xe4, - 0xa2, 0x7e, 0x9a, 0x80, 0x79, 0x99, 0xbe, 0xbe, 0x1e, 0xdf, 0xf1, 0x08, 0xcf, 0xe9, 0x23, 0x7d, - 0xa0, 0x47, 0x9b, 0x3f, 0xb5, 0x0c, 0x0b, 0x07, 0x78, 0x7c, 0x8c, 0x8f, 0xf5, 0x33, 0x05, 0xa6, - 0xd7, 0x69, 0xd7, 0xe9, 0x3f, 0xa1, 0xb3, 0x30, 0x42, 0x6e, 0xea, 0x48, 0x41, 0x3c, 0x80, 0x82, - 0xc4, 0x2b, 0xd9, 0x3a, 0xcc, 0xb6, 0x32, 0xe9, 0x6b, 0xb9, 0x04, 0xd3, 0xf2, 0x1c, 0x6e, 0xbb, - 0x8e, 0x1d, 0xc4, 0x78, 0x0e, 0x1c, 0xc4, 0x35, 0x66, 0x24, 0xf2, 0xc8, 0xce, 0x05, 0xf5, 0xc7, - 0x0a, 0x14, 0xca, 0x5e, 0xaf, 0xe7, 0x84, 0x4f, 0x28, 0xc7, 0x87, 0x19, 0x4a, 0x4d, 0x8a, 0xc7, - 0x73, 0x50, 0x8c, 0x60, 0x4a, 0x6a, 0x0f, 0xac, 0x34, 0xca, 0xa1, 0x95, 0xe6, 0x27, 0x0a, 0xcc, - 0x10, 0xcf, 0x75, 0xb7, 0xec, 0xf6, 0xce, 0xd3, 0x4d, 0xce, 0x79, 0x40, 0x43, 0xa0, 0x47, 0xa5, - 0xe7, 0x97, 0x0a, 0x14, 0x1b, 0x3e, 0x1d, 0xd8, 0x3e, 0x7d, 0xaa, 0xd9, 0x61, 0xdb, 0xf4, 0x4e, - 0x28, 0x37, 0x38, 0x39, 0xc2, 0xcb, 0xea, 0x2c, 0xcc, 0xc4, 0xd8, 0x05, 0x61, 0xea, 0xf7, 0x14, - 0x58, 0x10, 0x21, 0x26, 0x2d, 0x9d, 0x27, 0x94, 0x96, 0x08, 0x6f, 0x6a, 0x04, 0x6f, 0x09, 0x8e, - 0x1f, 0xc4, 0x26, 0x61, 0xbf, 0x9b, 0x80, 0x13, 0x51, 0xf0, 0x3c, 0xe1, 0xc0, 0xbf, 0x44, 0x3c, - 0x2c, 0x42, 0xe9, 0x30, 0x09, 0x92, 0xa1, 0x0f, 0x12, 0x50, 0x2a, 0xfb, 0xd4, 0x0e, 0xe9, 0xc8, - 0x3e, 0xe8, 0xe9, 0x89, 0x0d, 0x7c, 0x0e, 0xa6, 0x07, 0xb6, 0x1f, 0x3a, 0x6d, 0x67, 0x60, 0xb3, - 0xa3, 0x68, 0x9a, 0x6f, 0xb3, 0x0e, 0x34, 0x30, 0xe6, 0xa2, 0x9e, 0x82, 0x93, 0x13, 0x18, 0x91, - 0x7c, 0xfd, 0x4a, 0x01, 0xdc, 0x0c, 0x6d, 0x3f, 0xfc, 0x1a, 0xac, 0x4b, 0x13, 0x83, 0x69, 0x01, - 0xe6, 0xc6, 0xf0, 0x8f, 0xf2, 0x42, 0xc3, 0xaf, 0xc5, 0x92, 0xf4, 0xb9, 0xbc, 0x8c, 0xe2, 0x97, - 0xbc, 0xfc, 0x40, 0x81, 0xc5, 0xb2, 0x27, 0x6e, 0x27, 0x9f, 0xca, 0x2f, 0x4c, 0x7d, 0x1e, 0x4e, - 0x4d, 0x04, 0x28, 0x09, 0xf8, 0xbe, 0x02, 0xc7, 0x09, 0xb5, 0x3b, 0x4f, 0x27, 0xf8, 0xeb, 0x70, - 0xe2, 0x10, 0x38, 0xb9, 0x47, 0xb9, 0x08, 0xd9, 0x1e, 0x0d, 0x6d, 0xb6, 0xc3, 0x95, 0x90, 0x16, - 0xa3, 0x76, 0x87, 0xde, 0x35, 0xe9, 0x41, 0x62, 0x5f, 0xf5, 0x87, 0x09, 0x98, 0xe3, 0xfb, 0xec, - 0x67, 0x87, 0xbc, 0x23, 0xdd, 0xc2, 0x64, 0x0e, 0x6e, 0xfe, 0x98, 0xc3, 0xc0, 0xa7, 0x56, 0x74, - 0x3b, 0x30, 0xc5, 0x7f, 0xc2, 0xc1, 0xc0, 0xa7, 0xd7, 0x85, 0x46, 0xfd, 0x96, 0x02, 0xf3, 0xe3, - 0x14, 0xc7, 0x27, 0x9a, 0xdf, 0xf4, 0x6d, 0xcb, 0x84, 0x94, 0x92, 0x3c, 0xca, 0x21, 0x29, 0x75, - 0xe4, 0x43, 0xd2, 0xb7, 0x13, 0x50, 0x1a, 0x05, 0xf3, 0xec, 0x4e, 0x67, 0xfc, 0x4e, 0xe7, 0x8b, - 0xde, 0xf2, 0xa9, 0xdf, 0x51, 0xe0, 0xe4, 0x04, 0x42, 0xbf, 0x58, 0x88, 0x8c, 0xdc, 0xec, 0x24, - 0x1e, 0x79, 0xb3, 0xf3, 0xd5, 0x07, 0xc9, 0x77, 0x15, 0x98, 0xaf, 0x89, 0xbb, 0x7a, 0x71, 0xf3, - 0xf1, 0xe4, 0xe6, 0x60, 0x7e, 0x1d, 0x9f, 0x1a, 0xfe, 0xad, 0x52, 0xcb, 0xb0, 0x70, 0x00, 0xda, - 0x63, 0xdc, 0xe6, 0xfc, 0x42, 0x81, 0x59, 0xd9, 0x8a, 0xf6, 0xc4, 0x6e, 0x5f, 0x26, 0xb0, 0x83, - 0x5f, 0x80, 0xa4, 0xd3, 0x89, 0xf6, 0xbd, 0xe3, 0x3f, 0xe3, 0x99, 0x41, 0xbd, 0x0c, 0x78, 0x14, - 0xf7, 0x63, 0x50, 0xf7, 0xd3, 0x04, 0x2c, 0x10, 0x91, 0x7d, 0x9f, 0xfd, 0x5f, 0xf8, 0xb2, 0xff, - 0x17, 0x1e, 0xbe, 0x70, 0x7d, 0xca, 0x37, 0x53, 0xe3, 0x54, 0x7f, 0x75, 0x4b, 0xd7, 0x81, 0x85, - 0x36, 0x79, 0x68, 0xa1, 0x7d, 0xfc, 0x7c, 0xf4, 0x69, 0x02, 0x16, 0x25, 0x90, 0x67, 0x7b, 0x9d, - 0xa3, 0x47, 0x44, 0xe6, 0x50, 0x44, 0xfc, 0x5c, 0x81, 0x53, 0x13, 0x89, 0xfc, 0xad, 0xef, 0x68, - 0x0e, 0x44, 0x4f, 0xea, 0x91, 0xd1, 0x93, 0x3e, 0x72, 0xf4, 0xbc, 0x9f, 0x80, 0x22, 0xa1, 0x2e, - 0xb5, 0x83, 0xa7, 0xfc, 0x76, 0xef, 0x00, 0x87, 0xe9, 0x43, 0xf7, 0x9c, 0xb3, 0x30, 0x13, 0x13, - 0x21, 0x0f, 0x5c, 0xfc, 0x80, 0xce, 0xd6, 0xc1, 0xb7, 0xa9, 0xed, 0x86, 0xd1, 0x4e, 0x50, 0xfd, - 0xdf, 0x04, 0x14, 0x08, 0xd3, 0x38, 0x3d, 0xda, 0x0c, 0xed, 0x30, 0xc0, 0x2f, 0xc1, 0xf4, 0x36, - 0x77, 0xb1, 0x86, 0x11, 0x92, 0x23, 0x79, 0xa1, 0x13, 0x7f, 0x1f, 0xd7, 0x60, 0x21, 0xa0, 0x6d, - 0xaf, 0xdf, 0x09, 0xac, 0x2d, 0xba, 0xed, 0xf4, 0x3b, 0x56, 0xcf, 0x0e, 0x42, 0xea, 0x73, 0x5a, - 0x0a, 0x64, 0x4e, 0x1a, 0xd7, 0xb9, 0xad, 0xc6, 0x4d, 0xf8, 0x2c, 0xcc, 0x6f, 0x39, 0x7d, 0xd7, - 0xeb, 0x5a, 0x03, 0xd7, 0xde, 0xa7, 0x7e, 0x60, 0xb5, 0xbd, 0xdd, 0xbe, 0xe0, 0x23, 0x4d, 0xb0, - 0xb0, 0x35, 0x84, 0xa9, 0xcc, 0x2c, 0xf8, 0x36, 0xac, 0x4c, 0xec, 0xc5, 0xba, 0xe3, 0xb8, 0x21, - 0xf5, 0x69, 0xc7, 0xf2, 0xe9, 0xc0, 0x75, 0xda, 0xe2, 0xa1, 0x91, 0x20, 0xea, 0xd5, 0x09, 0x5d, - 0x6f, 0x4a, 0x77, 0x32, 0xf4, 0xc6, 0xa7, 0x20, 0xd7, 0x1e, 0xec, 0x5a, 0xbb, 0xfc, 0xd1, 0x02, - 0xe3, 0x4f, 0x21, 0xd9, 0xf6, 0x60, 0xb7, 0xc5, 0x64, 0x8c, 0x20, 0x79, 0x77, 0x20, 0x92, 0xb3, - 0x42, 0x58, 0x51, 0xfd, 0x4c, 0x81, 0xa2, 0xd6, 0xed, 0xfa, 0xb4, 0x6b, 0x87, 0x92, 0xa6, 0xb3, - 0x30, 0x2f, 0x28, 0xd9, 0xb7, 0x64, 0xb8, 0x0a, 0x3c, 0x8a, 0xc0, 0x23, 0x6d, 0x22, 0x56, 0x05, - 0x9e, 0x0b, 0x70, 0x7c, 0xb7, 0x3f, 0xb1, 0x4e, 0x82, 0xd7, 0x99, 0x8f, 0xad, 0xa3, 0xb5, 0xfe, - 0x10, 0x4e, 0x4e, 0x66, 0xa1, 0xe7, 0x88, 0xc7, 0x7e, 0x05, 0x72, 0x7c, 0x02, 0xe8, 0x9a, 0xd3, - 0x7f, 0x48, 0x55, 0xfb, 0x3e, 0xe7, 0xeb, 0x73, 0xaa, 0xda, 0xf7, 0xd5, 0xff, 0x8f, 0xff, 0x29, - 0x46, 0xe1, 0x12, 0x27, 0x8e, 0x28, 0x90, 0x95, 0x87, 0x05, 0x72, 0x09, 0xa6, 0x58, 0x30, 0x3a, - 0xfd, 0x2e, 0x07, 0x97, 0x25, 0x91, 0x88, 0x9b, 0xf0, 0xaa, 0xc4, 0x4e, 0xef, 0x87, 0xd4, 0xef, - 0xdb, 0xae, 0xbb, 0x6f, 0x89, 0xeb, 0xc7, 0x7e, 0x48, 0x3b, 0xd6, 0xf0, 0xf1, 0xa3, 0x48, 0x1f, - 0x2f, 0x0b, 0x6f, 0x3d, 0x76, 0x26, 0xb1, 0xaf, 0x19, 0x3f, 0x8b, 0x7c, 0x13, 0x8a, 0xbe, 0x0c, - 0x62, 0x2b, 0x60, 0xd3, 0x23, 0x53, 0xee, 0x7c, 0xf4, 0x6a, 0x62, 0x34, 0xc2, 0x49, 0xc1, 0x1f, - 0x0b, 0xf8, 0xc7, 0x4e, 0x38, 0x57, 0x53, 0xd9, 0x0c, 0x9a, 0x52, 0xbf, 0xa1, 0xc0, 0xdc, 0x84, - 0xb3, 0x7b, 0x7c, 0x31, 0xa0, 0x8c, 0xdc, 0x3b, 0xfe, 0x3e, 0xa4, 0xf9, 0x83, 0x16, 0xf9, 0x86, - 0xea, 0xc4, 0xe1, 0xa3, 0x3f, 0x7f, 0x7c, 0x42, 0x84, 0x17, 0xfb, 0x16, 0x39, 0xa6, 0x36, 0xbf, - 0x78, 0x8c, 0x32, 0x6a, 0x9e, 0xe9, 0xc4, 0x5d, 0xe4, 0xe1, 0x9b, 0xcc, 0xd4, 0x23, 0x6f, 0x32, - 0x57, 0xfe, 0x39, 0x09, 0xb9, 0xda, 0x7e, 0xf3, 0xae, 0xbb, 0xe9, 0xda, 0x5d, 0xfe, 0x3a, 0xa4, - 0xd6, 0x30, 0x6f, 0xa1, 0x63, 0x78, 0x16, 0x0a, 0x46, 0xdd, 0xb4, 0x8c, 0x56, 0xb5, 0x6a, 0x6d, - 0x56, 0xb5, 0x2b, 0x48, 0xc1, 0x08, 0xa6, 0x1b, 0xa4, 0x62, 0x5d, 0xd3, 0x6f, 0x09, 0x4d, 0x02, - 0xcf, 0xc1, 0x4c, 0xcb, 0xa8, 0x5c, 0x6f, 0xe9, 0x43, 0x65, 0x0a, 0x2f, 0xc0, 0x6c, 0xad, 0x55, - 0x35, 0x2b, 0x8d, 0xea, 0x88, 0x3a, 0x8b, 0x0b, 0x90, 0x5b, 0xaf, 0xd6, 0xd7, 0x85, 0x88, 0x58, - 0xfb, 0x2d, 0xa3, 0x59, 0xb9, 0x62, 0xe8, 0x1b, 0x42, 0xb5, 0xc4, 0x54, 0xb7, 0x75, 0x52, 0xdf, - 0xac, 0x44, 0x5d, 0x5e, 0xc6, 0x08, 0xf2, 0xeb, 0x15, 0x43, 0x23, 0xb2, 0x95, 0x07, 0x0a, 0x2e, - 0x42, 0x4e, 0x37, 0x5a, 0x35, 0x29, 0x27, 0x70, 0x09, 0xe6, 0xb4, 0x96, 0x59, 0xb7, 0x2a, 0x46, - 0x99, 0xe8, 0x35, 0xdd, 0x30, 0xa5, 0x25, 0x85, 0xe7, 0xa0, 0x68, 0x56, 0x6a, 0x7a, 0xd3, 0xd4, - 0x6a, 0x0d, 0xa9, 0x64, 0xa3, 0xc8, 0x36, 0xf5, 0xc8, 0x07, 0xe1, 0x45, 0x58, 0x30, 0xea, 0x96, - 0x7c, 0x8a, 0x67, 0xdd, 0xd0, 0xaa, 0x2d, 0x5d, 0xda, 0x96, 0xf0, 0x09, 0xc0, 0x75, 0xc3, 0x6a, - 0x35, 0x36, 0x34, 0x53, 0xb7, 0x8c, 0xfa, 0x4d, 0x69, 0xb8, 0x8c, 0x8b, 0x90, 0x1d, 0x8e, 0xe0, - 0x01, 0x63, 0xa1, 0xd0, 0xd0, 0x88, 0x39, 0x04, 0xfb, 0xe0, 0x01, 0x23, 0x0b, 0xae, 0x90, 0x7a, - 0xab, 0x31, 0x74, 0x9b, 0x85, 0xbc, 0x24, 0x4b, 0xaa, 0x52, 0x4c, 0xb5, 0x5e, 0x31, 0xca, 0xf1, - 0xf8, 0x1e, 0x64, 0x17, 0x13, 0x48, 0x59, 0xd9, 0x81, 0x14, 0x9f, 0x8e, 0x2c, 0xa4, 0x8c, 0xba, - 0xa1, 0xa3, 0x63, 0x78, 0x06, 0xa0, 0xd2, 0xac, 0x18, 0xa6, 0x7e, 0x85, 0x68, 0x55, 0x06, 0x9b, - 0x2b, 0x22, 0x02, 0x19, 0xda, 0x69, 0x98, 0xaa, 0x34, 0x37, 0xab, 0x75, 0xcd, 0x94, 0x30, 0x2b, - 0xcd, 0xeb, 0xad, 0xba, 0xc9, 0x8c, 0x08, 0xe7, 0x21, 0x53, 0x69, 0x9a, 0xfa, 0x3b, 0x26, 0xc3, - 0xc5, 0x6d, 0x82, 0x55, 0xf4, 0xe0, 0xf2, 0xca, 0x87, 0x49, 0x48, 0xf1, 0x57, 0xcd, 0x05, 0xc8, - 0xf1, 0xd9, 0x36, 0x6f, 0x35, 0x58, 0x97, 0x39, 0x48, 0x55, 0x0c, 0xf3, 0x12, 0xfa, 0xb3, 0x04, - 0x06, 0x48, 0xb7, 0x78, 0xf9, 0xcf, 0x33, 0xac, 0x5c, 0x31, 0xcc, 0x73, 0x17, 0xd1, 0xbb, 0x09, - 0xd6, 0x6c, 0x4b, 0x08, 0x7f, 0x11, 0x19, 0xd6, 0x2e, 0xa0, 0xf7, 0x62, 0xc3, 0xda, 0x05, 0xf4, - 0x97, 0x91, 0xe1, 0xfc, 0x1a, 0x7a, 0x3f, 0x36, 0x9c, 0x5f, 0x43, 0x7f, 0x15, 0x19, 0x2e, 0x5e, - 0x40, 0x7f, 0x1d, 0x1b, 0x2e, 0x5e, 0x40, 0x7f, 0x93, 0x61, 0x58, 0x38, 0x92, 0xf3, 0x6b, 0xe8, - 0x6f, 0xb3, 0xb1, 0x74, 0xf1, 0x02, 0xfa, 0xbb, 0x2c, 0x9b, 0xff, 0x78, 0x56, 0xd1, 0xdf, 0x23, - 0x36, 0x4c, 0x36, 0x41, 0xe8, 0x1f, 0x78, 0x91, 0x99, 0xd0, 0x3f, 0x22, 0x86, 0x91, 0x69, 0xb9, - 0xf8, 0x01, 0xb7, 0xdc, 0xd2, 0x35, 0x82, 0xfe, 0x29, 0x23, 0x5e, 0x5e, 0x96, 0x2b, 0x35, 0xad, - 0x8a, 0x30, 0xaf, 0xc1, 0x58, 0xf9, 0x97, 0xb3, 0xac, 0xc8, 0xc2, 0x13, 0xfd, 0x6b, 0x83, 0x75, - 0x78, 0x43, 0x23, 0xe5, 0xb7, 0x35, 0x82, 0xfe, 0xed, 0x2c, 0xeb, 0xf0, 0x86, 0x46, 0x24, 0x5f, - 0xff, 0xde, 0x60, 0x8e, 0xdc, 0xf4, 0x1f, 0x67, 0xd9, 0xa0, 0xa5, 0xfe, 0x3f, 0x1b, 0x38, 0x0b, - 0xc9, 0xf5, 0x8a, 0x89, 0x3e, 0xe4, 0xbd, 0xb1, 0x10, 0x45, 0xff, 0x85, 0x98, 0xb2, 0xa9, 0x9b, - 0xe8, 0x23, 0xa6, 0x4c, 0x9b, 0xad, 0x46, 0x55, 0x47, 0xcf, 0xb1, 0xc1, 0x5d, 0xd1, 0xeb, 0x35, - 0xdd, 0x24, 0xb7, 0xd0, 0x7f, 0x73, 0xf7, 0xab, 0xcd, 0xba, 0x81, 0x3e, 0x46, 0xb8, 0x08, 0xa0, - 0xbf, 0xd3, 0x20, 0x7a, 0xb3, 0x59, 0xa9, 0x1b, 0xe8, 0xc5, 0x95, 0x4d, 0x40, 0x07, 0xd3, 0x01, - 0x03, 0xd0, 0x32, 0xae, 0x19, 0xf5, 0x9b, 0x06, 0x3a, 0xc6, 0x84, 0x06, 0xd1, 0x1b, 0x1a, 0xd1, - 0x91, 0x82, 0x01, 0x32, 0xf2, 0x3d, 0x67, 0x02, 0x4f, 0x43, 0x96, 0xd4, 0xab, 0xd5, 0x75, 0xad, - 0x7c, 0x0d, 0x25, 0xd7, 0x5f, 0x87, 0x19, 0xc7, 0x5b, 0xdd, 0x73, 0x42, 0x1a, 0x04, 0xe2, 0xdd, - 0xfc, 0x6d, 0x55, 0x4a, 0x8e, 0x77, 0x46, 0x94, 0xce, 0x74, 0xbd, 0x33, 0x7b, 0xe1, 0x19, 0x6e, - 0x3d, 0xc3, 0x33, 0xc6, 0x56, 0x86, 0x0b, 0xe7, 0x7f, 0x1d, 0x00, 0x00, 0xff, 0xff, 0xd7, 0x1b, - 0xa7, 0xfb, 0x95, 0x2f, 0x00, 0x00, + // 3303 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0x4d, 0x90, 0x1b, 0x49, + 0x56, 0xee, 0x2a, 0xfd, 0xb4, 0xf4, 0xd4, 0x52, 0x67, 0x67, 0x77, 0xdb, 0x9a, 0xf6, 0x8c, 0xa7, + 0xb7, 0x76, 0x67, 0xd7, 0x18, 0x68, 0x7b, 0xda, 0x5e, 0x63, 0x66, 0x17, 0x98, 0x6a, 0x75, 0xb5, + 0x47, 0xb6, 0x54, 0x92, 0x53, 0x25, 0x7b, 0x3d, 0x41, 0x44, 0x45, 0x59, 0x4a, 0xab, 0x2b, 0xba, + 0x54, 0xa5, 0xae, 0xaa, 0x6e, 0x8f, 0x6e, 0x86, 0x65, 0x59, 0xfe, 0x59, 0xfe, 0x77, 0xd9, 0x60, + 0x83, 0x08, 0x0e, 0x04, 0x17, 0x22, 0xb8, 0x71, 0xe6, 0x30, 0x41, 0x70, 0x20, 0xe0, 0x08, 0x1c, + 0x58, 0x86, 0x20, 0xe0, 0xb4, 0x41, 0x70, 0xe0, 0xc0, 0x81, 0x20, 0xf2, 0xa7, 0x4a, 0x52, 0xb7, + 0xc6, 0xee, 0xf5, 0x32, 0xb1, 0x61, 0x8f, 0x6f, 0xf9, 0x7e, 0xf2, 0xe7, 0x7d, 0xf9, 0xf2, 0xbd, + 0x57, 0xa9, 0x14, 0x94, 0x0e, 0x8f, 0x68, 0x38, 0xde, 0x1a, 0x85, 0x41, 0x1c, 0xe0, 0x1c, 0x27, + 0x36, 0x2a, 0x71, 0x30, 0x0a, 0xfa, 0x4e, 0xec, 0x08, 0xf6, 0x46, 0xe9, 0x38, 0x0e, 0x47, 0x3d, + 0x41, 0x68, 0x5f, 0x53, 0x20, 0x6f, 0x39, 0xe1, 0x80, 0xc6, 0x78, 0x03, 0x0a, 0x07, 0x74, 0x1c, + 0x8d, 0x9c, 0x1e, 0xad, 0x2a, 0x9b, 0xca, 0xa5, 0x22, 0x49, 0x69, 0xbc, 0x06, 0xb9, 0x68, 0xdf, + 0x09, 0xfb, 0x55, 0x95, 0x0b, 0x04, 0x81, 0xbf, 0x08, 0xa5, 0xd8, 0x79, 0xe8, 0xd1, 0xd8, 0x8e, + 0xc7, 0x23, 0x5a, 0xcd, 0x6c, 0x2a, 0x97, 0x2a, 0xdb, 0x6b, 0x5b, 0xe9, 0x7c, 0x16, 0x17, 0x5a, + 0xe3, 0x11, 0x25, 0x10, 0xa7, 0x6d, 0x8c, 0x21, 0xdb, 0xa3, 0x9e, 0x57, 0xcd, 0xf2, 0xb1, 0x78, + 0x5b, 0xdb, 0x85, 0xca, 0x3d, 0xeb, 0x96, 0x13, 0xd3, 0x9a, 0xe3, 0x79, 0x34, 0xac, 0xef, 0xb2, + 0xe5, 0x1c, 0x45, 0x34, 0xf4, 0x9d, 0x61, 0xba, 0x9c, 0x84, 0xc6, 0xe7, 0x20, 0x3f, 0x08, 0x83, + 0xa3, 0x51, 0x54, 0x55, 0x37, 0x33, 0x97, 0x8a, 0x44, 0x52, 0xda, 0xcf, 0x02, 0x18, 0xc7, 0xd4, + 0x8f, 0xad, 0xe0, 0x80, 0xfa, 0xf8, 0x75, 0x28, 0xc6, 0xee, 0x90, 0x46, 0xb1, 0x33, 0x1c, 0xf1, + 0x21, 0x32, 0x64, 0xc2, 0xf8, 0x18, 0x93, 0x36, 0xa0, 0x30, 0x0a, 0x22, 0x37, 0x76, 0x03, 0x9f, + 0xdb, 0x53, 0x24, 0x29, 0xad, 0xfd, 0x34, 0xe4, 0xee, 0x39, 0xde, 0x11, 0xc5, 0x6f, 0x42, 0x96, + 0x1b, 0xac, 0x70, 0x83, 0x4b, 0x5b, 0x02, 0x74, 0x6e, 0x27, 0x17, 0xb0, 0xb1, 0x8f, 0x99, 0x26, + 0x1f, 0x7b, 0x89, 0x08, 0x42, 0x3b, 0x80, 0xa5, 0x1d, 0xd7, 0xef, 0xdf, 0x73, 0x42, 0x97, 0x81, + 0xf1, 0x9c, 0xc3, 0xe0, 0xcf, 0x41, 0x9e, 0x37, 0xa2, 0x6a, 0x66, 0x33, 0x73, 0xa9, 0xb4, 0xbd, + 0x24, 0x3b, 0xf2, 0xb5, 0x11, 0x29, 0xd3, 0xfe, 0x4a, 0x01, 0xd8, 0x09, 0x8e, 0xfc, 0xfe, 0x5d, + 0x26, 0xc4, 0x08, 0x32, 0xd1, 0xa1, 0x27, 0x81, 0x64, 0x4d, 0x7c, 0x07, 0x2a, 0x0f, 0x5d, 0xbf, + 0x6f, 0x1f, 0xcb, 0xe5, 0x08, 0x2c, 0x4b, 0xdb, 0x9f, 0x93, 0xc3, 0x4d, 0x3a, 0x6f, 0x4d, 0xaf, + 0x3a, 0x32, 0xfc, 0x38, 0x1c, 0x93, 0xf2, 0xc3, 0x69, 0xde, 0x46, 0x17, 0xf0, 0x69, 0x25, 0x36, + 0xe9, 0x01, 0x1d, 0x27, 0x93, 0x1e, 0xd0, 0x31, 0xfe, 0x91, 0x69, 0x8b, 0x4a, 0xdb, 0xab, 0xc9, + 0x5c, 0x53, 0x7d, 0xa5, 0x99, 0xef, 0xa8, 0x37, 0x15, 0xed, 0x2f, 0x16, 0xa1, 0x62, 0x7c, 0x40, + 0x7b, 0x47, 0x31, 0x6d, 0x8d, 0xd8, 0x1e, 0x44, 0xb8, 0x09, 0xcb, 0xae, 0xdf, 0xf3, 0x8e, 0xfa, + 0xb4, 0x6f, 0x3f, 0x72, 0xa9, 0xd7, 0x8f, 0xb8, 0x1f, 0x55, 0xd2, 0x75, 0xcf, 0xea, 0x6f, 0xd5, + 0xa5, 0xf2, 0x1e, 0xd7, 0x25, 0x15, 0x77, 0x86, 0xc6, 0x97, 0x61, 0xa5, 0xe7, 0xb9, 0xd4, 0x8f, + 0xed, 0x47, 0xcc, 0x5e, 0x3b, 0x0c, 0x1e, 0x47, 0xd5, 0xdc, 0xa6, 0x72, 0xa9, 0x40, 0x96, 0x85, + 0x60, 0x8f, 0xf1, 0x49, 0xf0, 0x38, 0xc2, 0xef, 0x40, 0xe1, 0x71, 0x10, 0x1e, 0x78, 0x81, 0xd3, + 0xaf, 0xe6, 0xf9, 0x9c, 0x17, 0xe7, 0xcf, 0x79, 0x5f, 0x6a, 0x91, 0x54, 0x1f, 0x5f, 0x02, 0x14, + 0x1d, 0x7a, 0x76, 0x44, 0x3d, 0xda, 0x8b, 0x6d, 0xcf, 0x1d, 0xba, 0x71, 0xb5, 0xc0, 0x5d, 0xb2, + 0x12, 0x1d, 0x7a, 0x1d, 0xce, 0x6e, 0x30, 0x2e, 0xb6, 0x61, 0x3d, 0x0e, 0x1d, 0x3f, 0x72, 0x7a, + 0x6c, 0x30, 0xdb, 0x8d, 0x02, 0xcf, 0xe1, 0xee, 0x58, 0xe4, 0x53, 0x5e, 0x9e, 0x3f, 0xa5, 0x35, + 0xe9, 0x52, 0x4f, 0x7a, 0x90, 0xb5, 0x78, 0x0e, 0x17, 0xbf, 0x0d, 0xeb, 0xd1, 0x81, 0x3b, 0xb2, + 0xf9, 0x38, 0xf6, 0xc8, 0x73, 0x7c, 0xbb, 0xe7, 0xf4, 0xf6, 0x69, 0x15, 0xb8, 0xd9, 0x98, 0x09, + 0xf9, 0xbe, 0xb7, 0x3d, 0xc7, 0xaf, 0x31, 0x09, 0x03, 0x9d, 0xe9, 0xf9, 0x34, 0xb4, 0x8f, 0x69, + 0x18, 0xb1, 0xd5, 0x94, 0x9e, 0x06, 0x7a, 0x5b, 0x28, 0xdf, 0x13, 0xba, 0xa4, 0x32, 0x9a, 0xa1, + 0xf1, 0x17, 0xe1, 0xfc, 0xbe, 0x13, 0xd9, 0xbd, 0x90, 0x3a, 0x31, 0xed, 0xdb, 0x31, 0x1d, 0x8e, + 0xec, 0x58, 0xf8, 0xe0, 0x12, 0x5f, 0xc3, 0xda, 0xbe, 0x13, 0xd5, 0x84, 0xd4, 0xa2, 0xc3, 0x11, + 0x8f, 0x23, 0x91, 0xf6, 0x25, 0xa8, 0xcc, 0xee, 0x26, 0x5e, 0x81, 0xb2, 0xf5, 0xa0, 0x6d, 0xd8, + 0xba, 0xb9, 0x6b, 0x9b, 0x7a, 0xd3, 0x40, 0x0b, 0xb8, 0x0c, 0x45, 0xce, 0x6a, 0x99, 0x8d, 0x07, + 0x48, 0xc1, 0x8b, 0x90, 0xd1, 0x1b, 0x0d, 0xa4, 0x6a, 0x37, 0xa1, 0x90, 0x6c, 0x0b, 0x5e, 0x86, + 0x52, 0xd7, 0xec, 0xb4, 0x8d, 0x5a, 0x7d, 0xaf, 0x6e, 0xec, 0xa2, 0x05, 0x5c, 0x80, 0x6c, 0xab, + 0x61, 0xb5, 0x91, 0x22, 0x5a, 0x7a, 0x1b, 0xa9, 0xac, 0xe7, 0xee, 0x8e, 0x8e, 0x32, 0xda, 0x9f, + 0x2a, 0xb0, 0x36, 0x0f, 0x5e, 0x5c, 0x82, 0xc5, 0x5d, 0x63, 0x4f, 0xef, 0x36, 0x2c, 0xb4, 0x80, + 0x57, 0x61, 0x99, 0x18, 0x6d, 0x43, 0xb7, 0xf4, 0x9d, 0x86, 0x61, 0x13, 0x43, 0xdf, 0x45, 0x0a, + 0xc6, 0x50, 0x61, 0x2d, 0xbb, 0xd6, 0x6a, 0x36, 0xeb, 0x96, 0x65, 0xec, 0x22, 0x15, 0xaf, 0x01, + 0xe2, 0xbc, 0xae, 0x39, 0xe1, 0x66, 0x30, 0x82, 0xa5, 0x8e, 0x41, 0xea, 0x7a, 0xa3, 0xfe, 0x3e, + 0x1b, 0x00, 0x65, 0xf1, 0x67, 0xe0, 0x8d, 0x5a, 0xcb, 0xec, 0xd4, 0x3b, 0x96, 0x61, 0x5a, 0x76, + 0xc7, 0xd4, 0xdb, 0x9d, 0xf7, 0x5a, 0x16, 0x1f, 0x59, 0x18, 0x97, 0xc3, 0x15, 0x00, 0xbd, 0x6b, + 0xb5, 0xc4, 0x38, 0x28, 0xaf, 0x1d, 0x42, 0x65, 0x16, 0x79, 0xb6, 0x2a, 0xb9, 0x44, 0xbb, 0xdd, + 0xd0, 0x4d, 0xd3, 0x20, 0x68, 0x01, 0xe7, 0x41, 0xbd, 0x77, 0x4d, 0xd8, 0x7a, 0x8b, 0xfa, 0xd7, + 0x91, 0xca, 0x06, 0x62, 0xad, 0x5b, 0x21, 0xa5, 0xfd, 0x31, 0xca, 0xb0, 0x75, 0x33, 0xba, 0x41, + 0x1f, 0xc5, 0xdb, 0xc4, 0x1d, 0xec, 0xc7, 0x28, 0xcb, 0xd6, 0xcd, 0x78, 0xf7, 0xdd, 0x78, 0x7f, + 0xcf, 0xf1, 0xbc, 0x87, 0x4e, 0xef, 0x00, 0xe5, 0x6e, 0x67, 0x0b, 0x0a, 0x52, 0x6f, 0x67, 0x0b, + 0x2a, 0xca, 0xdc, 0xce, 0x16, 0x32, 0x28, 0xab, 0xfd, 0xa5, 0x0a, 0x39, 0xbe, 0x3d, 0x2c, 0xce, + 0x4f, 0x45, 0x6f, 0xde, 0x4e, 0x63, 0x9e, 0xfa, 0x94, 0x98, 0xc7, 0x5d, 0x41, 0x46, 0x5f, 0x41, + 0xe0, 0x0b, 0x50, 0x0c, 0xc2, 0x81, 0x70, 0x12, 0x99, 0x37, 0x0a, 0x41, 0x38, 0xe0, 0x8e, 0xc1, + 0x62, 0x36, 0x4b, 0x37, 0x0f, 0x9d, 0x88, 0xf2, 0xa3, 0x5b, 0x24, 0x29, 0x8d, 0x5f, 0x03, 0xa6, + 0x67, 0xf3, 0x75, 0xe4, 0xb9, 0x6c, 0x31, 0x08, 0x07, 0x26, 0x5b, 0xca, 0x67, 0xa1, 0xdc, 0x0b, + 0xbc, 0xa3, 0xa1, 0x6f, 0x7b, 0xd4, 0x1f, 0xc4, 0xfb, 0xd5, 0xc5, 0x4d, 0xe5, 0x52, 0x99, 0x2c, + 0x09, 0x66, 0x83, 0xf3, 0x70, 0x15, 0x16, 0x7b, 0xfb, 0x4e, 0x18, 0x51, 0x71, 0x5c, 0xcb, 0x24, + 0x21, 0xf9, 0xac, 0xb4, 0xe7, 0x0e, 0x1d, 0x2f, 0xe2, 0x47, 0xb3, 0x4c, 0x52, 0x9a, 0x19, 0xf1, + 0xc8, 0x73, 0x06, 0x11, 0x3f, 0x52, 0x65, 0x22, 0x08, 0xfc, 0x26, 0x94, 0xe4, 0x84, 0x1c, 0x82, + 0x12, 0x5f, 0x0e, 0x08, 0x16, 0x43, 0x40, 0xfb, 0x09, 0xc8, 0x90, 0xe0, 0x31, 0x9b, 0x53, 0xac, + 0x28, 0xaa, 0x2a, 0x9b, 0x99, 0x4b, 0x98, 0x24, 0x24, 0xcb, 0x7b, 0x32, 0xf4, 0x8b, 0x8c, 0x90, + 0x04, 0xfb, 0x6f, 0x2b, 0x50, 0xe2, 0x47, 0x96, 0xd0, 0xe8, 0xc8, 0x8b, 0x59, 0x8a, 0x90, 0xb1, + 0x51, 0x99, 0x49, 0x11, 0x7c, 0x5f, 0x88, 0x94, 0x31, 0x00, 0x58, 0xb8, 0xb3, 0x9d, 0x47, 0x8f, + 0x68, 0x2f, 0xa6, 0x22, 0x13, 0x66, 0xc9, 0x12, 0x63, 0xea, 0x92, 0xc7, 0x90, 0x77, 0xfd, 0x88, + 0x86, 0xb1, 0xed, 0xf6, 0xf9, 0x9e, 0x64, 0x49, 0x41, 0x30, 0xea, 0x7d, 0x7c, 0x11, 0xb2, 0x3c, + 0x60, 0x66, 0xf9, 0x2c, 0x20, 0x67, 0x21, 0xc1, 0x63, 0xc2, 0xf9, 0xb7, 0xb3, 0x85, 0x1c, 0xca, + 0x6b, 0x5f, 0x86, 0x25, 0xbe, 0xb8, 0xfb, 0x4e, 0xe8, 0xbb, 0xfe, 0x80, 0xe7, 0xff, 0xa0, 0x2f, + 0xfc, 0xa2, 0x4c, 0x78, 0x9b, 0xd9, 0x3c, 0xa4, 0x51, 0xe4, 0x0c, 0xa8, 0xcc, 0xc7, 0x09, 0xa9, + 0xfd, 0x71, 0x06, 0x4a, 0x9d, 0x38, 0xa4, 0xce, 0x90, 0xa7, 0x76, 0xfc, 0x65, 0x80, 0x28, 0x76, + 0x62, 0x3a, 0xa4, 0x7e, 0x9c, 0xd8, 0xf7, 0xba, 0x9c, 0x79, 0x4a, 0x6f, 0xab, 0x93, 0x28, 0x91, + 0x29, 0x7d, 0xbc, 0x0d, 0x25, 0xca, 0xc4, 0x76, 0xcc, 0x4a, 0x04, 0x99, 0x86, 0x56, 0x92, 0x28, + 0x96, 0xd6, 0x0e, 0x04, 0x68, 0xda, 0xde, 0xf8, 0x8e, 0x0a, 0xc5, 0x74, 0x34, 0xac, 0x43, 0xa1, + 0xe7, 0xc4, 0x74, 0x10, 0x84, 0x63, 0x99, 0xb9, 0xdf, 0x7a, 0xda, 0xec, 0x5b, 0x35, 0xa9, 0x4c, + 0xd2, 0x6e, 0xf8, 0x0d, 0x10, 0xe5, 0x90, 0x70, 0x4b, 0x61, 0x6f, 0x91, 0x73, 0xb8, 0x63, 0xbe, + 0x03, 0x78, 0x14, 0xba, 0x43, 0x27, 0x1c, 0xdb, 0x07, 0x74, 0x9c, 0x64, 0xb9, 0xcc, 0x9c, 0x9d, + 0x44, 0x52, 0xef, 0x0e, 0x1d, 0xcb, 0x88, 0x78, 0x73, 0xb6, 0xaf, 0xf4, 0x96, 0xd3, 0xfb, 0x33, + 0xd5, 0x93, 0xd7, 0x0d, 0x51, 0x52, 0x21, 0xe4, 0xb8, 0x63, 0xb1, 0xa6, 0xf6, 0x05, 0x28, 0x24, + 0x8b, 0xc7, 0x45, 0xc8, 0x19, 0x61, 0x18, 0x84, 0x68, 0x81, 0x07, 0xc6, 0x66, 0x43, 0xc4, 0xd6, + 0xdd, 0x5d, 0x16, 0x5b, 0xff, 0x45, 0x4d, 0xd3, 0x34, 0xa1, 0x87, 0x47, 0x34, 0x8a, 0xf1, 0xcf, + 0xc0, 0x2a, 0xe5, 0x2e, 0xe4, 0x1e, 0x53, 0xbb, 0xc7, 0x6b, 0x3a, 0xe6, 0x40, 0x0a, 0xc7, 0x7b, + 0x79, 0x4b, 0x94, 0xa0, 0x49, 0xad, 0x47, 0x56, 0x52, 0x5d, 0xc9, 0xea, 0x63, 0x03, 0x56, 0xdd, + 0xe1, 0x90, 0xf6, 0x5d, 0x27, 0x9e, 0x1e, 0x40, 0x6c, 0xd8, 0x7a, 0x52, 0xf2, 0xcc, 0x94, 0x8c, + 0x64, 0x25, 0xed, 0x91, 0x0e, 0xf3, 0x16, 0xe4, 0x63, 0x5e, 0xde, 0x72, 0xdf, 0x2d, 0x6d, 0x97, + 0x93, 0x88, 0xc3, 0x99, 0x44, 0x0a, 0xf1, 0x17, 0x40, 0x14, 0xcb, 0x3c, 0xb6, 0x4c, 0x1c, 0x62, + 0x52, 0x03, 0x11, 0x21, 0xc7, 0x6f, 0x41, 0x65, 0x26, 0x3b, 0xf7, 0x39, 0x60, 0x19, 0x52, 0x9e, + 0x4e, 0xb5, 0x7d, 0x7c, 0x05, 0x16, 0x03, 0x91, 0x0b, 0x79, 0xd4, 0x99, 0xac, 0x78, 0x36, 0x51, + 0x92, 0x44, 0x8b, 0xc5, 0x86, 0x90, 0x46, 0x34, 0x3c, 0xa6, 0x7d, 0x36, 0xe8, 0x22, 0x1f, 0x14, + 0x12, 0x56, 0xbd, 0xaf, 0xfd, 0x14, 0x2c, 0xa7, 0x10, 0x47, 0xa3, 0xc0, 0x8f, 0x28, 0xbe, 0x0c, + 0xf9, 0x90, 0x9f, 0x77, 0x09, 0x2b, 0x96, 0x73, 0x4c, 0x45, 0x02, 0x22, 0x35, 0xb4, 0x3e, 0x2c, + 0x0b, 0x0e, 0x8b, 0xdf, 0x7c, 0x27, 0xf1, 0x5b, 0x90, 0xa3, 0xac, 0x71, 0x62, 0x53, 0x48, 0xbb, + 0xc6, 0xe5, 0x44, 0x48, 0xa7, 0x66, 0x51, 0x9f, 0x39, 0xcb, 0x7f, 0xaa, 0xb0, 0x2a, 0x57, 0xb9, + 0xe3, 0xc4, 0xbd, 0xfd, 0x17, 0xd4, 0x1b, 0x7e, 0x14, 0x16, 0x19, 0xdf, 0x4d, 0x4f, 0xce, 0x1c, + 0x7f, 0x48, 0x34, 0x98, 0x47, 0x38, 0x91, 0x3d, 0xb5, 0xfd, 0xb2, 0x7c, 0x2c, 0x3b, 0xd1, 0x54, + 0xd5, 0x30, 0xc7, 0x71, 0xf2, 0xcf, 0x70, 0x9c, 0xc5, 0xb3, 0x38, 0x8e, 0xb6, 0x0b, 0x6b, 0xb3, + 0x88, 0x4b, 0xe7, 0xf8, 0x31, 0x58, 0x14, 0x9b, 0x92, 0xc4, 0xc8, 0x79, 0xfb, 0x96, 0xa8, 0x68, + 0x1f, 0xaa, 0xb0, 0x26, 0xc3, 0xd7, 0xa7, 0xe3, 0x1c, 0x4f, 0xe1, 0x9c, 0x3b, 0xd3, 0x01, 0x3d, + 0xdb, 0xfe, 0x69, 0x35, 0x58, 0x3f, 0x81, 0xe3, 0x73, 0x1c, 0xd6, 0xef, 0x29, 0xb0, 0xb4, 0x43, + 0x07, 0xae, 0xff, 0x82, 0xee, 0xc2, 0x14, 0xb8, 0xd9, 0x33, 0x39, 0xf1, 0x08, 0xca, 0xd2, 0x5e, + 0x89, 0xd6, 0x69, 0xb4, 0x95, 0x79, 0xa7, 0xe5, 0x26, 0x2c, 0xc9, 0x0b, 0x08, 0xc7, 0x73, 0x9d, + 0x28, 0xb5, 0xe7, 0xc4, 0x0d, 0x84, 0xce, 0x84, 0x44, 0xde, 0x55, 0x70, 0x42, 0xfb, 0x37, 0x05, + 0xca, 0xb5, 0x60, 0x38, 0x74, 0xe3, 0x17, 0x14, 0xe3, 0xd3, 0x08, 0x65, 0xe7, 0xf9, 0xe3, 0xdb, + 0x50, 0x49, 0xcc, 0x94, 0xd0, 0x9e, 0xc8, 0x34, 0xca, 0xa9, 0x4c, 0xf3, 0xef, 0x0a, 0x2c, 0x93, + 0x40, 0x54, 0xf8, 0x2f, 0x37, 0x38, 0xd7, 0x00, 0x4d, 0x0c, 0x3d, 0x2b, 0x3c, 0xff, 0xa3, 0x40, + 0xa5, 0x1d, 0xd2, 0x91, 0x13, 0xd2, 0x97, 0x1a, 0x1d, 0x56, 0xa6, 0xf7, 0x63, 0x59, 0xe0, 0x14, + 0x09, 0x6f, 0x6b, 0x2b, 0xb0, 0x9c, 0xda, 0x2e, 0x00, 0xd3, 0xfe, 0x51, 0x81, 0x75, 0xe1, 0x62, + 0x52, 0xd2, 0x7f, 0x41, 0x61, 0x49, 0xec, 0xcd, 0x4e, 0xd9, 0x5b, 0x85, 0x73, 0x27, 0x6d, 0x93, + 0x66, 0x7f, 0x55, 0x85, 0xf3, 0x89, 0xf3, 0xbc, 0xe0, 0x86, 0xff, 0x00, 0xfe, 0xb0, 0x01, 0xd5, + 0xd3, 0x20, 0x48, 0x84, 0xbe, 0xa1, 0x42, 0x55, 0x5c, 0xe2, 0x4c, 0xd5, 0x41, 0x2f, 0x8f, 0x6f, + 0xe0, 0xb7, 0x61, 0x69, 0xe4, 0x84, 0xb1, 0xdb, 0x73, 0x47, 0x0e, 0xfb, 0x14, 0xcd, 0xf1, 0x32, + 0xeb, 0xc4, 0x00, 0x33, 0x2a, 0xda, 0x05, 0x78, 0x6d, 0x0e, 0x22, 0x12, 0xaf, 0xff, 0x55, 0x00, + 0x77, 0x62, 0x27, 0x8c, 0x3f, 0x05, 0x79, 0x69, 0xae, 0x33, 0xad, 0xc3, 0xea, 0x8c, 0xfd, 0xd3, + 0xb8, 0xd0, 0xf8, 0x53, 0x91, 0x92, 0x3e, 0x16, 0x97, 0x69, 0xfb, 0x25, 0x2e, 0xff, 0xac, 0xc0, + 0x46, 0x2d, 0x10, 0x17, 0xa2, 0x2f, 0xe5, 0x09, 0xd3, 0xde, 0x80, 0x0b, 0x73, 0x0d, 0x94, 0x00, + 0xfc, 0x93, 0x02, 0xe7, 0x08, 0x75, 0xfa, 0x2f, 0xa7, 0xf1, 0x77, 0xe1, 0xfc, 0x29, 0xe3, 0x64, + 0x8d, 0x72, 0x03, 0x0a, 0x43, 0x1a, 0x3b, 0xac, 0xc2, 0x95, 0x26, 0x6d, 0x24, 0xe3, 0x4e, 0xb4, + 0x9b, 0x52, 0x83, 0xa4, 0xba, 0xda, 0x77, 0x55, 0x58, 0xe5, 0x75, 0xf6, 0xab, 0x8f, 0xbc, 0x33, + 0xdd, 0xc2, 0xe4, 0x4f, 0x16, 0x7f, 0x4c, 0x61, 0x14, 0x52, 0x3b, 0xb9, 0x1d, 0x58, 0xe4, 0xbf, + 0x3e, 0xc2, 0x28, 0xa4, 0x77, 0x05, 0x47, 0xfb, 0x1b, 0x05, 0xd6, 0x66, 0x21, 0x4e, 0xbf, 0x68, + 0xfe, 0xbf, 0x6f, 0x5b, 0xe6, 0x84, 0x94, 0xcc, 0x59, 0x3e, 0x92, 0xb2, 0x67, 0xfe, 0x48, 0xfa, + 0x5b, 0x15, 0xaa, 0xd3, 0xc6, 0xbc, 0xba, 0xd3, 0x99, 0xbd, 0xd3, 0xf9, 0x7e, 0x6f, 0xf9, 0xb4, + 0xbf, 0x57, 0xe0, 0xb5, 0x39, 0x80, 0x7e, 0x7f, 0x2e, 0x32, 0x75, 0xb3, 0xa3, 0x3e, 0xf3, 0x66, + 0xe7, 0x93, 0x77, 0x92, 0x7f, 0x50, 0x60, 0xad, 0x29, 0xee, 0xea, 0xc5, 0xcd, 0xc7, 0x8b, 0x1b, + 0x83, 0xf9, 0x75, 0x7c, 0x76, 0xf2, 0x6b, 0x95, 0x56, 0x83, 0xf5, 0x13, 0xa6, 0x3d, 0xc7, 0x6d, + 0xce, 0x7f, 0x2b, 0xb0, 0x22, 0x47, 0xd1, 0x5f, 0xd8, 0xf2, 0x65, 0x0e, 0x3a, 0xf8, 0x22, 0x64, + 0xdc, 0x7e, 0x52, 0xf7, 0xce, 0xbe, 0x42, 0x60, 0x02, 0xed, 0x5d, 0xc0, 0xd3, 0x76, 0x3f, 0x07, + 0x74, 0xff, 0xa1, 0xc2, 0x3a, 0x11, 0xd1, 0xf7, 0xd5, 0xef, 0x0b, 0x3f, 0xe8, 0xef, 0x0b, 0x4f, + 0x4f, 0x5c, 0x1f, 0xf2, 0x62, 0x6a, 0x16, 0xea, 0x4f, 0x2e, 0x75, 0x9d, 0x48, 0xb4, 0x99, 0x53, + 0x89, 0xf6, 0xf9, 0xe3, 0xd1, 0x87, 0x2a, 0x6c, 0x48, 0x43, 0x5e, 0xd5, 0x3a, 0x67, 0xf7, 0x88, + 0xfc, 0x29, 0x8f, 0xf8, 0x2f, 0x05, 0x2e, 0xcc, 0x05, 0xf2, 0x87, 0x5e, 0xd1, 0x9c, 0xf0, 0x9e, + 0xec, 0x33, 0xbd, 0x27, 0x77, 0x66, 0xef, 0xf9, 0xba, 0x0a, 0x15, 0x42, 0x3d, 0xea, 0x44, 0x2f, + 0xf9, 0xed, 0xde, 0x09, 0x0c, 0x73, 0xa7, 0xee, 0x39, 0x57, 0x60, 0x39, 0x05, 0x42, 0x7e, 0x70, + 0xf1, 0x0f, 0x74, 0x96, 0x07, 0xdf, 0xa3, 0x8e, 0x17, 0x27, 0x95, 0xa0, 0xf6, 0x27, 0x2a, 0x94, + 0x09, 0xe3, 0xb8, 0x43, 0xda, 0x89, 0x9d, 0x38, 0xc2, 0x9f, 0x81, 0xa5, 0x7d, 0xae, 0x62, 0x4f, + 0x3c, 0xa4, 0x48, 0x4a, 0x82, 0x27, 0x7e, 0x7d, 0xdc, 0x86, 0xf5, 0x88, 0xf6, 0x02, 0xbf, 0x1f, + 0xd9, 0x0f, 0xe9, 0xbe, 0xeb, 0xf7, 0xed, 0xa1, 0x13, 0xc5, 0x34, 0xe4, 0xb0, 0x94, 0xc9, 0xaa, + 0x14, 0xee, 0x70, 0x59, 0x93, 0x8b, 0xf0, 0x55, 0x58, 0x7b, 0xe8, 0xfa, 0x5e, 0x30, 0xb0, 0x47, + 0x9e, 0x33, 0xa6, 0x61, 0x64, 0xf7, 0x82, 0x23, 0x5f, 0xe0, 0x91, 0x23, 0x58, 0xc8, 0xda, 0x42, + 0x54, 0x63, 0x12, 0xfc, 0x3e, 0x5c, 0x9e, 0x3b, 0x8b, 0xfd, 0xc8, 0xf5, 0x62, 0x1a, 0xd2, 0xbe, + 0x1d, 0xd2, 0x91, 0xe7, 0xf6, 0xc4, 0x0b, 0x2b, 0x01, 0xd4, 0xe7, 0xe7, 0x4c, 0xbd, 0x27, 0xd5, + 0xc9, 0x44, 0x1b, 0x5f, 0x80, 0x62, 0x6f, 0x74, 0x64, 0x1f, 0xf1, 0x47, 0x0b, 0x0c, 0x3f, 0x85, + 0x14, 0x7a, 0xa3, 0xa3, 0x2e, 0xa3, 0x31, 0x82, 0xcc, 0xe1, 0x48, 0x04, 0x67, 0x85, 0xb0, 0xa6, + 0xf6, 0x3d, 0x05, 0x2a, 0xfa, 0x60, 0x10, 0xd2, 0x81, 0x13, 0x4b, 0x98, 0xae, 0xc2, 0x9a, 0x80, + 0x64, 0x6c, 0x4b, 0x77, 0x15, 0xf6, 0x28, 0xc2, 0x1e, 0x29, 0x13, 0xbe, 0x2a, 0xec, 0xb9, 0x0e, + 0xe7, 0x8e, 0xfc, 0xb9, 0x7d, 0x54, 0xde, 0x67, 0x2d, 0x95, 0x4e, 0xf7, 0xfa, 0x49, 0x78, 0x6d, + 0x3e, 0x0a, 0x43, 0x57, 0xbc, 0x72, 0x2c, 0x93, 0x73, 0x73, 0x8c, 0x6e, 0xba, 0xfe, 0x53, 0xba, + 0x3a, 0x1f, 0x70, 0xbc, 0x3e, 0xa6, 0xab, 0xf3, 0x81, 0xf6, 0x67, 0xe9, 0x6f, 0x8a, 0x89, 0xbb, + 0xa4, 0x81, 0x23, 0x71, 0x64, 0xe5, 0x69, 0x8e, 0x5c, 0x85, 0x45, 0xe6, 0x8c, 0xae, 0x3f, 0xe0, + 0xc6, 0x15, 0x48, 0x42, 0xe2, 0x0e, 0x7c, 0x5e, 0xda, 0x4e, 0x3f, 0x88, 0x69, 0xe8, 0x3b, 0x9e, + 0x37, 0xb6, 0xc5, 0xf5, 0xa3, 0xcf, 0x1f, 0x94, 0xa5, 0xaf, 0x3e, 0x45, 0xf8, 0xf8, 0xac, 0xd0, + 0x36, 0x52, 0x65, 0x92, 0xea, 0x5a, 0xe9, 0x7b, 0xd0, 0x2f, 0x41, 0x25, 0x94, 0x4e, 0x6c, 0x47, + 0x6c, 0x7b, 0x64, 0xc8, 0x5d, 0x4b, 0x5e, 0x4d, 0x4c, 0x7b, 0x38, 0x29, 0x87, 0x33, 0x0e, 0xff, + 0xdc, 0x01, 0xe7, 0x76, 0xb6, 0x90, 0x47, 0x8b, 0xda, 0x9f, 0x2b, 0xb0, 0x3a, 0xe7, 0xdb, 0x3d, + 0xbd, 0x18, 0x50, 0xa6, 0xee, 0x1d, 0x7f, 0x1c, 0x72, 0xfc, 0x41, 0x8b, 0x7c, 0x43, 0x75, 0xfe, + 0xf4, 0xa7, 0x3f, 0x7f, 0x7c, 0x42, 0x84, 0x16, 0x3b, 0x8b, 0xdc, 0x26, 0xf9, 0xda, 0x4e, 0x42, + 0x52, 0x62, 0x3c, 0xf9, 0xc4, 0xee, 0xd4, 0x4d, 0x66, 0xf6, 0x99, 0x37, 0x99, 0x97, 0x7f, 0x3b, + 0x03, 0xc5, 0xe6, 0xb8, 0x73, 0xe8, 0xed, 0x79, 0xce, 0x80, 0xbf, 0x0e, 0x69, 0xb6, 0xad, 0x07, + 0x68, 0x01, 0xaf, 0x40, 0xd9, 0x6c, 0x59, 0xb6, 0xd9, 0x6d, 0x34, 0xec, 0xbd, 0x86, 0x7e, 0x0b, + 0x29, 0x18, 0xc1, 0x52, 0x9b, 0xd4, 0xed, 0x3b, 0xc6, 0x03, 0xc1, 0x51, 0xf1, 0x2a, 0x2c, 0x77, + 0xcd, 0xfa, 0xdd, 0xae, 0x31, 0x61, 0x66, 0xf1, 0x3a, 0xac, 0x34, 0xbb, 0x0d, 0xab, 0xde, 0x6e, + 0x4c, 0xb1, 0x0b, 0xb8, 0x0c, 0xc5, 0x9d, 0x46, 0x6b, 0x47, 0x90, 0x88, 0x8d, 0xdf, 0x35, 0x3b, + 0xf5, 0x5b, 0xa6, 0xb1, 0x2b, 0x58, 0x9b, 0x8c, 0xf5, 0xbe, 0x41, 0x5a, 0x7b, 0xf5, 0x64, 0xca, + 0x77, 0x31, 0x82, 0xd2, 0x4e, 0xdd, 0xd4, 0x89, 0x1c, 0xe5, 0x89, 0x82, 0x2b, 0x50, 0x34, 0xcc, + 0x6e, 0x53, 0xd2, 0x2a, 0xae, 0xc2, 0xaa, 0xde, 0xb5, 0x5a, 0x76, 0xdd, 0xac, 0x11, 0xa3, 0x69, + 0x98, 0x96, 0x94, 0x64, 0xf1, 0x2a, 0x54, 0xac, 0x7a, 0xd3, 0xe8, 0x58, 0x7a, 0xb3, 0x2d, 0x99, + 0x6c, 0x15, 0x85, 0x8e, 0x91, 0xe8, 0x20, 0xbc, 0x01, 0xeb, 0x66, 0xcb, 0x4e, 0x9e, 0xd6, 0xdd, + 0xd3, 0x1b, 0x5d, 0x43, 0xca, 0x36, 0xf1, 0x79, 0xc0, 0x2d, 0xd3, 0xee, 0xb6, 0x77, 0x75, 0xcb, + 0xb0, 0xcd, 0xd6, 0x7d, 0x29, 0x78, 0x17, 0x57, 0xa0, 0x30, 0x59, 0xc1, 0x13, 0x86, 0x42, 0xb9, + 0xad, 0x13, 0x6b, 0x62, 0xec, 0x93, 0x27, 0x0c, 0x2c, 0xb8, 0x45, 0x5a, 0xdd, 0xf6, 0x44, 0x6d, + 0x05, 0x4a, 0x12, 0x2c, 0xc9, 0xca, 0x32, 0xd6, 0x4e, 0xdd, 0xac, 0xa5, 0xeb, 0x7b, 0x52, 0xd8, + 0x50, 0x91, 0x72, 0xf9, 0x00, 0xb2, 0x7c, 0x3b, 0x0a, 0x90, 0x35, 0x5b, 0xa6, 0x81, 0x16, 0xf0, + 0x32, 0x40, 0xbd, 0x53, 0x37, 0x2d, 0xe3, 0x16, 0xd1, 0x1b, 0xcc, 0x6c, 0xce, 0x48, 0x00, 0x64, + 0xd6, 0x2e, 0xc1, 0x62, 0xbd, 0xb3, 0xd7, 0x68, 0xe9, 0x96, 0x34, 0xb3, 0xde, 0xb9, 0xdb, 0x6d, + 0x59, 0x4c, 0x88, 0x70, 0x09, 0xf2, 0xf5, 0x8e, 0x65, 0x7c, 0xc5, 0x62, 0x76, 0x71, 0x99, 0x40, + 0x15, 0x3d, 0x79, 0xf7, 0xf2, 0xb7, 0x32, 0x90, 0xe5, 0xcf, 0xb9, 0xcb, 0x50, 0xe4, 0xbb, 0x6d, + 0x3d, 0x68, 0xb3, 0x29, 0x8b, 0x90, 0xad, 0x9b, 0xd6, 0x4d, 0xf4, 0x73, 0x2a, 0x06, 0xc8, 0x75, + 0x79, 0xfb, 0xe7, 0xf3, 0xac, 0x5d, 0x37, 0xad, 0xb7, 0x6f, 0xa0, 0xaf, 0xaa, 0x6c, 0xd8, 0xae, + 0x20, 0x7e, 0x21, 0x11, 0x6c, 0x5f, 0x47, 0x5f, 0x4b, 0x05, 0xdb, 0xd7, 0xd1, 0x2f, 0x26, 0x82, + 0x6b, 0xdb, 0xe8, 0xeb, 0xa9, 0xe0, 0xda, 0x36, 0xfa, 0xa5, 0x44, 0x70, 0xe3, 0x3a, 0xfa, 0xe5, + 0x54, 0x70, 0xe3, 0x3a, 0xfa, 0x95, 0x3c, 0xb3, 0x85, 0x5b, 0x72, 0x6d, 0x1b, 0xfd, 0x6a, 0x21, + 0xa5, 0x6e, 0x5c, 0x47, 0xbf, 0x56, 0x60, 0xfb, 0x9f, 0xee, 0x2a, 0xfa, 0x75, 0xc4, 0x96, 0xc9, + 0x36, 0x08, 0xfd, 0x06, 0x6f, 0x32, 0x11, 0xfa, 0x4d, 0xc4, 0x6c, 0x64, 0x5c, 0x4e, 0x7e, 0x83, + 0x4b, 0x1e, 0x18, 0x3a, 0x41, 0xbf, 0x95, 0x17, 0x8f, 0x3d, 0x6b, 0xf5, 0xa6, 0xde, 0x40, 0x98, + 0xf7, 0x60, 0xa8, 0xfc, 0xce, 0x55, 0xd6, 0x64, 0xee, 0x89, 0x7e, 0xb7, 0xcd, 0x26, 0xbc, 0xa7, + 0x93, 0xda, 0x7b, 0x3a, 0x41, 0xbf, 0x77, 0x95, 0x4d, 0x78, 0x4f, 0x27, 0x12, 0xaf, 0xdf, 0x6f, + 0x33, 0x45, 0x2e, 0xfa, 0x83, 0xab, 0x6c, 0xd1, 0x92, 0xff, 0xcd, 0x36, 0x2e, 0x40, 0x66, 0xa7, + 0x6e, 0xa1, 0x6f, 0xf1, 0xd9, 0x98, 0x8b, 0xa2, 0x3f, 0x44, 0x8c, 0xd9, 0x31, 0x2c, 0xf4, 0x6d, + 0xc6, 0xcc, 0x59, 0xdd, 0x76, 0xc3, 0x40, 0xaf, 0xb3, 0xc5, 0xdd, 0x32, 0x5a, 0x4d, 0xc3, 0x22, + 0x0f, 0xd0, 0x1f, 0x71, 0xf5, 0xdb, 0x9d, 0x96, 0x89, 0xbe, 0x83, 0x70, 0x05, 0xc0, 0xf8, 0x4a, + 0x9b, 0x18, 0x9d, 0x4e, 0xbd, 0x65, 0xa2, 0x37, 0x2f, 0xef, 0x01, 0x3a, 0x19, 0x0e, 0x98, 0x01, + 0x5d, 0xf3, 0x8e, 0xd9, 0xba, 0x6f, 0xa2, 0x05, 0x46, 0xb4, 0x89, 0xd1, 0xd6, 0x89, 0x81, 0x14, + 0x0c, 0x90, 0x97, 0x4f, 0x48, 0x55, 0xbc, 0x04, 0x05, 0xd2, 0x6a, 0x34, 0x76, 0xf4, 0xda, 0x1d, + 0x94, 0xd9, 0x31, 0xfe, 0xfa, 0xa3, 0x8b, 0xca, 0xdf, 0x7d, 0x74, 0x51, 0xf9, 0xee, 0x47, 0x17, + 0x95, 0x6f, 0xfe, 0xeb, 0xc5, 0x05, 0x58, 0x76, 0x83, 0xad, 0x63, 0x37, 0xa6, 0x51, 0x24, 0xfe, + 0x40, 0xf0, 0xbe, 0x26, 0x29, 0x37, 0xb8, 0x22, 0x5a, 0x57, 0x06, 0xc1, 0x95, 0xe3, 0xf8, 0x0a, + 0x97, 0x5e, 0xe1, 0x11, 0xe4, 0x61, 0x9e, 0x13, 0xd7, 0xfe, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xc4, + 0x64, 0x72, 0x89, 0x9e, 0x30, 0x00, 0x00, +} + +func (m *Target) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Target) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Target) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Cell) > 0 { + i -= len(m.Cell) + copy(dAtA[i:], m.Cell) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Cell))) + i-- + dAtA[i] = 0x22 + } + if m.TabletType != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.TabletType)) + i-- + dAtA[i] = 0x18 + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VTGateCallerID) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VTGateCallerID) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VTGateCallerID) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Username) > 0 { + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Username))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EventToken) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventToken) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventToken) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Position))) + i-- + dAtA[i] = 0x1a + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if m.Timestamp != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Value) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if m.Type != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *BindVariable) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BindVariable) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BindVariable) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Values[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if m.Type != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *BoundQuery) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BoundQuery) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BoundQuery) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.BindVariables) > 0 { + for k := range m.BindVariables { + v := m.BindVariables[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintQuery(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintQuery(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Sql) > 0 { + i -= len(m.Sql) + copy(dAtA[i:], m.Sql) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Sql))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExecuteOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecuteOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecuteOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.HasCreatedTempTables { + i-- + if m.HasCreatedTempTables { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x60 + } + if m.PlannerVersion != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.PlannerVersion)) + i-- + dAtA[i] = 0x58 + } + if m.SkipQueryPlanCache { + i-- + if m.SkipQueryPlanCache { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if m.TransactionIsolation != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.TransactionIsolation)) + i-- + dAtA[i] = 0x48 + } + if m.SqlSelectLimit != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.SqlSelectLimit)) + i-- + dAtA[i] = 0x40 + } + if m.Workload != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Workload)) + i-- + dAtA[i] = 0x30 + } + if m.ClientFoundRows { + i-- + if m.ClientFoundRows { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.IncludedFields != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.IncludedFields)) + i-- + dAtA[i] = 0x20 + } + return len(dAtA) - i, nil +} + +func (m *Field) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Field) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Field) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.ColumnType) > 0 { + i -= len(m.ColumnType) + copy(dAtA[i:], m.ColumnType) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ColumnType))) + i-- + dAtA[i] = 0x5a + } + if m.Flags != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Flags)) + i-- + dAtA[i] = 0x50 + } + if m.Decimals != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Decimals)) + i-- + dAtA[i] = 0x48 + } + if m.Charset != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Charset)) + i-- + dAtA[i] = 0x40 + } + if m.ColumnLength != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.ColumnLength)) + i-- + dAtA[i] = 0x38 + } + if len(m.OrgName) > 0 { + i -= len(m.OrgName) + copy(dAtA[i:], m.OrgName) + i = encodeVarintQuery(dAtA, i, uint64(len(m.OrgName))) + i-- + dAtA[i] = 0x32 + } + if len(m.Database) > 0 { + i -= len(m.Database) + copy(dAtA[i:], m.Database) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Database))) + i-- + dAtA[i] = 0x2a + } + if len(m.OrgTable) > 0 { + i -= len(m.OrgTable) + copy(dAtA[i:], m.OrgTable) + i = encodeVarintQuery(dAtA, i, uint64(len(m.OrgTable))) + i-- + dAtA[i] = 0x22 + } + if len(m.Table) > 0 { + i -= len(m.Table) + copy(dAtA[i:], m.Table) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Table))) + i-- + dAtA[i] = 0x1a + } + if m.Type != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Row) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Row) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Row) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Values) > 0 { + i -= len(m.Values) + copy(dAtA[i:], m.Values) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Values))) + i-- + dAtA[i] = 0x12 + } + if len(m.Lengths) > 0 { + var j2 int + dAtA4 := make([]byte, len(m.Lengths)*10) + for _, num := range m.Lengths { + x3 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x3 >= 1<<7 { + dAtA4[j2] = uint8(uint64(x3)&0x7f | 0x80) + j2++ + x3 >>= 7 + } + dAtA4[j2] = uint8(x3) + j2++ + } + i -= j2 + copy(dAtA[i:], dAtA4[:j2]) + i = encodeVarintQuery(dAtA, i, uint64(j2)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } + +func (m *QueryResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Rows) > 0 { + for iNdEx := len(m.Rows) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rows[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.InsertId != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.InsertId)) + i-- + dAtA[i] = 0x18 + } + if m.RowsAffected != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.RowsAffected)) + i-- + dAtA[i] = 0x10 + } + if len(m.Fields) > 0 { + for iNdEx := len(m.Fields) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Fields[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryWarning) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryWarning) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryWarning) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Message) > 0 { + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + } + if m.Code != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *StreamEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StreamEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StreamEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.EventToken != nil { + { + size, err := m.EventToken.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Statements) > 0 { + for iNdEx := len(m.Statements) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Statements[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *StreamEvent_Statement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StreamEvent_Statement) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StreamEvent_Statement) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Sql) > 0 { + i -= len(m.Sql) + copy(dAtA[i:], m.Sql) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Sql))) + i-- + dAtA[i] = 0x2a + } + if len(m.PrimaryKeyValues) > 0 { + for iNdEx := len(m.PrimaryKeyValues) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PrimaryKeyValues[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.PrimaryKeyFields) > 0 { + for iNdEx := len(m.PrimaryKeyFields) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PrimaryKeyFields[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.TableName) > 0 { + i -= len(m.TableName) + copy(dAtA[i:], m.TableName) + i = encodeVarintQuery(dAtA, i, uint64(len(m.TableName))) + i-- + dAtA[i] = 0x12 + } + if m.Category != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Category)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ExecuteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecuteRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecuteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.ReservedId != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.ReservedId)) + i-- + dAtA[i] = 0x38 + } + if m.Options != nil { + { + size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.TransactionId != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.TransactionId)) + i-- + dAtA[i] = 0x28 + } + if m.Query != nil { + { + size, err := m.Query.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Target != nil { + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ImmediateCallerId != nil { + { + size, err := m.ImmediateCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.EffectiveCallerId != nil { + { + size, err := m.EffectiveCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExecuteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecuteResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecuteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Result != nil { + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResultWithError) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResultWithError) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResultWithError) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Result != nil { + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExecuteBatchRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecuteBatchRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecuteBatchRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Options != nil { + { + size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.TransactionId != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.TransactionId)) + i-- + dAtA[i] = 0x30 + } + if m.AsTransaction { + i-- + if m.AsTransaction { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if len(m.Queries) > 0 { + for iNdEx := len(m.Queries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Queries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.Target != nil { + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ImmediateCallerId != nil { + { + size, err := m.ImmediateCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.EffectiveCallerId != nil { + { + size, err := m.EffectiveCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExecuteBatchResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecuteBatchResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecuteBatchResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Results) > 0 { + for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Results[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *StreamExecuteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StreamExecuteRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StreamExecuteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.TransactionId != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.TransactionId)) + i-- + dAtA[i] = 0x30 + } + if m.Options != nil { + { + size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Query != nil { + { + size, err := m.Query.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Target != nil { + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ImmediateCallerId != nil { + { + size, err := m.ImmediateCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.EffectiveCallerId != nil { + { + size, err := m.EffectiveCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StreamExecuteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StreamExecuteResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StreamExecuteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Result != nil { + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BeginRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BeginRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BeginRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Options != nil { + { + size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Target != nil { + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ImmediateCallerId != nil { + { + size, err := m.ImmediateCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.EffectiveCallerId != nil { + { + size, err := m.EffectiveCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BeginResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BeginResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BeginResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.TabletAlias != nil { + { + size, err := m.TabletAlias.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.TransactionId != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.TransactionId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *CommitRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommitRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CommitRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.TransactionId != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.TransactionId)) + i-- + dAtA[i] = 0x20 + } + if m.Target != nil { + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ImmediateCallerId != nil { + { + size, err := m.ImmediateCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.EffectiveCallerId != nil { + { + size, err := m.EffectiveCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CommitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CommitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.ReservedId != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.ReservedId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *RollbackRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollbackRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollbackRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.TransactionId != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.TransactionId)) + i-- + dAtA[i] = 0x20 + } + if m.Target != nil { + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ImmediateCallerId != nil { + { + size, err := m.ImmediateCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.EffectiveCallerId != nil { + { + size, err := m.EffectiveCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RollbackResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollbackResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollbackResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.ReservedId != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.ReservedId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *PrepareRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PrepareRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PrepareRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Dtid) > 0 { + i -= len(m.Dtid) + copy(dAtA[i:], m.Dtid) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Dtid))) + i-- + dAtA[i] = 0x2a + } + if m.TransactionId != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.TransactionId)) + i-- + dAtA[i] = 0x20 + } + if m.Target != nil { + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ImmediateCallerId != nil { + { + size, err := m.ImmediateCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.EffectiveCallerId != nil { + { + size, err := m.EffectiveCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PrepareResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PrepareResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PrepareResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *CommitPreparedRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommitPreparedRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CommitPreparedRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Dtid) > 0 { + i -= len(m.Dtid) + copy(dAtA[i:], m.Dtid) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Dtid))) + i-- + dAtA[i] = 0x22 + } + if m.Target != nil { + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ImmediateCallerId != nil { + { + size, err := m.ImmediateCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.EffectiveCallerId != nil { + { + size, err := m.EffectiveCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CommitPreparedResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommitPreparedResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CommitPreparedResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *RollbackPreparedRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollbackPreparedRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollbackPreparedRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Dtid) > 0 { + i -= len(m.Dtid) + copy(dAtA[i:], m.Dtid) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Dtid))) + i-- + dAtA[i] = 0x2a + } + if m.TransactionId != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.TransactionId)) + i-- + dAtA[i] = 0x20 + } + if m.Target != nil { + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ImmediateCallerId != nil { + { + size, err := m.ImmediateCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.EffectiveCallerId != nil { + { + size, err := m.EffectiveCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RollbackPreparedResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollbackPreparedResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollbackPreparedResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *CreateTransactionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateTransactionRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CreateTransactionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Participants) > 0 { + for iNdEx := len(m.Participants) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Participants[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.Dtid) > 0 { + i -= len(m.Dtid) + copy(dAtA[i:], m.Dtid) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Dtid))) + i-- + dAtA[i] = 0x22 + } + if m.Target != nil { + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ImmediateCallerId != nil { + { + size, err := m.ImmediateCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.EffectiveCallerId != nil { + { + size, err := m.EffectiveCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CreateTransactionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateTransactionResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CreateTransactionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *StartCommitRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StartCommitRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StartCommitRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Dtid) > 0 { + i -= len(m.Dtid) + copy(dAtA[i:], m.Dtid) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Dtid))) + i-- + dAtA[i] = 0x2a + } + if m.TransactionId != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.TransactionId)) + i-- + dAtA[i] = 0x20 + } + if m.Target != nil { + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ImmediateCallerId != nil { + { + size, err := m.ImmediateCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.EffectiveCallerId != nil { + { + size, err := m.EffectiveCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StartCommitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StartCommitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StartCommitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *SetRollbackRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SetRollbackRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SetRollbackRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Dtid) > 0 { + i -= len(m.Dtid) + copy(dAtA[i:], m.Dtid) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Dtid))) + i-- + dAtA[i] = 0x2a + } + if m.TransactionId != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.TransactionId)) + i-- + dAtA[i] = 0x20 + } + if m.Target != nil { + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ImmediateCallerId != nil { + { + size, err := m.ImmediateCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.EffectiveCallerId != nil { + { + size, err := m.EffectiveCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SetRollbackResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SetRollbackResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SetRollbackResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *ConcludeTransactionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConcludeTransactionRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConcludeTransactionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Dtid) > 0 { + i -= len(m.Dtid) + copy(dAtA[i:], m.Dtid) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Dtid))) + i-- + dAtA[i] = 0x22 + } + if m.Target != nil { + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ImmediateCallerId != nil { + { + size, err := m.ImmediateCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.EffectiveCallerId != nil { + { + size, err := m.EffectiveCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ConcludeTransactionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConcludeTransactionResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConcludeTransactionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *ReadTransactionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadTransactionRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReadTransactionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Dtid) > 0 { + i -= len(m.Dtid) + copy(dAtA[i:], m.Dtid) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Dtid))) + i-- + dAtA[i] = 0x22 + } + if m.Target != nil { + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ImmediateCallerId != nil { + { + size, err := m.ImmediateCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.EffectiveCallerId != nil { + { + size, err := m.EffectiveCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ReadTransactionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadTransactionResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReadTransactionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Metadata != nil { + { + size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BeginExecuteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BeginExecuteRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BeginExecuteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.PreQueries) > 0 { + for iNdEx := len(m.PreQueries) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PreQueries[iNdEx]) + copy(dAtA[i:], m.PreQueries[iNdEx]) + i = encodeVarintQuery(dAtA, i, uint64(len(m.PreQueries[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if m.ReservedId != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.ReservedId)) + i-- + dAtA[i] = 0x30 + } + if m.Options != nil { + { + size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Query != nil { + { + size, err := m.Query.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Target != nil { + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ImmediateCallerId != nil { + { + size, err := m.ImmediateCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.EffectiveCallerId != nil { + { + size, err := m.EffectiveCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BeginExecuteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BeginExecuteResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BeginExecuteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.TabletAlias != nil { + { + size, err := m.TabletAlias.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.TransactionId != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.TransactionId)) + i-- + dAtA[i] = 0x18 + } + if m.Result != nil { + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BeginExecuteBatchRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BeginExecuteBatchRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BeginExecuteBatchRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Options != nil { + { + size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.AsTransaction { + i-- + if m.AsTransaction { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if len(m.Queries) > 0 { + for iNdEx := len(m.Queries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Queries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.Target != nil { + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ImmediateCallerId != nil { + { + size, err := m.ImmediateCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.EffectiveCallerId != nil { + { + size, err := m.EffectiveCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BeginExecuteBatchResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BeginExecuteBatchResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BeginExecuteBatchResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.TabletAlias != nil { + { + size, err := m.TabletAlias.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.TransactionId != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.TransactionId)) + i-- + dAtA[i] = 0x18 + } + if len(m.Results) > 0 { + for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Results[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MessageStreamRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MessageStreamRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MessageStreamRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + } + if m.Target != nil { + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ImmediateCallerId != nil { + { + size, err := m.ImmediateCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.EffectiveCallerId != nil { + { + size, err := m.EffectiveCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MessageStreamResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MessageStreamResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MessageStreamResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Result != nil { + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MessageAckRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MessageAckRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MessageAckRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Ids) > 0 { + for iNdEx := len(m.Ids) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ids[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + } + if m.Target != nil { + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ImmediateCallerId != nil { + { + size, err := m.ImmediateCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.EffectiveCallerId != nil { + { + size, err := m.EffectiveCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MessageAckResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MessageAckResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MessageAckResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Result != nil { + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ReserveExecuteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReserveExecuteRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReserveExecuteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.PreQueries) > 0 { + for iNdEx := len(m.PreQueries) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PreQueries[iNdEx]) + copy(dAtA[i:], m.PreQueries[iNdEx]) + i = encodeVarintQuery(dAtA, i, uint64(len(m.PreQueries[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if m.Options != nil { + { + size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.TransactionId != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.TransactionId)) + i-- + dAtA[i] = 0x28 + } + if m.Query != nil { + { + size, err := m.Query.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Target != nil { + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ImmediateCallerId != nil { + { + size, err := m.ImmediateCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.EffectiveCallerId != nil { + { + size, err := m.EffectiveCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ReserveExecuteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReserveExecuteResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReserveExecuteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.TabletAlias != nil { + { + size, err := m.TabletAlias.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.ReservedId != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.ReservedId)) + i-- + dAtA[i] = 0x18 + } + if m.Result != nil { + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ReserveBeginExecuteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReserveBeginExecuteRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReserveBeginExecuteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.PreQueries) > 0 { + for iNdEx := len(m.PreQueries) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PreQueries[iNdEx]) + copy(dAtA[i:], m.PreQueries[iNdEx]) + i = encodeVarintQuery(dAtA, i, uint64(len(m.PreQueries[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if m.Options != nil { + { + size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Query != nil { + { + size, err := m.Query.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Target != nil { + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ImmediateCallerId != nil { + { + size, err := m.ImmediateCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.EffectiveCallerId != nil { + { + size, err := m.EffectiveCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ReserveBeginExecuteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReserveBeginExecuteResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReserveBeginExecuteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.TabletAlias != nil { + { + size, err := m.TabletAlias.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.ReservedId != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.ReservedId)) + i-- + dAtA[i] = 0x20 + } + if m.TransactionId != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.TransactionId)) + i-- + dAtA[i] = 0x18 + } + if m.Result != nil { + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ReleaseRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReleaseRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReleaseRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.ReservedId != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.ReservedId)) + i-- + dAtA[i] = 0x28 + } + if m.TransactionId != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.TransactionId)) + i-- + dAtA[i] = 0x20 + } + if m.Target != nil { + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ImmediateCallerId != nil { + { + size, err := m.ImmediateCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.EffectiveCallerId != nil { + { + size, err := m.EffectiveCallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ReleaseResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReleaseResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReleaseResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *StreamHealthRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StreamHealthRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StreamHealthRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *RealtimeStats) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RealtimeStats) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RealtimeStats) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Qps != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Qps)))) + i-- + dAtA[i] = 0x31 + } + if m.CpuUsage != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.CpuUsage)))) + i-- + dAtA[i] = 0x29 + } + if m.SecondsBehindMasterFilteredReplication != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.SecondsBehindMasterFilteredReplication)) + i-- + dAtA[i] = 0x20 + } + if m.BinlogPlayersCount != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.BinlogPlayersCount)) + i-- + dAtA[i] = 0x18 + } + if m.SecondsBehindMaster != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.SecondsBehindMaster)) + i-- + dAtA[i] = 0x10 + } + if len(m.HealthError) > 0 { + i -= len(m.HealthError) + copy(dAtA[i:], m.HealthError) + i = encodeVarintQuery(dAtA, i, uint64(len(m.HealthError))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AggregateStats) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AggregateStats) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AggregateStats) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.SecondsBehindMasterMax != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.SecondsBehindMasterMax)) + i-- + dAtA[i] = 0x20 + } + if m.SecondsBehindMasterMin != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.SecondsBehindMasterMin)) + i-- + dAtA[i] = 0x18 + } + if m.UnhealthyTabletCount != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.UnhealthyTabletCount)) + i-- + dAtA[i] = 0x10 + } + if m.HealthyTabletCount != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.HealthyTabletCount)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *StreamHealthResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StreamHealthResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StreamHealthResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.TabletAlias != nil { + { + size, err := m.TabletAlias.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.RealtimeStats != nil { + { + size, err := m.RealtimeStats.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.TabletExternallyReparentedTimestamp != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.TabletExternallyReparentedTimestamp)) + i-- + dAtA[i] = 0x18 + } + if m.Serving { + i-- + if m.Serving { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Target != nil { + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TransactionMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TransactionMetadata) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TransactionMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Participants) > 0 { + for iNdEx := len(m.Participants) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Participants[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.TimeCreated != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.TimeCreated)) + i-- + dAtA[i] = 0x18 + } + if m.State != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x10 + } + if len(m.Dtid) > 0 { + i -= len(m.Dtid) + copy(dAtA[i:], m.Dtid) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Dtid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Target) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.TabletType != 0 { + n += 1 + sovQuery(uint64(m.TabletType)) + } + l = len(m.Cell) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *VTGateCallerID) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Username) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *EventToken) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Timestamp != 0 { + n += 1 + sovQuery(uint64(m.Timestamp)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Position) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Value) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovQuery(uint64(m.Type)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BindVariable) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovQuery(uint64(m.Type)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if len(m.Values) > 0 { + for _, e := range m.Values { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BoundQuery) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Sql) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if len(m.BindVariables) > 0 { + for k, v := range m.BindVariables { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovQuery(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovQuery(uint64(len(k))) + l + n += mapEntrySize + 1 + sovQuery(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ExecuteOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.IncludedFields != 0 { + n += 1 + sovQuery(uint64(m.IncludedFields)) + } + if m.ClientFoundRows { + n += 2 + } + if m.Workload != 0 { + n += 1 + sovQuery(uint64(m.Workload)) + } + if m.SqlSelectLimit != 0 { + n += 1 + sovQuery(uint64(m.SqlSelectLimit)) + } + if m.TransactionIsolation != 0 { + n += 1 + sovQuery(uint64(m.TransactionIsolation)) + } + if m.SkipQueryPlanCache { + n += 2 + } + if m.PlannerVersion != 0 { + n += 1 + sovQuery(uint64(m.PlannerVersion)) + } + if m.HasCreatedTempTables { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Field) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.Type != 0 { + n += 1 + sovQuery(uint64(m.Type)) + } + l = len(m.Table) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.OrgTable) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Database) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.OrgName) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.ColumnLength != 0 { + n += 1 + sovQuery(uint64(m.ColumnLength)) + } + if m.Charset != 0 { + n += 1 + sovQuery(uint64(m.Charset)) + } + if m.Decimals != 0 { + n += 1 + sovQuery(uint64(m.Decimals)) + } + if m.Flags != 0 { + n += 1 + sovQuery(uint64(m.Flags)) + } + l = len(m.ColumnType) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Row) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Lengths) > 0 { + l = 0 + for _, e := range m.Lengths { + l += sozQuery(uint64(e)) + } + n += 1 + sovQuery(uint64(l)) + l + } + l = len(m.Values) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *QueryResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Fields) > 0 { + for _, e := range m.Fields { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.RowsAffected != 0 { + n += 1 + sovQuery(uint64(m.RowsAffected)) + } + if m.InsertId != 0 { + n += 1 + sovQuery(uint64(m.InsertId)) + } + if len(m.Rows) > 0 { + for _, e := range m.Rows { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *QueryWarning) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Code != 0 { + n += 1 + sovQuery(uint64(m.Code)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StreamEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Statements) > 0 { + for _, e := range m.Statements { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.EventToken != nil { + l = m.EventToken.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StreamEvent_Statement) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Category != 0 { + n += 1 + sovQuery(uint64(m.Category)) + } + l = len(m.TableName) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if len(m.PrimaryKeyFields) > 0 { + for _, e := range m.PrimaryKeyFields { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if len(m.PrimaryKeyValues) > 0 { + for _, e := range m.PrimaryKeyValues { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + l = len(m.Sql) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ExecuteRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EffectiveCallerId != nil { + l = m.EffectiveCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.ImmediateCallerId != nil { + l = m.ImmediateCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Target != nil { + l = m.Target.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Query != nil { + l = m.Query.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.TransactionId != 0 { + n += 1 + sovQuery(uint64(m.TransactionId)) + } + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.ReservedId != 0 { + n += 1 + sovQuery(uint64(m.ReservedId)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ExecuteResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ResultWithError) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ExecuteBatchRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EffectiveCallerId != nil { + l = m.EffectiveCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.ImmediateCallerId != nil { + l = m.ImmediateCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Target != nil { + l = m.Target.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if len(m.Queries) > 0 { + for _, e := range m.Queries { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.AsTransaction { + n += 2 + } + if m.TransactionId != 0 { + n += 1 + sovQuery(uint64(m.TransactionId)) + } + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ExecuteBatchResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Results) > 0 { + for _, e := range m.Results { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StreamExecuteRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EffectiveCallerId != nil { + l = m.EffectiveCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.ImmediateCallerId != nil { + l = m.ImmediateCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Target != nil { + l = m.Target.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Query != nil { + l = m.Query.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.TransactionId != 0 { + n += 1 + sovQuery(uint64(m.TransactionId)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StreamExecuteResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BeginRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EffectiveCallerId != nil { + l = m.EffectiveCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.ImmediateCallerId != nil { + l = m.ImmediateCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Target != nil { + l = m.Target.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BeginResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TransactionId != 0 { + n += 1 + sovQuery(uint64(m.TransactionId)) + } + if m.TabletAlias != nil { + l = m.TabletAlias.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CommitRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EffectiveCallerId != nil { + l = m.EffectiveCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.ImmediateCallerId != nil { + l = m.ImmediateCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Target != nil { + l = m.Target.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.TransactionId != 0 { + n += 1 + sovQuery(uint64(m.TransactionId)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CommitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ReservedId != 0 { + n += 1 + sovQuery(uint64(m.ReservedId)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RollbackRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EffectiveCallerId != nil { + l = m.EffectiveCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.ImmediateCallerId != nil { + l = m.ImmediateCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Target != nil { + l = m.Target.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.TransactionId != 0 { + n += 1 + sovQuery(uint64(m.TransactionId)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RollbackResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ReservedId != 0 { + n += 1 + sovQuery(uint64(m.ReservedId)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PrepareRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EffectiveCallerId != nil { + l = m.EffectiveCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.ImmediateCallerId != nil { + l = m.ImmediateCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Target != nil { + l = m.Target.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.TransactionId != 0 { + n += 1 + sovQuery(uint64(m.TransactionId)) + } + l = len(m.Dtid) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PrepareResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CommitPreparedRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EffectiveCallerId != nil { + l = m.EffectiveCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.ImmediateCallerId != nil { + l = m.ImmediateCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Target != nil { + l = m.Target.Size() + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Dtid) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CommitPreparedResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RollbackPreparedRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EffectiveCallerId != nil { + l = m.EffectiveCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.ImmediateCallerId != nil { + l = m.ImmediateCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Target != nil { + l = m.Target.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.TransactionId != 0 { + n += 1 + sovQuery(uint64(m.TransactionId)) + } + l = len(m.Dtid) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RollbackPreparedResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CreateTransactionRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EffectiveCallerId != nil { + l = m.EffectiveCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.ImmediateCallerId != nil { + l = m.ImmediateCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Target != nil { + l = m.Target.Size() + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Dtid) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if len(m.Participants) > 0 { + for _, e := range m.Participants { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CreateTransactionResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StartCommitRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EffectiveCallerId != nil { + l = m.EffectiveCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.ImmediateCallerId != nil { + l = m.ImmediateCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Target != nil { + l = m.Target.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.TransactionId != 0 { + n += 1 + sovQuery(uint64(m.TransactionId)) + } + l = len(m.Dtid) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StartCommitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SetRollbackRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EffectiveCallerId != nil { + l = m.EffectiveCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.ImmediateCallerId != nil { + l = m.ImmediateCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Target != nil { + l = m.Target.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.TransactionId != 0 { + n += 1 + sovQuery(uint64(m.TransactionId)) + } + l = len(m.Dtid) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SetRollbackResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ConcludeTransactionRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EffectiveCallerId != nil { + l = m.EffectiveCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.ImmediateCallerId != nil { + l = m.ImmediateCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Target != nil { + l = m.Target.Size() + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Dtid) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ConcludeTransactionResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReadTransactionRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EffectiveCallerId != nil { + l = m.EffectiveCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.ImmediateCallerId != nil { + l = m.ImmediateCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Target != nil { + l = m.Target.Size() + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Dtid) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReadTransactionResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Metadata != nil { + l = m.Metadata.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BeginExecuteRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EffectiveCallerId != nil { + l = m.EffectiveCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.ImmediateCallerId != nil { + l = m.ImmediateCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Target != nil { + l = m.Target.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Query != nil { + l = m.Query.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.ReservedId != 0 { + n += 1 + sovQuery(uint64(m.ReservedId)) + } + if len(m.PreQueries) > 0 { + for _, s := range m.PreQueries { + l = len(s) + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BeginExecuteResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.TransactionId != 0 { + n += 1 + sovQuery(uint64(m.TransactionId)) + } + if m.TabletAlias != nil { + l = m.TabletAlias.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BeginExecuteBatchRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EffectiveCallerId != nil { + l = m.EffectiveCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.ImmediateCallerId != nil { + l = m.ImmediateCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Target != nil { + l = m.Target.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if len(m.Queries) > 0 { + for _, e := range m.Queries { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.AsTransaction { + n += 2 + } + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BeginExecuteBatchResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if len(m.Results) > 0 { + for _, e := range m.Results { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.TransactionId != 0 { + n += 1 + sovQuery(uint64(m.TransactionId)) + } + if m.TabletAlias != nil { + l = m.TabletAlias.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MessageStreamRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EffectiveCallerId != nil { + l = m.EffectiveCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.ImmediateCallerId != nil { + l = m.ImmediateCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Target != nil { + l = m.Target.Size() + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MessageStreamResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MessageAckRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EffectiveCallerId != nil { + l = m.EffectiveCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.ImmediateCallerId != nil { + l = m.ImmediateCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Target != nil { + l = m.Target.Size() + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if len(m.Ids) > 0 { + for _, e := range m.Ids { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MessageAckResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReserveExecuteRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EffectiveCallerId != nil { + l = m.EffectiveCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.ImmediateCallerId != nil { + l = m.ImmediateCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Target != nil { + l = m.Target.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Query != nil { + l = m.Query.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.TransactionId != 0 { + n += 1 + sovQuery(uint64(m.TransactionId)) + } + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if len(m.PreQueries) > 0 { + for _, s := range m.PreQueries { + l = len(s) + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReserveExecuteResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.ReservedId != 0 { + n += 1 + sovQuery(uint64(m.ReservedId)) + } + if m.TabletAlias != nil { + l = m.TabletAlias.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReserveBeginExecuteRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EffectiveCallerId != nil { + l = m.EffectiveCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.ImmediateCallerId != nil { + l = m.ImmediateCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Target != nil { + l = m.Target.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Query != nil { + l = m.Query.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if len(m.PreQueries) > 0 { + for _, s := range m.PreQueries { + l = len(s) + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReserveBeginExecuteResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.TransactionId != 0 { + n += 1 + sovQuery(uint64(m.TransactionId)) + } + if m.ReservedId != 0 { + n += 1 + sovQuery(uint64(m.ReservedId)) + } + if m.TabletAlias != nil { + l = m.TabletAlias.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReleaseRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EffectiveCallerId != nil { + l = m.EffectiveCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.ImmediateCallerId != nil { + l = m.ImmediateCallerId.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Target != nil { + l = m.Target.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.TransactionId != 0 { + n += 1 + sovQuery(uint64(m.TransactionId)) + } + if m.ReservedId != 0 { + n += 1 + sovQuery(uint64(m.ReservedId)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReleaseResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StreamHealthRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RealtimeStats) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HealthError) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.SecondsBehindMaster != 0 { + n += 1 + sovQuery(uint64(m.SecondsBehindMaster)) + } + if m.BinlogPlayersCount != 0 { + n += 1 + sovQuery(uint64(m.BinlogPlayersCount)) + } + if m.SecondsBehindMasterFilteredReplication != 0 { + n += 1 + sovQuery(uint64(m.SecondsBehindMasterFilteredReplication)) + } + if m.CpuUsage != 0 { + n += 9 + } + if m.Qps != 0 { + n += 9 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AggregateStats) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HealthyTabletCount != 0 { + n += 1 + sovQuery(uint64(m.HealthyTabletCount)) + } + if m.UnhealthyTabletCount != 0 { + n += 1 + sovQuery(uint64(m.UnhealthyTabletCount)) + } + if m.SecondsBehindMasterMin != 0 { + n += 1 + sovQuery(uint64(m.SecondsBehindMasterMin)) + } + if m.SecondsBehindMasterMax != 0 { + n += 1 + sovQuery(uint64(m.SecondsBehindMasterMax)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StreamHealthResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Target != nil { + l = m.Target.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Serving { + n += 2 + } + if m.TabletExternallyReparentedTimestamp != 0 { + n += 1 + sovQuery(uint64(m.TabletExternallyReparentedTimestamp)) + } + if m.RealtimeStats != nil { + l = m.RealtimeStats.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.TabletAlias != nil { + l = m.TabletAlias.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *TransactionMetadata) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Dtid) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.State != 0 { + n += 1 + sovQuery(uint64(m.State)) + } + if m.TimeCreated != 0 { + n += 1 + sovQuery(uint64(m.TimeCreated)) + } + if len(m.Participants) > 0 { + for _, e := range m.Participants { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Target) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Target: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Target: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletType", wireType) + } + m.TabletType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TabletType |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cell = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VTGateCallerID) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VTGateCallerID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VTGateCallerID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventToken) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventToken: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventToken: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Position = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= Type(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BindVariable) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BindVariable: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BindVariable: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= Type(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, &Value{}) + if err := m.Values[len(m.Values)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BoundQuery) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BoundQuery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BoundQuery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sql", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sql = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BindVariables", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BindVariables == nil { + m.BindVariables = make(map[string]*BindVariable) + } + var mapkey string + var mapvalue *BindVariable + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthQuery + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthQuery + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthQuery + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthQuery + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &BindVariable{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.BindVariables[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecuteOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecuteOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecuteOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludedFields", wireType) + } + m.IncludedFields = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.IncludedFields |= ExecuteOptions_IncludedFields(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientFoundRows", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ClientFoundRows = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Workload", wireType) + } + m.Workload = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Workload |= ExecuteOptions_Workload(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SqlSelectLimit", wireType) + } + m.SqlSelectLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SqlSelectLimit |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TransactionIsolation", wireType) + } + m.TransactionIsolation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TransactionIsolation |= ExecuteOptions_TransactionIsolation(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SkipQueryPlanCache", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SkipQueryPlanCache = bool(v != 0) + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PlannerVersion", wireType) + } + m.PlannerVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PlannerVersion |= ExecuteOptions_PlannerVersion(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HasCreatedTempTables", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.HasCreatedTempTables = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Field) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Field: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Field: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= Type(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Table", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Table = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OrgTable", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OrgTable = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Database", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Database = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OrgName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OrgName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ColumnLength", wireType) + } + m.ColumnLength = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ColumnLength |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Charset", wireType) + } + m.Charset = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Charset |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Decimals", wireType) + } + m.Decimals = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Decimals |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType) + } + m.Flags = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Flags |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ColumnType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ColumnType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Row) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Row: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Row: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.Lengths = append(m.Lengths, int64(v)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Lengths) == 0 { + m.Lengths = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.Lengths = append(m.Lengths, int64(v)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Lengths", wireType) + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values[:0], dAtA[iNdEx:postIndex]...) + if m.Values == nil { + m.Values = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Fields = append(m.Fields, &Field{}) + if err := m.Fields[len(m.Fields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RowsAffected", wireType) + } + m.RowsAffected = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RowsAffected |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InsertId", wireType) + } + m.InsertId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.InsertId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rows", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rows = append(m.Rows, &Row{}) + if err := m.Rows[len(m.Rows)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryWarning) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryWarning: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryWarning: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Code |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StreamEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StreamEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StreamEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Statements", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Statements = append(m.Statements, &StreamEvent_Statement{}) + if err := m.Statements[len(m.Statements)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EventToken", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EventToken == nil { + m.EventToken = &EventToken{} + } + if err := m.EventToken.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StreamEvent_Statement) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Statement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Statement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Category", wireType) + } + m.Category = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Category |= StreamEvent_Statement_Category(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TableName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TableName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrimaryKeyFields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PrimaryKeyFields = append(m.PrimaryKeyFields, &Field{}) + if err := m.PrimaryKeyFields[len(m.PrimaryKeyFields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrimaryKeyValues", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PrimaryKeyValues = append(m.PrimaryKeyValues, &Row{}) + if err := m.PrimaryKeyValues[len(m.PrimaryKeyValues)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sql", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sql = append(m.Sql[:0], dAtA[iNdEx:postIndex]...) + if m.Sql == nil { + m.Sql = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecuteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecuteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecuteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EffectiveCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EffectiveCallerId == nil { + m.EffectiveCallerId = &vtrpc.CallerID{} + } + if err := m.EffectiveCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImmediateCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImmediateCallerId == nil { + m.ImmediateCallerId = &VTGateCallerID{} + } + if err := m.ImmediateCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Target == nil { + m.Target = &Target{} + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Query == nil { + m.Query = &BoundQuery{} + } + if err := m.Query.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TransactionId", wireType) + } + m.TransactionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TransactionId |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &ExecuteOptions{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReservedId", wireType) + } + m.ReservedId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReservedId |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecuteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecuteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecuteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &QueryResult{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResultWithError) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResultWithError: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResultWithError: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &vtrpc.RPCError{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &QueryResult{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecuteBatchRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecuteBatchRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecuteBatchRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EffectiveCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EffectiveCallerId == nil { + m.EffectiveCallerId = &vtrpc.CallerID{} + } + if err := m.EffectiveCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImmediateCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImmediateCallerId == nil { + m.ImmediateCallerId = &VTGateCallerID{} + } + if err := m.ImmediateCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Target == nil { + m.Target = &Target{} + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Queries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Queries = append(m.Queries, &BoundQuery{}) + if err := m.Queries[len(m.Queries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AsTransaction", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AsTransaction = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TransactionId", wireType) + } + m.TransactionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TransactionId |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &ExecuteOptions{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecuteBatchResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecuteBatchResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecuteBatchResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Results = append(m.Results, &QueryResult{}) + if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StreamExecuteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StreamExecuteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StreamExecuteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EffectiveCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EffectiveCallerId == nil { + m.EffectiveCallerId = &vtrpc.CallerID{} + } + if err := m.EffectiveCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImmediateCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImmediateCallerId == nil { + m.ImmediateCallerId = &VTGateCallerID{} + } + if err := m.ImmediateCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Target == nil { + m.Target = &Target{} + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Query == nil { + m.Query = &BoundQuery{} + } + if err := m.Query.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &ExecuteOptions{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TransactionId", wireType) + } + m.TransactionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TransactionId |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StreamExecuteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StreamExecuteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StreamExecuteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &QueryResult{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BeginRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BeginRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BeginRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EffectiveCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EffectiveCallerId == nil { + m.EffectiveCallerId = &vtrpc.CallerID{} + } + if err := m.EffectiveCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImmediateCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImmediateCallerId == nil { + m.ImmediateCallerId = &VTGateCallerID{} + } + if err := m.ImmediateCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Target == nil { + m.Target = &Target{} + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &ExecuteOptions{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BeginResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BeginResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BeginResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TransactionId", wireType) + } + m.TransactionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TransactionId |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} + } + if err := m.TabletAlias.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CommitRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CommitRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CommitRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EffectiveCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EffectiveCallerId == nil { + m.EffectiveCallerId = &vtrpc.CallerID{} + } + if err := m.EffectiveCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImmediateCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImmediateCallerId == nil { + m.ImmediateCallerId = &VTGateCallerID{} + } + if err := m.ImmediateCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Target == nil { + m.Target = &Target{} + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TransactionId", wireType) + } + m.TransactionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TransactionId |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CommitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CommitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CommitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReservedId", wireType) + } + m.ReservedId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReservedId |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollbackRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollbackRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollbackRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EffectiveCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EffectiveCallerId == nil { + m.EffectiveCallerId = &vtrpc.CallerID{} + } + if err := m.EffectiveCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImmediateCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImmediateCallerId == nil { + m.ImmediateCallerId = &VTGateCallerID{} + } + if err := m.ImmediateCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Target == nil { + m.Target = &Target{} + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TransactionId", wireType) + } + m.TransactionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TransactionId |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollbackResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollbackResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollbackResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReservedId", wireType) + } + m.ReservedId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReservedId |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PrepareRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PrepareRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PrepareRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EffectiveCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EffectiveCallerId == nil { + m.EffectiveCallerId = &vtrpc.CallerID{} + } + if err := m.EffectiveCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImmediateCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImmediateCallerId == nil { + m.ImmediateCallerId = &VTGateCallerID{} + } + if err := m.ImmediateCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Target == nil { + m.Target = &Target{} + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TransactionId", wireType) + } + m.TransactionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TransactionId |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dtid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Dtid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PrepareResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PrepareResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PrepareResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CommitPreparedRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CommitPreparedRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CommitPreparedRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EffectiveCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EffectiveCallerId == nil { + m.EffectiveCallerId = &vtrpc.CallerID{} + } + if err := m.EffectiveCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImmediateCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImmediateCallerId == nil { + m.ImmediateCallerId = &VTGateCallerID{} + } + if err := m.ImmediateCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Target == nil { + m.Target = &Target{} + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dtid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Dtid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CommitPreparedResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CommitPreparedResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CommitPreparedResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollbackPreparedRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollbackPreparedRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollbackPreparedRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EffectiveCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EffectiveCallerId == nil { + m.EffectiveCallerId = &vtrpc.CallerID{} + } + if err := m.EffectiveCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImmediateCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImmediateCallerId == nil { + m.ImmediateCallerId = &VTGateCallerID{} + } + if err := m.ImmediateCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Target == nil { + m.Target = &Target{} + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TransactionId", wireType) + } + m.TransactionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TransactionId |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dtid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Dtid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollbackPreparedResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollbackPreparedResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollbackPreparedResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateTransactionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateTransactionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateTransactionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EffectiveCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EffectiveCallerId == nil { + m.EffectiveCallerId = &vtrpc.CallerID{} + } + if err := m.EffectiveCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImmediateCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImmediateCallerId == nil { + m.ImmediateCallerId = &VTGateCallerID{} + } + if err := m.ImmediateCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Target == nil { + m.Target = &Target{} + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dtid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Dtid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Participants", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Participants = append(m.Participants, &Target{}) + if err := m.Participants[len(m.Participants)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateTransactionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateTransactionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateTransactionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StartCommitRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StartCommitRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StartCommitRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EffectiveCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EffectiveCallerId == nil { + m.EffectiveCallerId = &vtrpc.CallerID{} + } + if err := m.EffectiveCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImmediateCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImmediateCallerId == nil { + m.ImmediateCallerId = &VTGateCallerID{} + } + if err := m.ImmediateCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Target == nil { + m.Target = &Target{} + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TransactionId", wireType) + } + m.TransactionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TransactionId |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dtid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Dtid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StartCommitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StartCommitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StartCommitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SetRollbackRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetRollbackRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetRollbackRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EffectiveCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EffectiveCallerId == nil { + m.EffectiveCallerId = &vtrpc.CallerID{} + } + if err := m.EffectiveCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImmediateCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImmediateCallerId == nil { + m.ImmediateCallerId = &VTGateCallerID{} + } + if err := m.ImmediateCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Target == nil { + m.Target = &Target{} + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TransactionId", wireType) + } + m.TransactionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TransactionId |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dtid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Dtid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SetRollbackResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetRollbackResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetRollbackResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConcludeTransactionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConcludeTransactionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConcludeTransactionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EffectiveCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EffectiveCallerId == nil { + m.EffectiveCallerId = &vtrpc.CallerID{} + } + if err := m.EffectiveCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImmediateCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImmediateCallerId == nil { + m.ImmediateCallerId = &VTGateCallerID{} + } + if err := m.ImmediateCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Target == nil { + m.Target = &Target{} + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dtid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Dtid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConcludeTransactionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConcludeTransactionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConcludeTransactionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadTransactionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadTransactionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadTransactionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EffectiveCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EffectiveCallerId == nil { + m.EffectiveCallerId = &vtrpc.CallerID{} + } + if err := m.EffectiveCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImmediateCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImmediateCallerId == nil { + m.ImmediateCallerId = &VTGateCallerID{} + } + if err := m.ImmediateCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Target == nil { + m.Target = &Target{} + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dtid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Dtid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadTransactionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadTransactionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadTransactionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = &TransactionMetadata{} + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BeginExecuteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BeginExecuteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BeginExecuteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EffectiveCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EffectiveCallerId == nil { + m.EffectiveCallerId = &vtrpc.CallerID{} + } + if err := m.EffectiveCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImmediateCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImmediateCallerId == nil { + m.ImmediateCallerId = &VTGateCallerID{} + } + if err := m.ImmediateCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Target == nil { + m.Target = &Target{} + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Query == nil { + m.Query = &BoundQuery{} + } + if err := m.Query.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &ExecuteOptions{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReservedId", wireType) + } + m.ReservedId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReservedId |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreQueries", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreQueries = append(m.PreQueries, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BeginExecuteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BeginExecuteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BeginExecuteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &vtrpc.RPCError{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &QueryResult{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TransactionId", wireType) + } + m.TransactionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TransactionId |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} + } + if err := m.TabletAlias.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BeginExecuteBatchRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BeginExecuteBatchRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BeginExecuteBatchRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EffectiveCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EffectiveCallerId == nil { + m.EffectiveCallerId = &vtrpc.CallerID{} + } + if err := m.EffectiveCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImmediateCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImmediateCallerId == nil { + m.ImmediateCallerId = &VTGateCallerID{} + } + if err := m.ImmediateCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Target == nil { + m.Target = &Target{} + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Queries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Queries = append(m.Queries, &BoundQuery{}) + if err := m.Queries[len(m.Queries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AsTransaction", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AsTransaction = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &ExecuteOptions{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BeginExecuteBatchResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BeginExecuteBatchResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BeginExecuteBatchResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &vtrpc.RPCError{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Results = append(m.Results, &QueryResult{}) + if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TransactionId", wireType) + } + m.TransactionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TransactionId |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} + } + if err := m.TabletAlias.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MessageStreamRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MessageStreamRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MessageStreamRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EffectiveCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EffectiveCallerId == nil { + m.EffectiveCallerId = &vtrpc.CallerID{} + } + if err := m.EffectiveCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImmediateCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImmediateCallerId == nil { + m.ImmediateCallerId = &VTGateCallerID{} + } + if err := m.ImmediateCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Target == nil { + m.Target = &Target{} + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MessageStreamResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MessageStreamResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MessageStreamResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &QueryResult{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MessageAckRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MessageAckRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MessageAckRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EffectiveCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EffectiveCallerId == nil { + m.EffectiveCallerId = &vtrpc.CallerID{} + } + if err := m.EffectiveCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImmediateCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImmediateCallerId == nil { + m.ImmediateCallerId = &VTGateCallerID{} + } + if err := m.ImmediateCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Target == nil { + m.Target = &Target{} + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ids", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ids = append(m.Ids, &Value{}) + if err := m.Ids[len(m.Ids)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MessageAckResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MessageAckResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MessageAckResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &QueryResult{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReserveExecuteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReserveExecuteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReserveExecuteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EffectiveCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EffectiveCallerId == nil { + m.EffectiveCallerId = &vtrpc.CallerID{} + } + if err := m.EffectiveCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImmediateCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImmediateCallerId == nil { + m.ImmediateCallerId = &VTGateCallerID{} + } + if err := m.ImmediateCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Target == nil { + m.Target = &Target{} + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Query == nil { + m.Query = &BoundQuery{} + } + if err := m.Query.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TransactionId", wireType) + } + m.TransactionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TransactionId |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &ExecuteOptions{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreQueries", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreQueries = append(m.PreQueries, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReserveExecuteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReserveExecuteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReserveExecuteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &vtrpc.RPCError{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &QueryResult{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReservedId", wireType) + } + m.ReservedId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReservedId |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} + } + if err := m.TabletAlias.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReserveBeginExecuteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReserveBeginExecuteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReserveBeginExecuteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EffectiveCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EffectiveCallerId == nil { + m.EffectiveCallerId = &vtrpc.CallerID{} + } + if err := m.EffectiveCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImmediateCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImmediateCallerId == nil { + m.ImmediateCallerId = &VTGateCallerID{} + } + if err := m.ImmediateCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Target == nil { + m.Target = &Target{} + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Query == nil { + m.Query = &BoundQuery{} + } + if err := m.Query.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &ExecuteOptions{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreQueries", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreQueries = append(m.PreQueries, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReserveBeginExecuteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReserveBeginExecuteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReserveBeginExecuteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &vtrpc.RPCError{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &QueryResult{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TransactionId", wireType) + } + m.TransactionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TransactionId |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReservedId", wireType) + } + m.ReservedId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReservedId |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} + } + if err := m.TabletAlias.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReleaseRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReleaseRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReleaseRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EffectiveCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EffectiveCallerId == nil { + m.EffectiveCallerId = &vtrpc.CallerID{} + } + if err := m.EffectiveCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImmediateCallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImmediateCallerId == nil { + m.ImmediateCallerId = &VTGateCallerID{} + } + if err := m.ImmediateCallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Target == nil { + m.Target = &Target{} + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TransactionId", wireType) + } + m.TransactionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TransactionId |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReservedId", wireType) + } + m.ReservedId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReservedId |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReleaseResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReleaseResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReleaseResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StreamHealthRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StreamHealthRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StreamHealthRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RealtimeStats) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RealtimeStats: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RealtimeStats: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HealthError", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HealthError = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SecondsBehindMaster", wireType) + } + m.SecondsBehindMaster = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SecondsBehindMaster |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BinlogPlayersCount", wireType) + } + m.BinlogPlayersCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BinlogPlayersCount |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SecondsBehindMasterFilteredReplication", wireType) + } + m.SecondsBehindMasterFilteredReplication = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SecondsBehindMasterFilteredReplication |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field CpuUsage", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.CpuUsage = float64(math.Float64frombits(v)) + case 6: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Qps", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Qps = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AggregateStats) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AggregateStats: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AggregateStats: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HealthyTabletCount", wireType) + } + m.HealthyTabletCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HealthyTabletCount |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnhealthyTabletCount", wireType) + } + m.UnhealthyTabletCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UnhealthyTabletCount |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SecondsBehindMasterMin", wireType) + } + m.SecondsBehindMasterMin = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SecondsBehindMasterMin |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SecondsBehindMasterMax", wireType) + } + m.SecondsBehindMasterMax = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SecondsBehindMasterMax |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StreamHealthResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StreamHealthResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StreamHealthResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Target == nil { + m.Target = &Target{} + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Serving", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Serving = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletExternallyReparentedTimestamp", wireType) + } + m.TabletExternallyReparentedTimestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TabletExternallyReparentedTimestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RealtimeStats", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RealtimeStats == nil { + m.RealtimeStats = &RealtimeStats{} + } + if err := m.RealtimeStats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} + } + if err := m.TabletAlias.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TransactionMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TransactionMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TransactionMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dtid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Dtid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= TransactionState(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeCreated", wireType) + } + m.TimeCreated = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimeCreated |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Participants", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Participants = append(m.Participants, &Target{}) + if err := m.Participants[len(m.Participants)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/vt/proto/queryservice/queryservice.pb.go b/go/vt/proto/queryservice/queryservice.pb.go index 7553788d1cc..f3ed3b94cd5 100644 --- a/go/vt/proto/queryservice/queryservice.pb.go +++ b/go/vt/proto/queryservice/queryservice.pb.go @@ -1,4 +1,4 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: queryservice.proto package queryservice @@ -12,7 +12,6 @@ import ( grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" - binlogdata "vitess.io/vitess/go/vt/proto/binlogdata" query "vitess.io/vitess/go/vt/proto/query" ) @@ -31,45 +30,46 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package func init() { proto.RegisterFile("queryservice.proto", fileDescriptor_4bd2dde8711f22e3) } var fileDescriptor_4bd2dde8711f22e3 = []byte{ - // 598 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x55, 0x4d, 0x6f, 0xd3, 0x40, - 0x10, 0x85, 0x43, 0x1b, 0xb4, 0x09, 0xa1, 0x6c, 0x29, 0x50, 0x27, 0xa4, 0x4d, 0x6e, 0x08, 0x29, - 0x41, 0x80, 0x84, 0x54, 0x89, 0x43, 0x13, 0x51, 0x81, 0x10, 0x5f, 0x2e, 0x54, 0x08, 0x24, 0xa4, - 0x8d, 0x33, 0x0a, 0x56, 0x1d, 0x6f, 0xea, 0x5d, 0xa7, 0xf0, 0xdb, 0xb9, 0x54, 0xb1, 0x3d, 0xe3, - 0xdd, 0x8d, 0x9d, 0x5b, 0xe7, 0xbd, 0x99, 0xd7, 0xc9, 0x8c, 0xe7, 0x2d, 0xe3, 0x57, 0x29, 0x24, - 0xff, 0x14, 0x24, 0xab, 0x30, 0x80, 0xe1, 0x32, 0x91, 0x5a, 0xf2, 0x96, 0x89, 0x79, 0xcd, 0x2c, - 0xca, 0x29, 0x6f, 0x6f, 0x1a, 0xc6, 0x91, 0x9c, 0xcf, 0x84, 0x16, 0x39, 0xf2, 0xe2, 0x7f, 0x9b, - 0xed, 0x7c, 0x5d, 0x67, 0xf0, 0x13, 0xd6, 0x78, 0xfb, 0x17, 0x82, 0x54, 0x03, 0x3f, 0x18, 0xe6, - 0x45, 0x45, 0xec, 0xc3, 0x55, 0x0a, 0x4a, 0x7b, 0x0f, 0x5d, 0x58, 0x2d, 0x65, 0xac, 0x60, 0x70, - 0x8b, 0xbf, 0x67, 0xad, 0x02, 0x1c, 0x0b, 0x1d, 0xfc, 0xe1, 0x9e, 0x9d, 0x99, 0x81, 0xa8, 0xd2, - 0xa9, 0xe4, 0x48, 0xea, 0x13, 0xbb, 0x7b, 0xae, 0x13, 0x10, 0x0b, 0x6c, 0x06, 0xf3, 0x2d, 0x14, - 0xc5, 0xba, 0xd5, 0x24, 0xaa, 0x3d, 0xbf, 0xcd, 0x5f, 0xb1, 0x9d, 0x31, 0xcc, 0xc3, 0x98, 0xef, - 0x17, 0xa9, 0x59, 0x84, 0xf5, 0x0f, 0x6c, 0x90, 0xba, 0x78, 0xcd, 0x76, 0x27, 0x72, 0xb1, 0x08, - 0x35, 0xc7, 0x8c, 0x3c, 0xc4, 0xba, 0x03, 0x07, 0xa5, 0xc2, 0x37, 0xec, 0x8e, 0x2f, 0xa3, 0x68, - 0x2a, 0x82, 0x4b, 0x8e, 0xf3, 0x42, 0x00, 0x8b, 0x1f, 0x6d, 0xe0, 0x54, 0x7e, 0xc2, 0x1a, 0x5f, - 0x12, 0x58, 0x8a, 0xa4, 0x5c, 0x42, 0x11, 0xbb, 0x4b, 0x20, 0x98, 0x6a, 0x3f, 0xb3, 0x76, 0xde, - 0x4e, 0x41, 0xcd, 0x78, 0xd7, 0xea, 0x12, 0x61, 0x54, 0x7a, 0x52, 0xc3, 0x92, 0xe0, 0x77, 0xb6, - 0x87, 0x2d, 0x92, 0x64, 0xcf, 0xe9, 0xdd, 0x15, 0x3d, 0xaa, 0xe5, 0x49, 0xf6, 0x07, 0xbb, 0x3f, - 0x49, 0x40, 0x68, 0xf8, 0x96, 0x88, 0x58, 0x89, 0x40, 0x87, 0x32, 0xe6, 0x58, 0xb7, 0xc1, 0xa0, - 0xf0, 0x71, 0x7d, 0x02, 0x29, 0x9f, 0xb1, 0xe6, 0xb9, 0x16, 0x89, 0x2e, 0x56, 0x77, 0x48, 0x1f, - 0x07, 0x61, 0xa8, 0xe6, 0x55, 0x51, 0x96, 0x0e, 0x68, 0xda, 0x23, 0xe9, 0x94, 0xd8, 0x86, 0x8e, - 0x49, 0x91, 0xce, 0x6f, 0xb6, 0x3f, 0x91, 0x71, 0x10, 0xa5, 0x33, 0xeb, 0xb7, 0xf6, 0x69, 0xf0, - 0x1b, 0x1c, 0xea, 0x0e, 0xb6, 0xa5, 0x90, 0xbe, 0xcf, 0xee, 0xf9, 0x20, 0x66, 0xa6, 0x36, 0x2e, - 0xd5, 0xc1, 0x51, 0xb7, 0x57, 0x47, 0x9b, 0xa7, 0x9c, 0x1d, 0x03, 0x9e, 0x9f, 0x67, 0x5e, 0x88, - 0x73, 0x7d, 0x9d, 0x4a, 0xce, 0x5c, 0xb4, 0xc9, 0xe4, 0xd6, 0x70, 0x54, 0x51, 0x63, 0xf9, 0xc3, - 0x71, 0x7d, 0x82, 0x69, 0x12, 0x1f, 0x41, 0x29, 0x31, 0x87, 0xfc, 0xf0, 0xc9, 0x24, 0x2c, 0xd4, - 0x35, 0x09, 0x87, 0x34, 0x4c, 0x62, 0xc2, 0x58, 0x41, 0x9e, 0x06, 0x97, 0xfc, 0xb1, 0x9d, 0x7f, - 0x5a, 0xae, 0xfb, 0xb0, 0x82, 0x31, 0xef, 0xcf, 0x87, 0xb5, 0xed, 0x02, 0xce, 0xae, 0x4b, 0xd3, - 0x36, 0x61, 0xf7, 0xfe, 0x5c, 0xd6, 0xfc, 0x7c, 0x0a, 0xce, 0xda, 0x48, 0xdf, 0xae, 0xab, 0x5a, - 0xcc, 0x60, 0x5b, 0x8a, 0x69, 0x36, 0x3e, 0x44, 0x20, 0x54, 0x69, 0x36, 0x45, 0xec, 0x9a, 0x0d, - 0xc1, 0x54, 0xfb, 0x81, 0xb5, 0xf2, 0x39, 0xbe, 0x03, 0x11, 0xe9, 0xd2, 0xf1, 0x4d, 0xd0, 0xfd, - 0x4c, 0x6c, 0xce, 0x18, 0xff, 0x19, 0x6b, 0x5c, 0x14, 0x8b, 0xf4, 0x86, 0xc6, 0x13, 0x75, 0x61, - 0xef, 0xb1, 0x53, 0xc9, 0x19, 0x3a, 0x3e, 0x6b, 0x22, 0x2c, 0xaf, 0x15, 0xef, 0x55, 0xe5, 0xcb, - 0x6b, 0x55, 0x7a, 0x55, 0x1d, 0x6f, 0x68, 0xfe, 0x62, 0xed, 0xf2, 0x5f, 0xa5, 0x91, 0x56, 0xbc, - 0x5f, 0xdd, 0xc6, 0x9a, 0x2b, 0xe7, 0xbf, 0x25, 0xa5, 0x14, 0x1f, 0x3f, 0xfb, 0xf9, 0x74, 0x15, - 0x6a, 0x50, 0x6a, 0x18, 0xca, 0x51, 0xfe, 0xd7, 0x68, 0x2e, 0x47, 0x2b, 0x3d, 0xca, 0x5e, 0xe7, - 0x91, 0xf9, 0x92, 0x4f, 0x77, 0x33, 0xec, 0xe5, 0x4d, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe2, 0x30, - 0x29, 0x02, 0xf4, 0x07, 0x00, 0x00, + // 622 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x55, 0xcd, 0x6e, 0xd3, 0x40, + 0x10, 0x6e, 0x0e, 0x6d, 0xd0, 0x26, 0x84, 0xb2, 0xa5, 0x40, 0x9d, 0xe0, 0x36, 0xb9, 0x71, 0x49, + 0x10, 0x20, 0x21, 0x15, 0x71, 0x68, 0x22, 0x2a, 0x10, 0xe2, 0xcf, 0x85, 0x0a, 0x81, 0x84, 0xb4, + 0x71, 0x46, 0xc1, 0xaa, 0xe3, 0x4d, 0xbd, 0xeb, 0x14, 0xde, 0x84, 0x47, 0xe2, 0xc8, 0x23, 0xa0, + 0xf0, 0x18, 0x5c, 0x50, 0x6c, 0xcf, 0x7a, 0x77, 0x63, 0xe7, 0xd6, 0xf9, 0xbe, 0x99, 0xaf, 0x93, + 0x19, 0xcf, 0xb7, 0x84, 0x5e, 0x26, 0x10, 0xff, 0x10, 0x10, 0x2f, 0x02, 0x1f, 0xfa, 0xf3, 0x98, + 0x4b, 0x4e, 0x9b, 0x3a, 0xe6, 0x34, 0xd2, 0x28, 0xa3, 0x9c, 0xdd, 0x71, 0x10, 0x85, 0x7c, 0x3a, + 0x61, 0x92, 0x65, 0xc8, 0xc3, 0x7f, 0x2d, 0xb2, 0xfd, 0x7e, 0x95, 0x41, 0x8f, 0x49, 0xfd, 0xf9, + 0x77, 0xf0, 0x13, 0x09, 0x74, 0xbf, 0x9f, 0x15, 0xe5, 0xb1, 0x07, 0x97, 0x09, 0x08, 0xe9, 0xdc, + 0xb6, 0x61, 0x31, 0xe7, 0x91, 0x80, 0xde, 0x16, 0x7d, 0x49, 0x9a, 0x39, 0x38, 0x64, 0xd2, 0xff, + 0x46, 0x1d, 0x33, 0x33, 0x05, 0x51, 0xa5, 0x5d, 0xca, 0x29, 0xa9, 0x37, 0xe4, 0xfa, 0x99, 0x8c, + 0x81, 0xcd, 0xb0, 0x19, 0xcc, 0x37, 0x50, 0x14, 0xeb, 0x94, 0x93, 0xa8, 0xf6, 0xa0, 0x46, 0x1f, + 0x93, 0xed, 0x21, 0x4c, 0x83, 0x88, 0xee, 0xe5, 0xa9, 0x69, 0x84, 0xf5, 0xb7, 0x4c, 0x50, 0x75, + 0xf1, 0x84, 0xec, 0x8c, 0xf8, 0x6c, 0x16, 0x48, 0x8a, 0x19, 0x59, 0x88, 0x75, 0xfb, 0x16, 0xaa, + 0x0a, 0x9f, 0x91, 0x6b, 0x1e, 0x0f, 0xc3, 0x31, 0xf3, 0x2f, 0x28, 0xce, 0x0b, 0x01, 0x2c, 0xbe, + 0xb3, 0x86, 0xab, 0xf2, 0x63, 0x52, 0x7f, 0x17, 0xc3, 0x9c, 0xc5, 0xc5, 0x12, 0xf2, 0xd8, 0x5e, + 0x82, 0x82, 0x55, 0xed, 0x5b, 0xd2, 0xca, 0xda, 0xc9, 0xa9, 0x09, 0xed, 0x18, 0x5d, 0x22, 0x8c, + 0x4a, 0xf7, 0x2a, 0x58, 0x25, 0xf8, 0x91, 0xec, 0x62, 0x8b, 0x4a, 0xd2, 0xb5, 0x7a, 0xb7, 0x45, + 0x0f, 0x2b, 0x79, 0x25, 0xfb, 0x89, 0xdc, 0x1c, 0xc5, 0xc0, 0x24, 0x7c, 0x88, 0x59, 0x24, 0x98, + 0x2f, 0x03, 0x1e, 0x51, 0xac, 0x5b, 0x63, 0x50, 0xf8, 0xa8, 0x3a, 0x41, 0x29, 0x9f, 0x92, 0xc6, + 0x99, 0x64, 0xb1, 0xcc, 0x57, 0x77, 0xa0, 0x3e, 0x0e, 0x85, 0xa1, 0x9a, 0x53, 0x46, 0x19, 0x3a, + 0x20, 0xd5, 0x1e, 0x95, 0x4e, 0x81, 0xad, 0xe9, 0xe8, 0x94, 0xd2, 0xf9, 0x4a, 0xf6, 0x46, 0x3c, + 0xf2, 0xc3, 0x64, 0x62, 0xfc, 0xd6, 0xae, 0x1a, 0xfc, 0x1a, 0x87, 0xba, 0xbd, 0x4d, 0x29, 0x4a, + 0xdf, 0x23, 0x37, 0x3c, 0x60, 0x13, 0x5d, 0x1b, 0x97, 0x6a, 0xe1, 0xa8, 0xeb, 0x56, 0xd1, 0xfa, + 0x29, 0xa7, 0xc7, 0x80, 0xe7, 0xe7, 0xe8, 0x17, 0x62, 0x5d, 0x5f, 0xbb, 0x94, 0xd3, 0x17, 0xad, + 0x33, 0x99, 0x35, 0x1c, 0x96, 0xd4, 0x18, 0xfe, 0x70, 0x54, 0x9d, 0xa0, 0x9b, 0xc4, 0x6b, 0x10, + 0x82, 0x4d, 0x21, 0x3b, 0x7c, 0x65, 0x12, 0x06, 0x6a, 0x9b, 0x84, 0x45, 0x6a, 0x26, 0x31, 0x22, + 0x24, 0x27, 0x4f, 0xfc, 0x0b, 0x7a, 0xd7, 0xcc, 0x3f, 0x29, 0xd6, 0x7d, 0x50, 0xc2, 0xe8, 0xf7, + 0xe7, 0xc1, 0xca, 0x76, 0x01, 0x67, 0xd7, 0x51, 0xd3, 0xd6, 0x61, 0xfb, 0xfe, 0x6c, 0x56, 0xff, + 0x7c, 0x72, 0xce, 0xd8, 0x48, 0xd7, 0xac, 0x2b, 0x5b, 0x4c, 0x6f, 0x53, 0x8a, 0x6e, 0x36, 0x1e, + 0x84, 0xc0, 0x44, 0x61, 0x36, 0x79, 0x6c, 0x9b, 0x8d, 0x82, 0x55, 0xed, 0x2b, 0xd2, 0xcc, 0xe6, + 0xf8, 0x02, 0x58, 0x28, 0x0b, 0xc7, 0xd7, 0x41, 0xfb, 0x33, 0x31, 0x39, 0x6d, 0xfc, 0xa7, 0xa4, + 0x7e, 0x9e, 0x2f, 0xd2, 0xe9, 0x6b, 0x4f, 0xd4, 0xb9, 0xb9, 0xc7, 0x76, 0x29, 0xa7, 0xe9, 0x78, + 0xa4, 0x81, 0x30, 0xbf, 0x12, 0xd4, 0x2d, 0xcb, 0xe7, 0x57, 0xa2, 0xf0, 0xaa, 0x2a, 0x5e, 0xd3, + 0xfc, 0x42, 0x5a, 0xc5, 0xbf, 0x4a, 0x42, 0x29, 0x68, 0xb7, 0xbc, 0x8d, 0x15, 0x57, 0xcc, 0x7f, + 0x43, 0x4a, 0x21, 0x3e, 0x7c, 0xfa, 0x6b, 0xe9, 0xd6, 0x7e, 0x2f, 0xdd, 0xda, 0x9f, 0xa5, 0x5b, + 0xfb, 0xf9, 0xd7, 0xdd, 0xfa, 0x7c, 0x7f, 0x11, 0x48, 0x10, 0xa2, 0x1f, 0xf0, 0x41, 0xf6, 0xd7, + 0x60, 0xca, 0x07, 0x0b, 0x39, 0x48, 0x5f, 0xeb, 0x81, 0xfe, 0xb2, 0x8f, 0x77, 0x52, 0xec, 0xd1, + 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0x33, 0x99, 0xf7, 0xaa, 0x04, 0x08, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/go/vt/proto/replicationdata/replicationdata.pb.go b/go/vt/proto/replicationdata/replicationdata.pb.go index ab6af754b31..71f3d05669b 100644 --- a/go/vt/proto/replicationdata/replicationdata.pb.go +++ b/go/vt/proto/replicationdata/replicationdata.pb.go @@ -1,11 +1,13 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: replicationdata.proto package replicationdata import ( fmt "fmt" + io "io" math "math" + math_bits "math/bits" proto "github.com/golang/protobuf/proto" ) @@ -74,18 +76,26 @@ func (*Status) ProtoMessage() {} func (*Status) Descriptor() ([]byte, []int) { return fileDescriptor_ee8ee22b8c4b9d06, []int{0} } - func (m *Status) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Status.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Status.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_Status.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *Status) XXX_Merge(src proto.Message) { xxx_messageInfo_Status.Merge(m, src) } func (m *Status) XXX_Size() int { - return xxx_messageInfo_Status.Size(m) + return m.Size() } func (m *Status) XXX_DiscardUnknown() { xxx_messageInfo_Status.DiscardUnknown(m) @@ -193,18 +203,26 @@ func (*StopReplicationStatus) ProtoMessage() {} func (*StopReplicationStatus) Descriptor() ([]byte, []int) { return fileDescriptor_ee8ee22b8c4b9d06, []int{1} } - func (m *StopReplicationStatus) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StopReplicationStatus.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *StopReplicationStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StopReplicationStatus.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_StopReplicationStatus.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *StopReplicationStatus) XXX_Merge(src proto.Message) { xxx_messageInfo_StopReplicationStatus.Merge(m, src) } func (m *StopReplicationStatus) XXX_Size() int { - return xxx_messageInfo_StopReplicationStatus.Size(m) + return m.Size() } func (m *StopReplicationStatus) XXX_DiscardUnknown() { xxx_messageInfo_StopReplicationStatus.DiscardUnknown(m) @@ -241,18 +259,26 @@ func (*MasterStatus) ProtoMessage() {} func (*MasterStatus) Descriptor() ([]byte, []int) { return fileDescriptor_ee8ee22b8c4b9d06, []int{2} } - func (m *MasterStatus) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MasterStatus.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *MasterStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MasterStatus.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_MasterStatus.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *MasterStatus) XXX_Merge(src proto.Message) { xxx_messageInfo_MasterStatus.Merge(m, src) } func (m *MasterStatus) XXX_Size() int { - return xxx_messageInfo_MasterStatus.Size(m) + return m.Size() } func (m *MasterStatus) XXX_DiscardUnknown() { xxx_messageInfo_MasterStatus.DiscardUnknown(m) @@ -284,34 +310,1039 @@ func init() { func init() { proto.RegisterFile("replicationdata.proto", fileDescriptor_ee8ee22b8c4b9d06) } var fileDescriptor_ee8ee22b8c4b9d06 = []byte{ - // 454 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xc1, 0x6f, 0xd3, 0x30, - 0x18, 0xc5, 0x49, 0x47, 0x4b, 0xf7, 0xb5, 0xdb, 0x82, 0xb7, 0x6a, 0x11, 0x17, 0xaa, 0x72, 0x89, - 0xa6, 0xd1, 0xa0, 0x21, 0x4e, 0x9c, 0x36, 0x86, 0xb4, 0x4a, 0xdd, 0x3a, 0xdc, 0x71, 0x80, 0x8b, - 0x95, 0xd6, 0x6e, 0x6a, 0x29, 0xe4, 0xcb, 0x6c, 0xa7, 0x68, 0x7f, 0x3b, 0x17, 0x14, 0x3b, 0xeb, - 0x4a, 0x8a, 0x10, 0xb7, 0xf8, 0xbd, 0x9f, 0x1c, 0x7f, 0xcf, 0xcf, 0xd0, 0x53, 0x22, 0x4f, 0xe5, - 0x3c, 0x36, 0x12, 0x33, 0x1e, 0x9b, 0x78, 0x98, 0x2b, 0x34, 0x48, 0x0e, 0x6a, 0xf2, 0xe0, 0xd7, - 0x0e, 0xb4, 0xa6, 0x26, 0x36, 0x85, 0x26, 0xaf, 0xa0, 0x9d, 0xa3, 0x96, 0xa5, 0x15, 0x78, 0x7d, - 0x2f, 0xdc, 0xa5, 0xeb, 0x35, 0x39, 0x81, 0x97, 0x12, 0x99, 0x59, 0x2a, 0x11, 0x73, 0xa6, 0x8a, - 0x2c, 0x93, 0x59, 0x12, 0x34, 0xfa, 0x5e, 0xd8, 0xa6, 0x07, 0x12, 0xef, 0xac, 0x4e, 0x9d, 0x4c, - 0x4e, 0x81, 0xe8, 0xfb, 0xb4, 0x0e, 0xef, 0x58, 0xd8, 0xd7, 0xf7, 0xe9, 0x9f, 0xf4, 0x19, 0xf4, - 0xb4, 0x98, 0x63, 0xc6, 0x35, 0x9b, 0x89, 0xa5, 0xcc, 0x38, 0xfb, 0x11, 0x6b, 0x23, 0x54, 0xf0, - 0xbc, 0xef, 0x85, 0x7b, 0xf4, 0xb0, 0x32, 0x2f, 0xac, 0x77, 0x6d, 0x2d, 0xf2, 0x1a, 0x3a, 0x0e, - 0x62, 0x4b, 0xd4, 0x26, 0x68, 0xda, 0xc3, 0x82, 0x93, 0xae, 0x50, 0x9b, 0x0d, 0x20, 0x47, 0x65, - 0x82, 0x56, 0xdf, 0x0b, 0x9b, 0x8f, 0xc0, 0x2d, 0x2a, 0x43, 0xde, 0xc1, 0x51, 0x05, 0xcc, 0x31, - 0xcb, 0xc4, 0xdc, 0x30, 0x25, 0x8c, 0x7a, 0x08, 0x5e, 0x58, 0x92, 0x38, 0xef, 0x93, 0xb3, 0x68, - 0xe9, 0x94, 0x53, 0x29, 0x91, 0xc6, 0x0f, 0x2c, 0xc5, 0x84, 0xad, 0x73, 0x6a, 0xdb, 0x5f, 0xfb, - 0xd6, 0x19, 0x63, 0x72, 0xfb, 0x98, 0xd7, 0x1b, 0xd8, 0x5b, 0xc8, 0x54, 0x3c, 0x81, 0xbb, 0x16, - 0xec, 0x96, 0xe2, 0x1a, 0xfa, 0x00, 0xc7, 0x16, 0xfa, 0xcb, 0xbe, 0x60, 0xf1, 0xa3, 0xd2, 0xa6, - 0xf5, 0xbd, 0x43, 0xf0, 0xab, 0xb3, 0x6b, 0xa1, 0x56, 0x42, 0x31, 0xc9, 0x83, 0x8e, 0x0d, 0x6b, - 0xdf, 0xe9, 0x53, 0x2b, 0x8f, 0xf8, 0x46, 0x0c, 0x45, 0x21, 0x79, 0xd0, 0xdd, 0xcc, 0xe9, 0x6b, - 0x21, 0xf9, 0xe0, 0x27, 0xf4, 0xa6, 0x06, 0x73, 0xfa, 0x54, 0x8a, 0xaa, 0x0b, 0x11, 0xb4, 0x66, - 0x62, 0x81, 0x4a, 0xd8, 0x26, 0x74, 0xce, 0x8e, 0x87, 0xf5, 0x3e, 0x39, 0x90, 0x56, 0x18, 0x79, - 0x0b, 0xcd, 0x78, 0x51, 0x5e, 0x5b, 0xe3, 0xdf, 0xbc, 0xa3, 0x06, 0x13, 0xe8, 0xba, 0xbb, 0xfc, - 0x8f, 0xee, 0x6d, 0x65, 0xd9, 0xd8, 0xce, 0xf2, 0xe4, 0x23, 0x1c, 0xd6, 0x26, 0xb9, 0x46, 0x2e, - 0x08, 0x81, 0xfd, 0xd1, 0xe4, 0xfc, 0xe6, 0x72, 0xfa, 0x65, 0x7c, 0x77, 0x45, 0x3f, 0x9f, 0x5f, - 0xfa, 0xcf, 0x88, 0x0f, 0xdd, 0xd1, 0xc4, 0xad, 0x26, 0x37, 0xe3, 0x6f, 0xbe, 0x77, 0x31, 0xfc, - 0x7e, 0xba, 0x92, 0x46, 0x68, 0x3d, 0x94, 0x18, 0xb9, 0xaf, 0x28, 0xc1, 0x68, 0x65, 0x22, 0xfb, - 0x6a, 0xa2, 0xda, 0x2c, 0xb3, 0x96, 0x95, 0xdf, 0xff, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x14, 0xa4, - 0xdd, 0xf6, 0x65, 0x03, 0x00, 0x00, + // 482 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x41, 0x6f, 0xd3, 0x3e, + 0x18, 0xc6, 0x97, 0xee, 0xdf, 0xfe, 0xbb, 0xb7, 0xdd, 0x16, 0xbc, 0x55, 0x8b, 0x38, 0x94, 0xaa, + 0x5c, 0xa2, 0x69, 0x34, 0x68, 0x88, 0x13, 0x12, 0xd2, 0xc6, 0x90, 0x56, 0xa9, 0x5b, 0x47, 0x3a, + 0x0e, 0x70, 0xb1, 0xd2, 0xda, 0x6d, 0x2d, 0x85, 0xbc, 0x99, 0xed, 0x14, 0xed, 0x9b, 0xf0, 0x91, + 0x38, 0xf2, 0x11, 0x50, 0xf9, 0x18, 0x5c, 0x50, 0xec, 0xac, 0x2b, 0x19, 0x42, 0xdc, 0xe2, 0xe7, + 0xf9, 0xc9, 0xf1, 0xfb, 0xf8, 0x31, 0xb4, 0x24, 0x4f, 0x63, 0x31, 0x89, 0xb4, 0xc0, 0x84, 0x45, + 0x3a, 0xea, 0xa5, 0x12, 0x35, 0x92, 0xdd, 0x92, 0xdc, 0xfd, 0xb9, 0x09, 0xb5, 0x91, 0x8e, 0x74, + 0xa6, 0xc8, 0x63, 0xa8, 0xa7, 0xa8, 0x44, 0x6e, 0x79, 0x4e, 0xc7, 0xf1, 0xb7, 0xc2, 0xd5, 0x9a, + 0x1c, 0xc2, 0x23, 0x81, 0x54, 0xcf, 0x25, 0x8f, 0x18, 0x95, 0x59, 0x92, 0x88, 0x64, 0xe6, 0x55, + 0x3a, 0x8e, 0x5f, 0x0f, 0x77, 0x05, 0x5e, 0x1b, 0x3d, 0xb4, 0x32, 0x39, 0x02, 0xa2, 0x6e, 0xe2, + 0x32, 0xbc, 0x69, 0x60, 0x57, 0xdd, 0xc4, 0xbf, 0xd3, 0xc7, 0xd0, 0x52, 0x7c, 0x82, 0x09, 0x53, + 0x74, 0xcc, 0xe7, 0x22, 0x61, 0xf4, 0x53, 0xa4, 0x34, 0x97, 0xde, 0x7f, 0x1d, 0xc7, 0xdf, 0x0e, + 0xf7, 0x0a, 0xf3, 0xd4, 0x78, 0x17, 0xc6, 0x22, 0x4f, 0xa0, 0x61, 0x21, 0x3a, 0x47, 0xa5, 0xbd, + 0xaa, 0x39, 0x2c, 0x58, 0xe9, 0x1c, 0x95, 0x5e, 0x03, 0x52, 0x94, 0xda, 0xab, 0x75, 0x1c, 0xbf, + 0x7a, 0x07, 0x5c, 0xa1, 0xd4, 0xe4, 0x39, 0xec, 0x17, 0xc0, 0x04, 0x93, 0x84, 0x4f, 0x34, 0x95, + 0x5c, 0xcb, 0x5b, 0xef, 0x7f, 0x43, 0x12, 0xeb, 0xbd, 0xb1, 0x56, 0x98, 0x3b, 0xf9, 0x54, 0x92, + 0xc7, 0xd1, 0x2d, 0x8d, 0x71, 0x46, 0x57, 0x39, 0xd5, 0xcd, 0xaf, 0x5d, 0xe3, 0x0c, 0x70, 0x76, + 0x75, 0x97, 0xd7, 0x53, 0xd8, 0x9e, 0x8a, 0x98, 0xdf, 0x83, 0x5b, 0x06, 0x6c, 0xe6, 0xe2, 0x0a, + 0x7a, 0x09, 0x07, 0x06, 0xfa, 0xc3, 0xbe, 0x60, 0xf0, 0xfd, 0xdc, 0x0e, 0xcb, 0x7b, 0xfb, 0xe0, + 0x16, 0x67, 0x57, 0x5c, 0x2e, 0xb8, 0xa4, 0x82, 0x79, 0x0d, 0x13, 0xd6, 0x8e, 0xd5, 0x47, 0x46, + 0xee, 0xb3, 0xb5, 0x18, 0xb2, 0x4c, 0x30, 0xaf, 0xb9, 0x9e, 0xd3, 0xfb, 0x4c, 0xb0, 0xee, 0x67, + 0x68, 0x8d, 0x34, 0xa6, 0xe1, 0x7d, 0x29, 0x8a, 0x2e, 0x04, 0x50, 0x1b, 0xf3, 0x29, 0x4a, 0x6e, + 0x9a, 0xd0, 0x38, 0x3e, 0xe8, 0x95, 0xfb, 0x64, 0xc1, 0xb0, 0xc0, 0xc8, 0x33, 0xa8, 0x46, 0xd3, + 0xfc, 0xda, 0x2a, 0x7f, 0xe7, 0x2d, 0xd5, 0x1d, 0x42, 0xd3, 0xde, 0xe5, 0x3f, 0x74, 0xef, 0x41, + 0x96, 0x95, 0x87, 0x59, 0x1e, 0xbe, 0x82, 0xbd, 0xd2, 0x24, 0x17, 0xc8, 0x38, 0x21, 0xb0, 0xd3, + 0x1f, 0x9e, 0x5c, 0x9e, 0x8d, 0xde, 0x0d, 0xae, 0xcf, 0xc3, 0xb7, 0x27, 0x67, 0xee, 0x06, 0x71, + 0xa1, 0xd9, 0x1f, 0xda, 0xd5, 0xf0, 0x72, 0xf0, 0xc1, 0x75, 0x4e, 0x5f, 0x7f, 0x5d, 0xb6, 0x9d, + 0x6f, 0xcb, 0xb6, 0xf3, 0x7d, 0xd9, 0x76, 0xbe, 0xfc, 0x68, 0x6f, 0x7c, 0x3c, 0x5a, 0x08, 0xcd, + 0x95, 0xea, 0x09, 0x0c, 0xec, 0x57, 0x30, 0xc3, 0x60, 0xa1, 0x03, 0xf3, 0x8a, 0x82, 0xd2, 0x6c, + 0xe3, 0x9a, 0x91, 0x5f, 0xfc, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x96, 0x76, 0xbc, 0xf8, 0x75, 0x03, + 0x00, 0x00, +} + +func (m *Status) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Status) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Status) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.MasterUuid) > 0 { + i -= len(m.MasterUuid) + copy(dAtA[i:], m.MasterUuid) + i = encodeVarintReplicationdata(dAtA, i, uint64(len(m.MasterUuid))) + i-- + dAtA[i] = 0x62 + } + if m.MasterServerId != 0 { + i = encodeVarintReplicationdata(dAtA, i, uint64(m.MasterServerId)) + i-- + dAtA[i] = 0x58 + } + if len(m.FileRelayLogPosition) > 0 { + i -= len(m.FileRelayLogPosition) + copy(dAtA[i:], m.FileRelayLogPosition) + i = encodeVarintReplicationdata(dAtA, i, uint64(len(m.FileRelayLogPosition))) + i-- + dAtA[i] = 0x52 + } + if len(m.FilePosition) > 0 { + i -= len(m.FilePosition) + copy(dAtA[i:], m.FilePosition) + i = encodeVarintReplicationdata(dAtA, i, uint64(len(m.FilePosition))) + i-- + dAtA[i] = 0x4a + } + if len(m.RelayLogPosition) > 0 { + i -= len(m.RelayLogPosition) + copy(dAtA[i:], m.RelayLogPosition) + i = encodeVarintReplicationdata(dAtA, i, uint64(len(m.RelayLogPosition))) + i-- + dAtA[i] = 0x42 + } + if m.MasterConnectRetry != 0 { + i = encodeVarintReplicationdata(dAtA, i, uint64(m.MasterConnectRetry)) + i-- + dAtA[i] = 0x38 + } + if m.MasterPort != 0 { + i = encodeVarintReplicationdata(dAtA, i, uint64(m.MasterPort)) + i-- + dAtA[i] = 0x30 + } + if len(m.MasterHost) > 0 { + i -= len(m.MasterHost) + copy(dAtA[i:], m.MasterHost) + i = encodeVarintReplicationdata(dAtA, i, uint64(len(m.MasterHost))) + i-- + dAtA[i] = 0x2a + } + if m.SecondsBehindMaster != 0 { + i = encodeVarintReplicationdata(dAtA, i, uint64(m.SecondsBehindMaster)) + i-- + dAtA[i] = 0x20 + } + if m.SqlThreadRunning { + i-- + if m.SqlThreadRunning { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.IoThreadRunning { + i-- + if m.IoThreadRunning { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarintReplicationdata(dAtA, i, uint64(len(m.Position))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StopReplicationStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StopReplicationStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StopReplicationStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.After != nil { + { + size, err := m.After.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintReplicationdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Before != nil { + { + size, err := m.Before.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintReplicationdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MasterStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MasterStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MasterStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.FilePosition) > 0 { + i -= len(m.FilePosition) + copy(dAtA[i:], m.FilePosition) + i = encodeVarintReplicationdata(dAtA, i, uint64(len(m.FilePosition))) + i-- + dAtA[i] = 0x12 + } + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarintReplicationdata(dAtA, i, uint64(len(m.Position))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintReplicationdata(dAtA []byte, offset int, v uint64) int { + offset -= sovReplicationdata(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base } +func (m *Status) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Position) + if l > 0 { + n += 1 + l + sovReplicationdata(uint64(l)) + } + if m.IoThreadRunning { + n += 2 + } + if m.SqlThreadRunning { + n += 2 + } + if m.SecondsBehindMaster != 0 { + n += 1 + sovReplicationdata(uint64(m.SecondsBehindMaster)) + } + l = len(m.MasterHost) + if l > 0 { + n += 1 + l + sovReplicationdata(uint64(l)) + } + if m.MasterPort != 0 { + n += 1 + sovReplicationdata(uint64(m.MasterPort)) + } + if m.MasterConnectRetry != 0 { + n += 1 + sovReplicationdata(uint64(m.MasterConnectRetry)) + } + l = len(m.RelayLogPosition) + if l > 0 { + n += 1 + l + sovReplicationdata(uint64(l)) + } + l = len(m.FilePosition) + if l > 0 { + n += 1 + l + sovReplicationdata(uint64(l)) + } + l = len(m.FileRelayLogPosition) + if l > 0 { + n += 1 + l + sovReplicationdata(uint64(l)) + } + if m.MasterServerId != 0 { + n += 1 + sovReplicationdata(uint64(m.MasterServerId)) + } + l = len(m.MasterUuid) + if l > 0 { + n += 1 + l + sovReplicationdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StopReplicationStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Before != nil { + l = m.Before.Size() + n += 1 + l + sovReplicationdata(uint64(l)) + } + if m.After != nil { + l = m.After.Size() + n += 1 + l + sovReplicationdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MasterStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Position) + if l > 0 { + n += 1 + l + sovReplicationdata(uint64(l)) + } + l = len(m.FilePosition) + if l > 0 { + n += 1 + l + sovReplicationdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovReplicationdata(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozReplicationdata(x uint64) (n int) { + return sovReplicationdata(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Status) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowReplicationdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Status: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Status: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowReplicationdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthReplicationdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthReplicationdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Position = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IoThreadRunning", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowReplicationdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IoThreadRunning = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SqlThreadRunning", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowReplicationdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SqlThreadRunning = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SecondsBehindMaster", wireType) + } + m.SecondsBehindMaster = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowReplicationdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SecondsBehindMaster |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MasterHost", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowReplicationdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthReplicationdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthReplicationdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MasterHost = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MasterPort", wireType) + } + m.MasterPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowReplicationdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MasterPort |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MasterConnectRetry", wireType) + } + m.MasterConnectRetry = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowReplicationdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MasterConnectRetry |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RelayLogPosition", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowReplicationdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthReplicationdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthReplicationdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RelayLogPosition = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FilePosition", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowReplicationdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthReplicationdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthReplicationdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FilePosition = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FileRelayLogPosition", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowReplicationdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthReplicationdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthReplicationdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FileRelayLogPosition = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MasterServerId", wireType) + } + m.MasterServerId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowReplicationdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MasterServerId |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MasterUuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowReplicationdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthReplicationdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthReplicationdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MasterUuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipReplicationdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthReplicationdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthReplicationdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StopReplicationStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowReplicationdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StopReplicationStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StopReplicationStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Before", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowReplicationdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthReplicationdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthReplicationdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Before == nil { + m.Before = &Status{} + } + if err := m.Before.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field After", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowReplicationdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthReplicationdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthReplicationdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.After == nil { + m.After = &Status{} + } + if err := m.After.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipReplicationdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthReplicationdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthReplicationdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MasterStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowReplicationdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MasterStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MasterStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowReplicationdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthReplicationdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthReplicationdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Position = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FilePosition", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowReplicationdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthReplicationdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthReplicationdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FilePosition = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipReplicationdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthReplicationdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthReplicationdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipReplicationdata(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowReplicationdata + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowReplicationdata + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowReplicationdata + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthReplicationdata + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupReplicationdata + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthReplicationdata + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthReplicationdata = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowReplicationdata = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupReplicationdata = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/vt/proto/tableacl/tableacl.pb.go b/go/vt/proto/tableacl/tableacl.pb.go index 5fbfc778baa..eab0daaa033 100644 --- a/go/vt/proto/tableacl/tableacl.pb.go +++ b/go/vt/proto/tableacl/tableacl.pb.go @@ -1,11 +1,13 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: tableacl.proto package tableacl import ( fmt "fmt" + io "io" math "math" + math_bits "math/bits" proto "github.com/golang/protobuf/proto" ) @@ -40,18 +42,26 @@ func (*TableGroupSpec) ProtoMessage() {} func (*TableGroupSpec) Descriptor() ([]byte, []int) { return fileDescriptor_7d0bedb248a1632e, []int{0} } - func (m *TableGroupSpec) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TableGroupSpec.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *TableGroupSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TableGroupSpec.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_TableGroupSpec.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *TableGroupSpec) XXX_Merge(src proto.Message) { xxx_messageInfo_TableGroupSpec.Merge(m, src) } func (m *TableGroupSpec) XXX_Size() int { - return xxx_messageInfo_TableGroupSpec.Size(m) + return m.Size() } func (m *TableGroupSpec) XXX_DiscardUnknown() { xxx_messageInfo_TableGroupSpec.DiscardUnknown(m) @@ -107,18 +117,26 @@ func (*Config) ProtoMessage() {} func (*Config) Descriptor() ([]byte, []int) { return fileDescriptor_7d0bedb248a1632e, []int{1} } - func (m *Config) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Config.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *Config) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Config.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_Config.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *Config) XXX_Merge(src proto.Message) { xxx_messageInfo_Config.Merge(m, src) } func (m *Config) XXX_Size() int { - return xxx_messageInfo_Config.Size(m) + return m.Size() } func (m *Config) XXX_DiscardUnknown() { xxx_messageInfo_Config.DiscardUnknown(m) @@ -141,20 +159,594 @@ func init() { func init() { proto.RegisterFile("tableacl.proto", fileDescriptor_7d0bedb248a1632e) } var fileDescriptor_7d0bedb248a1632e = []byte{ - // 232 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x90, 0xc1, 0x4b, 0xc3, 0x30, - 0x14, 0xc6, 0x89, 0x9d, 0xd5, 0xbd, 0xc9, 0x0e, 0x41, 0x34, 0xc7, 0x32, 0x10, 0x7b, 0x6a, 0x40, - 0xf1, 0xe4, 0x4d, 0x11, 0x6f, 0x2a, 0xd5, 0x93, 0x97, 0x92, 0x6d, 0x6f, 0x25, 0xb0, 0x35, 0xe1, - 0xbd, 0x38, 0xfd, 0x8f, 0xfc, 0x37, 0x25, 0x69, 0x3b, 0xf0, 0xf6, 0xfd, 0xf8, 0x25, 0xe1, 0xfb, - 0x02, 0xf3, 0x60, 0x96, 0x5b, 0x34, 0xab, 0x6d, 0xe5, 0xc9, 0x05, 0x27, 0x4f, 0x47, 0x5e, 0xfc, - 0x0a, 0x98, 0x7f, 0x44, 0x78, 0x26, 0xf7, 0xe5, 0xdf, 0x3d, 0xae, 0xa4, 0x84, 0x49, 0x67, 0x76, - 0xa8, 0x44, 0x21, 0xca, 0x69, 0x9d, 0xb2, 0xbc, 0x83, 0xcb, 0x74, 0xa5, 0x89, 0xc4, 0x8d, 0xa3, - 0xc6, 0x13, 0x6e, 0xec, 0x0f, 0xb2, 0x3a, 0x2a, 0xb2, 0x72, 0x5a, 0x9f, 0x27, 0xfd, 0x12, 0xed, - 0x2b, 0xbd, 0x0d, 0x4e, 0x2a, 0x38, 0x21, 0x34, 0x6b, 0x24, 0x56, 0x59, 0x3a, 0x36, 0x62, 0x34, - 0xdf, 0x64, 0x43, 0x34, 0x93, 0xde, 0x0c, 0x28, 0x2f, 0x20, 0x37, 0xeb, 0x9d, 0xed, 0x58, 0x1d, - 0x27, 0x31, 0xd0, 0xe2, 0x09, 0xf2, 0x47, 0xd7, 0x6d, 0x6c, 0x2b, 0xef, 0xe1, 0xac, 0x2f, 0xd3, - 0xc6, 0xce, 0xac, 0x44, 0x91, 0x95, 0xb3, 0x1b, 0x55, 0x1d, 0x46, 0xfe, 0x1f, 0x54, 0xcf, 0xc2, - 0x81, 0xf9, 0xe1, 0xfa, 0xf3, 0x6a, 0x6f, 0x03, 0x32, 0x57, 0xd6, 0xe9, 0x3e, 0xe9, 0xd6, 0xe9, - 0x7d, 0xd0, 0xe9, 0x6b, 0xf4, 0xf8, 0xc8, 0x32, 0x4f, 0x7c, 0xfb, 0x17, 0x00, 0x00, 0xff, 0xff, - 0x09, 0x82, 0xf5, 0x82, 0x3c, 0x01, 0x00, 0x00, + // 251 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2b, 0x49, 0x4c, 0xca, + 0x49, 0x4d, 0x4c, 0xce, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x80, 0xf1, 0x95, 0x96, + 0x33, 0x72, 0xf1, 0x85, 0x80, 0x38, 0xee, 0x45, 0xf9, 0xa5, 0x05, 0xc1, 0x05, 0xa9, 0xc9, 0x42, + 0x42, 0x5c, 0x2c, 0x79, 0x89, 0xb9, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x60, 0xb6, + 0x90, 0x29, 0x97, 0x38, 0x58, 0x4b, 0x3c, 0x88, 0x57, 0x1c, 0x9f, 0x5f, 0x14, 0x5f, 0x50, 0x94, + 0x9a, 0x96, 0x59, 0x91, 0x5a, 0x2c, 0xc1, 0xa4, 0xc0, 0xac, 0xc1, 0x19, 0x24, 0x02, 0x96, 0xf6, + 0x03, 0xc9, 0xfa, 0x17, 0x05, 0x40, 0xe5, 0x84, 0x24, 0xb8, 0xd8, 0x8b, 0x52, 0x13, 0x53, 0x52, + 0x8b, 0x8a, 0x25, 0x98, 0xc1, 0xca, 0x60, 0x5c, 0x90, 0x4c, 0x79, 0x51, 0x66, 0x09, 0x48, 0x86, + 0x05, 0x22, 0x03, 0xe5, 0x0a, 0x89, 0x71, 0xb1, 0x25, 0xa6, 0xe4, 0x66, 0xe6, 0x15, 0x4b, 0xb0, + 0x82, 0x25, 0xa0, 0x3c, 0x25, 0x57, 0x2e, 0x36, 0xe7, 0xfc, 0xbc, 0xb4, 0xcc, 0x74, 0x21, 0x6b, + 0x2e, 0x1e, 0x88, 0x63, 0xd2, 0x41, 0x6e, 0x2e, 0x96, 0x60, 0x54, 0x60, 0xd6, 0xe0, 0x36, 0x92, + 0xd0, 0x83, 0x7b, 0x12, 0xd5, 0x43, 0x41, 0xdc, 0x25, 0x70, 0x7e, 0xb1, 0x93, 0xf9, 0x89, 0x47, + 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe3, 0xb1, 0x1c, 0x43, 0x94, + 0x6a, 0x59, 0x66, 0x49, 0x6a, 0x71, 0xb1, 0x5e, 0x66, 0xbe, 0x3e, 0x84, 0xa5, 0x9f, 0x9e, 0xaf, + 0x5f, 0x56, 0xa2, 0x0f, 0x0e, 0x2a, 0x7d, 0x98, 0xa1, 0x49, 0x6c, 0x60, 0xbe, 0x31, 0x20, 0x00, + 0x00, 0xff, 0xff, 0xe5, 0x2a, 0xb5, 0x83, 0x4c, 0x01, 0x00, 0x00, +} + +func (m *TableGroupSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TableGroupSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TableGroupSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Admins) > 0 { + for iNdEx := len(m.Admins) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Admins[iNdEx]) + copy(dAtA[i:], m.Admins[iNdEx]) + i = encodeVarintTableacl(dAtA, i, uint64(len(m.Admins[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Writers) > 0 { + for iNdEx := len(m.Writers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Writers[iNdEx]) + copy(dAtA[i:], m.Writers[iNdEx]) + i = encodeVarintTableacl(dAtA, i, uint64(len(m.Writers[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Readers) > 0 { + for iNdEx := len(m.Readers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Readers[iNdEx]) + copy(dAtA[i:], m.Readers[iNdEx]) + i = encodeVarintTableacl(dAtA, i, uint64(len(m.Readers[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.TableNamesOrPrefixes) > 0 { + for iNdEx := len(m.TableNamesOrPrefixes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TableNamesOrPrefixes[iNdEx]) + copy(dAtA[i:], m.TableNamesOrPrefixes[iNdEx]) + i = encodeVarintTableacl(dAtA, i, uint64(len(m.TableNamesOrPrefixes[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintTableacl(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Config) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Config) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } + +func (m *Config) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.TableGroups) > 0 { + for iNdEx := len(m.TableGroups) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TableGroups[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTableacl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintTableacl(dAtA []byte, offset int, v uint64) int { + offset -= sovTableacl(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *TableGroupSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTableacl(uint64(l)) + } + if len(m.TableNamesOrPrefixes) > 0 { + for _, s := range m.TableNamesOrPrefixes { + l = len(s) + n += 1 + l + sovTableacl(uint64(l)) + } + } + if len(m.Readers) > 0 { + for _, s := range m.Readers { + l = len(s) + n += 1 + l + sovTableacl(uint64(l)) + } + } + if len(m.Writers) > 0 { + for _, s := range m.Writers { + l = len(s) + n += 1 + l + sovTableacl(uint64(l)) + } + } + if len(m.Admins) > 0 { + for _, s := range m.Admins { + l = len(s) + n += 1 + l + sovTableacl(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Config) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.TableGroups) > 0 { + for _, e := range m.TableGroups { + l = e.Size() + n += 1 + l + sovTableacl(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovTableacl(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTableacl(x uint64) (n int) { + return sovTableacl(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *TableGroupSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTableacl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TableGroupSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TableGroupSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTableacl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTableacl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTableacl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TableNamesOrPrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTableacl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTableacl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTableacl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TableNamesOrPrefixes = append(m.TableNamesOrPrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Readers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTableacl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTableacl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTableacl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Readers = append(m.Readers, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Writers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTableacl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTableacl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTableacl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Writers = append(m.Writers, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Admins", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTableacl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTableacl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTableacl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Admins = append(m.Admins, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTableacl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTableacl + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTableacl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Config) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTableacl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Config: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Config: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TableGroups", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTableacl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTableacl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTableacl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TableGroups = append(m.TableGroups, &TableGroupSpec{}) + if err := m.TableGroups[len(m.TableGroups)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTableacl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTableacl + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTableacl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTableacl(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTableacl + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTableacl + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTableacl + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTableacl + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTableacl + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTableacl + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTableacl = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTableacl = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTableacl = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go b/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go index d6dfc5f5160..b2da2486d5b 100644 --- a/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go +++ b/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go @@ -1,14 +1,15 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: tabletmanagerdata.proto package tabletmanagerdata import ( fmt "fmt" + io "io" math "math" + math_bits "math/bits" proto "github.com/golang/protobuf/proto" - logutil "vitess.io/vitess/go/vt/proto/logutil" query "vitess.io/vitess/go/vt/proto/query" replicationdata "vitess.io/vitess/go/vt/proto/replicationdata" @@ -55,18 +56,26 @@ func (*TableDefinition) ProtoMessage() {} func (*TableDefinition) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{0} } - func (m *TableDefinition) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TableDefinition.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *TableDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TableDefinition.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_TableDefinition.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *TableDefinition) XXX_Merge(src proto.Message) { xxx_messageInfo_TableDefinition.Merge(m, src) } func (m *TableDefinition) XXX_Size() int { - return xxx_messageInfo_TableDefinition.Size(m) + return m.Size() } func (m *TableDefinition) XXX_DiscardUnknown() { xxx_messageInfo_TableDefinition.DiscardUnknown(m) @@ -145,18 +154,26 @@ func (*SchemaDefinition) ProtoMessage() {} func (*SchemaDefinition) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{1} } - func (m *SchemaDefinition) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SchemaDefinition.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *SchemaDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SchemaDefinition.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_SchemaDefinition.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *SchemaDefinition) XXX_Merge(src proto.Message) { xxx_messageInfo_SchemaDefinition.Merge(m, src) } func (m *SchemaDefinition) XXX_Size() int { - return xxx_messageInfo_SchemaDefinition.Size(m) + return m.Size() } func (m *SchemaDefinition) XXX_DiscardUnknown() { xxx_messageInfo_SchemaDefinition.DiscardUnknown(m) @@ -201,18 +218,26 @@ func (*SchemaChangeResult) ProtoMessage() {} func (*SchemaChangeResult) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{2} } - func (m *SchemaChangeResult) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SchemaChangeResult.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *SchemaChangeResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SchemaChangeResult.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_SchemaChangeResult.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *SchemaChangeResult) XXX_Merge(src proto.Message) { xxx_messageInfo_SchemaChangeResult.Merge(m, src) } func (m *SchemaChangeResult) XXX_Size() int { - return xxx_messageInfo_SchemaChangeResult.Size(m) + return m.Size() } func (m *SchemaChangeResult) XXX_DiscardUnknown() { xxx_messageInfo_SchemaChangeResult.DiscardUnknown(m) @@ -253,18 +278,26 @@ func (*UserPermission) ProtoMessage() {} func (*UserPermission) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{3} } - func (m *UserPermission) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UserPermission.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *UserPermission) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UserPermission.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_UserPermission.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *UserPermission) XXX_Merge(src proto.Message) { xxx_messageInfo_UserPermission.Merge(m, src) } func (m *UserPermission) XXX_Size() int { - return xxx_messageInfo_UserPermission.Size(m) + return m.Size() } func (m *UserPermission) XXX_DiscardUnknown() { xxx_messageInfo_UserPermission.DiscardUnknown(m) @@ -318,18 +351,26 @@ func (*DbPermission) ProtoMessage() {} func (*DbPermission) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{4} } - func (m *DbPermission) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DbPermission.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *DbPermission) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DbPermission.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_DbPermission.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *DbPermission) XXX_Merge(src proto.Message) { xxx_messageInfo_DbPermission.Merge(m, src) } func (m *DbPermission) XXX_Size() int { - return xxx_messageInfo_DbPermission.Size(m) + return m.Size() } func (m *DbPermission) XXX_DiscardUnknown() { xxx_messageInfo_DbPermission.DiscardUnknown(m) @@ -381,18 +422,26 @@ func (*Permissions) ProtoMessage() {} func (*Permissions) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{5} } - func (m *Permissions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Permissions.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *Permissions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Permissions.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_Permissions.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *Permissions) XXX_Merge(src proto.Message) { xxx_messageInfo_Permissions.Merge(m, src) } func (m *Permissions) XXX_Size() int { - return xxx_messageInfo_Permissions.Size(m) + return m.Size() } func (m *Permissions) XXX_DiscardUnknown() { xxx_messageInfo_Permissions.DiscardUnknown(m) @@ -427,18 +476,26 @@ func (*PingRequest) ProtoMessage() {} func (*PingRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{6} } - func (m *PingRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PingRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *PingRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PingRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_PingRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *PingRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_PingRequest.Merge(m, src) } func (m *PingRequest) XXX_Size() int { - return xxx_messageInfo_PingRequest.Size(m) + return m.Size() } func (m *PingRequest) XXX_DiscardUnknown() { xxx_messageInfo_PingRequest.DiscardUnknown(m) @@ -466,18 +523,26 @@ func (*PingResponse) ProtoMessage() {} func (*PingResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{7} } - func (m *PingResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PingResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *PingResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PingResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_PingResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *PingResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_PingResponse.Merge(m, src) } func (m *PingResponse) XXX_Size() int { - return xxx_messageInfo_PingResponse.Size(m) + return m.Size() } func (m *PingResponse) XXX_DiscardUnknown() { xxx_messageInfo_PingResponse.DiscardUnknown(m) @@ -506,18 +571,26 @@ func (*SleepRequest) ProtoMessage() {} func (*SleepRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{8} } - func (m *SleepRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SleepRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *SleepRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SleepRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_SleepRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *SleepRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_SleepRequest.Merge(m, src) } func (m *SleepRequest) XXX_Size() int { - return xxx_messageInfo_SleepRequest.Size(m) + return m.Size() } func (m *SleepRequest) XXX_DiscardUnknown() { xxx_messageInfo_SleepRequest.DiscardUnknown(m) @@ -544,18 +617,26 @@ func (*SleepResponse) ProtoMessage() {} func (*SleepResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{9} } - func (m *SleepResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SleepResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *SleepResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SleepResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_SleepResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *SleepResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_SleepResponse.Merge(m, src) } func (m *SleepResponse) XXX_Size() int { - return xxx_messageInfo_SleepResponse.Size(m) + return m.Size() } func (m *SleepResponse) XXX_DiscardUnknown() { xxx_messageInfo_SleepResponse.DiscardUnknown(m) @@ -578,18 +659,26 @@ func (*ExecuteHookRequest) ProtoMessage() {} func (*ExecuteHookRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{10} } - func (m *ExecuteHookRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExecuteHookRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ExecuteHookRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExecuteHookRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ExecuteHookRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ExecuteHookRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ExecuteHookRequest.Merge(m, src) } func (m *ExecuteHookRequest) XXX_Size() int { - return xxx_messageInfo_ExecuteHookRequest.Size(m) + return m.Size() } func (m *ExecuteHookRequest) XXX_DiscardUnknown() { xxx_messageInfo_ExecuteHookRequest.DiscardUnknown(m) @@ -633,18 +722,26 @@ func (*ExecuteHookResponse) ProtoMessage() {} func (*ExecuteHookResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{11} } - func (m *ExecuteHookResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExecuteHookResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ExecuteHookResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExecuteHookResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ExecuteHookResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ExecuteHookResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ExecuteHookResponse.Merge(m, src) } func (m *ExecuteHookResponse) XXX_Size() int { - return xxx_messageInfo_ExecuteHookResponse.Size(m) + return m.Size() } func (m *ExecuteHookResponse) XXX_DiscardUnknown() { xxx_messageInfo_ExecuteHookResponse.DiscardUnknown(m) @@ -688,18 +785,26 @@ func (*GetSchemaRequest) ProtoMessage() {} func (*GetSchemaRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{12} } - func (m *GetSchemaRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetSchemaRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *GetSchemaRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetSchemaRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_GetSchemaRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *GetSchemaRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_GetSchemaRequest.Merge(m, src) } func (m *GetSchemaRequest) XXX_Size() int { - return xxx_messageInfo_GetSchemaRequest.Size(m) + return m.Size() } func (m *GetSchemaRequest) XXX_DiscardUnknown() { xxx_messageInfo_GetSchemaRequest.DiscardUnknown(m) @@ -741,18 +846,26 @@ func (*GetSchemaResponse) ProtoMessage() {} func (*GetSchemaResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{13} } - func (m *GetSchemaResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetSchemaResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *GetSchemaResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetSchemaResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_GetSchemaResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *GetSchemaResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_GetSchemaResponse.Merge(m, src) } func (m *GetSchemaResponse) XXX_Size() int { - return xxx_messageInfo_GetSchemaResponse.Size(m) + return m.Size() } func (m *GetSchemaResponse) XXX_DiscardUnknown() { xxx_messageInfo_GetSchemaResponse.DiscardUnknown(m) @@ -779,18 +892,26 @@ func (*GetPermissionsRequest) ProtoMessage() {} func (*GetPermissionsRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{14} } - func (m *GetPermissionsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetPermissionsRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *GetPermissionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetPermissionsRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_GetPermissionsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *GetPermissionsRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_GetPermissionsRequest.Merge(m, src) } func (m *GetPermissionsRequest) XXX_Size() int { - return xxx_messageInfo_GetPermissionsRequest.Size(m) + return m.Size() } func (m *GetPermissionsRequest) XXX_DiscardUnknown() { xxx_messageInfo_GetPermissionsRequest.DiscardUnknown(m) @@ -811,18 +932,26 @@ func (*GetPermissionsResponse) ProtoMessage() {} func (*GetPermissionsResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{15} } - func (m *GetPermissionsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetPermissionsResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *GetPermissionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetPermissionsResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_GetPermissionsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *GetPermissionsResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_GetPermissionsResponse.Merge(m, src) } func (m *GetPermissionsResponse) XXX_Size() int { - return xxx_messageInfo_GetPermissionsResponse.Size(m) + return m.Size() } func (m *GetPermissionsResponse) XXX_DiscardUnknown() { xxx_messageInfo_GetPermissionsResponse.DiscardUnknown(m) @@ -849,18 +978,26 @@ func (*SetReadOnlyRequest) ProtoMessage() {} func (*SetReadOnlyRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{16} } - func (m *SetReadOnlyRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetReadOnlyRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *SetReadOnlyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetReadOnlyRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_SetReadOnlyRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *SetReadOnlyRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_SetReadOnlyRequest.Merge(m, src) } func (m *SetReadOnlyRequest) XXX_Size() int { - return xxx_messageInfo_SetReadOnlyRequest.Size(m) + return m.Size() } func (m *SetReadOnlyRequest) XXX_DiscardUnknown() { xxx_messageInfo_SetReadOnlyRequest.DiscardUnknown(m) @@ -880,18 +1017,26 @@ func (*SetReadOnlyResponse) ProtoMessage() {} func (*SetReadOnlyResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{17} } - func (m *SetReadOnlyResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetReadOnlyResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *SetReadOnlyResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetReadOnlyResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_SetReadOnlyResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *SetReadOnlyResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_SetReadOnlyResponse.Merge(m, src) } func (m *SetReadOnlyResponse) XXX_Size() int { - return xxx_messageInfo_SetReadOnlyResponse.Size(m) + return m.Size() } func (m *SetReadOnlyResponse) XXX_DiscardUnknown() { xxx_messageInfo_SetReadOnlyResponse.DiscardUnknown(m) @@ -911,18 +1056,26 @@ func (*SetReadWriteRequest) ProtoMessage() {} func (*SetReadWriteRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{18} } - func (m *SetReadWriteRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetReadWriteRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *SetReadWriteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetReadWriteRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_SetReadWriteRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *SetReadWriteRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_SetReadWriteRequest.Merge(m, src) } func (m *SetReadWriteRequest) XXX_Size() int { - return xxx_messageInfo_SetReadWriteRequest.Size(m) + return m.Size() } func (m *SetReadWriteRequest) XXX_DiscardUnknown() { xxx_messageInfo_SetReadWriteRequest.DiscardUnknown(m) @@ -942,18 +1095,26 @@ func (*SetReadWriteResponse) ProtoMessage() {} func (*SetReadWriteResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{19} } - func (m *SetReadWriteResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetReadWriteResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *SetReadWriteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetReadWriteResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_SetReadWriteResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *SetReadWriteResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_SetReadWriteResponse.Merge(m, src) } func (m *SetReadWriteResponse) XXX_Size() int { - return xxx_messageInfo_SetReadWriteResponse.Size(m) + return m.Size() } func (m *SetReadWriteResponse) XXX_DiscardUnknown() { xxx_messageInfo_SetReadWriteResponse.DiscardUnknown(m) @@ -974,18 +1135,26 @@ func (*ChangeTypeRequest) ProtoMessage() {} func (*ChangeTypeRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{20} } - func (m *ChangeTypeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ChangeTypeRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ChangeTypeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ChangeTypeRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ChangeTypeRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ChangeTypeRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ChangeTypeRequest.Merge(m, src) } func (m *ChangeTypeRequest) XXX_Size() int { - return xxx_messageInfo_ChangeTypeRequest.Size(m) + return m.Size() } func (m *ChangeTypeRequest) XXX_DiscardUnknown() { xxx_messageInfo_ChangeTypeRequest.DiscardUnknown(m) @@ -1012,18 +1181,26 @@ func (*ChangeTypeResponse) ProtoMessage() {} func (*ChangeTypeResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{21} } - func (m *ChangeTypeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ChangeTypeResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ChangeTypeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ChangeTypeResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ChangeTypeResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ChangeTypeResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ChangeTypeResponse.Merge(m, src) } func (m *ChangeTypeResponse) XXX_Size() int { - return xxx_messageInfo_ChangeTypeResponse.Size(m) + return m.Size() } func (m *ChangeTypeResponse) XXX_DiscardUnknown() { xxx_messageInfo_ChangeTypeResponse.DiscardUnknown(m) @@ -1043,18 +1220,26 @@ func (*RefreshStateRequest) ProtoMessage() {} func (*RefreshStateRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{22} } - func (m *RefreshStateRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RefreshStateRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *RefreshStateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RefreshStateRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_RefreshStateRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *RefreshStateRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_RefreshStateRequest.Merge(m, src) } func (m *RefreshStateRequest) XXX_Size() int { - return xxx_messageInfo_RefreshStateRequest.Size(m) + return m.Size() } func (m *RefreshStateRequest) XXX_DiscardUnknown() { xxx_messageInfo_RefreshStateRequest.DiscardUnknown(m) @@ -1074,18 +1259,26 @@ func (*RefreshStateResponse) ProtoMessage() {} func (*RefreshStateResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{23} } - func (m *RefreshStateResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RefreshStateResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *RefreshStateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RefreshStateResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_RefreshStateResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *RefreshStateResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_RefreshStateResponse.Merge(m, src) } func (m *RefreshStateResponse) XXX_Size() int { - return xxx_messageInfo_RefreshStateResponse.Size(m) + return m.Size() } func (m *RefreshStateResponse) XXX_DiscardUnknown() { xxx_messageInfo_RefreshStateResponse.DiscardUnknown(m) @@ -1105,18 +1298,26 @@ func (*RunHealthCheckRequest) ProtoMessage() {} func (*RunHealthCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{24} } - func (m *RunHealthCheckRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RunHealthCheckRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *RunHealthCheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RunHealthCheckRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_RunHealthCheckRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *RunHealthCheckRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_RunHealthCheckRequest.Merge(m, src) } func (m *RunHealthCheckRequest) XXX_Size() int { - return xxx_messageInfo_RunHealthCheckRequest.Size(m) + return m.Size() } func (m *RunHealthCheckRequest) XXX_DiscardUnknown() { xxx_messageInfo_RunHealthCheckRequest.DiscardUnknown(m) @@ -1136,18 +1337,26 @@ func (*RunHealthCheckResponse) ProtoMessage() {} func (*RunHealthCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{25} } - func (m *RunHealthCheckResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RunHealthCheckResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *RunHealthCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RunHealthCheckResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_RunHealthCheckResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *RunHealthCheckResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_RunHealthCheckResponse.Merge(m, src) } func (m *RunHealthCheckResponse) XXX_Size() int { - return xxx_messageInfo_RunHealthCheckResponse.Size(m) + return m.Size() } func (m *RunHealthCheckResponse) XXX_DiscardUnknown() { xxx_messageInfo_RunHealthCheckResponse.DiscardUnknown(m) @@ -1168,18 +1377,26 @@ func (*IgnoreHealthErrorRequest) ProtoMessage() {} func (*IgnoreHealthErrorRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{26} } - func (m *IgnoreHealthErrorRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IgnoreHealthErrorRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *IgnoreHealthErrorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IgnoreHealthErrorRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_IgnoreHealthErrorRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *IgnoreHealthErrorRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_IgnoreHealthErrorRequest.Merge(m, src) } func (m *IgnoreHealthErrorRequest) XXX_Size() int { - return xxx_messageInfo_IgnoreHealthErrorRequest.Size(m) + return m.Size() } func (m *IgnoreHealthErrorRequest) XXX_DiscardUnknown() { xxx_messageInfo_IgnoreHealthErrorRequest.DiscardUnknown(m) @@ -1206,18 +1423,26 @@ func (*IgnoreHealthErrorResponse) ProtoMessage() {} func (*IgnoreHealthErrorResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{27} } - func (m *IgnoreHealthErrorResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IgnoreHealthErrorResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *IgnoreHealthErrorResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IgnoreHealthErrorResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_IgnoreHealthErrorResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *IgnoreHealthErrorResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_IgnoreHealthErrorResponse.Merge(m, src) } func (m *IgnoreHealthErrorResponse) XXX_Size() int { - return xxx_messageInfo_IgnoreHealthErrorResponse.Size(m) + return m.Size() } func (m *IgnoreHealthErrorResponse) XXX_DiscardUnknown() { xxx_messageInfo_IgnoreHealthErrorResponse.DiscardUnknown(m) @@ -1241,18 +1466,26 @@ func (*ReloadSchemaRequest) ProtoMessage() {} func (*ReloadSchemaRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{28} } - func (m *ReloadSchemaRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReloadSchemaRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ReloadSchemaRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReloadSchemaRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ReloadSchemaRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ReloadSchemaRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ReloadSchemaRequest.Merge(m, src) } func (m *ReloadSchemaRequest) XXX_Size() int { - return xxx_messageInfo_ReloadSchemaRequest.Size(m) + return m.Size() } func (m *ReloadSchemaRequest) XXX_DiscardUnknown() { xxx_messageInfo_ReloadSchemaRequest.DiscardUnknown(m) @@ -1279,18 +1512,26 @@ func (*ReloadSchemaResponse) ProtoMessage() {} func (*ReloadSchemaResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{29} } - func (m *ReloadSchemaResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReloadSchemaResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ReloadSchemaResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReloadSchemaResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ReloadSchemaResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ReloadSchemaResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ReloadSchemaResponse.Merge(m, src) } func (m *ReloadSchemaResponse) XXX_Size() int { - return xxx_messageInfo_ReloadSchemaResponse.Size(m) + return m.Size() } func (m *ReloadSchemaResponse) XXX_DiscardUnknown() { xxx_messageInfo_ReloadSchemaResponse.DiscardUnknown(m) @@ -1311,18 +1552,26 @@ func (*PreflightSchemaRequest) ProtoMessage() {} func (*PreflightSchemaRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{30} } - func (m *PreflightSchemaRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PreflightSchemaRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *PreflightSchemaRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PreflightSchemaRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_PreflightSchemaRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *PreflightSchemaRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_PreflightSchemaRequest.Merge(m, src) } func (m *PreflightSchemaRequest) XXX_Size() int { - return xxx_messageInfo_PreflightSchemaRequest.Size(m) + return m.Size() } func (m *PreflightSchemaRequest) XXX_DiscardUnknown() { xxx_messageInfo_PreflightSchemaRequest.DiscardUnknown(m) @@ -1352,18 +1601,26 @@ func (*PreflightSchemaResponse) ProtoMessage() {} func (*PreflightSchemaResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{31} } - func (m *PreflightSchemaResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PreflightSchemaResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *PreflightSchemaResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PreflightSchemaResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_PreflightSchemaResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *PreflightSchemaResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_PreflightSchemaResponse.Merge(m, src) } func (m *PreflightSchemaResponse) XXX_Size() int { - return xxx_messageInfo_PreflightSchemaResponse.Size(m) + return m.Size() } func (m *PreflightSchemaResponse) XXX_DiscardUnknown() { xxx_messageInfo_PreflightSchemaResponse.DiscardUnknown(m) @@ -1395,18 +1652,26 @@ func (*ApplySchemaRequest) ProtoMessage() {} func (*ApplySchemaRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{32} } - func (m *ApplySchemaRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ApplySchemaRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ApplySchemaRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ApplySchemaRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ApplySchemaRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ApplySchemaRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ApplySchemaRequest.Merge(m, src) } func (m *ApplySchemaRequest) XXX_Size() int { - return xxx_messageInfo_ApplySchemaRequest.Size(m) + return m.Size() } func (m *ApplySchemaRequest) XXX_DiscardUnknown() { xxx_messageInfo_ApplySchemaRequest.DiscardUnknown(m) @@ -1463,18 +1728,26 @@ func (*ApplySchemaResponse) ProtoMessage() {} func (*ApplySchemaResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{33} } - func (m *ApplySchemaResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ApplySchemaResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ApplySchemaResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ApplySchemaResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ApplySchemaResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ApplySchemaResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ApplySchemaResponse.Merge(m, src) } func (m *ApplySchemaResponse) XXX_Size() int { - return xxx_messageInfo_ApplySchemaResponse.Size(m) + return m.Size() } func (m *ApplySchemaResponse) XXX_DiscardUnknown() { xxx_messageInfo_ApplySchemaResponse.DiscardUnknown(m) @@ -1508,18 +1781,26 @@ func (*LockTablesRequest) ProtoMessage() {} func (*LockTablesRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{34} } - func (m *LockTablesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LockTablesRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *LockTablesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LockTablesRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_LockTablesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *LockTablesRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_LockTablesRequest.Merge(m, src) } func (m *LockTablesRequest) XXX_Size() int { - return xxx_messageInfo_LockTablesRequest.Size(m) + return m.Size() } func (m *LockTablesRequest) XXX_DiscardUnknown() { xxx_messageInfo_LockTablesRequest.DiscardUnknown(m) @@ -1539,18 +1820,26 @@ func (*LockTablesResponse) ProtoMessage() {} func (*LockTablesResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{35} } - func (m *LockTablesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LockTablesResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *LockTablesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LockTablesResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_LockTablesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *LockTablesResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_LockTablesResponse.Merge(m, src) } func (m *LockTablesResponse) XXX_Size() int { - return xxx_messageInfo_LockTablesResponse.Size(m) + return m.Size() } func (m *LockTablesResponse) XXX_DiscardUnknown() { xxx_messageInfo_LockTablesResponse.DiscardUnknown(m) @@ -1570,18 +1859,26 @@ func (*UnlockTablesRequest) ProtoMessage() {} func (*UnlockTablesRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{36} } - func (m *UnlockTablesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UnlockTablesRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *UnlockTablesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UnlockTablesRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_UnlockTablesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *UnlockTablesRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_UnlockTablesRequest.Merge(m, src) } func (m *UnlockTablesRequest) XXX_Size() int { - return xxx_messageInfo_UnlockTablesRequest.Size(m) + return m.Size() } func (m *UnlockTablesRequest) XXX_DiscardUnknown() { xxx_messageInfo_UnlockTablesRequest.DiscardUnknown(m) @@ -1601,18 +1898,26 @@ func (*UnlockTablesResponse) ProtoMessage() {} func (*UnlockTablesResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{37} } - func (m *UnlockTablesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UnlockTablesResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *UnlockTablesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UnlockTablesResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_UnlockTablesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *UnlockTablesResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_UnlockTablesResponse.Merge(m, src) } func (m *UnlockTablesResponse) XXX_Size() int { - return xxx_messageInfo_UnlockTablesResponse.Size(m) + return m.Size() } func (m *UnlockTablesResponse) XXX_DiscardUnknown() { xxx_messageInfo_UnlockTablesResponse.DiscardUnknown(m) @@ -1637,18 +1942,26 @@ func (*ExecuteFetchAsDbaRequest) ProtoMessage() {} func (*ExecuteFetchAsDbaRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{38} } - func (m *ExecuteFetchAsDbaRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExecuteFetchAsDbaRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ExecuteFetchAsDbaRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExecuteFetchAsDbaRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ExecuteFetchAsDbaRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ExecuteFetchAsDbaRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ExecuteFetchAsDbaRequest.Merge(m, src) } func (m *ExecuteFetchAsDbaRequest) XXX_Size() int { - return xxx_messageInfo_ExecuteFetchAsDbaRequest.Size(m) + return m.Size() } func (m *ExecuteFetchAsDbaRequest) XXX_DiscardUnknown() { xxx_messageInfo_ExecuteFetchAsDbaRequest.DiscardUnknown(m) @@ -1704,18 +2017,26 @@ func (*ExecuteFetchAsDbaResponse) ProtoMessage() {} func (*ExecuteFetchAsDbaResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{39} } - func (m *ExecuteFetchAsDbaResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExecuteFetchAsDbaResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ExecuteFetchAsDbaResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExecuteFetchAsDbaResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ExecuteFetchAsDbaResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ExecuteFetchAsDbaResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ExecuteFetchAsDbaResponse.Merge(m, src) } func (m *ExecuteFetchAsDbaResponse) XXX_Size() int { - return xxx_messageInfo_ExecuteFetchAsDbaResponse.Size(m) + return m.Size() } func (m *ExecuteFetchAsDbaResponse) XXX_DiscardUnknown() { xxx_messageInfo_ExecuteFetchAsDbaResponse.DiscardUnknown(m) @@ -1746,18 +2067,26 @@ func (*ExecuteFetchAsAllPrivsRequest) ProtoMessage() {} func (*ExecuteFetchAsAllPrivsRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{40} } - func (m *ExecuteFetchAsAllPrivsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExecuteFetchAsAllPrivsRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ExecuteFetchAsAllPrivsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExecuteFetchAsAllPrivsRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ExecuteFetchAsAllPrivsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ExecuteFetchAsAllPrivsRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ExecuteFetchAsAllPrivsRequest.Merge(m, src) } func (m *ExecuteFetchAsAllPrivsRequest) XXX_Size() int { - return xxx_messageInfo_ExecuteFetchAsAllPrivsRequest.Size(m) + return m.Size() } func (m *ExecuteFetchAsAllPrivsRequest) XXX_DiscardUnknown() { xxx_messageInfo_ExecuteFetchAsAllPrivsRequest.DiscardUnknown(m) @@ -1806,18 +2135,26 @@ func (*ExecuteFetchAsAllPrivsResponse) ProtoMessage() {} func (*ExecuteFetchAsAllPrivsResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{41} } - func (m *ExecuteFetchAsAllPrivsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExecuteFetchAsAllPrivsResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ExecuteFetchAsAllPrivsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExecuteFetchAsAllPrivsResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ExecuteFetchAsAllPrivsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ExecuteFetchAsAllPrivsResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ExecuteFetchAsAllPrivsResponse.Merge(m, src) } func (m *ExecuteFetchAsAllPrivsResponse) XXX_Size() int { - return xxx_messageInfo_ExecuteFetchAsAllPrivsResponse.Size(m) + return m.Size() } func (m *ExecuteFetchAsAllPrivsResponse) XXX_DiscardUnknown() { xxx_messageInfo_ExecuteFetchAsAllPrivsResponse.DiscardUnknown(m) @@ -1846,18 +2183,26 @@ func (*ExecuteFetchAsAppRequest) ProtoMessage() {} func (*ExecuteFetchAsAppRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{42} } - func (m *ExecuteFetchAsAppRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExecuteFetchAsAppRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ExecuteFetchAsAppRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExecuteFetchAsAppRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ExecuteFetchAsAppRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ExecuteFetchAsAppRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ExecuteFetchAsAppRequest.Merge(m, src) } func (m *ExecuteFetchAsAppRequest) XXX_Size() int { - return xxx_messageInfo_ExecuteFetchAsAppRequest.Size(m) + return m.Size() } func (m *ExecuteFetchAsAppRequest) XXX_DiscardUnknown() { xxx_messageInfo_ExecuteFetchAsAppRequest.DiscardUnknown(m) @@ -1892,18 +2237,26 @@ func (*ExecuteFetchAsAppResponse) ProtoMessage() {} func (*ExecuteFetchAsAppResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{43} } - func (m *ExecuteFetchAsAppResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExecuteFetchAsAppResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ExecuteFetchAsAppResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExecuteFetchAsAppResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ExecuteFetchAsAppResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ExecuteFetchAsAppResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ExecuteFetchAsAppResponse.Merge(m, src) } func (m *ExecuteFetchAsAppResponse) XXX_Size() int { - return xxx_messageInfo_ExecuteFetchAsAppResponse.Size(m) + return m.Size() } func (m *ExecuteFetchAsAppResponse) XXX_DiscardUnknown() { xxx_messageInfo_ExecuteFetchAsAppResponse.DiscardUnknown(m) @@ -1930,18 +2283,26 @@ func (*ReplicationStatusRequest) ProtoMessage() {} func (*ReplicationStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{44} } - func (m *ReplicationStatusRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReplicationStatusRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ReplicationStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReplicationStatusRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ReplicationStatusRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ReplicationStatusRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ReplicationStatusRequest.Merge(m, src) } func (m *ReplicationStatusRequest) XXX_Size() int { - return xxx_messageInfo_ReplicationStatusRequest.Size(m) + return m.Size() } func (m *ReplicationStatusRequest) XXX_DiscardUnknown() { xxx_messageInfo_ReplicationStatusRequest.DiscardUnknown(m) @@ -1962,18 +2323,26 @@ func (*ReplicationStatusResponse) ProtoMessage() {} func (*ReplicationStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{45} } - func (m *ReplicationStatusResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReplicationStatusResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ReplicationStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReplicationStatusResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ReplicationStatusResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ReplicationStatusResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ReplicationStatusResponse.Merge(m, src) } func (m *ReplicationStatusResponse) XXX_Size() int { - return xxx_messageInfo_ReplicationStatusResponse.Size(m) + return m.Size() } func (m *ReplicationStatusResponse) XXX_DiscardUnknown() { xxx_messageInfo_ReplicationStatusResponse.DiscardUnknown(m) @@ -2000,18 +2369,26 @@ func (*MasterStatusRequest) ProtoMessage() {} func (*MasterStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{46} } - func (m *MasterStatusRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MasterStatusRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *MasterStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MasterStatusRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_MasterStatusRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *MasterStatusRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_MasterStatusRequest.Merge(m, src) } func (m *MasterStatusRequest) XXX_Size() int { - return xxx_messageInfo_MasterStatusRequest.Size(m) + return m.Size() } func (m *MasterStatusRequest) XXX_DiscardUnknown() { xxx_messageInfo_MasterStatusRequest.DiscardUnknown(m) @@ -2032,18 +2409,26 @@ func (*MasterStatusResponse) ProtoMessage() {} func (*MasterStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{47} } - func (m *MasterStatusResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MasterStatusResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *MasterStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MasterStatusResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_MasterStatusResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *MasterStatusResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_MasterStatusResponse.Merge(m, src) } func (m *MasterStatusResponse) XXX_Size() int { - return xxx_messageInfo_MasterStatusResponse.Size(m) + return m.Size() } func (m *MasterStatusResponse) XXX_DiscardUnknown() { xxx_messageInfo_MasterStatusResponse.DiscardUnknown(m) @@ -2070,18 +2455,26 @@ func (*MasterPositionRequest) ProtoMessage() {} func (*MasterPositionRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{48} } - func (m *MasterPositionRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MasterPositionRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *MasterPositionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MasterPositionRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_MasterPositionRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *MasterPositionRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_MasterPositionRequest.Merge(m, src) } func (m *MasterPositionRequest) XXX_Size() int { - return xxx_messageInfo_MasterPositionRequest.Size(m) + return m.Size() } func (m *MasterPositionRequest) XXX_DiscardUnknown() { xxx_messageInfo_MasterPositionRequest.DiscardUnknown(m) @@ -2102,18 +2495,26 @@ func (*MasterPositionResponse) ProtoMessage() {} func (*MasterPositionResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{49} } - func (m *MasterPositionResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MasterPositionResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *MasterPositionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MasterPositionResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_MasterPositionResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *MasterPositionResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_MasterPositionResponse.Merge(m, src) } func (m *MasterPositionResponse) XXX_Size() int { - return xxx_messageInfo_MasterPositionResponse.Size(m) + return m.Size() } func (m *MasterPositionResponse) XXX_DiscardUnknown() { xxx_messageInfo_MasterPositionResponse.DiscardUnknown(m) @@ -2141,18 +2542,26 @@ func (*WaitForPositionRequest) ProtoMessage() {} func (*WaitForPositionRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{50} } - func (m *WaitForPositionRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_WaitForPositionRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *WaitForPositionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_WaitForPositionRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_WaitForPositionRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *WaitForPositionRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_WaitForPositionRequest.Merge(m, src) } func (m *WaitForPositionRequest) XXX_Size() int { - return xxx_messageInfo_WaitForPositionRequest.Size(m) + return m.Size() } func (m *WaitForPositionRequest) XXX_DiscardUnknown() { xxx_messageInfo_WaitForPositionRequest.DiscardUnknown(m) @@ -2179,18 +2588,26 @@ func (*WaitForPositionResponse) ProtoMessage() {} func (*WaitForPositionResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{51} } - func (m *WaitForPositionResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_WaitForPositionResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *WaitForPositionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_WaitForPositionResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_WaitForPositionResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *WaitForPositionResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_WaitForPositionResponse.Merge(m, src) } func (m *WaitForPositionResponse) XXX_Size() int { - return xxx_messageInfo_WaitForPositionResponse.Size(m) + return m.Size() } func (m *WaitForPositionResponse) XXX_DiscardUnknown() { xxx_messageInfo_WaitForPositionResponse.DiscardUnknown(m) @@ -2210,18 +2627,26 @@ func (*StopReplicationRequest) ProtoMessage() {} func (*StopReplicationRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{52} } - func (m *StopReplicationRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StopReplicationRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *StopReplicationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StopReplicationRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_StopReplicationRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *StopReplicationRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_StopReplicationRequest.Merge(m, src) } func (m *StopReplicationRequest) XXX_Size() int { - return xxx_messageInfo_StopReplicationRequest.Size(m) + return m.Size() } func (m *StopReplicationRequest) XXX_DiscardUnknown() { xxx_messageInfo_StopReplicationRequest.DiscardUnknown(m) @@ -2241,18 +2666,26 @@ func (*StopReplicationResponse) ProtoMessage() {} func (*StopReplicationResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{53} } - func (m *StopReplicationResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StopReplicationResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *StopReplicationResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StopReplicationResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_StopReplicationResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *StopReplicationResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_StopReplicationResponse.Merge(m, src) } func (m *StopReplicationResponse) XXX_Size() int { - return xxx_messageInfo_StopReplicationResponse.Size(m) + return m.Size() } func (m *StopReplicationResponse) XXX_DiscardUnknown() { xxx_messageInfo_StopReplicationResponse.DiscardUnknown(m) @@ -2274,18 +2707,26 @@ func (*StopReplicationMinimumRequest) ProtoMessage() {} func (*StopReplicationMinimumRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{54} } - func (m *StopReplicationMinimumRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StopReplicationMinimumRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *StopReplicationMinimumRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StopReplicationMinimumRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_StopReplicationMinimumRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *StopReplicationMinimumRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_StopReplicationMinimumRequest.Merge(m, src) } func (m *StopReplicationMinimumRequest) XXX_Size() int { - return xxx_messageInfo_StopReplicationMinimumRequest.Size(m) + return m.Size() } func (m *StopReplicationMinimumRequest) XXX_DiscardUnknown() { xxx_messageInfo_StopReplicationMinimumRequest.DiscardUnknown(m) @@ -2320,18 +2761,26 @@ func (*StopReplicationMinimumResponse) ProtoMessage() {} func (*StopReplicationMinimumResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{55} } - func (m *StopReplicationMinimumResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StopReplicationMinimumResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *StopReplicationMinimumResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StopReplicationMinimumResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_StopReplicationMinimumResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *StopReplicationMinimumResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_StopReplicationMinimumResponse.Merge(m, src) } func (m *StopReplicationMinimumResponse) XXX_Size() int { - return xxx_messageInfo_StopReplicationMinimumResponse.Size(m) + return m.Size() } func (m *StopReplicationMinimumResponse) XXX_DiscardUnknown() { xxx_messageInfo_StopReplicationMinimumResponse.DiscardUnknown(m) @@ -2358,18 +2807,26 @@ func (*StartReplicationRequest) ProtoMessage() {} func (*StartReplicationRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{56} } - func (m *StartReplicationRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StartReplicationRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *StartReplicationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StartReplicationRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_StartReplicationRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *StartReplicationRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_StartReplicationRequest.Merge(m, src) } func (m *StartReplicationRequest) XXX_Size() int { - return xxx_messageInfo_StartReplicationRequest.Size(m) + return m.Size() } func (m *StartReplicationRequest) XXX_DiscardUnknown() { xxx_messageInfo_StartReplicationRequest.DiscardUnknown(m) @@ -2389,18 +2846,26 @@ func (*StartReplicationResponse) ProtoMessage() {} func (*StartReplicationResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{57} } - func (m *StartReplicationResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StartReplicationResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *StartReplicationResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StartReplicationResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_StartReplicationResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *StartReplicationResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_StartReplicationResponse.Merge(m, src) } func (m *StartReplicationResponse) XXX_Size() int { - return xxx_messageInfo_StartReplicationResponse.Size(m) + return m.Size() } func (m *StartReplicationResponse) XXX_DiscardUnknown() { xxx_messageInfo_StartReplicationResponse.DiscardUnknown(m) @@ -2422,18 +2887,26 @@ func (*StartReplicationUntilAfterRequest) ProtoMessage() {} func (*StartReplicationUntilAfterRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{58} } - func (m *StartReplicationUntilAfterRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StartReplicationUntilAfterRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *StartReplicationUntilAfterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StartReplicationUntilAfterRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_StartReplicationUntilAfterRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *StartReplicationUntilAfterRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_StartReplicationUntilAfterRequest.Merge(m, src) } func (m *StartReplicationUntilAfterRequest) XXX_Size() int { - return xxx_messageInfo_StartReplicationUntilAfterRequest.Size(m) + return m.Size() } func (m *StartReplicationUntilAfterRequest) XXX_DiscardUnknown() { xxx_messageInfo_StartReplicationUntilAfterRequest.DiscardUnknown(m) @@ -2467,18 +2940,26 @@ func (*StartReplicationUntilAfterResponse) ProtoMessage() {} func (*StartReplicationUntilAfterResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{59} } - func (m *StartReplicationUntilAfterResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StartReplicationUntilAfterResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *StartReplicationUntilAfterResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StartReplicationUntilAfterResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_StartReplicationUntilAfterResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *StartReplicationUntilAfterResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_StartReplicationUntilAfterResponse.Merge(m, src) } func (m *StartReplicationUntilAfterResponse) XXX_Size() int { - return xxx_messageInfo_StartReplicationUntilAfterResponse.Size(m) + return m.Size() } func (m *StartReplicationUntilAfterResponse) XXX_DiscardUnknown() { xxx_messageInfo_StartReplicationUntilAfterResponse.DiscardUnknown(m) @@ -2498,18 +2979,26 @@ func (*GetReplicasRequest) ProtoMessage() {} func (*GetReplicasRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{60} } - func (m *GetReplicasRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetReplicasRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *GetReplicasRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetReplicasRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_GetReplicasRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *GetReplicasRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_GetReplicasRequest.Merge(m, src) } func (m *GetReplicasRequest) XXX_Size() int { - return xxx_messageInfo_GetReplicasRequest.Size(m) + return m.Size() } func (m *GetReplicasRequest) XXX_DiscardUnknown() { xxx_messageInfo_GetReplicasRequest.DiscardUnknown(m) @@ -2530,18 +3019,26 @@ func (*GetReplicasResponse) ProtoMessage() {} func (*GetReplicasResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{61} } - func (m *GetReplicasResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetReplicasResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *GetReplicasResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetReplicasResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_GetReplicasResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *GetReplicasResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_GetReplicasResponse.Merge(m, src) } func (m *GetReplicasResponse) XXX_Size() int { - return xxx_messageInfo_GetReplicasResponse.Size(m) + return m.Size() } func (m *GetReplicasResponse) XXX_DiscardUnknown() { xxx_messageInfo_GetReplicasResponse.DiscardUnknown(m) @@ -2568,18 +3065,26 @@ func (*ResetReplicationRequest) ProtoMessage() {} func (*ResetReplicationRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{62} } - func (m *ResetReplicationRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResetReplicationRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ResetReplicationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResetReplicationRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ResetReplicationRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ResetReplicationRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ResetReplicationRequest.Merge(m, src) } func (m *ResetReplicationRequest) XXX_Size() int { - return xxx_messageInfo_ResetReplicationRequest.Size(m) + return m.Size() } func (m *ResetReplicationRequest) XXX_DiscardUnknown() { xxx_messageInfo_ResetReplicationRequest.DiscardUnknown(m) @@ -2599,18 +3104,26 @@ func (*ResetReplicationResponse) ProtoMessage() {} func (*ResetReplicationResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{63} } - func (m *ResetReplicationResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResetReplicationResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ResetReplicationResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResetReplicationResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ResetReplicationResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ResetReplicationResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ResetReplicationResponse.Merge(m, src) } func (m *ResetReplicationResponse) XXX_Size() int { - return xxx_messageInfo_ResetReplicationResponse.Size(m) + return m.Size() } func (m *ResetReplicationResponse) XXX_DiscardUnknown() { xxx_messageInfo_ResetReplicationResponse.DiscardUnknown(m) @@ -2631,18 +3144,26 @@ func (*VReplicationExecRequest) ProtoMessage() {} func (*VReplicationExecRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{64} } - func (m *VReplicationExecRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VReplicationExecRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *VReplicationExecRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VReplicationExecRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_VReplicationExecRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *VReplicationExecRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_VReplicationExecRequest.Merge(m, src) } func (m *VReplicationExecRequest) XXX_Size() int { - return xxx_messageInfo_VReplicationExecRequest.Size(m) + return m.Size() } func (m *VReplicationExecRequest) XXX_DiscardUnknown() { xxx_messageInfo_VReplicationExecRequest.DiscardUnknown(m) @@ -2670,18 +3191,26 @@ func (*VReplicationExecResponse) ProtoMessage() {} func (*VReplicationExecResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{65} } - func (m *VReplicationExecResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VReplicationExecResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *VReplicationExecResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VReplicationExecResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_VReplicationExecResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *VReplicationExecResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_VReplicationExecResponse.Merge(m, src) } func (m *VReplicationExecResponse) XXX_Size() int { - return xxx_messageInfo_VReplicationExecResponse.Size(m) + return m.Size() } func (m *VReplicationExecResponse) XXX_DiscardUnknown() { xxx_messageInfo_VReplicationExecResponse.DiscardUnknown(m) @@ -2710,18 +3239,26 @@ func (*VReplicationWaitForPosRequest) ProtoMessage() {} func (*VReplicationWaitForPosRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{66} } - func (m *VReplicationWaitForPosRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VReplicationWaitForPosRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *VReplicationWaitForPosRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VReplicationWaitForPosRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_VReplicationWaitForPosRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *VReplicationWaitForPosRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_VReplicationWaitForPosRequest.Merge(m, src) } func (m *VReplicationWaitForPosRequest) XXX_Size() int { - return xxx_messageInfo_VReplicationWaitForPosRequest.Size(m) + return m.Size() } func (m *VReplicationWaitForPosRequest) XXX_DiscardUnknown() { xxx_messageInfo_VReplicationWaitForPosRequest.DiscardUnknown(m) @@ -2755,18 +3292,26 @@ func (*VReplicationWaitForPosResponse) ProtoMessage() {} func (*VReplicationWaitForPosResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{67} } - func (m *VReplicationWaitForPosResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VReplicationWaitForPosResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *VReplicationWaitForPosResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VReplicationWaitForPosResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_VReplicationWaitForPosResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *VReplicationWaitForPosResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_VReplicationWaitForPosResponse.Merge(m, src) } func (m *VReplicationWaitForPosResponse) XXX_Size() int { - return xxx_messageInfo_VReplicationWaitForPosResponse.Size(m) + return m.Size() } func (m *VReplicationWaitForPosResponse) XXX_DiscardUnknown() { xxx_messageInfo_VReplicationWaitForPosResponse.DiscardUnknown(m) @@ -2786,18 +3331,26 @@ func (*InitMasterRequest) ProtoMessage() {} func (*InitMasterRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{68} } - func (m *InitMasterRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_InitMasterRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *InitMasterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_InitMasterRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_InitMasterRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *InitMasterRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_InitMasterRequest.Merge(m, src) } func (m *InitMasterRequest) XXX_Size() int { - return xxx_messageInfo_InitMasterRequest.Size(m) + return m.Size() } func (m *InitMasterRequest) XXX_DiscardUnknown() { xxx_messageInfo_InitMasterRequest.DiscardUnknown(m) @@ -2818,18 +3371,26 @@ func (*InitMasterResponse) ProtoMessage() {} func (*InitMasterResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{69} } - func (m *InitMasterResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_InitMasterResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *InitMasterResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_InitMasterResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_InitMasterResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *InitMasterResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_InitMasterResponse.Merge(m, src) } func (m *InitMasterResponse) XXX_Size() int { - return xxx_messageInfo_InitMasterResponse.Size(m) + return m.Size() } func (m *InitMasterResponse) XXX_DiscardUnknown() { xxx_messageInfo_InitMasterResponse.DiscardUnknown(m) @@ -2860,18 +3421,26 @@ func (*PopulateReparentJournalRequest) ProtoMessage() {} func (*PopulateReparentJournalRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{70} } - func (m *PopulateReparentJournalRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PopulateReparentJournalRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *PopulateReparentJournalRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PopulateReparentJournalRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_PopulateReparentJournalRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *PopulateReparentJournalRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_PopulateReparentJournalRequest.Merge(m, src) } func (m *PopulateReparentJournalRequest) XXX_Size() int { - return xxx_messageInfo_PopulateReparentJournalRequest.Size(m) + return m.Size() } func (m *PopulateReparentJournalRequest) XXX_DiscardUnknown() { xxx_messageInfo_PopulateReparentJournalRequest.DiscardUnknown(m) @@ -2919,18 +3488,26 @@ func (*PopulateReparentJournalResponse) ProtoMessage() {} func (*PopulateReparentJournalResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{71} } - func (m *PopulateReparentJournalResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PopulateReparentJournalResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *PopulateReparentJournalResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PopulateReparentJournalResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_PopulateReparentJournalResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *PopulateReparentJournalResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_PopulateReparentJournalResponse.Merge(m, src) } func (m *PopulateReparentJournalResponse) XXX_Size() int { - return xxx_messageInfo_PopulateReparentJournalResponse.Size(m) + return m.Size() } func (m *PopulateReparentJournalResponse) XXX_DiscardUnknown() { xxx_messageInfo_PopulateReparentJournalResponse.DiscardUnknown(m) @@ -2953,18 +3530,26 @@ func (*InitReplicaRequest) ProtoMessage() {} func (*InitReplicaRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{72} } - func (m *InitReplicaRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_InitReplicaRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *InitReplicaRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_InitReplicaRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_InitReplicaRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *InitReplicaRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_InitReplicaRequest.Merge(m, src) } func (m *InitReplicaRequest) XXX_Size() int { - return xxx_messageInfo_InitReplicaRequest.Size(m) + return m.Size() } func (m *InitReplicaRequest) XXX_DiscardUnknown() { xxx_messageInfo_InitReplicaRequest.DiscardUnknown(m) @@ -3005,18 +3590,26 @@ func (*InitReplicaResponse) ProtoMessage() {} func (*InitReplicaResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{73} } - func (m *InitReplicaResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_InitReplicaResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *InitReplicaResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_InitReplicaResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_InitReplicaResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *InitReplicaResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_InitReplicaResponse.Merge(m, src) } func (m *InitReplicaResponse) XXX_Size() int { - return xxx_messageInfo_InitReplicaResponse.Size(m) + return m.Size() } func (m *InitReplicaResponse) XXX_DiscardUnknown() { xxx_messageInfo_InitReplicaResponse.DiscardUnknown(m) @@ -3036,18 +3629,26 @@ func (*DemoteMasterRequest) ProtoMessage() {} func (*DemoteMasterRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{74} } - func (m *DemoteMasterRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DemoteMasterRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *DemoteMasterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DemoteMasterRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_DemoteMasterRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *DemoteMasterRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_DemoteMasterRequest.Merge(m, src) } func (m *DemoteMasterRequest) XXX_Size() int { - return xxx_messageInfo_DemoteMasterRequest.Size(m) + return m.Size() } func (m *DemoteMasterRequest) XXX_DiscardUnknown() { xxx_messageInfo_DemoteMasterRequest.DiscardUnknown(m) @@ -3071,18 +3672,26 @@ func (*DemoteMasterResponse) ProtoMessage() {} func (*DemoteMasterResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{75} } - func (m *DemoteMasterResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DemoteMasterResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *DemoteMasterResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DemoteMasterResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_DemoteMasterResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *DemoteMasterResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_DemoteMasterResponse.Merge(m, src) } func (m *DemoteMasterResponse) XXX_Size() int { - return xxx_messageInfo_DemoteMasterResponse.Size(m) + return m.Size() } func (m *DemoteMasterResponse) XXX_DiscardUnknown() { xxx_messageInfo_DemoteMasterResponse.DiscardUnknown(m) @@ -3117,18 +3726,26 @@ func (*UndoDemoteMasterRequest) ProtoMessage() {} func (*UndoDemoteMasterRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{76} } - func (m *UndoDemoteMasterRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UndoDemoteMasterRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *UndoDemoteMasterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UndoDemoteMasterRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_UndoDemoteMasterRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *UndoDemoteMasterRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_UndoDemoteMasterRequest.Merge(m, src) } func (m *UndoDemoteMasterRequest) XXX_Size() int { - return xxx_messageInfo_UndoDemoteMasterRequest.Size(m) + return m.Size() } func (m *UndoDemoteMasterRequest) XXX_DiscardUnknown() { xxx_messageInfo_UndoDemoteMasterRequest.DiscardUnknown(m) @@ -3148,18 +3765,26 @@ func (*UndoDemoteMasterResponse) ProtoMessage() {} func (*UndoDemoteMasterResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{77} } - func (m *UndoDemoteMasterResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UndoDemoteMasterResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *UndoDemoteMasterResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UndoDemoteMasterResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_UndoDemoteMasterResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *UndoDemoteMasterResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_UndoDemoteMasterResponse.Merge(m, src) } func (m *UndoDemoteMasterResponse) XXX_Size() int { - return xxx_messageInfo_UndoDemoteMasterResponse.Size(m) + return m.Size() } func (m *UndoDemoteMasterResponse) XXX_DiscardUnknown() { xxx_messageInfo_UndoDemoteMasterResponse.DiscardUnknown(m) @@ -3179,18 +3804,26 @@ func (*ReplicaWasPromotedRequest) ProtoMessage() {} func (*ReplicaWasPromotedRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{78} } - func (m *ReplicaWasPromotedRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReplicaWasPromotedRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ReplicaWasPromotedRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReplicaWasPromotedRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ReplicaWasPromotedRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ReplicaWasPromotedRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ReplicaWasPromotedRequest.Merge(m, src) } func (m *ReplicaWasPromotedRequest) XXX_Size() int { - return xxx_messageInfo_ReplicaWasPromotedRequest.Size(m) + return m.Size() } func (m *ReplicaWasPromotedRequest) XXX_DiscardUnknown() { xxx_messageInfo_ReplicaWasPromotedRequest.DiscardUnknown(m) @@ -3210,18 +3843,26 @@ func (*ReplicaWasPromotedResponse) ProtoMessage() {} func (*ReplicaWasPromotedResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{79} } - func (m *ReplicaWasPromotedResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReplicaWasPromotedResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ReplicaWasPromotedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReplicaWasPromotedResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ReplicaWasPromotedResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ReplicaWasPromotedResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ReplicaWasPromotedResponse.Merge(m, src) } func (m *ReplicaWasPromotedResponse) XXX_Size() int { - return xxx_messageInfo_ReplicaWasPromotedResponse.Size(m) + return m.Size() } func (m *ReplicaWasPromotedResponse) XXX_DiscardUnknown() { xxx_messageInfo_ReplicaWasPromotedResponse.DiscardUnknown(m) @@ -3245,18 +3886,26 @@ func (*SetMasterRequest) ProtoMessage() {} func (*SetMasterRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{80} } - func (m *SetMasterRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetMasterRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *SetMasterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetMasterRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_SetMasterRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *SetMasterRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_SetMasterRequest.Merge(m, src) } func (m *SetMasterRequest) XXX_Size() int { - return xxx_messageInfo_SetMasterRequest.Size(m) + return m.Size() } func (m *SetMasterRequest) XXX_DiscardUnknown() { xxx_messageInfo_SetMasterRequest.DiscardUnknown(m) @@ -3304,18 +3953,26 @@ func (*SetMasterResponse) ProtoMessage() {} func (*SetMasterResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{81} } - func (m *SetMasterResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetMasterResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *SetMasterResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetMasterResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_SetMasterResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *SetMasterResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_SetMasterResponse.Merge(m, src) } func (m *SetMasterResponse) XXX_Size() int { - return xxx_messageInfo_SetMasterResponse.Size(m) + return m.Size() } func (m *SetMasterResponse) XXX_DiscardUnknown() { xxx_messageInfo_SetMasterResponse.DiscardUnknown(m) @@ -3337,18 +3994,26 @@ func (*ReplicaWasRestartedRequest) ProtoMessage() {} func (*ReplicaWasRestartedRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{82} } - func (m *ReplicaWasRestartedRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReplicaWasRestartedRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ReplicaWasRestartedRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReplicaWasRestartedRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ReplicaWasRestartedRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ReplicaWasRestartedRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ReplicaWasRestartedRequest.Merge(m, src) } func (m *ReplicaWasRestartedRequest) XXX_Size() int { - return xxx_messageInfo_ReplicaWasRestartedRequest.Size(m) + return m.Size() } func (m *ReplicaWasRestartedRequest) XXX_DiscardUnknown() { xxx_messageInfo_ReplicaWasRestartedRequest.DiscardUnknown(m) @@ -3375,18 +4040,26 @@ func (*ReplicaWasRestartedResponse) ProtoMessage() {} func (*ReplicaWasRestartedResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{83} } - func (m *ReplicaWasRestartedResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReplicaWasRestartedResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ReplicaWasRestartedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReplicaWasRestartedResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ReplicaWasRestartedResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ReplicaWasRestartedResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ReplicaWasRestartedResponse.Merge(m, src) } func (m *ReplicaWasRestartedResponse) XXX_Size() int { - return xxx_messageInfo_ReplicaWasRestartedResponse.Size(m) + return m.Size() } func (m *ReplicaWasRestartedResponse) XXX_DiscardUnknown() { xxx_messageInfo_ReplicaWasRestartedResponse.DiscardUnknown(m) @@ -3407,18 +4080,26 @@ func (*StopReplicationAndGetStatusRequest) ProtoMessage() {} func (*StopReplicationAndGetStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{84} } - func (m *StopReplicationAndGetStatusRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StopReplicationAndGetStatusRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *StopReplicationAndGetStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StopReplicationAndGetStatusRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_StopReplicationAndGetStatusRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *StopReplicationAndGetStatusRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_StopReplicationAndGetStatusRequest.Merge(m, src) } func (m *StopReplicationAndGetStatusRequest) XXX_Size() int { - return xxx_messageInfo_StopReplicationAndGetStatusRequest.Size(m) + return m.Size() } func (m *StopReplicationAndGetStatusRequest) XXX_DiscardUnknown() { xxx_messageInfo_StopReplicationAndGetStatusRequest.DiscardUnknown(m) @@ -3451,18 +4132,26 @@ func (*StopReplicationAndGetStatusResponse) ProtoMessage() {} func (*StopReplicationAndGetStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{85} } - func (m *StopReplicationAndGetStatusResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StopReplicationAndGetStatusResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *StopReplicationAndGetStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StopReplicationAndGetStatusResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_StopReplicationAndGetStatusResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *StopReplicationAndGetStatusResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_StopReplicationAndGetStatusResponse.Merge(m, src) } func (m *StopReplicationAndGetStatusResponse) XXX_Size() int { - return xxx_messageInfo_StopReplicationAndGetStatusResponse.Size(m) + return m.Size() } func (m *StopReplicationAndGetStatusResponse) XXX_DiscardUnknown() { xxx_messageInfo_StopReplicationAndGetStatusResponse.DiscardUnknown(m) @@ -3497,18 +4186,26 @@ func (*PromoteReplicaRequest) ProtoMessage() {} func (*PromoteReplicaRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{86} } - func (m *PromoteReplicaRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PromoteReplicaRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *PromoteReplicaRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PromoteReplicaRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_PromoteReplicaRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *PromoteReplicaRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_PromoteReplicaRequest.Merge(m, src) } func (m *PromoteReplicaRequest) XXX_Size() int { - return xxx_messageInfo_PromoteReplicaRequest.Size(m) + return m.Size() } func (m *PromoteReplicaRequest) XXX_DiscardUnknown() { xxx_messageInfo_PromoteReplicaRequest.DiscardUnknown(m) @@ -3529,18 +4226,26 @@ func (*PromoteReplicaResponse) ProtoMessage() {} func (*PromoteReplicaResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{87} } - func (m *PromoteReplicaResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PromoteReplicaResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *PromoteReplicaResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PromoteReplicaResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_PromoteReplicaResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *PromoteReplicaResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_PromoteReplicaResponse.Merge(m, src) } func (m *PromoteReplicaResponse) XXX_Size() int { - return xxx_messageInfo_PromoteReplicaResponse.Size(m) + return m.Size() } func (m *PromoteReplicaResponse) XXX_DiscardUnknown() { xxx_messageInfo_PromoteReplicaResponse.DiscardUnknown(m) @@ -3569,18 +4274,26 @@ func (*BackupRequest) ProtoMessage() {} func (*BackupRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{88} } - func (m *BackupRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BackupRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *BackupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BackupRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_BackupRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *BackupRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_BackupRequest.Merge(m, src) } func (m *BackupRequest) XXX_Size() int { - return xxx_messageInfo_BackupRequest.Size(m) + return m.Size() } func (m *BackupRequest) XXX_DiscardUnknown() { xxx_messageInfo_BackupRequest.DiscardUnknown(m) @@ -3615,18 +4328,26 @@ func (*BackupResponse) ProtoMessage() {} func (*BackupResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{89} } - func (m *BackupResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BackupResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *BackupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BackupResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_BackupResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *BackupResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_BackupResponse.Merge(m, src) } func (m *BackupResponse) XXX_Size() int { - return xxx_messageInfo_BackupResponse.Size(m) + return m.Size() } func (m *BackupResponse) XXX_DiscardUnknown() { xxx_messageInfo_BackupResponse.DiscardUnknown(m) @@ -3653,18 +4374,26 @@ func (*RestoreFromBackupRequest) ProtoMessage() {} func (*RestoreFromBackupRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{90} } - func (m *RestoreFromBackupRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RestoreFromBackupRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *RestoreFromBackupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RestoreFromBackupRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_RestoreFromBackupRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *RestoreFromBackupRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_RestoreFromBackupRequest.Merge(m, src) } func (m *RestoreFromBackupRequest) XXX_Size() int { - return xxx_messageInfo_RestoreFromBackupRequest.Size(m) + return m.Size() } func (m *RestoreFromBackupRequest) XXX_DiscardUnknown() { xxx_messageInfo_RestoreFromBackupRequest.DiscardUnknown(m) @@ -3685,18 +4414,26 @@ func (*RestoreFromBackupResponse) ProtoMessage() {} func (*RestoreFromBackupResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{91} } - func (m *RestoreFromBackupResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RestoreFromBackupResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *RestoreFromBackupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RestoreFromBackupResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_RestoreFromBackupResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *RestoreFromBackupResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_RestoreFromBackupResponse.Merge(m, src) } func (m *RestoreFromBackupResponse) XXX_Size() int { - return xxx_messageInfo_RestoreFromBackupResponse.Size(m) + return m.Size() } func (m *RestoreFromBackupResponse) XXX_DiscardUnknown() { xxx_messageInfo_RestoreFromBackupResponse.DiscardUnknown(m) @@ -3726,18 +4463,26 @@ func (*VExecRequest) ProtoMessage() {} func (*VExecRequest) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{92} } - func (m *VExecRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VExecRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *VExecRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VExecRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_VExecRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *VExecRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_VExecRequest.Merge(m, src) } func (m *VExecRequest) XXX_Size() int { - return xxx_messageInfo_VExecRequest.Size(m) + return m.Size() } func (m *VExecRequest) XXX_DiscardUnknown() { xxx_messageInfo_VExecRequest.DiscardUnknown(m) @@ -3779,18 +4524,26 @@ func (*VExecResponse) ProtoMessage() {} func (*VExecResponse) Descriptor() ([]byte, []int) { return fileDescriptor_ff9ac4f89e61ffa4, []int{93} } - func (m *VExecResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VExecResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *VExecResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VExecResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_VExecResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *VExecResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_VExecResponse.Merge(m, src) } func (m *VExecResponse) XXX_Size() int { - return xxx_messageInfo_VExecResponse.Size(m) + return m.Size() } func (m *VExecResponse) XXX_DiscardUnknown() { xxx_messageInfo_VExecResponse.DiscardUnknown(m) @@ -3908,142 +4661,13745 @@ func init() { func init() { proto.RegisterFile("tabletmanagerdata.proto", fileDescriptor_ff9ac4f89e61ffa4) } var fileDescriptor_ff9ac4f89e61ffa4 = []byte{ - // 2181 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x19, 0xdb, 0x72, 0xdb, 0xc6, - 0x75, 0x40, 0x4a, 0x32, 0x75, 0x78, 0x11, 0x05, 0x52, 0x22, 0x44, 0xd7, 0xb2, 0x0c, 0x3b, 0x89, - 0x27, 0x99, 0x52, 0x89, 0x9c, 0x64, 0x32, 0x49, 0xdb, 0xa9, 0x6c, 0x4b, 0x76, 0x62, 0x39, 0x56, - 0x20, 0x5f, 0x3a, 0x99, 0x4e, 0x31, 0x20, 0xb0, 0x22, 0x31, 0x02, 0xb1, 0xf0, 0xee, 0x82, 0x14, - 0x5f, 0xfa, 0x09, 0xed, 0x1f, 0xf4, 0xa5, 0x33, 0xed, 0x7b, 0x3f, 0xa2, 0x9f, 0x90, 0x7e, 0x4a, - 0x1f, 0xfa, 0xd0, 0xce, 0x5e, 0x40, 0x02, 0x04, 0x64, 0xcb, 0x1a, 0x77, 0x26, 0x2f, 0x1a, 0x9c, - 0xfb, 0x65, 0xcf, 0x9e, 0x73, 0x96, 0x82, 0x0e, 0x73, 0xfa, 0x01, 0x62, 0x23, 0x27, 0x74, 0x06, - 0x88, 0x78, 0x0e, 0x73, 0x7a, 0x11, 0xc1, 0x0c, 0xeb, 0xeb, 0x39, 0x42, 0xb7, 0xfa, 0x3a, 0x46, - 0x64, 0x2a, 0xe9, 0xdd, 0x06, 0xc3, 0x11, 0x9e, 0xf3, 0x77, 0x37, 0x08, 0x8a, 0x02, 0xdf, 0x75, - 0x98, 0x8f, 0xc3, 0x14, 0xba, 0x1e, 0xe0, 0x41, 0xcc, 0xfc, 0x40, 0x82, 0xe6, 0x7f, 0x35, 0x58, - 0x7b, 0xce, 0x15, 0x3f, 0x44, 0xa7, 0x7e, 0xe8, 0x73, 0x66, 0x5d, 0x87, 0xa5, 0xd0, 0x19, 0x21, - 0x43, 0xdb, 0xd1, 0xee, 0xae, 0x5a, 0xe2, 0x5b, 0xdf, 0x84, 0x15, 0xea, 0x0e, 0xd1, 0xc8, 0x31, - 0x4a, 0x02, 0xab, 0x20, 0xdd, 0x80, 0x6b, 0x2e, 0x0e, 0xe2, 0x51, 0x48, 0x8d, 0xf2, 0x4e, 0xf9, - 0xee, 0xaa, 0x95, 0x80, 0x7a, 0x0f, 0x5a, 0x11, 0xf1, 0x47, 0x0e, 0x99, 0xda, 0x67, 0x68, 0x6a, - 0x27, 0x5c, 0x4b, 0x82, 0x6b, 0x5d, 0x91, 0x9e, 0xa0, 0xe9, 0x03, 0xc5, 0xaf, 0xc3, 0x12, 0x9b, - 0x46, 0xc8, 0x58, 0x96, 0x56, 0xf9, 0xb7, 0x7e, 0x13, 0xaa, 0xdc, 0x75, 0x3b, 0x40, 0xe1, 0x80, - 0x0d, 0x8d, 0x95, 0x1d, 0xed, 0xee, 0x92, 0x05, 0x1c, 0x75, 0x24, 0x30, 0xfa, 0x75, 0x58, 0x25, - 0x78, 0x62, 0xbb, 0x38, 0x0e, 0x99, 0x71, 0x4d, 0x90, 0x2b, 0x04, 0x4f, 0x1e, 0x70, 0x58, 0xbf, - 0x03, 0x2b, 0xa7, 0x3e, 0x0a, 0x3c, 0x6a, 0x54, 0x76, 0xca, 0x77, 0xab, 0x7b, 0xb5, 0x9e, 0xcc, - 0xd7, 0x21, 0x47, 0x5a, 0x8a, 0x66, 0xfe, 0x4d, 0x83, 0xe6, 0x89, 0x08, 0x26, 0x95, 0x82, 0x8f, - 0x60, 0x8d, 0x5b, 0xe9, 0x3b, 0x14, 0xd9, 0x2a, 0x6e, 0x99, 0x8d, 0x46, 0x82, 0x96, 0x22, 0xfa, - 0x33, 0x90, 0xe7, 0x62, 0x7b, 0x33, 0x61, 0x6a, 0x94, 0x84, 0x39, 0xb3, 0x97, 0x3f, 0xca, 0x85, - 0x54, 0x5b, 0x4d, 0x96, 0x45, 0x50, 0x9e, 0xd0, 0x31, 0x22, 0xd4, 0xc7, 0xa1, 0x51, 0x16, 0x16, - 0x13, 0x90, 0x3b, 0xaa, 0x4b, 0xab, 0x0f, 0x86, 0x4e, 0x38, 0x40, 0x16, 0xa2, 0x71, 0xc0, 0xf4, - 0xc7, 0x50, 0xef, 0xa3, 0x53, 0x4c, 0x32, 0x8e, 0x56, 0xf7, 0x6e, 0x17, 0x58, 0x5f, 0x0c, 0xd3, - 0xaa, 0x49, 0x49, 0x15, 0xcb, 0x21, 0xd4, 0x9c, 0x53, 0x86, 0x88, 0x9d, 0x3a, 0xe9, 0x4b, 0x2a, - 0xaa, 0x0a, 0x41, 0x89, 0x36, 0xff, 0xad, 0x41, 0xe3, 0x05, 0x45, 0xe4, 0x18, 0x91, 0x91, 0x4f, - 0xa9, 0x2a, 0xa9, 0x21, 0xa6, 0x2c, 0x29, 0x29, 0xfe, 0xcd, 0x71, 0x31, 0x45, 0x44, 0x15, 0x94, - 0xf8, 0xd6, 0x3f, 0x81, 0xf5, 0xc8, 0xa1, 0x74, 0x82, 0x89, 0x67, 0xbb, 0x43, 0xe4, 0x9e, 0xd1, - 0x78, 0x24, 0xf2, 0xb0, 0x64, 0x35, 0x13, 0xc2, 0x03, 0x85, 0xd7, 0x7f, 0x00, 0x88, 0x88, 0x3f, - 0xf6, 0x03, 0x34, 0x40, 0xb2, 0xb0, 0xaa, 0x7b, 0x9f, 0x15, 0x78, 0x9b, 0xf5, 0xa5, 0x77, 0x3c, - 0x93, 0x39, 0x08, 0x19, 0x99, 0x5a, 0x29, 0x25, 0xdd, 0x5f, 0xc3, 0xda, 0x02, 0x59, 0x6f, 0x42, - 0xf9, 0x0c, 0x4d, 0x95, 0xe7, 0xfc, 0x53, 0x6f, 0xc3, 0xf2, 0xd8, 0x09, 0x62, 0xa4, 0x3c, 0x97, - 0xc0, 0xd7, 0xa5, 0xaf, 0x34, 0xf3, 0x27, 0x0d, 0x6a, 0x0f, 0xfb, 0x6f, 0x89, 0xbb, 0x01, 0x25, - 0xaf, 0xaf, 0x64, 0x4b, 0x5e, 0x7f, 0x96, 0x87, 0x72, 0x2a, 0x0f, 0xcf, 0x0a, 0x42, 0xdb, 0x2d, - 0x08, 0x2d, 0x6d, 0xec, 0xff, 0x19, 0xd8, 0x5f, 0x35, 0xa8, 0xce, 0x2d, 0x51, 0xfd, 0x08, 0x9a, - 0xdc, 0x4f, 0x3b, 0x9a, 0xe3, 0x0c, 0x4d, 0x78, 0x79, 0xeb, 0xad, 0x07, 0x60, 0xad, 0xc5, 0x19, - 0x98, 0xea, 0x87, 0xd0, 0xf0, 0xfa, 0x19, 0x5d, 0xf2, 0x06, 0xdd, 0x7c, 0x4b, 0xc4, 0x56, 0xdd, - 0x4b, 0x41, 0xd4, 0xfc, 0x08, 0xaa, 0xc7, 0x7e, 0x38, 0xb0, 0xd0, 0xeb, 0x18, 0x51, 0xc6, 0xaf, - 0x52, 0xe4, 0x4c, 0x03, 0xec, 0x78, 0x2a, 0xc8, 0x04, 0x34, 0xef, 0x42, 0x4d, 0x32, 0xd2, 0x08, - 0x87, 0x14, 0xbd, 0x81, 0xf3, 0x63, 0xa8, 0x9d, 0x04, 0x08, 0x45, 0x89, 0xce, 0x2e, 0x54, 0xbc, - 0x98, 0x88, 0xa6, 0x2a, 0x58, 0xcb, 0xd6, 0x0c, 0x36, 0xd7, 0xa0, 0xae, 0x78, 0xa5, 0x5a, 0xf3, - 0x5f, 0x1a, 0xe8, 0x07, 0xe7, 0xc8, 0x8d, 0x19, 0x7a, 0x8c, 0xf1, 0x59, 0xa2, 0xa3, 0xa8, 0xbf, - 0x6e, 0x03, 0x44, 0x0e, 0x71, 0x46, 0x88, 0x21, 0x22, 0xc3, 0x5f, 0xb5, 0x52, 0x18, 0xfd, 0x18, - 0x56, 0xd1, 0x39, 0x23, 0x8e, 0x8d, 0xc2, 0xb1, 0xe8, 0xb4, 0xd5, 0xbd, 0x7b, 0x05, 0xd9, 0xc9, - 0x5b, 0xeb, 0x1d, 0x70, 0xb1, 0x83, 0x70, 0x2c, 0x6b, 0xa2, 0x82, 0x14, 0xd8, 0xfd, 0x06, 0xea, - 0x19, 0xd2, 0x3b, 0xd5, 0xc3, 0x29, 0xb4, 0x32, 0xa6, 0x54, 0x1e, 0x6f, 0x42, 0x15, 0x9d, 0xfb, - 0xcc, 0xa6, 0xcc, 0x61, 0x31, 0x55, 0x09, 0x02, 0x8e, 0x3a, 0x11, 0x18, 0x31, 0x46, 0x98, 0x87, - 0x63, 0x36, 0x1b, 0x23, 0x02, 0x52, 0x78, 0x44, 0x92, 0x5b, 0xa0, 0x20, 0x73, 0x0c, 0xcd, 0x47, - 0x88, 0xc9, 0xbe, 0x92, 0xa4, 0x6f, 0x13, 0x56, 0x44, 0xe0, 0xb2, 0xe2, 0x56, 0x2d, 0x05, 0xe9, - 0xb7, 0xa1, 0xee, 0x87, 0x6e, 0x10, 0x7b, 0xc8, 0x1e, 0xfb, 0x68, 0x42, 0x85, 0x89, 0x8a, 0x55, - 0x53, 0xc8, 0x97, 0x1c, 0xa7, 0x7f, 0x00, 0x0d, 0x74, 0x2e, 0x99, 0x94, 0x12, 0x39, 0xb6, 0xea, - 0x0a, 0x2b, 0x1a, 0x34, 0x35, 0x11, 0xac, 0xa7, 0xec, 0xaa, 0xe8, 0x8e, 0x61, 0x5d, 0x76, 0xc6, - 0x54, 0xb3, 0x7f, 0x97, 0x6e, 0xdb, 0xa4, 0x0b, 0x18, 0xb3, 0x03, 0x1b, 0x8f, 0x10, 0x4b, 0x95, - 0xb0, 0x8a, 0xd1, 0xfc, 0x11, 0x36, 0x17, 0x09, 0xca, 0x89, 0xdf, 0x42, 0x35, 0x7b, 0xe9, 0xb8, - 0xf9, 0xed, 0x02, 0xf3, 0x69, 0xe1, 0xb4, 0x88, 0xd9, 0x06, 0xfd, 0x04, 0x31, 0x0b, 0x39, 0xde, - 0xb3, 0x30, 0x98, 0x26, 0x16, 0x37, 0xa0, 0x95, 0xc1, 0xaa, 0x12, 0x9e, 0xa3, 0x5f, 0x11, 0x9f, - 0xa1, 0x84, 0x7b, 0x13, 0xda, 0x59, 0xb4, 0x62, 0xff, 0x0e, 0xd6, 0xe5, 0x70, 0x7a, 0x3e, 0x8d, - 0x12, 0x66, 0xfd, 0x0b, 0xa8, 0x4a, 0xf7, 0x6c, 0x31, 0xe0, 0xb9, 0xcb, 0x8d, 0xbd, 0x76, 0x6f, - 0xb6, 0xaf, 0x88, 0x9c, 0x33, 0x21, 0x01, 0x6c, 0xf6, 0xcd, 0xfd, 0x4c, 0xeb, 0x9a, 0x3b, 0x64, - 0xa1, 0x53, 0x82, 0xe8, 0x90, 0x97, 0x54, 0xda, 0xa1, 0x2c, 0x5a, 0xb1, 0x77, 0x60, 0xc3, 0x8a, - 0xc3, 0xc7, 0xc8, 0x09, 0xd8, 0x50, 0x0c, 0x8e, 0x44, 0xc0, 0x80, 0xcd, 0x45, 0x82, 0x12, 0xf9, - 0x1c, 0x8c, 0x6f, 0x07, 0x21, 0x26, 0x48, 0x12, 0x0f, 0x08, 0xc1, 0x24, 0xd3, 0x52, 0x18, 0x43, - 0x24, 0x9c, 0x37, 0x0a, 0x01, 0x9a, 0xd7, 0x61, 0xab, 0x40, 0x4a, 0xa9, 0xfc, 0x9a, 0x3b, 0xcd, - 0xfb, 0x49, 0xb6, 0x92, 0x6f, 0x43, 0x7d, 0xe2, 0xf8, 0xcc, 0x8e, 0x30, 0x9d, 0x17, 0xd3, 0xaa, - 0x55, 0xe3, 0xc8, 0x63, 0x85, 0x93, 0x91, 0xa5, 0x65, 0x95, 0xce, 0x3d, 0xd8, 0x3c, 0x26, 0xe8, - 0x34, 0xf0, 0x07, 0xc3, 0x85, 0x0b, 0xc2, 0x77, 0x32, 0x91, 0xb8, 0xe4, 0x86, 0x24, 0xa0, 0x39, - 0x80, 0x4e, 0x4e, 0x46, 0xd5, 0xd5, 0x11, 0x34, 0x24, 0x97, 0x4d, 0xc4, 0x5e, 0x91, 0xf4, 0xf3, - 0x0f, 0x2e, 0xac, 0xec, 0xf4, 0x16, 0x62, 0xd5, 0xdd, 0x14, 0x44, 0xcd, 0xff, 0x68, 0xa0, 0xef, - 0x47, 0x51, 0x30, 0xcd, 0x7a, 0xd6, 0x84, 0x32, 0x7d, 0x1d, 0x24, 0x2d, 0x86, 0xbe, 0x0e, 0x78, - 0x8b, 0x39, 0xc5, 0xc4, 0x45, 0xea, 0xb2, 0x4a, 0x80, 0xaf, 0x01, 0x4e, 0x10, 0xe0, 0x89, 0x9d, - 0xda, 0x61, 0x45, 0x67, 0xa8, 0x58, 0x4d, 0x41, 0xb0, 0xe6, 0xf8, 0xfc, 0x02, 0xb4, 0xf4, 0xbe, - 0x16, 0xa0, 0xe5, 0x2b, 0x2e, 0x40, 0x7f, 0xd7, 0xa0, 0x95, 0x89, 0x5e, 0xe5, 0xf8, 0xe7, 0xb7, - 0xaa, 0xb5, 0x60, 0xfd, 0x08, 0xbb, 0x67, 0xb2, 0xeb, 0x25, 0x57, 0xa3, 0x0d, 0x7a, 0x1a, 0x39, - 0xbf, 0x78, 0x2f, 0xc2, 0x20, 0xc7, 0xbc, 0x09, 0xed, 0x2c, 0x5a, 0xb1, 0xff, 0x43, 0x03, 0x43, - 0x8d, 0x88, 0x43, 0xc4, 0xdc, 0xe1, 0x3e, 0x7d, 0xd8, 0x9f, 0xd5, 0x41, 0x1b, 0x96, 0xc5, 0x2a, - 0x2e, 0x12, 0x50, 0xb3, 0x24, 0xa0, 0x77, 0xe0, 0x9a, 0xd7, 0xb7, 0xc5, 0x68, 0x54, 0xd3, 0xc1, - 0xeb, 0x7f, 0xcf, 0x87, 0xe3, 0x16, 0x54, 0x46, 0xce, 0xb9, 0x4d, 0xf0, 0x84, 0xaa, 0x65, 0xf0, - 0xda, 0xc8, 0x39, 0xb7, 0xf0, 0x84, 0x8a, 0x45, 0xdd, 0xa7, 0x62, 0x03, 0xef, 0xfb, 0x61, 0x80, - 0x07, 0x54, 0x1c, 0x7f, 0xc5, 0x6a, 0x28, 0xf4, 0x7d, 0x89, 0xe5, 0x77, 0x8d, 0x88, 0x6b, 0x94, - 0x3e, 0xdc, 0x8a, 0x55, 0x23, 0xa9, 0xbb, 0x65, 0x3e, 0x82, 0xad, 0x02, 0x9f, 0xd5, 0xe9, 0x7d, - 0x0c, 0x2b, 0xf2, 0x6a, 0xa8, 0x63, 0xd3, 0xd5, 0x73, 0xe2, 0x07, 0xfe, 0x57, 0x5d, 0x03, 0xc5, - 0x61, 0xfe, 0x49, 0x83, 0x1b, 0x59, 0x4d, 0xfb, 0x41, 0xc0, 0x17, 0x30, 0xfa, 0xfe, 0x53, 0x90, - 0x8b, 0x6c, 0xa9, 0x20, 0xb2, 0x23, 0xd8, 0xbe, 0xc8, 0x9f, 0x2b, 0x84, 0xf7, 0x64, 0xf1, 0x6c, - 0xf7, 0xa3, 0xe8, 0xcd, 0x81, 0xa5, 0xfd, 0x2f, 0x65, 0xfc, 0xcf, 0x27, 0x5d, 0x28, 0xbb, 0x82, - 0x57, 0x5d, 0x30, 0x52, 0x7d, 0x41, 0x6e, 0x1c, 0x49, 0x99, 0x1e, 0xc1, 0x56, 0x01, 0x4d, 0x19, - 0xd9, 0xe5, 0xdb, 0xc7, 0x6c, 0x63, 0xa9, 0xee, 0x75, 0x7a, 0x8b, 0x6f, 0x67, 0x25, 0xa0, 0xd8, - 0xf8, 0x5d, 0x78, 0xea, 0x50, 0x7e, 0x8d, 0x32, 0x46, 0x9e, 0x42, 0x3b, 0x8b, 0x56, 0xfa, 0xbf, - 0x58, 0xd0, 0x7f, 0x23, 0xa7, 0x3f, 0x23, 0x96, 0x58, 0xe9, 0xc0, 0x86, 0xc4, 0x27, 0xb3, 0x20, - 0xb1, 0xf3, 0x39, 0x6c, 0x2e, 0x12, 0x94, 0xa5, 0x2e, 0x54, 0x16, 0x86, 0xc9, 0x0c, 0xe6, 0x52, - 0xaf, 0x1c, 0x9f, 0x1d, 0xe2, 0x45, 0x7d, 0x6f, 0x94, 0xda, 0x82, 0x4e, 0x4e, 0x4a, 0x5d, 0x71, - 0x03, 0x36, 0x4f, 0x18, 0x8e, 0x52, 0x79, 0x4d, 0x1c, 0xdc, 0x82, 0x4e, 0x8e, 0xa2, 0x84, 0xfe, - 0x00, 0x37, 0x16, 0x48, 0x4f, 0xfd, 0xd0, 0x1f, 0xc5, 0xa3, 0x4b, 0x38, 0xa3, 0xdf, 0x02, 0x31, - 0x1b, 0x6d, 0xe6, 0x8f, 0x50, 0xb2, 0x44, 0x96, 0xad, 0x2a, 0xc7, 0x3d, 0x97, 0x28, 0xf3, 0x57, - 0xb0, 0x7d, 0x91, 0xfe, 0x4b, 0xe4, 0x48, 0x38, 0xee, 0x10, 0x56, 0x10, 0x53, 0x17, 0x8c, 0x3c, - 0x49, 0x05, 0xd5, 0x87, 0x5b, 0x8b, 0xb4, 0x17, 0x21, 0xf3, 0x83, 0x7d, 0xde, 0x6a, 0xdf, 0x53, - 0x60, 0x77, 0xc0, 0x7c, 0x93, 0x0d, 0xe5, 0x49, 0x1b, 0xf4, 0x47, 0x28, 0xe1, 0x99, 0x15, 0xe6, - 0x27, 0xd0, 0xca, 0x60, 0x55, 0x26, 0xda, 0xb0, 0xec, 0x78, 0x1e, 0x49, 0xd6, 0x04, 0x09, 0xf0, - 0x1c, 0x58, 0x88, 0xa2, 0x0b, 0x72, 0x90, 0x27, 0x29, 0xcb, 0xbb, 0xd0, 0x79, 0x99, 0xc2, 0xf3, - 0x2b, 0x5d, 0xd8, 0x12, 0x56, 0x55, 0x4b, 0x30, 0x0f, 0xc1, 0xc8, 0x0b, 0x5c, 0xa9, 0x19, 0xdd, - 0x48, 0xeb, 0x99, 0x57, 0x6b, 0x62, 0xbe, 0x01, 0x25, 0xdf, 0x53, 0x8f, 0x91, 0x92, 0xef, 0x65, - 0x0e, 0xa2, 0xb4, 0x50, 0x00, 0x3b, 0xb0, 0x7d, 0x91, 0x32, 0x15, 0x67, 0x0b, 0xd6, 0xbf, 0x0d, - 0x7d, 0x26, 0x2f, 0x60, 0x92, 0x98, 0x4f, 0x41, 0x4f, 0x23, 0x2f, 0x51, 0x69, 0x3f, 0x69, 0xb0, - 0x7d, 0x8c, 0xa3, 0x38, 0x10, 0xdb, 0x6a, 0xe4, 0x10, 0x14, 0xb2, 0xef, 0x70, 0x4c, 0x42, 0x27, - 0x48, 0xfc, 0xfe, 0x10, 0xd6, 0x78, 0x3d, 0xd8, 0x2e, 0x41, 0x0e, 0x43, 0x9e, 0x1d, 0x26, 0x2f, - 0xaa, 0x3a, 0x47, 0x3f, 0x90, 0xd8, 0xef, 0x29, 0x7f, 0x75, 0x39, 0x2e, 0x57, 0x9a, 0x1e, 0x1c, - 0x20, 0x51, 0x62, 0x78, 0x7c, 0x05, 0xb5, 0x91, 0xf0, 0xcc, 0x76, 0x02, 0xdf, 0x91, 0x03, 0xa4, - 0xba, 0xb7, 0xb1, 0xb8, 0x81, 0xef, 0x73, 0xa2, 0x55, 0x95, 0xac, 0x02, 0xd0, 0x3f, 0x83, 0x76, - 0xaa, 0x55, 0xcd, 0x17, 0xd5, 0x25, 0x61, 0xa3, 0x95, 0xa2, 0xcd, 0xf6, 0xd5, 0x5b, 0x70, 0xf3, - 0xc2, 0xb8, 0x54, 0x0a, 0xff, 0xa2, 0xc9, 0x74, 0xa9, 0x44, 0x27, 0xf1, 0xfe, 0x12, 0x56, 0x24, - 0xbf, 0x3a, 0xf4, 0x0b, 0x1c, 0x54, 0x4c, 0x17, 0xfa, 0x56, 0xba, 0xd0, 0xb7, 0xa2, 0x8c, 0x96, - 0x0b, 0x32, 0xca, 0xfb, 0x7b, 0xc6, 0xbf, 0xf9, 0x0a, 0xf4, 0x10, 0x8d, 0x30, 0x43, 0xd9, 0xc3, - 0xff, 0xb3, 0x06, 0xed, 0x2c, 0x5e, 0x9d, 0xff, 0x3d, 0x68, 0x79, 0x28, 0x22, 0xc8, 0x15, 0xc6, - 0xb2, 0xa5, 0x70, 0xbf, 0x64, 0x68, 0x96, 0x3e, 0x27, 0xcf, 0x7c, 0xbc, 0x0f, 0x75, 0x75, 0x58, - 0x6a, 0x66, 0x94, 0x2e, 0x33, 0x33, 0xd4, 0x01, 0x4b, 0x88, 0x5f, 0xe1, 0x17, 0xa1, 0x87, 0x8b, - 0x9c, 0xed, 0x82, 0x91, 0x27, 0xa9, 0xf8, 0xae, 0xcf, 0x86, 0xe4, 0x2b, 0x87, 0x1e, 0x13, 0xcc, - 0x59, 0xbc, 0x44, 0xf0, 0x17, 0xd0, 0x2d, 0x22, 0x2a, 0xd1, 0x7f, 0x6a, 0xd0, 0x3c, 0x41, 0xd9, - 0x5b, 0xf1, 0xae, 0x07, 0x5a, 0x70, 0x3a, 0xa5, 0xa2, 0x7a, 0xff, 0x12, 0x3a, 0xe2, 0x99, 0xc0, - 0x13, 0x44, 0x58, 0xc1, 0x1b, 0x61, 0x43, 0x90, 0x17, 0xbb, 0x65, 0xfe, 0xb9, 0xb5, 0x54, 0xf0, - 0xdc, 0x6a, 0xc1, 0x7a, 0x2a, 0x0e, 0x15, 0xdd, 0x93, 0x74, 0xec, 0x16, 0x12, 0x76, 0x67, 0x99, - 0x79, 0xc7, 0x30, 0xcd, 0x1b, 0x70, 0xbd, 0x50, 0x99, 0xb2, 0xf5, 0x47, 0xde, 0xe7, 0x33, 0x03, - 0x6c, 0x3f, 0xf4, 0x1e, 0x21, 0x96, 0x59, 0x35, 0xf4, 0xdf, 0xc1, 0x06, 0x65, 0x38, 0x4a, 0x07, - 0x6f, 0x8f, 0xb0, 0x97, 0xbc, 0xae, 0xef, 0x14, 0x6c, 0x30, 0xd9, 0xa1, 0x88, 0x3d, 0x64, 0xb5, - 0x68, 0x1e, 0xc9, 0x1f, 0x2f, 0xb7, 0xdf, 0xe8, 0xc0, 0xec, 0x87, 0x88, 0xfa, 0x70, 0xda, 0x27, - 0xbe, 0x67, 0x5f, 0x6a, 0x77, 0x12, 0xf5, 0x5e, 0x93, 0x12, 0xea, 0xc7, 0xa0, 0xdf, 0xcc, 0xd6, - 0x22, 0x59, 0xe2, 0x1f, 0xbe, 0xcd, 0xe9, 0xfc, 0x7e, 0xa4, 0xea, 0x30, 0xdb, 0x48, 0xf8, 0xa6, - 0xb3, 0x48, 0xb8, 0x44, 0x47, 0x3e, 0x81, 0xfa, 0x7d, 0xc7, 0x3d, 0x8b, 0x67, 0x9b, 0xec, 0x0e, - 0x54, 0x5d, 0x1c, 0xba, 0x31, 0x21, 0x28, 0x74, 0xa7, 0xaa, 0xf7, 0xa6, 0x51, 0x9c, 0x43, 0x3c, - 0x47, 0x65, 0xb9, 0xa8, 0x37, 0x6c, 0x1a, 0x65, 0x7e, 0x09, 0x8d, 0x44, 0xa9, 0x72, 0xe1, 0x0e, - 0x2c, 0xa3, 0xf1, 0xbc, 0x58, 0x1a, 0xbd, 0xe4, 0x1f, 0x32, 0x07, 0x1c, 0x6b, 0x49, 0xa2, 0x9a, - 0xb4, 0x0c, 0x13, 0x74, 0x48, 0xf0, 0x28, 0xe3, 0x97, 0xb9, 0xcf, 0xaf, 0x69, 0x8e, 0xf6, 0x4e, - 0xea, 0x7f, 0x0f, 0xb5, 0x97, 0x6f, 0x9d, 0xd0, 0x3c, 0x5b, 0x13, 0x4c, 0xce, 0x4e, 0x03, 0x3c, - 0x49, 0x06, 0x65, 0x02, 0x73, 0xda, 0x19, 0x9a, 0xd2, 0xc8, 0x71, 0x91, 0xfa, 0xcd, 0x6e, 0x06, - 0x9b, 0xdf, 0x40, 0xfd, 0xe5, 0x55, 0xc7, 0xf9, 0xfd, 0x4f, 0x7f, 0xec, 0x8d, 0x7d, 0x86, 0x28, - 0xed, 0xf9, 0x78, 0x57, 0x7e, 0xed, 0x0e, 0xf0, 0xee, 0x98, 0xed, 0x8a, 0xff, 0x58, 0xed, 0xe6, - 0x9e, 0xb8, 0xfd, 0x15, 0x41, 0xb8, 0xf7, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe2, 0x46, 0xf8, - 0x40, 0x3b, 0x1b, 0x00, 0x00, + // 2206 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x39, 0x4b, 0x73, 0xdb, 0xd6, + 0xd5, 0x1f, 0xa8, 0x87, 0xa5, 0xc3, 0x87, 0x28, 0x90, 0x12, 0x21, 0xfa, 0x33, 0x2d, 0xc3, 0x4e, + 0xe2, 0x49, 0xa6, 0x54, 0x23, 0x27, 0x99, 0x4c, 0xd2, 0x76, 0x22, 0xdb, 0x92, 0x9d, 0x58, 0x8e, + 0x15, 0xc8, 0x8f, 0x4e, 0xa6, 0x53, 0x0c, 0x08, 0x5c, 0x91, 0x18, 0x81, 0xb8, 0xf0, 0xbd, 0x17, + 0xa4, 0xb8, 0xe9, 0x4f, 0x68, 0xb7, 0x5d, 0x75, 0xd3, 0x99, 0x76, 0xdf, 0x1f, 0xd1, 0xe9, 0xb2, + 0xab, 0x74, 0xdb, 0x71, 0x7f, 0x44, 0x17, 0x5d, 0xb4, 0x73, 0x1f, 0x20, 0x01, 0x02, 0xb2, 0x65, + 0x8d, 0x3b, 0xd3, 0x8d, 0x06, 0xe7, 0xfd, 0xb8, 0xe7, 0x9e, 0x73, 0x2e, 0x05, 0x2d, 0xe6, 0xf4, + 0x02, 0xc4, 0x86, 0x4e, 0xe8, 0xf4, 0x11, 0xf1, 0x1c, 0xe6, 0x74, 0x23, 0x82, 0x19, 0xd6, 0xd7, + 0x73, 0x84, 0x76, 0xf9, 0x65, 0x8c, 0xc8, 0x44, 0xd2, 0xdb, 0x35, 0x86, 0x23, 0x3c, 0xe3, 0x6f, + 0x6f, 0x10, 0x14, 0x05, 0xbe, 0xeb, 0x30, 0x1f, 0x87, 0x29, 0x74, 0x35, 0xc0, 0xfd, 0x98, 0xf9, + 0x81, 0x04, 0xcd, 0x7f, 0x6b, 0xb0, 0xf6, 0x94, 0x2b, 0xbe, 0x8f, 0x4e, 0xfc, 0xd0, 0xe7, 0xcc, + 0xba, 0x0e, 0x8b, 0xa1, 0x33, 0x44, 0x86, 0xb6, 0xad, 0xdd, 0x5e, 0xb5, 0xc4, 0xb7, 0xbe, 0x09, + 0xcb, 0xd4, 0x1d, 0xa0, 0xa1, 0x63, 0x94, 0x04, 0x56, 0x41, 0xba, 0x01, 0x57, 0x5c, 0x1c, 0xc4, + 0xc3, 0x90, 0x1a, 0x0b, 0xdb, 0x0b, 0xb7, 0x57, 0xad, 0x04, 0xd4, 0xbb, 0xd0, 0x88, 0x88, 0x3f, + 0x74, 0xc8, 0xc4, 0x3e, 0x45, 0x13, 0x3b, 0xe1, 0x5a, 0x14, 0x5c, 0xeb, 0x8a, 0xf4, 0x08, 0x4d, + 0xee, 0x29, 0x7e, 0x1d, 0x16, 0xd9, 0x24, 0x42, 0xc6, 0x92, 0xb4, 0xca, 0xbf, 0xf5, 0xeb, 0x50, + 0xe6, 0xae, 0xdb, 0x01, 0x0a, 0xfb, 0x6c, 0x60, 0x2c, 0x6f, 0x6b, 0xb7, 0x17, 0x2d, 0xe0, 0xa8, + 0x43, 0x81, 0xd1, 0xaf, 0xc2, 0x2a, 0xc1, 0x63, 0xdb, 0xc5, 0x71, 0xc8, 0x8c, 0x2b, 0x82, 0xbc, + 0x42, 0xf0, 0xf8, 0x1e, 0x87, 0xf5, 0x5b, 0xb0, 0x7c, 0xe2, 0xa3, 0xc0, 0xa3, 0xc6, 0xca, 0xf6, + 0xc2, 0xed, 0xf2, 0x6e, 0xa5, 0x2b, 0xf3, 0x75, 0xc0, 0x91, 0x96, 0xa2, 0x99, 0x7f, 0xd0, 0xa0, + 0x7e, 0x2c, 0x82, 0x49, 0xa5, 0xe0, 0x03, 0x58, 0xe3, 0x56, 0x7a, 0x0e, 0x45, 0xb6, 0x8a, 0x5b, + 0x66, 0xa3, 0x96, 0xa0, 0xa5, 0x88, 0xfe, 0x04, 0xe4, 0xb9, 0xd8, 0xde, 0x54, 0x98, 0x1a, 0x25, + 0x61, 0xce, 0xec, 0xe6, 0x8f, 0x72, 0x2e, 0xd5, 0x56, 0x9d, 0x65, 0x11, 0x94, 0x27, 0x74, 0x84, + 0x08, 0xf5, 0x71, 0x68, 0x2c, 0x08, 0x8b, 0x09, 0xc8, 0x1d, 0xd5, 0xa5, 0xd5, 0x7b, 0x03, 0x27, + 0xec, 0x23, 0x0b, 0xd1, 0x38, 0x60, 0xfa, 0x43, 0xa8, 0xf6, 0xd0, 0x09, 0x26, 0x19, 0x47, 0xcb, + 0xbb, 0x37, 0x0b, 0xac, 0xcf, 0x87, 0x69, 0x55, 0xa4, 0xa4, 0x8a, 0xe5, 0x00, 0x2a, 0xce, 0x09, + 0x43, 0xc4, 0x4e, 0x9d, 0xf4, 0x05, 0x15, 0x95, 0x85, 0xa0, 0x44, 0x9b, 0xff, 0xd4, 0xa0, 0xf6, + 0x8c, 0x22, 0x72, 0x84, 0xc8, 0xd0, 0xa7, 0x54, 0x95, 0xd4, 0x00, 0x53, 0x96, 0x94, 0x14, 0xff, + 0xe6, 0xb8, 0x98, 0x22, 0xa2, 0x0a, 0x4a, 0x7c, 0xeb, 0x1f, 0xc1, 0x7a, 0xe4, 0x50, 0x3a, 0xc6, + 0xc4, 0xb3, 0xdd, 0x01, 0x72, 0x4f, 0x69, 0x3c, 0x14, 0x79, 0x58, 0xb4, 0xea, 0x09, 0xe1, 0x9e, + 0xc2, 0xeb, 0xdf, 0x01, 0x44, 0xc4, 0x1f, 0xf9, 0x01, 0xea, 0x23, 0x59, 0x58, 0xe5, 0xdd, 0x8f, + 0x0b, 0xbc, 0xcd, 0xfa, 0xd2, 0x3d, 0x9a, 0xca, 0xec, 0x87, 0x8c, 0x4c, 0xac, 0x94, 0x92, 0xf6, + 0x4f, 0x61, 0x6d, 0x8e, 0xac, 0xd7, 0x61, 0xe1, 0x14, 0x4d, 0x94, 0xe7, 0xfc, 0x53, 0x6f, 0xc2, + 0xd2, 0xc8, 0x09, 0x62, 0xa4, 0x3c, 0x97, 0xc0, 0x17, 0xa5, 0xcf, 0x35, 0xf3, 0x07, 0x0d, 0x2a, + 0xf7, 0x7b, 0x6f, 0x88, 0xbb, 0x06, 0x25, 0xaf, 0xa7, 0x64, 0x4b, 0x5e, 0x6f, 0x9a, 0x87, 0x85, + 0x54, 0x1e, 0x9e, 0x14, 0x84, 0xb6, 0x53, 0x10, 0x5a, 0xda, 0xd8, 0x7f, 0x33, 0xb0, 0xdf, 0x6b, + 0x50, 0x9e, 0x59, 0xa2, 0xfa, 0x21, 0xd4, 0xb9, 0x9f, 0x76, 0x34, 0xc3, 0x19, 0x9a, 0xf0, 0xf2, + 0xc6, 0x1b, 0x0f, 0xc0, 0x5a, 0x8b, 0x33, 0x30, 0xd5, 0x0f, 0xa0, 0xe6, 0xf5, 0x32, 0xba, 0xe4, + 0x0d, 0xba, 0xfe, 0x86, 0x88, 0xad, 0xaa, 0x97, 0x82, 0xa8, 0xf9, 0x01, 0x94, 0x8f, 0xfc, 0xb0, + 0x6f, 0xa1, 0x97, 0x31, 0xa2, 0x8c, 0x5f, 0xa5, 0xc8, 0x99, 0x04, 0xd8, 0xf1, 0x54, 0x90, 0x09, + 0x68, 0xde, 0x86, 0x8a, 0x64, 0xa4, 0x11, 0x0e, 0x29, 0x7a, 0x0d, 0xe7, 0x87, 0x50, 0x39, 0x0e, + 0x10, 0x8a, 0x12, 0x9d, 0x6d, 0x58, 0xf1, 0x62, 0x22, 0x9a, 0xaa, 0x60, 0x5d, 0xb0, 0xa6, 0xb0, + 0xb9, 0x06, 0x55, 0xc5, 0x2b, 0xd5, 0x9a, 0x7f, 0xd3, 0x40, 0xdf, 0x3f, 0x43, 0x6e, 0xcc, 0xd0, + 0x43, 0x8c, 0x4f, 0x13, 0x1d, 0x45, 0xfd, 0xb5, 0x03, 0x10, 0x39, 0xc4, 0x19, 0x22, 0x86, 0x88, + 0x0c, 0x7f, 0xd5, 0x4a, 0x61, 0xf4, 0x23, 0x58, 0x45, 0x67, 0x8c, 0x38, 0x36, 0x0a, 0x47, 0xa2, + 0xd3, 0x96, 0x77, 0xef, 0x14, 0x64, 0x27, 0x6f, 0xad, 0xbb, 0xcf, 0xc5, 0xf6, 0xc3, 0x91, 0xac, + 0x89, 0x15, 0xa4, 0xc0, 0xf6, 0x97, 0x50, 0xcd, 0x90, 0xde, 0xaa, 0x1e, 0x4e, 0xa0, 0x91, 0x31, + 0xa5, 0xf2, 0x78, 0x1d, 0xca, 0xe8, 0xcc, 0x67, 0x36, 0x65, 0x0e, 0x8b, 0xa9, 0x4a, 0x10, 0x70, + 0xd4, 0xb1, 0xc0, 0x88, 0x31, 0xc2, 0x3c, 0x1c, 0xb3, 0xe9, 0x18, 0x11, 0x90, 0xc2, 0x23, 0x92, + 0xdc, 0x02, 0x05, 0x99, 0x23, 0xa8, 0x3f, 0x40, 0x4c, 0xf6, 0x95, 0x24, 0x7d, 0x9b, 0xb0, 0x2c, + 0x02, 0x97, 0x15, 0xb7, 0x6a, 0x29, 0x48, 0xbf, 0x09, 0x55, 0x3f, 0x74, 0x83, 0xd8, 0x43, 0xf6, + 0xc8, 0x47, 0x63, 0x2a, 0x4c, 0xac, 0x58, 0x15, 0x85, 0x7c, 0xce, 0x71, 0xfa, 0x7b, 0x50, 0x43, + 0x67, 0x92, 0x49, 0x29, 0x91, 0x63, 0xab, 0xaa, 0xb0, 0xa2, 0x41, 0x53, 0x13, 0xc1, 0x7a, 0xca, + 0xae, 0x8a, 0xee, 0x08, 0xd6, 0x65, 0x67, 0x4c, 0x35, 0xfb, 0xb7, 0xe9, 0xb6, 0x75, 0x3a, 0x87, + 0x31, 0x5b, 0xb0, 0xf1, 0x00, 0xb1, 0x54, 0x09, 0xab, 0x18, 0xcd, 0xef, 0x61, 0x73, 0x9e, 0xa0, + 0x9c, 0xf8, 0x0a, 0xca, 0xd9, 0x4b, 0xc7, 0xcd, 0x77, 0x0a, 0xcc, 0xa7, 0x85, 0xd3, 0x22, 0x66, + 0x13, 0xf4, 0x63, 0xc4, 0x2c, 0xe4, 0x78, 0x4f, 0xc2, 0x60, 0x92, 0x58, 0xdc, 0x80, 0x46, 0x06, + 0xab, 0x4a, 0x78, 0x86, 0x7e, 0x41, 0x7c, 0x86, 0x12, 0xee, 0x4d, 0x68, 0x66, 0xd1, 0x8a, 0xfd, + 0x1b, 0x58, 0x97, 0xc3, 0xe9, 0xe9, 0x24, 0x4a, 0x98, 0xf5, 0x4f, 0xa1, 0x2c, 0xdd, 0xb3, 0xc5, + 0x80, 0xe7, 0x2e, 0xd7, 0x76, 0x9b, 0xdd, 0xe9, 0xbe, 0x22, 0x72, 0xce, 0x84, 0x04, 0xb0, 0xe9, + 0x37, 0xf7, 0x33, 0xad, 0x6b, 0xe6, 0x90, 0x85, 0x4e, 0x08, 0xa2, 0x03, 0x5e, 0x52, 0x69, 0x87, + 0xb2, 0x68, 0xc5, 0xde, 0x82, 0x0d, 0x2b, 0x0e, 0x1f, 0x22, 0x27, 0x60, 0x03, 0x31, 0x38, 0x12, + 0x01, 0x03, 0x36, 0xe7, 0x09, 0x4a, 0xe4, 0x13, 0x30, 0xbe, 0xee, 0x87, 0x98, 0x20, 0x49, 0xdc, + 0x27, 0x04, 0x93, 0x4c, 0x4b, 0x61, 0x0c, 0x91, 0x70, 0xd6, 0x28, 0x04, 0x68, 0x5e, 0x85, 0xad, + 0x02, 0x29, 0xa5, 0xf2, 0x0b, 0xee, 0x34, 0xef, 0x27, 0xd9, 0x4a, 0xbe, 0x09, 0xd5, 0xb1, 0xe3, + 0x33, 0x3b, 0xc2, 0x74, 0x56, 0x4c, 0xab, 0x56, 0x85, 0x23, 0x8f, 0x14, 0x4e, 0x46, 0x96, 0x96, + 0x55, 0x3a, 0x77, 0x61, 0xf3, 0x88, 0xa0, 0x93, 0xc0, 0xef, 0x0f, 0xe6, 0x2e, 0x08, 0xdf, 0xc9, + 0x44, 0xe2, 0x92, 0x1b, 0x92, 0x80, 0x66, 0x1f, 0x5a, 0x39, 0x19, 0x55, 0x57, 0x87, 0x50, 0x93, + 0x5c, 0x36, 0x11, 0x7b, 0x45, 0xd2, 0xcf, 0xdf, 0x3b, 0xb7, 0xb2, 0xd3, 0x5b, 0x88, 0x55, 0x75, + 0x53, 0x10, 0x35, 0xff, 0xa5, 0x81, 0xbe, 0x17, 0x45, 0xc1, 0x24, 0xeb, 0x59, 0x1d, 0x16, 0xe8, + 0xcb, 0x20, 0x69, 0x31, 0xf4, 0x65, 0xc0, 0x5b, 0xcc, 0x09, 0x26, 0x2e, 0x52, 0x97, 0x55, 0x02, + 0x7c, 0x0d, 0x70, 0x82, 0x00, 0x8f, 0xed, 0xd4, 0x0e, 0x2b, 0x3a, 0xc3, 0x8a, 0x55, 0x17, 0x04, + 0x6b, 0x86, 0xcf, 0x2f, 0x40, 0x8b, 0xef, 0x6a, 0x01, 0x5a, 0xba, 0xe4, 0x02, 0xf4, 0x47, 0x0d, + 0x1a, 0x99, 0xe8, 0x55, 0x8e, 0xff, 0xf7, 0x56, 0xb5, 0x06, 0xac, 0x1f, 0x62, 0xf7, 0x54, 0x76, + 0xbd, 0xe4, 0x6a, 0x34, 0x41, 0x4f, 0x23, 0x67, 0x17, 0xef, 0x59, 0x18, 0xe4, 0x98, 0x37, 0xa1, + 0x99, 0x45, 0x2b, 0xf6, 0x3f, 0x69, 0x60, 0xa8, 0x11, 0x71, 0x80, 0x98, 0x3b, 0xd8, 0xa3, 0xf7, + 0x7b, 0xd3, 0x3a, 0x68, 0xc2, 0x92, 0x58, 0xc5, 0x45, 0x02, 0x2a, 0x96, 0x04, 0xf4, 0x16, 0x5c, + 0xf1, 0x7a, 0xb6, 0x18, 0x8d, 0x6a, 0x3a, 0x78, 0xbd, 0x6f, 0xf9, 0x70, 0xdc, 0x82, 0x95, 0xa1, + 0x73, 0x66, 0x13, 0x3c, 0xa6, 0x6a, 0x19, 0xbc, 0x32, 0x74, 0xce, 0x2c, 0x3c, 0xa6, 0x62, 0x51, + 0xf7, 0xa9, 0xd8, 0xc0, 0x7b, 0x7e, 0x18, 0xe0, 0x3e, 0x15, 0xc7, 0xbf, 0x62, 0xd5, 0x14, 0xfa, + 0xae, 0xc4, 0xf2, 0xbb, 0x46, 0xc4, 0x35, 0x4a, 0x1f, 0xee, 0x8a, 0x55, 0x21, 0xa9, 0xbb, 0x65, + 0x3e, 0x80, 0xad, 0x02, 0x9f, 0xd5, 0xe9, 0x7d, 0x08, 0xcb, 0xf2, 0x6a, 0xa8, 0x63, 0xd3, 0xd5, + 0x73, 0xe2, 0x3b, 0xfe, 0x57, 0x5d, 0x03, 0xc5, 0x61, 0xfe, 0x5a, 0x83, 0x6b, 0x59, 0x4d, 0x7b, + 0x41, 0xc0, 0x17, 0x30, 0xfa, 0xee, 0x53, 0x90, 0x8b, 0x6c, 0xb1, 0x20, 0xb2, 0x43, 0xe8, 0x9c, + 0xe7, 0xcf, 0x25, 0xc2, 0x7b, 0x34, 0x7f, 0xb6, 0x7b, 0x51, 0xf4, 0xfa, 0xc0, 0xd2, 0xfe, 0x97, + 0x32, 0xfe, 0xe7, 0x93, 0x2e, 0x94, 0x5d, 0xc2, 0xab, 0x36, 0x18, 0xa9, 0xbe, 0x20, 0x37, 0x8e, + 0xa4, 0x4c, 0x0f, 0x61, 0xab, 0x80, 0xa6, 0x8c, 0xec, 0xf0, 0xed, 0x63, 0xba, 0xb1, 0x94, 0x77, + 0x5b, 0xdd, 0xf9, 0xb7, 0xb3, 0x12, 0x50, 0x6c, 0xfc, 0x2e, 0x3c, 0x76, 0x28, 0xbf, 0x46, 0x19, + 0x23, 0x8f, 0xa1, 0x99, 0x45, 0x2b, 0xfd, 0x9f, 0xce, 0xe9, 0xbf, 0x96, 0xd3, 0x9f, 0x11, 0x4b, + 0xac, 0xb4, 0x60, 0x43, 0xe2, 0x93, 0x59, 0x90, 0xd8, 0xf9, 0x04, 0x36, 0xe7, 0x09, 0xca, 0x52, + 0x1b, 0x56, 0xe6, 0x86, 0xc9, 0x14, 0xe6, 0x52, 0x2f, 0x1c, 0x9f, 0x1d, 0xe0, 0x79, 0x7d, 0xaf, + 0x95, 0xda, 0x82, 0x56, 0x4e, 0x4a, 0x5d, 0x71, 0x03, 0x36, 0x8f, 0x19, 0x8e, 0x52, 0x79, 0x4d, + 0x1c, 0xdc, 0x82, 0x56, 0x8e, 0xa2, 0x84, 0x7e, 0x09, 0xd7, 0xe6, 0x48, 0x8f, 0xfd, 0xd0, 0x1f, + 0xc6, 0xc3, 0x0b, 0x38, 0xa3, 0xdf, 0x00, 0x31, 0x1b, 0x6d, 0xe6, 0x0f, 0x51, 0xb2, 0x44, 0x2e, + 0x58, 0x65, 0x8e, 0x7b, 0x2a, 0x51, 0xe6, 0x4f, 0xa0, 0x73, 0x9e, 0xfe, 0x0b, 0xe4, 0x48, 0x38, + 0xee, 0x10, 0x56, 0x10, 0x53, 0x1b, 0x8c, 0x3c, 0x49, 0x05, 0xd5, 0x83, 0x1b, 0xf3, 0xb4, 0x67, + 0x21, 0xf3, 0x83, 0x3d, 0xde, 0x6a, 0xdf, 0x51, 0x60, 0xb7, 0xc0, 0x7c, 0x9d, 0x0d, 0xe5, 0x49, + 0x13, 0xf4, 0x07, 0x28, 0xe1, 0x99, 0x16, 0xe6, 0x47, 0xd0, 0xc8, 0x60, 0x55, 0x26, 0x9a, 0xb0, + 0xe4, 0x78, 0x1e, 0x49, 0xd6, 0x04, 0x09, 0xf0, 0x1c, 0x58, 0x88, 0xa2, 0x73, 0x72, 0x90, 0x27, + 0x29, 0xcb, 0x3b, 0xd0, 0x7a, 0x9e, 0xc2, 0xf3, 0x2b, 0x5d, 0xd8, 0x12, 0x56, 0x55, 0x4b, 0x30, + 0x0f, 0xc0, 0xc8, 0x0b, 0x5c, 0xaa, 0x19, 0x5d, 0x4b, 0xeb, 0x99, 0x55, 0x6b, 0x62, 0xbe, 0x06, + 0x25, 0xdf, 0x53, 0x8f, 0x91, 0x92, 0xef, 0x65, 0x0e, 0xa2, 0x34, 0x57, 0x00, 0xdb, 0xd0, 0x39, + 0x4f, 0x99, 0x8a, 0xb3, 0x01, 0xeb, 0x5f, 0x87, 0x3e, 0x93, 0x17, 0x30, 0x49, 0xcc, 0x8f, 0x41, + 0x4f, 0x23, 0x2f, 0x50, 0x69, 0x3f, 0x68, 0xd0, 0x39, 0xc2, 0x51, 0x1c, 0x88, 0x6d, 0x35, 0x72, + 0x08, 0x0a, 0xd9, 0x37, 0x38, 0x26, 0xa1, 0x13, 0x24, 0x7e, 0xbf, 0x0f, 0x6b, 0xbc, 0x1e, 0x6c, + 0x97, 0x20, 0x87, 0x21, 0xcf, 0x0e, 0x93, 0x17, 0x55, 0x95, 0xa3, 0xef, 0x49, 0xec, 0xb7, 0x94, + 0xbf, 0xba, 0x1c, 0x97, 0x2b, 0x4d, 0x0f, 0x0e, 0x90, 0x28, 0x31, 0x3c, 0x3e, 0x87, 0xca, 0x50, + 0x78, 0x66, 0x3b, 0x81, 0xef, 0xc8, 0x01, 0x52, 0xde, 0xdd, 0x98, 0xdf, 0xc0, 0xf7, 0x38, 0xd1, + 0x2a, 0x4b, 0x56, 0x01, 0xe8, 0x1f, 0x43, 0x33, 0xd5, 0xaa, 0x66, 0x8b, 0xea, 0xa2, 0xb0, 0xd1, + 0x48, 0xd1, 0xa6, 0xfb, 0xea, 0x0d, 0xb8, 0x7e, 0x6e, 0x5c, 0x2a, 0x85, 0xbf, 0xd3, 0x64, 0xba, + 0x54, 0xa2, 0x93, 0x78, 0x7f, 0x04, 0xcb, 0x92, 0x5f, 0x1d, 0xfa, 0x39, 0x0e, 0x2a, 0xa6, 0x73, + 0x7d, 0x2b, 0x9d, 0xeb, 0x5b, 0x51, 0x46, 0x17, 0x0a, 0x32, 0xca, 0xfb, 0x7b, 0xc6, 0xbf, 0xd9, + 0x0a, 0x74, 0x1f, 0x0d, 0x31, 0x43, 0xd9, 0xc3, 0xff, 0x8d, 0x06, 0xcd, 0x2c, 0x5e, 0x9d, 0xff, + 0x1d, 0x68, 0x78, 0x28, 0x22, 0xc8, 0x15, 0xc6, 0xb2, 0xa5, 0x70, 0xb7, 0x64, 0x68, 0x96, 0x3e, + 0x23, 0x4f, 0x7d, 0xbc, 0x0b, 0x55, 0x75, 0x58, 0x6a, 0x66, 0x94, 0x2e, 0x32, 0x33, 0xd4, 0x01, + 0x4b, 0x88, 0x5f, 0xe1, 0x67, 0xa1, 0x87, 0x8b, 0x9c, 0x6d, 0x83, 0x91, 0x27, 0xa9, 0xf8, 0xae, + 0x4e, 0x87, 0xe4, 0x0b, 0x87, 0x1e, 0x11, 0xcc, 0x59, 0xbc, 0x44, 0xf0, 0xff, 0xa1, 0x5d, 0x44, + 0x54, 0xa2, 0x7f, 0xd6, 0xa0, 0x7e, 0x8c, 0xb2, 0xb7, 0xe2, 0x6d, 0x0f, 0xb4, 0xe0, 0x74, 0x4a, + 0x45, 0xf5, 0xfe, 0x19, 0xb4, 0xc4, 0x33, 0x81, 0x27, 0x88, 0xb0, 0x82, 0x37, 0xc2, 0x86, 0x20, + 0xcf, 0x77, 0xcb, 0xfc, 0x73, 0x6b, 0xb1, 0xe0, 0xb9, 0xd5, 0x80, 0xf5, 0x54, 0x1c, 0x2a, 0xba, + 0x47, 0xe9, 0xd8, 0x2d, 0x24, 0xec, 0x4e, 0x33, 0xf3, 0x96, 0x61, 0x9a, 0xd7, 0xe0, 0x6a, 0xa1, + 0x32, 0x65, 0xeb, 0x57, 0xbc, 0xcf, 0x67, 0x06, 0xd8, 0x5e, 0xe8, 0x3d, 0x40, 0x2c, 0xb3, 0x6a, + 0xe8, 0x3f, 0x87, 0x0d, 0xca, 0x70, 0x94, 0x0e, 0xde, 0x1e, 0x62, 0x2f, 0x79, 0x5d, 0xdf, 0x2a, + 0xd8, 0x60, 0xb2, 0x43, 0x11, 0x7b, 0xc8, 0x6a, 0xd0, 0x3c, 0x92, 0x3f, 0x5e, 0x6e, 0xbe, 0xd6, + 0x81, 0xe9, 0x0f, 0x11, 0xd5, 0xc1, 0xa4, 0x47, 0x7c, 0xcf, 0xbe, 0xd0, 0xee, 0x24, 0xea, 0xbd, + 0x22, 0x25, 0xd4, 0x8f, 0x41, 0x3f, 0x9b, 0xae, 0x45, 0xb2, 0xc4, 0xdf, 0x7f, 0x93, 0xd3, 0xf9, + 0xfd, 0x48, 0xd5, 0x61, 0xb6, 0x91, 0xf0, 0x4d, 0x67, 0x9e, 0x70, 0x81, 0x8e, 0x7c, 0x0c, 0xd5, + 0xbb, 0x8e, 0x7b, 0x1a, 0x4f, 0x37, 0xd9, 0x6d, 0x28, 0xbb, 0x38, 0x74, 0x63, 0x42, 0x50, 0xe8, + 0x4e, 0x54, 0xef, 0x4d, 0xa3, 0x38, 0x87, 0x78, 0x8e, 0xca, 0x72, 0x51, 0x6f, 0xd8, 0x34, 0xca, + 0xfc, 0x0c, 0x6a, 0x89, 0x52, 0xe5, 0xc2, 0x2d, 0x58, 0x42, 0xa3, 0x59, 0xb1, 0xd4, 0xba, 0xc9, + 0x3f, 0x64, 0xf6, 0x39, 0xd6, 0x92, 0x44, 0x35, 0x69, 0x19, 0x26, 0xe8, 0x80, 0xe0, 0x61, 0xc6, + 0x2f, 0x73, 0x8f, 0x5f, 0xd3, 0x1c, 0xed, 0xad, 0xd4, 0xff, 0x02, 0x2a, 0xcf, 0xdf, 0x38, 0xa1, + 0x79, 0xb6, 0xc6, 0x98, 0x9c, 0x9e, 0x04, 0x78, 0x9c, 0x0c, 0xca, 0x04, 0xe6, 0xb4, 0x53, 0x34, + 0xa1, 0x91, 0xe3, 0x22, 0xf5, 0x9b, 0xdd, 0x14, 0x36, 0xbf, 0x84, 0xea, 0xf3, 0xcb, 0x8e, 0xf3, + 0xbb, 0x5f, 0xfd, 0xe5, 0x55, 0x47, 0xfb, 0xeb, 0xab, 0x8e, 0xf6, 0xf7, 0x57, 0x1d, 0xed, 0xb7, + 0xff, 0xe8, 0xfc, 0xdf, 0xf7, 0xdd, 0x91, 0xcf, 0x10, 0xa5, 0x5d, 0x1f, 0xef, 0xc8, 0xaf, 0x9d, + 0x3e, 0xde, 0x19, 0xb1, 0x1d, 0xf1, 0x1f, 0xac, 0x9d, 0xdc, 0x93, 0xb7, 0xb7, 0x2c, 0x08, 0x77, + 0xfe, 0x13, 0x00, 0x00, 0xff, 0xff, 0x11, 0xbb, 0x4b, 0xec, 0x4b, 0x1b, 0x00, 0x00, +} + +func (m *TableDefinition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TableDefinition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } + +func (m *TableDefinition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Fields) > 0 { + for iNdEx := len(m.Fields) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Fields[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if m.RowCount != 0 { + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(m.RowCount)) + i-- + dAtA[i] = 0x38 + } + if m.DataLength != 0 { + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(m.DataLength)) + i-- + dAtA[i] = 0x30 + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x2a + } + if len(m.PrimaryKeyColumns) > 0 { + for iNdEx := len(m.PrimaryKeyColumns) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PrimaryKeyColumns[iNdEx]) + copy(dAtA[i:], m.PrimaryKeyColumns[iNdEx]) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.PrimaryKeyColumns[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Columns) > 0 { + for iNdEx := len(m.Columns) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Columns[iNdEx]) + copy(dAtA[i:], m.Columns[iNdEx]) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.Columns[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Schema) > 0 { + i -= len(m.Schema) + copy(dAtA[i:], m.Schema) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.Schema))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SchemaDefinition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SchemaDefinition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SchemaDefinition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x1a + } + if len(m.TableDefinitions) > 0 { + for iNdEx := len(m.TableDefinitions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TableDefinitions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.DatabaseSchema) > 0 { + i -= len(m.DatabaseSchema) + copy(dAtA[i:], m.DatabaseSchema) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.DatabaseSchema))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SchemaChangeResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SchemaChangeResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SchemaChangeResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.AfterSchema != nil { + { + size, err := m.AfterSchema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.BeforeSchema != nil { + { + size, err := m.BeforeSchema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UserPermission) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserPermission) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserPermission) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Privileges) > 0 { + for k := range m.Privileges { + v := m.Privileges[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if m.PasswordChecksum != 0 { + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(m.PasswordChecksum)) + i-- + dAtA[i] = 0x18 + } + if len(m.User) > 0 { + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x12 + } + if len(m.Host) > 0 { + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DbPermission) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DbPermission) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DbPermission) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Privileges) > 0 { + for k := range m.Privileges { + v := m.Privileges[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.User) > 0 { + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x1a + } + if len(m.Db) > 0 { + i -= len(m.Db) + copy(dAtA[i:], m.Db) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.Db))) + i-- + dAtA[i] = 0x12 + } + if len(m.Host) > 0 { + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Permissions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Permissions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Permissions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.DbPermissions) > 0 { + for iNdEx := len(m.DbPermissions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DbPermissions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.UserPermissions) > 0 { + for iNdEx := len(m.UserPermissions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.UserPermissions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PingRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PingRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PingRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Payload) > 0 { + i -= len(m.Payload) + copy(dAtA[i:], m.Payload) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.Payload))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PingResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PingResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PingResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Payload) > 0 { + i -= len(m.Payload) + copy(dAtA[i:], m.Payload) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.Payload))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SleepRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SleepRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SleepRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Duration != 0 { + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(m.Duration)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *SleepResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SleepResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SleepResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *ExecuteHookRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecuteHookRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecuteHookRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.ExtraEnv) > 0 { + for k := range m.ExtraEnv { + v := m.ExtraEnv[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Parameters) > 0 { + for iNdEx := len(m.Parameters) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Parameters[iNdEx]) + copy(dAtA[i:], m.Parameters[iNdEx]) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.Parameters[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExecuteHookResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecuteHookResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecuteHookResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Stderr) > 0 { + i -= len(m.Stderr) + copy(dAtA[i:], m.Stderr) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.Stderr))) + i-- + dAtA[i] = 0x1a + } + if len(m.Stdout) > 0 { + i -= len(m.Stdout) + copy(dAtA[i:], m.Stdout) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.Stdout))) + i-- + dAtA[i] = 0x12 + } + if m.ExitStatus != 0 { + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(m.ExitStatus)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *GetSchemaRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetSchemaRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetSchemaRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.ExcludeTables) > 0 { + for iNdEx := len(m.ExcludeTables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ExcludeTables[iNdEx]) + copy(dAtA[i:], m.ExcludeTables[iNdEx]) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.ExcludeTables[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if m.IncludeViews { + i-- + if m.IncludeViews { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Tables) > 0 { + for iNdEx := len(m.Tables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Tables[iNdEx]) + copy(dAtA[i:], m.Tables[iNdEx]) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.Tables[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GetSchemaResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetSchemaResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetSchemaResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.SchemaDefinition != nil { + { + size, err := m.SchemaDefinition.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetPermissionsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetPermissionsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetPermissionsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *GetPermissionsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetPermissionsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetPermissionsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Permissions != nil { + { + size, err := m.Permissions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SetReadOnlyRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SetReadOnlyRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SetReadOnlyRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *SetReadOnlyResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SetReadOnlyResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SetReadOnlyResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *SetReadWriteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SetReadWriteRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SetReadWriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *SetReadWriteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SetReadWriteResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SetReadWriteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *ChangeTypeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ChangeTypeRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ChangeTypeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.TabletType != 0 { + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(m.TabletType)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ChangeTypeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ChangeTypeResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ChangeTypeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *RefreshStateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RefreshStateRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RefreshStateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *RefreshStateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RefreshStateResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RefreshStateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *RunHealthCheckRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RunHealthCheckRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RunHealthCheckRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *RunHealthCheckResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RunHealthCheckResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RunHealthCheckResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *IgnoreHealthErrorRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IgnoreHealthErrorRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IgnoreHealthErrorRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Pattern) > 0 { + i -= len(m.Pattern) + copy(dAtA[i:], m.Pattern) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.Pattern))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *IgnoreHealthErrorResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IgnoreHealthErrorResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IgnoreHealthErrorResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *ReloadSchemaRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReloadSchemaRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReloadSchemaRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.WaitPosition) > 0 { + i -= len(m.WaitPosition) + copy(dAtA[i:], m.WaitPosition) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.WaitPosition))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ReloadSchemaResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReloadSchemaResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReloadSchemaResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *PreflightSchemaRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PreflightSchemaRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PreflightSchemaRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Changes) > 0 { + for iNdEx := len(m.Changes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Changes[iNdEx]) + copy(dAtA[i:], m.Changes[iNdEx]) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.Changes[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PreflightSchemaResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PreflightSchemaResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PreflightSchemaResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.ChangeResults) > 0 { + for iNdEx := len(m.ChangeResults) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ChangeResults[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ApplySchemaRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ApplySchemaRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ApplySchemaRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.AfterSchema != nil { + { + size, err := m.AfterSchema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.BeforeSchema != nil { + { + size, err := m.BeforeSchema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.AllowReplication { + i-- + if m.AllowReplication { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Force { + i-- + if m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Sql) > 0 { + i -= len(m.Sql) + copy(dAtA[i:], m.Sql) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.Sql))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ApplySchemaResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ApplySchemaResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ApplySchemaResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.AfterSchema != nil { + { + size, err := m.AfterSchema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.BeforeSchema != nil { + { + size, err := m.BeforeSchema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LockTablesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LockTablesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LockTablesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *LockTablesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LockTablesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LockTablesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *UnlockTablesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UnlockTablesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UnlockTablesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *UnlockTablesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UnlockTablesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UnlockTablesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *ExecuteFetchAsDbaRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecuteFetchAsDbaRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecuteFetchAsDbaRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.ReloadSchema { + i-- + if m.ReloadSchema { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.DisableBinlogs { + i-- + if m.DisableBinlogs { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.MaxRows != 0 { + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(m.MaxRows)) + i-- + dAtA[i] = 0x18 + } + if len(m.DbName) > 0 { + i -= len(m.DbName) + copy(dAtA[i:], m.DbName) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.DbName))) + i-- + dAtA[i] = 0x12 + } + if len(m.Query) > 0 { + i -= len(m.Query) + copy(dAtA[i:], m.Query) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.Query))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExecuteFetchAsDbaResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecuteFetchAsDbaResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecuteFetchAsDbaResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Result != nil { + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExecuteFetchAsAllPrivsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecuteFetchAsAllPrivsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecuteFetchAsAllPrivsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.ReloadSchema { + i-- + if m.ReloadSchema { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.MaxRows != 0 { + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(m.MaxRows)) + i-- + dAtA[i] = 0x18 + } + if len(m.DbName) > 0 { + i -= len(m.DbName) + copy(dAtA[i:], m.DbName) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.DbName))) + i-- + dAtA[i] = 0x12 + } + if len(m.Query) > 0 { + i -= len(m.Query) + copy(dAtA[i:], m.Query) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.Query))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExecuteFetchAsAllPrivsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecuteFetchAsAllPrivsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecuteFetchAsAllPrivsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Result != nil { + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExecuteFetchAsAppRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecuteFetchAsAppRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecuteFetchAsAppRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.MaxRows != 0 { + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(m.MaxRows)) + i-- + dAtA[i] = 0x10 + } + if len(m.Query) > 0 { + i -= len(m.Query) + copy(dAtA[i:], m.Query) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.Query))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExecuteFetchAsAppResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecuteFetchAsAppResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecuteFetchAsAppResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Result != nil { + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ReplicationStatusRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicationStatusRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicationStatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *ReplicationStatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicationStatusResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicationStatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Status != nil { + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MasterStatusRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MasterStatusRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MasterStatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *MasterStatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MasterStatusResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MasterStatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Status != nil { + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MasterPositionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MasterPositionRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MasterPositionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *MasterPositionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MasterPositionResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MasterPositionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.Position))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WaitForPositionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WaitForPositionRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WaitForPositionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.Position))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WaitForPositionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WaitForPositionResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WaitForPositionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *StopReplicationRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StopReplicationRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StopReplicationRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *StopReplicationResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StopReplicationResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StopReplicationResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *StopReplicationMinimumRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StopReplicationMinimumRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StopReplicationMinimumRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.WaitTimeout != 0 { + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(m.WaitTimeout)) + i-- + dAtA[i] = 0x10 + } + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.Position))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StopReplicationMinimumResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StopReplicationMinimumResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StopReplicationMinimumResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.Position))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StartReplicationRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StartReplicationRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StartReplicationRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *StartReplicationResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StartReplicationResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StartReplicationResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *StartReplicationUntilAfterRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StartReplicationUntilAfterRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StartReplicationUntilAfterRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.WaitTimeout != 0 { + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(m.WaitTimeout)) + i-- + dAtA[i] = 0x10 + } + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.Position))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StartReplicationUntilAfterResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StartReplicationUntilAfterResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StartReplicationUntilAfterResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *GetReplicasRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetReplicasRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetReplicasRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *GetReplicasResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetReplicasResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetReplicasResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Addrs) > 0 { + for iNdEx := len(m.Addrs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Addrs[iNdEx]) + copy(dAtA[i:], m.Addrs[iNdEx]) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.Addrs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResetReplicationRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResetReplicationRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResetReplicationRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *ResetReplicationResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResetReplicationResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResetReplicationResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *VReplicationExecRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VReplicationExecRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VReplicationExecRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Query) > 0 { + i -= len(m.Query) + copy(dAtA[i:], m.Query) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.Query))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VReplicationExecResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VReplicationExecResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VReplicationExecResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Result != nil { + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VReplicationWaitForPosRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VReplicationWaitForPosRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VReplicationWaitForPosRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.Position))) + i-- + dAtA[i] = 0x12 + } + if m.Id != 0 { + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *VReplicationWaitForPosResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VReplicationWaitForPosResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VReplicationWaitForPosResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *InitMasterRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InitMasterRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InitMasterRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *InitMasterResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InitMasterResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InitMasterResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.Position))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PopulateReparentJournalRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PopulateReparentJournalRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PopulateReparentJournalRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.ReplicationPosition) > 0 { + i -= len(m.ReplicationPosition) + copy(dAtA[i:], m.ReplicationPosition) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.ReplicationPosition))) + i-- + dAtA[i] = 0x22 + } + if m.MasterAlias != nil { + { + size, err := m.MasterAlias.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.ActionName) > 0 { + i -= len(m.ActionName) + copy(dAtA[i:], m.ActionName) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.ActionName))) + i-- + dAtA[i] = 0x12 + } + if m.TimeCreatedNs != 0 { + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(m.TimeCreatedNs)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *PopulateReparentJournalResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PopulateReparentJournalResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PopulateReparentJournalResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *InitReplicaRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InitReplicaRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InitReplicaRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.TimeCreatedNs != 0 { + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(m.TimeCreatedNs)) + i-- + dAtA[i] = 0x18 + } + if len(m.ReplicationPosition) > 0 { + i -= len(m.ReplicationPosition) + copy(dAtA[i:], m.ReplicationPosition) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.ReplicationPosition))) + i-- + dAtA[i] = 0x12 + } + if m.Parent != nil { + { + size, err := m.Parent.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *InitReplicaResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InitReplicaResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InitReplicaResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *DemoteMasterRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DemoteMasterRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DemoteMasterRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *DemoteMasterResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DemoteMasterResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DemoteMasterResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.MasterStatus != nil { + { + size, err := m.MasterStatus.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.DeprecatedPosition) > 0 { + i -= len(m.DeprecatedPosition) + copy(dAtA[i:], m.DeprecatedPosition) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.DeprecatedPosition))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UndoDemoteMasterRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UndoDemoteMasterRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UndoDemoteMasterRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *UndoDemoteMasterResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UndoDemoteMasterResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UndoDemoteMasterResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *ReplicaWasPromotedRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaWasPromotedRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaWasPromotedRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *ReplicaWasPromotedResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaWasPromotedResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaWasPromotedResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *SetMasterRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SetMasterRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SetMasterRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.WaitPosition) > 0 { + i -= len(m.WaitPosition) + copy(dAtA[i:], m.WaitPosition) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.WaitPosition))) + i-- + dAtA[i] = 0x22 + } + if m.ForceStartReplication { + i-- + if m.ForceStartReplication { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.TimeCreatedNs != 0 { + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(m.TimeCreatedNs)) + i-- + dAtA[i] = 0x10 + } + if m.Parent != nil { + { + size, err := m.Parent.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SetMasterResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SetMasterResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SetMasterResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *ReplicaWasRestartedRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaWasRestartedRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaWasRestartedRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Parent != nil { + { + size, err := m.Parent.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ReplicaWasRestartedResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaWasRestartedResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaWasRestartedResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *StopReplicationAndGetStatusRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StopReplicationAndGetStatusRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StopReplicationAndGetStatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.StopReplicationMode != 0 { + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(m.StopReplicationMode)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *StopReplicationAndGetStatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StopReplicationAndGetStatusResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StopReplicationAndGetStatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Status != nil { + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.HybridStatus != nil { + { + size, err := m.HybridStatus.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PromoteReplicaRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PromoteReplicaRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PromoteReplicaRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *PromoteReplicaResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PromoteReplicaResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PromoteReplicaResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.Position))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BackupRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BackupRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BackupRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.AllowMaster { + i-- + if m.AllowMaster { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Concurrency != 0 { + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(m.Concurrency)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *BackupResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BackupResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BackupResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Event != nil { + { + size, err := m.Event.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RestoreFromBackupRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RestoreFromBackupRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RestoreFromBackupRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *RestoreFromBackupResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RestoreFromBackupResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RestoreFromBackupResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Event != nil { + { + size, err := m.Event.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VExecRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VExecRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VExecRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0x1a + } + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0x12 + } + if len(m.Query) > 0 { + i -= len(m.Query) + copy(dAtA[i:], m.Query) + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(len(m.Query))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VExecResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VExecResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VExecResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Result != nil { + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTabletmanagerdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintTabletmanagerdata(dAtA []byte, offset int, v uint64) int { + offset -= sovTabletmanagerdata(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *TableDefinition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + l = len(m.Schema) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if len(m.Columns) > 0 { + for _, s := range m.Columns { + l = len(s) + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + } + if len(m.PrimaryKeyColumns) > 0 { + for _, s := range m.PrimaryKeyColumns { + l = len(s) + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + } + l = len(m.Type) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.DataLength != 0 { + n += 1 + sovTabletmanagerdata(uint64(m.DataLength)) + } + if m.RowCount != 0 { + n += 1 + sovTabletmanagerdata(uint64(m.RowCount)) + } + if len(m.Fields) > 0 { + for _, e := range m.Fields { + l = e.Size() + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SchemaDefinition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DatabaseSchema) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if len(m.TableDefinitions) > 0 { + for _, e := range m.TableDefinitions { + l = e.Size() + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + } + l = len(m.Version) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SchemaChangeResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BeforeSchema != nil { + l = m.BeforeSchema.Size() + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.AfterSchema != nil { + l = m.AfterSchema.Size() + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UserPermission) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Host) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + l = len(m.User) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.PasswordChecksum != 0 { + n += 1 + sovTabletmanagerdata(uint64(m.PasswordChecksum)) + } + if len(m.Privileges) > 0 { + for k, v := range m.Privileges { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTabletmanagerdata(uint64(len(k))) + 1 + len(v) + sovTabletmanagerdata(uint64(len(v))) + n += mapEntrySize + 1 + sovTabletmanagerdata(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *DbPermission) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Host) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + l = len(m.Db) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + l = len(m.User) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if len(m.Privileges) > 0 { + for k, v := range m.Privileges { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTabletmanagerdata(uint64(len(k))) + 1 + len(v) + sovTabletmanagerdata(uint64(len(v))) + n += mapEntrySize + 1 + sovTabletmanagerdata(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Permissions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.UserPermissions) > 0 { + for _, e := range m.UserPermissions { + l = e.Size() + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + } + if len(m.DbPermissions) > 0 { + for _, e := range m.DbPermissions { + l = e.Size() + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PingRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Payload) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PingResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Payload) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SleepRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Duration != 0 { + n += 1 + sovTabletmanagerdata(uint64(m.Duration)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SleepResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ExecuteHookRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if len(m.Parameters) > 0 { + for _, s := range m.Parameters { + l = len(s) + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + } + if len(m.ExtraEnv) > 0 { + for k, v := range m.ExtraEnv { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTabletmanagerdata(uint64(len(k))) + 1 + len(v) + sovTabletmanagerdata(uint64(len(v))) + n += mapEntrySize + 1 + sovTabletmanagerdata(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ExecuteHookResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ExitStatus != 0 { + n += 1 + sovTabletmanagerdata(uint64(m.ExitStatus)) + } + l = len(m.Stdout) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + l = len(m.Stderr) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetSchemaRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Tables) > 0 { + for _, s := range m.Tables { + l = len(s) + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + } + if m.IncludeViews { + n += 2 + } + if len(m.ExcludeTables) > 0 { + for _, s := range m.ExcludeTables { + l = len(s) + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetSchemaResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SchemaDefinition != nil { + l = m.SchemaDefinition.Size() + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetPermissionsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetPermissionsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Permissions != nil { + l = m.Permissions.Size() + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SetReadOnlyRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SetReadOnlyResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SetReadWriteRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SetReadWriteResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ChangeTypeRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletType != 0 { + n += 1 + sovTabletmanagerdata(uint64(m.TabletType)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ChangeTypeResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RefreshStateRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RefreshStateResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RunHealthCheckRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RunHealthCheckResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *IgnoreHealthErrorRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Pattern) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *IgnoreHealthErrorResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReloadSchemaRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.WaitPosition) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReloadSchemaResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PreflightSchemaRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Changes) > 0 { + for _, s := range m.Changes { + l = len(s) + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PreflightSchemaResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ChangeResults) > 0 { + for _, e := range m.ChangeResults { + l = e.Size() + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ApplySchemaRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Sql) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.Force { + n += 2 + } + if m.AllowReplication { + n += 2 + } + if m.BeforeSchema != nil { + l = m.BeforeSchema.Size() + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.AfterSchema != nil { + l = m.AfterSchema.Size() + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ApplySchemaResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BeforeSchema != nil { + l = m.BeforeSchema.Size() + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.AfterSchema != nil { + l = m.AfterSchema.Size() + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *LockTablesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *LockTablesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UnlockTablesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UnlockTablesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ExecuteFetchAsDbaRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Query) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + l = len(m.DbName) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.MaxRows != 0 { + n += 1 + sovTabletmanagerdata(uint64(m.MaxRows)) + } + if m.DisableBinlogs { + n += 2 + } + if m.ReloadSchema { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ExecuteFetchAsDbaResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ExecuteFetchAsAllPrivsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Query) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + l = len(m.DbName) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.MaxRows != 0 { + n += 1 + sovTabletmanagerdata(uint64(m.MaxRows)) + } + if m.ReloadSchema { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ExecuteFetchAsAllPrivsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ExecuteFetchAsAppRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Query) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.MaxRows != 0 { + n += 1 + sovTabletmanagerdata(uint64(m.MaxRows)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ExecuteFetchAsAppResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReplicationStatusRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReplicationStatusResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MasterStatusRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MasterStatusResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MasterPositionRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MasterPositionResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Position) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *WaitForPositionRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Position) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *WaitForPositionResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StopReplicationRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StopReplicationResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StopReplicationMinimumRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Position) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.WaitTimeout != 0 { + n += 1 + sovTabletmanagerdata(uint64(m.WaitTimeout)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StopReplicationMinimumResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Position) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StartReplicationRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StartReplicationResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StartReplicationUntilAfterRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Position) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.WaitTimeout != 0 { + n += 1 + sovTabletmanagerdata(uint64(m.WaitTimeout)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StartReplicationUntilAfterResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetReplicasRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetReplicasResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Addrs) > 0 { + for _, s := range m.Addrs { + l = len(s) + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ResetReplicationRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ResetReplicationResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *VReplicationExecRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Query) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *VReplicationExecResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *VReplicationWaitForPosRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sovTabletmanagerdata(uint64(m.Id)) + } + l = len(m.Position) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *VReplicationWaitForPosResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *InitMasterRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *InitMasterResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Position) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PopulateReparentJournalRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TimeCreatedNs != 0 { + n += 1 + sovTabletmanagerdata(uint64(m.TimeCreatedNs)) + } + l = len(m.ActionName) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.MasterAlias != nil { + l = m.MasterAlias.Size() + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + l = len(m.ReplicationPosition) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PopulateReparentJournalResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *InitReplicaRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Parent != nil { + l = m.Parent.Size() + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + l = len(m.ReplicationPosition) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.TimeCreatedNs != 0 { + n += 1 + sovTabletmanagerdata(uint64(m.TimeCreatedNs)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *InitReplicaResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *DemoteMasterRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *DemoteMasterResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DeprecatedPosition) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.MasterStatus != nil { + l = m.MasterStatus.Size() + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UndoDemoteMasterRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UndoDemoteMasterResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReplicaWasPromotedRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReplicaWasPromotedResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SetMasterRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Parent != nil { + l = m.Parent.Size() + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.TimeCreatedNs != 0 { + n += 1 + sovTabletmanagerdata(uint64(m.TimeCreatedNs)) + } + if m.ForceStartReplication { + n += 2 + } + l = len(m.WaitPosition) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SetMasterResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReplicaWasRestartedRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Parent != nil { + l = m.Parent.Size() + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReplicaWasRestartedResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StopReplicationAndGetStatusRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StopReplicationMode != 0 { + n += 1 + sovTabletmanagerdata(uint64(m.StopReplicationMode)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StopReplicationAndGetStatusResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HybridStatus != nil { + l = m.HybridStatus.Size() + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PromoteReplicaRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PromoteReplicaResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Position) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BackupRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Concurrency != 0 { + n += 1 + sovTabletmanagerdata(uint64(m.Concurrency)) + } + if m.AllowMaster { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BackupResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Event != nil { + l = m.Event.Size() + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RestoreFromBackupRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RestoreFromBackupResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Event != nil { + l = m.Event.Size() + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *VExecRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Query) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *VExecResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovTabletmanagerdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovTabletmanagerdata(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTabletmanagerdata(x uint64) (n int) { + return sovTabletmanagerdata(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *TableDefinition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TableDefinition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TableDefinition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Schema = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Columns", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Columns = append(m.Columns, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrimaryKeyColumns", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PrimaryKeyColumns = append(m.PrimaryKeyColumns, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DataLength", wireType) + } + m.DataLength = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DataLength |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RowCount", wireType) + } + m.RowCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RowCount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Fields = append(m.Fields, &query.Field{}) + if err := m.Fields[len(m.Fields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SchemaDefinition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SchemaDefinition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SchemaDefinition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DatabaseSchema", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DatabaseSchema = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TableDefinitions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TableDefinitions = append(m.TableDefinitions, &TableDefinition{}) + if err := m.TableDefinitions[len(m.TableDefinitions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SchemaChangeResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SchemaChangeResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SchemaChangeResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BeforeSchema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BeforeSchema == nil { + m.BeforeSchema = &SchemaDefinition{} + } + if err := m.BeforeSchema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AfterSchema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AfterSchema == nil { + m.AfterSchema = &SchemaDefinition{} + } + if err := m.AfterSchema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserPermission) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserPermission: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserPermission: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PasswordChecksum", wireType) + } + m.PasswordChecksum = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PasswordChecksum |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Privileges == nil { + m.Privileges = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Privileges[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DbPermission) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DbPermission: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DbPermission: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Db", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Db = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Privileges == nil { + m.Privileges = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Privileges[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Permissions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Permissions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Permissions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserPermissions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserPermissions = append(m.UserPermissions, &UserPermission{}) + if err := m.UserPermissions[len(m.UserPermissions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DbPermissions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DbPermissions = append(m.DbPermissions, &DbPermission{}) + if err := m.DbPermissions[len(m.DbPermissions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PingRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PingRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PingRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Payload = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PingResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PingResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PingResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Payload = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SleepRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SleepRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SleepRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + m.Duration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Duration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SleepResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SleepResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SleepResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecuteHookRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecuteHookRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecuteHookRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Parameters = append(m.Parameters, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExtraEnv", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExtraEnv == nil { + m.ExtraEnv = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ExtraEnv[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecuteHookResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecuteHookResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecuteHookResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType) + } + m.ExitStatus = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExitStatus |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stdout = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stderr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetSchemaRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetSchemaRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tables = append(m.Tables, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeViews", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeViews = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExcludeTables", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExcludeTables = append(m.ExcludeTables, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetSchemaResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetSchemaResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SchemaDefinition", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SchemaDefinition == nil { + m.SchemaDefinition = &SchemaDefinition{} + } + if err := m.SchemaDefinition.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetPermissionsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetPermissionsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetPermissionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetPermissionsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetPermissionsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetPermissionsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Permissions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Permissions == nil { + m.Permissions = &Permissions{} + } + if err := m.Permissions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SetReadOnlyRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetReadOnlyRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetReadOnlyRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SetReadOnlyResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetReadOnlyResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetReadOnlyResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SetReadWriteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetReadWriteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetReadWriteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SetReadWriteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetReadWriteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetReadWriteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ChangeTypeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ChangeTypeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ChangeTypeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletType", wireType) + } + m.TabletType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TabletType |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ChangeTypeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ChangeTypeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ChangeTypeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RefreshStateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RefreshStateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RefreshStateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RefreshStateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RefreshStateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RefreshStateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RunHealthCheckRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RunHealthCheckRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RunHealthCheckRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RunHealthCheckResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RunHealthCheckResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RunHealthCheckResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IgnoreHealthErrorRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IgnoreHealthErrorRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IgnoreHealthErrorRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pattern", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pattern = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IgnoreHealthErrorResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IgnoreHealthErrorResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IgnoreHealthErrorResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReloadSchemaRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReloadSchemaRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReloadSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WaitPosition", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WaitPosition = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReloadSchemaResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReloadSchemaResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReloadSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PreflightSchemaRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PreflightSchemaRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PreflightSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Changes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Changes = append(m.Changes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PreflightSchemaResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PreflightSchemaResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PreflightSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChangeResults", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChangeResults = append(m.ChangeResults, &SchemaChangeResult{}) + if err := m.ChangeResults[len(m.ChangeResults)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ApplySchemaRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ApplySchemaRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ApplySchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sql", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sql = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Force = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowReplication", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowReplication = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BeforeSchema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BeforeSchema == nil { + m.BeforeSchema = &SchemaDefinition{} + } + if err := m.BeforeSchema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AfterSchema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AfterSchema == nil { + m.AfterSchema = &SchemaDefinition{} + } + if err := m.AfterSchema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ApplySchemaResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ApplySchemaResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ApplySchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BeforeSchema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BeforeSchema == nil { + m.BeforeSchema = &SchemaDefinition{} + } + if err := m.BeforeSchema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AfterSchema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AfterSchema == nil { + m.AfterSchema = &SchemaDefinition{} + } + if err := m.AfterSchema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LockTablesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LockTablesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LockTablesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LockTablesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LockTablesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LockTablesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UnlockTablesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UnlockTablesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UnlockTablesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UnlockTablesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UnlockTablesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UnlockTablesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecuteFetchAsDbaRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecuteFetchAsDbaRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecuteFetchAsDbaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Query = append(m.Query[:0], dAtA[iNdEx:postIndex]...) + if m.Query == nil { + m.Query = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DbName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DbName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxRows", wireType) + } + m.MaxRows = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxRows |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DisableBinlogs", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DisableBinlogs = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReloadSchema", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReloadSchema = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecuteFetchAsDbaResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecuteFetchAsDbaResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecuteFetchAsDbaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &query.QueryResult{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecuteFetchAsAllPrivsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecuteFetchAsAllPrivsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecuteFetchAsAllPrivsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Query = append(m.Query[:0], dAtA[iNdEx:postIndex]...) + if m.Query == nil { + m.Query = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DbName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DbName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxRows", wireType) + } + m.MaxRows = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxRows |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReloadSchema", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReloadSchema = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecuteFetchAsAllPrivsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecuteFetchAsAllPrivsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecuteFetchAsAllPrivsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &query.QueryResult{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecuteFetchAsAppRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecuteFetchAsAppRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecuteFetchAsAppRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Query = append(m.Query[:0], dAtA[iNdEx:postIndex]...) + if m.Query == nil { + m.Query = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxRows", wireType) + } + m.MaxRows = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxRows |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecuteFetchAsAppResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecuteFetchAsAppResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecuteFetchAsAppResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &query.QueryResult{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicationStatusRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicationStatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicationStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicationStatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicationStatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicationStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &replicationdata.Status{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MasterStatusRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MasterStatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MasterStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MasterStatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MasterStatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MasterStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &replicationdata.MasterStatus{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MasterPositionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MasterPositionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MasterPositionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MasterPositionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MasterPositionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MasterPositionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Position = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WaitForPositionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WaitForPositionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WaitForPositionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Position = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WaitForPositionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WaitForPositionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WaitForPositionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StopReplicationRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StopReplicationRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StopReplicationRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StopReplicationResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StopReplicationResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StopReplicationResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StopReplicationMinimumRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StopReplicationMinimumRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StopReplicationMinimumRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Position = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WaitTimeout", wireType) + } + m.WaitTimeout = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.WaitTimeout |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StopReplicationMinimumResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StopReplicationMinimumResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StopReplicationMinimumResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Position = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StartReplicationRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StartReplicationRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StartReplicationRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StartReplicationResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StartReplicationResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StartReplicationResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StartReplicationUntilAfterRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StartReplicationUntilAfterRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StartReplicationUntilAfterRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Position = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WaitTimeout", wireType) + } + m.WaitTimeout = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.WaitTimeout |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StartReplicationUntilAfterResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StartReplicationUntilAfterResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StartReplicationUntilAfterResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetReplicasRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetReplicasRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetReplicasRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetReplicasResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetReplicasResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetReplicasResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addrs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addrs = append(m.Addrs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResetReplicationRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResetReplicationRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResetReplicationRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResetReplicationResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResetReplicationResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResetReplicationResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VReplicationExecRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VReplicationExecRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VReplicationExecRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Query = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VReplicationExecResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VReplicationExecResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VReplicationExecResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &query.QueryResult{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VReplicationWaitForPosRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VReplicationWaitForPosRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VReplicationWaitForPosRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Position = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VReplicationWaitForPosResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VReplicationWaitForPosResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VReplicationWaitForPosResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InitMasterRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InitMasterRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InitMasterRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InitMasterResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InitMasterResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InitMasterResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Position = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PopulateReparentJournalRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PopulateReparentJournalRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PopulateReparentJournalRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeCreatedNs", wireType) + } + m.TimeCreatedNs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimeCreatedNs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActionName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ActionName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MasterAlias", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MasterAlias == nil { + m.MasterAlias = &topodata.TabletAlias{} + } + if err := m.MasterAlias.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReplicationPosition", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReplicationPosition = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PopulateReparentJournalResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PopulateReparentJournalResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PopulateReparentJournalResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InitReplicaRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InitReplicaRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InitReplicaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Parent == nil { + m.Parent = &topodata.TabletAlias{} + } + if err := m.Parent.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReplicationPosition", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReplicationPosition = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeCreatedNs", wireType) + } + m.TimeCreatedNs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimeCreatedNs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InitReplicaResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InitReplicaResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InitReplicaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DemoteMasterRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DemoteMasterRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DemoteMasterRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DemoteMasterResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DemoteMasterResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DemoteMasterResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedPosition", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeprecatedPosition = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MasterStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MasterStatus == nil { + m.MasterStatus = &replicationdata.MasterStatus{} + } + if err := m.MasterStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UndoDemoteMasterRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UndoDemoteMasterRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UndoDemoteMasterRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UndoDemoteMasterResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UndoDemoteMasterResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UndoDemoteMasterResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaWasPromotedRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaWasPromotedRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaWasPromotedRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaWasPromotedResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaWasPromotedResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaWasPromotedResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SetMasterRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetMasterRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetMasterRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Parent == nil { + m.Parent = &topodata.TabletAlias{} + } + if err := m.Parent.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeCreatedNs", wireType) + } + m.TimeCreatedNs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimeCreatedNs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ForceStartReplication", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ForceStartReplication = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WaitPosition", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WaitPosition = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SetMasterResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetMasterResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetMasterResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaWasRestartedRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaWasRestartedRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaWasRestartedRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Parent == nil { + m.Parent = &topodata.TabletAlias{} + } + if err := m.Parent.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaWasRestartedResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaWasRestartedResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaWasRestartedResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StopReplicationAndGetStatusRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StopReplicationAndGetStatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StopReplicationAndGetStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StopReplicationMode", wireType) + } + m.StopReplicationMode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StopReplicationMode |= replicationdata.StopReplicationMode(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StopReplicationAndGetStatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StopReplicationAndGetStatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StopReplicationAndGetStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HybridStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HybridStatus == nil { + m.HybridStatus = &replicationdata.Status{} + } + if err := m.HybridStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &replicationdata.StopReplicationStatus{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PromoteReplicaRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PromoteReplicaRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PromoteReplicaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PromoteReplicaResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PromoteReplicaResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PromoteReplicaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Position = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BackupRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BackupRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BackupRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Concurrency", wireType) + } + m.Concurrency = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Concurrency |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowMaster", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowMaster = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BackupResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BackupResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BackupResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Event == nil { + m.Event = &logutil.Event{} + } + if err := m.Event.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RestoreFromBackupRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RestoreFromBackupRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RestoreFromBackupRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RestoreFromBackupResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RestoreFromBackupResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RestoreFromBackupResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Event == nil { + m.Event = &logutil.Event{} + } + if err := m.Event.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VExecRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VExecRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VExecRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Query = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Workflow = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VExecResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VExecResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VExecResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTabletmanagerdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &query.QueryResult{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTabletmanagerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTabletmanagerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTabletmanagerdata(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTabletmanagerdata + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTabletmanagerdata + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTabletmanagerdata + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTabletmanagerdata + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTabletmanagerdata = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTabletmanagerdata = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTabletmanagerdata = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go b/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go index 338257b4306..5df76707ca2 100644 --- a/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go +++ b/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go @@ -1,4 +1,4 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: tabletmanagerservice.proto package tabletmanagerservice @@ -12,7 +12,6 @@ import ( grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" - tabletmanagerdata "vitess.io/vitess/go/vt/proto/tabletmanagerdata" ) @@ -30,70 +29,72 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package func init() { proto.RegisterFile("tabletmanagerservice.proto", fileDescriptor_9ee75fe63cfd9360) } var fileDescriptor_9ee75fe63cfd9360 = []byte{ - // 998 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x6d, 0x6f, 0x23, 0x35, - 0x10, 0xc7, 0xa9, 0xc4, 0x9d, 0x84, 0x79, 0x36, 0x88, 0x93, 0x8a, 0x04, 0x07, 0xd7, 0x83, 0xe3, - 0x0e, 0x9a, 0x7b, 0xe0, 0x78, 0x9f, 0x7b, 0x68, 0xaf, 0xa8, 0x15, 0x21, 0xe9, 0x03, 0x02, 0x09, - 0xc9, 0x4d, 0xa6, 0x89, 0xe9, 0xc6, 0x5e, 0x6c, 0x27, 0xa2, 0xaf, 0x90, 0x78, 0x8b, 0xc4, 0x07, - 0xe2, 0xd3, 0x9d, 0xb2, 0x59, 0x7b, 0xc7, 0xbb, 0xb3, 0xce, 0xf6, 0x5d, 0x94, 0xf9, 0xcd, 0xfc, - 0xed, 0xd9, 0xf1, 0x8c, 0x77, 0xd9, 0xb6, 0x13, 0xe7, 0x19, 0xb8, 0xb9, 0x50, 0x62, 0x0a, 0xc6, - 0x82, 0x59, 0xca, 0x31, 0xec, 0xe6, 0x46, 0x3b, 0xcd, 0x3f, 0xa6, 0x6c, 0xdb, 0xb7, 0xa2, 0x7f, - 0x27, 0xc2, 0x89, 0x35, 0xfe, 0xf8, 0xff, 0x1d, 0xf6, 0xee, 0x71, 0x61, 0x3b, 0x5a, 0xdb, 0xf8, - 0x01, 0x7b, 0x73, 0x20, 0xd5, 0x94, 0x7f, 0xb6, 0xdb, 0xf4, 0x59, 0x19, 0x86, 0xf0, 0xe7, 0x02, - 0xac, 0xdb, 0xfe, 0xbc, 0xd5, 0x6e, 0x73, 0xad, 0x2c, 0x7c, 0xf9, 0x06, 0x3f, 0x64, 0x37, 0x46, - 0x19, 0x40, 0xce, 0x29, 0xb6, 0xb0, 0xf8, 0x60, 0xb7, 0xdb, 0x81, 0x10, 0xed, 0x77, 0xf6, 0xf6, - 0xcb, 0xbf, 0x60, 0xbc, 0x70, 0xf0, 0x4a, 0xeb, 0x4b, 0x7e, 0x97, 0x70, 0x41, 0x76, 0x1f, 0xf9, - 0xab, 0x4d, 0x58, 0x88, 0xff, 0x0b, 0x7b, 0x6b, 0x1f, 0xdc, 0x68, 0x3c, 0x83, 0xb9, 0xe0, 0x77, - 0x08, 0xb7, 0x60, 0xf5, 0xb1, 0x77, 0xd2, 0x50, 0x88, 0x3c, 0x65, 0xef, 0xed, 0x83, 0x1b, 0x80, - 0x99, 0x4b, 0x6b, 0xa5, 0x56, 0x96, 0xdf, 0xa3, 0x3d, 0x11, 0xe2, 0x35, 0xbe, 0xe9, 0x40, 0xe2, - 0x14, 0x8d, 0xc0, 0x0d, 0x41, 0x4c, 0x7e, 0x52, 0xd9, 0x15, 0x99, 0x22, 0x64, 0x4f, 0xa5, 0x28, - 0xc2, 0x42, 0x7c, 0xc1, 0xde, 0x29, 0x0d, 0x67, 0x46, 0x3a, 0xe0, 0x09, 0xcf, 0x02, 0xf0, 0x0a, - 0x5f, 0x6f, 0xe4, 0x82, 0xc4, 0x6f, 0x8c, 0x3d, 0x9f, 0x09, 0x35, 0x85, 0xe3, 0xab, 0x1c, 0x38, - 0x95, 0xe1, 0xca, 0xec, 0xc3, 0xdf, 0xdd, 0x40, 0xe1, 0xf5, 0x0f, 0xe1, 0xc2, 0x80, 0x9d, 0x8d, - 0x9c, 0x68, 0x59, 0x3f, 0x06, 0x52, 0xeb, 0x8f, 0x39, 0xfc, 0xac, 0x87, 0x0b, 0xf5, 0x0a, 0x44, - 0xe6, 0x66, 0xcf, 0x67, 0x30, 0xbe, 0x24, 0x9f, 0x75, 0x8c, 0xa4, 0x9e, 0x75, 0x9d, 0x0c, 0x42, - 0x39, 0xfb, 0xf0, 0x60, 0xaa, 0xb4, 0x81, 0xb5, 0xf9, 0xa5, 0x31, 0xda, 0xf0, 0x07, 0x44, 0x84, - 0x06, 0xe5, 0xe5, 0xbe, 0xed, 0x06, 0xc7, 0xd9, 0xcb, 0xb4, 0x98, 0x94, 0x67, 0x84, 0xce, 0x5e, - 0x05, 0xa4, 0xb3, 0x87, 0xb9, 0x20, 0xf1, 0x07, 0x7b, 0x7f, 0x60, 0xe0, 0x22, 0x93, 0xd3, 0x99, - 0x3f, 0x89, 0x54, 0x52, 0x6a, 0x8c, 0x17, 0xba, 0xdf, 0x05, 0xc5, 0x87, 0xa5, 0x9f, 0xe7, 0xd9, - 0x55, 0xa9, 0x43, 0x15, 0x11, 0xb2, 0xa7, 0x0e, 0x4b, 0x84, 0xe1, 0x4a, 0x3e, 0xd4, 0xe3, 0xcb, - 0xa2, 0xbb, 0x5a, 0xb2, 0x92, 0x2b, 0x73, 0xaa, 0x92, 0x31, 0x85, 0x9f, 0xc5, 0x89, 0xca, 0xaa, - 0xf0, 0xd4, 0xb2, 0x30, 0x90, 0x7a, 0x16, 0x31, 0x87, 0x0b, 0xac, 0x6c, 0x94, 0x7b, 0xe0, 0xc6, - 0xb3, 0xbe, 0x7d, 0x71, 0x2e, 0xc8, 0x02, 0x6b, 0x50, 0xa9, 0x02, 0x23, 0xe0, 0xa0, 0xf8, 0x37, - 0xfb, 0x24, 0x36, 0xf7, 0xb3, 0x6c, 0x60, 0xe4, 0xd2, 0xf2, 0x87, 0x1b, 0x23, 0x79, 0xd4, 0x6b, - 0x3f, 0xba, 0x86, 0x47, 0xfb, 0x96, 0xfb, 0x79, 0xde, 0x61, 0xcb, 0xfd, 0x3c, 0xef, 0xbe, 0xe5, - 0x02, 0xc6, 0x8a, 0x43, 0xc8, 0x33, 0x39, 0x16, 0x4e, 0x6a, 0xb5, 0x6a, 0x26, 0x0b, 0x4b, 0x2a, - 0x36, 0xa8, 0x94, 0x22, 0x01, 0xe3, 0xca, 0x39, 0x12, 0xd6, 0x81, 0x29, 0xc5, 0xa8, 0xca, 0xc1, - 0x40, 0xaa, 0x72, 0x62, 0x0e, 0xf7, 0xc0, 0xb5, 0x65, 0xa0, 0xad, 0x5c, 0x2d, 0x82, 0xec, 0x81, - 0x31, 0x92, 0xea, 0x81, 0x75, 0x12, 0xb7, 0x8b, 0x33, 0x21, 0xdd, 0x9e, 0xae, 0x94, 0x28, 0xff, - 0x1a, 0x93, 0x6a, 0x17, 0x0d, 0x14, 0x6b, 0x8d, 0x9c, 0xce, 0x51, 0x6a, 0x49, 0xad, 0x1a, 0x93, - 0xd2, 0x6a, 0xa0, 0xf8, 0x20, 0xd4, 0x8c, 0x47, 0x52, 0xc9, 0xf9, 0x62, 0x4e, 0x1e, 0x04, 0x1a, - 0x4d, 0x1d, 0x84, 0x36, 0x8f, 0xb0, 0x80, 0x39, 0xfb, 0x60, 0xe4, 0x84, 0x71, 0x78, 0xb7, 0xf4, - 0x16, 0x62, 0xc8, 0x8b, 0x3e, 0xe8, 0xc4, 0x06, 0xb9, 0x7f, 0xb7, 0xd8, 0x76, 0xdd, 0x7c, 0xa2, - 0x9c, 0xcc, 0xfa, 0x17, 0x0e, 0x0c, 0xff, 0xbe, 0x43, 0xb4, 0x0a, 0xf7, 0x6b, 0x78, 0x7a, 0x4d, - 0x2f, 0x3c, 0x18, 0xf6, 0xc1, 0x53, 0x96, 0x1c, 0x0c, 0xc8, 0x9e, 0x1a, 0x0c, 0x11, 0x86, 0x93, - 0x7b, 0x8a, 0xd6, 0xb0, 0x6a, 0x0f, 0x64, 0x72, 0xeb, 0x50, 0x2a, 0xb9, 0x4d, 0x16, 0x17, 0x13, - 0xb6, 0x56, 0x15, 0x4e, 0x16, 0x13, 0x8d, 0xa6, 0x8a, 0xa9, 0xcd, 0x03, 0xef, 0x77, 0x08, 0x16, - 0x36, 0x16, 0x53, 0x1d, 0x4a, 0xed, 0xb7, 0xc9, 0xe2, 0xb9, 0x7b, 0xa0, 0xa4, 0x5b, 0x37, 0x0d, - 0x72, 0xee, 0x56, 0xe6, 0xd4, 0xdc, 0xc5, 0x54, 0x08, 0xfe, 0xcf, 0x16, 0xbb, 0x35, 0xd0, 0xf9, - 0x22, 0x2b, 0x6e, 0x7d, 0xb9, 0x30, 0xa0, 0xdc, 0x8f, 0x7a, 0x61, 0x94, 0xc8, 0x38, 0x95, 0x9c, - 0x16, 0xd6, 0xeb, 0x3e, 0xbe, 0x8e, 0x0b, 0x2e, 0xd0, 0xd5, 0xe2, 0xca, 0xed, 0xf3, 0xb6, 0xc5, - 0x97, 0xf6, 0x54, 0x81, 0x46, 0x18, 0x1e, 0x11, 0x2f, 0x60, 0xae, 0x1d, 0x94, 0x39, 0xa4, 0x3c, - 0x31, 0x90, 0x1a, 0x11, 0x31, 0x87, 0x6b, 0xe2, 0x44, 0x4d, 0x74, 0x24, 0x73, 0x9f, 0xbc, 0x9b, - 0xc4, 0x50, 0xaa, 0x26, 0x9a, 0x6c, 0x90, 0xb3, 0x8c, 0x97, 0xdb, 0x3c, 0x13, 0x76, 0x60, 0xf4, - 0x0a, 0x9a, 0xf0, 0xc4, 0xe8, 0x44, 0x98, 0x97, 0xfc, 0xae, 0x23, 0x8d, 0x5f, 0x28, 0x47, 0xe0, - 0xeb, 0xf0, 0x0e, 0xfd, 0x0a, 0x14, 0xef, 0x6a, 0x27, 0x0d, 0x85, 0xc8, 0x4b, 0xf6, 0x51, 0xa5, - 0x3c, 0x04, 0xbb, 0xea, 0x6a, 0x30, 0xe1, 0xe9, 0x15, 0x06, 0xce, 0xab, 0xed, 0x76, 0xc5, 0x83, - 0xee, 0x7f, 0x5b, 0xec, 0xd3, 0xda, 0xec, 0xe8, 0xab, 0xc9, 0xea, 0x95, 0x77, 0x7d, 0x97, 0x78, - 0xba, 0x79, 0xd6, 0x60, 0xde, 0x2f, 0xe4, 0x87, 0xeb, 0xba, 0xe1, 0x9b, 0x46, 0x99, 0x78, 0x7f, - 0x18, 0xee, 0x91, 0xef, 0x00, 0x18, 0x49, 0xdd, 0x34, 0xea, 0x64, 0x10, 0xfa, 0x99, 0xdd, 0x7c, - 0x26, 0xc6, 0x97, 0x8b, 0x9c, 0x53, 0x9f, 0x2a, 0xd6, 0x26, 0x1f, 0xf8, 0x8b, 0x04, 0xe1, 0x03, - 0x3e, 0xdc, 0xe2, 0x66, 0x75, 0xf5, 0xb3, 0x4e, 0x1b, 0xd8, 0x33, 0x7a, 0x5e, 0x46, 0x6f, 0xe9, - 0x75, 0x31, 0x95, 0xbe, 0xfa, 0x35, 0x60, 0xa4, 0x79, 0xc8, 0x6e, 0x9c, 0x16, 0xf3, 0x86, 0xfa, - 0x22, 0x73, 0x8a, 0x87, 0xcc, 0xed, 0x76, 0xc0, 0xc7, 0x7b, 0xf6, 0xe4, 0xd7, 0x47, 0x4b, 0xe9, - 0xc0, 0xda, 0x5d, 0xa9, 0x7b, 0xeb, 0x5f, 0xbd, 0xa9, 0xee, 0x2d, 0x5d, 0xaf, 0xf8, 0xb8, 0xd4, - 0xa3, 0x3e, 0x45, 0x9d, 0xdf, 0x2c, 0x6c, 0x4f, 0x5e, 0x07, 0x00, 0x00, 0xff, 0xff, 0xba, 0xb0, - 0x28, 0x40, 0xc5, 0x12, 0x00, 0x00, + // 1025 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0xdf, 0x6f, 0x1b, 0x45, + 0x10, 0xc7, 0x6b, 0x89, 0x56, 0x62, 0xf9, 0xbd, 0x20, 0x2a, 0x05, 0xc9, 0x14, 0x9a, 0x42, 0x69, + 0x21, 0x6e, 0x0b, 0xe5, 0xdd, 0x4d, 0x9b, 0x34, 0x28, 0x11, 0xc6, 0x6e, 0x12, 0x04, 0x12, 0xd2, + 0xc6, 0x9e, 0xd8, 0x47, 0xce, 0xb7, 0xc7, 0xee, 0xda, 0x22, 0x4f, 0x48, 0xbc, 0x22, 0xf1, 0xcc, + 0xdf, 0xc2, 0x5f, 0xc0, 0x23, 0x7f, 0x02, 0x0a, 0xff, 0x48, 0x75, 0xe7, 0xdb, 0xbd, 0xd9, 0xbb, + 0xb9, 0xf5, 0xf9, 0xcd, 0xf2, 0x7c, 0x66, 0xbe, 0xbb, 0x73, 0xb3, 0x33, 0x7b, 0xc7, 0xb6, 0x8c, + 0x38, 0x8b, 0xc1, 0xcc, 0x45, 0x22, 0xa6, 0xa0, 0x34, 0xa8, 0x65, 0x34, 0x86, 0x9d, 0x54, 0x49, + 0x23, 0xf9, 0x7b, 0x94, 0x6d, 0xeb, 0xa6, 0xf7, 0xef, 0x44, 0x18, 0xb1, 0xc2, 0x1f, 0xfd, 0xbd, + 0xcd, 0xde, 0x78, 0x91, 0xdb, 0x8e, 0x56, 0x36, 0x7e, 0xc0, 0x5e, 0x19, 0x44, 0xc9, 0x94, 0x77, + 0x77, 0xea, 0x3e, 0x99, 0x61, 0x08, 0xbf, 0x2c, 0x40, 0x9b, 0xad, 0x0f, 0x1b, 0xed, 0x3a, 0x95, + 0x89, 0x86, 0x8f, 0xaf, 0xf1, 0x43, 0x76, 0x7d, 0x14, 0x03, 0xa4, 0x9c, 0x62, 0x73, 0x8b, 0x0d, + 0x76, 0xab, 0x19, 0x70, 0xd1, 0x7e, 0x62, 0xaf, 0x3d, 0xfb, 0x15, 0xc6, 0x0b, 0x03, 0xcf, 0xa5, + 0xbc, 0xe0, 0x77, 0x08, 0x17, 0x64, 0xb7, 0x91, 0x3f, 0x59, 0x87, 0xb9, 0xf8, 0xdf, 0xb3, 0x57, + 0xf7, 0xc1, 0x8c, 0xc6, 0x33, 0x98, 0x0b, 0x7e, 0x9b, 0x70, 0x73, 0x56, 0x1b, 0x7b, 0x3b, 0x0c, + 0xb9, 0xc8, 0x53, 0xf6, 0xe6, 0x3e, 0x98, 0x01, 0xa8, 0x79, 0xa4, 0x75, 0x24, 0x13, 0xcd, 0xef, + 0xd2, 0x9e, 0x08, 0xb1, 0x1a, 0x9f, 0xb5, 0x20, 0x71, 0x8a, 0x46, 0x60, 0x86, 0x20, 0x26, 0xdf, + 0x26, 0xf1, 0x25, 0x99, 0x22, 0x64, 0x0f, 0xa5, 0xc8, 0xc3, 0x5c, 0x7c, 0xc1, 0x5e, 0x2f, 0x0c, + 0xa7, 0x2a, 0x32, 0xc0, 0x03, 0x9e, 0x39, 0x60, 0x15, 0x3e, 0x5d, 0xcb, 0x39, 0x89, 0x1f, 0x19, + 0xdb, 0x9d, 0x89, 0x64, 0x0a, 0x2f, 0x2e, 0x53, 0xe0, 0x54, 0x86, 0x4b, 0xb3, 0x0d, 0x7f, 0x67, + 0x0d, 0x85, 0xd7, 0x3f, 0x84, 0x73, 0x05, 0x7a, 0x36, 0x32, 0xa2, 0x61, 0xfd, 0x18, 0x08, 0xad, + 0xdf, 0xe7, 0xf0, 0xb3, 0x1e, 0x2e, 0x92, 0xe7, 0x20, 0x62, 0x33, 0xdb, 0x9d, 0xc1, 0xf8, 0x82, + 0x7c, 0xd6, 0x3e, 0x12, 0x7a, 0xd6, 0x55, 0xd2, 0x09, 0xa5, 0xec, 0x9d, 0x83, 0x69, 0x22, 0x15, + 0xac, 0xcc, 0xcf, 0x94, 0x92, 0x8a, 0xdf, 0x27, 0x22, 0xd4, 0x28, 0x2b, 0xf7, 0x79, 0x3b, 0xd8, + 0xcf, 0x5e, 0x2c, 0xc5, 0xa4, 0x38, 0x23, 0x74, 0xf6, 0x4a, 0x20, 0x9c, 0x3d, 0xcc, 0x39, 0x89, + 0x9f, 0xd9, 0x5b, 0x03, 0x05, 0xe7, 0x71, 0x34, 0x9d, 0xd9, 0x93, 0x48, 0x25, 0xa5, 0xc2, 0x58, + 0xa1, 0x7b, 0x6d, 0x50, 0x7c, 0x58, 0xfa, 0x69, 0x1a, 0x5f, 0x16, 0x3a, 0x54, 0x11, 0x21, 0x7b, + 0xe8, 0xb0, 0x78, 0x18, 0xae, 0xe4, 0x43, 0x39, 0xbe, 0xc8, 0xbb, 0xab, 0x26, 0x2b, 0xb9, 0x34, + 0x87, 0x2a, 0x19, 0x53, 0xf8, 0x59, 0x1c, 0x27, 0x71, 0x19, 0x9e, 0x5a, 0x16, 0x06, 0x42, 0xcf, + 0xc2, 0xe7, 0x70, 0x81, 0x15, 0x8d, 0x72, 0x0f, 0xcc, 0x78, 0xd6, 0xd7, 0x4f, 0xcf, 0x04, 0x59, + 0x60, 0x35, 0x2a, 0x54, 0x60, 0x04, 0xec, 0x14, 0x7f, 0x63, 0xef, 0xfb, 0xe6, 0x7e, 0x1c, 0x0f, + 0x54, 0xb4, 0xd4, 0xfc, 0xc1, 0xda, 0x48, 0x16, 0xb5, 0xda, 0x0f, 0x37, 0xf0, 0x68, 0xde, 0x72, + 0x3f, 0x4d, 0x5b, 0x6c, 0xb9, 0x9f, 0xa6, 0xed, 0xb7, 0x9c, 0xc3, 0x58, 0x71, 0x08, 0x69, 0x1c, + 0x8d, 0x85, 0x89, 0x64, 0x92, 0x35, 0x93, 0x85, 0x26, 0x15, 0x6b, 0x54, 0x48, 0x91, 0x80, 0x71, + 0xe5, 0x1c, 0x09, 0x6d, 0x40, 0x15, 0x62, 0x54, 0xe5, 0x60, 0x20, 0x54, 0x39, 0x3e, 0x87, 0x7b, + 0xe0, 0xca, 0x32, 0x90, 0x3a, 0xca, 0x16, 0x41, 0xf6, 0x40, 0x1f, 0x09, 0xf5, 0xc0, 0x2a, 0x89, + 0xdb, 0xc5, 0xa9, 0x88, 0xcc, 0x9e, 0x2c, 0x95, 0x28, 0xff, 0x0a, 0x13, 0x6a, 0x17, 0x35, 0x14, + 0x6b, 0x8d, 0x8c, 0x4c, 0x51, 0x6a, 0x49, 0xad, 0x0a, 0x13, 0xd2, 0xaa, 0xa1, 0xf8, 0x20, 0x54, + 0x8c, 0x47, 0x51, 0x12, 0xcd, 0x17, 0x73, 0xf2, 0x20, 0xd0, 0x68, 0xe8, 0x20, 0x34, 0x79, 0xb8, + 0x05, 0xcc, 0xd9, 0xdb, 0x23, 0x23, 0x94, 0xc1, 0xbb, 0xa5, 0xb7, 0xe0, 0x43, 0x56, 0xf4, 0x7e, + 0x2b, 0xd6, 0xc9, 0xfd, 0xd1, 0x61, 0x5b, 0x55, 0xf3, 0x71, 0x62, 0xa2, 0xb8, 0x7f, 0x6e, 0x40, + 0xf1, 0xaf, 0x5a, 0x44, 0x2b, 0x71, 0xbb, 0x86, 0xc7, 0x1b, 0x7a, 0xe1, 0xc1, 0xb0, 0x0f, 0x96, + 0xd2, 0xe4, 0x60, 0x40, 0xf6, 0xd0, 0x60, 0xf0, 0x30, 0x9c, 0xdc, 0x13, 0xb4, 0x86, 0xac, 0x3d, + 0x90, 0xc9, 0xad, 0x42, 0xa1, 0xe4, 0xd6, 0x59, 0x5c, 0x4c, 0xd8, 0x5a, 0x56, 0x38, 0x59, 0x4c, + 0x34, 0x1a, 0x2a, 0xa6, 0x26, 0x0f, 0xbc, 0xdf, 0x21, 0x68, 0x58, 0x5b, 0x4c, 0x55, 0x28, 0xb4, + 0xdf, 0x3a, 0x8b, 0xe7, 0xee, 0x41, 0x12, 0x99, 0x55, 0xd3, 0x20, 0xe7, 0x6e, 0x69, 0x0e, 0xcd, + 0x5d, 0x4c, 0xb9, 0xe0, 0xbf, 0x77, 0xd8, 0xcd, 0x81, 0x4c, 0x17, 0x71, 0x7e, 0xeb, 0x4b, 0x85, + 0x82, 0xc4, 0x7c, 0x23, 0x17, 0x2a, 0x11, 0x31, 0xa7, 0x92, 0xd3, 0xc0, 0x5a, 0xdd, 0x47, 0x9b, + 0xb8, 0xe0, 0x02, 0xcd, 0x16, 0x57, 0x6c, 0x9f, 0x37, 0x2d, 0xbe, 0xb0, 0x87, 0x0a, 0xd4, 0xc3, + 0xf0, 0x88, 0x78, 0x0a, 0x73, 0x69, 0xa0, 0xc8, 0x21, 0xe5, 0x89, 0x81, 0xd0, 0x88, 0xf0, 0x39, + 0x5c, 0x13, 0xc7, 0xc9, 0x44, 0x7a, 0x32, 0xf7, 0xc8, 0xbb, 0x89, 0x0f, 0x85, 0x6a, 0xa2, 0xce, + 0x3a, 0x39, 0xcd, 0x78, 0xb1, 0xcd, 0x53, 0xa1, 0x07, 0x4a, 0x66, 0xd0, 0x84, 0x07, 0x46, 0x27, + 0xc2, 0xac, 0xe4, 0x17, 0x2d, 0x69, 0xfc, 0x42, 0x39, 0x02, 0x5b, 0x87, 0xb7, 0xe9, 0x57, 0x20, + 0x7f, 0x57, 0xdb, 0x61, 0xc8, 0x45, 0x5e, 0xb2, 0x77, 0x4b, 0xe5, 0x21, 0xe8, 0xac, 0xab, 0xc1, + 0x84, 0x87, 0x57, 0xe8, 0x38, 0xab, 0xb6, 0xd3, 0x16, 0x77, 0xba, 0x7f, 0x76, 0xd8, 0x07, 0x95, + 0xd9, 0xd1, 0x4f, 0x26, 0xd9, 0x2b, 0xef, 0xea, 0x2e, 0xf1, 0x78, 0xfd, 0xac, 0xc1, 0xbc, 0x5d, + 0xc8, 0xd7, 0x9b, 0xba, 0xe1, 0x9b, 0x46, 0x91, 0x78, 0x7b, 0x18, 0xee, 0x92, 0xef, 0x00, 0x18, + 0x09, 0xdd, 0x34, 0xaa, 0xa4, 0x13, 0xfa, 0x8e, 0xdd, 0x78, 0x22, 0xc6, 0x17, 0x8b, 0x94, 0x53, + 0x9f, 0x2a, 0x56, 0x26, 0x1b, 0xf8, 0xa3, 0x00, 0x61, 0x03, 0x3e, 0xe8, 0x70, 0x95, 0x5d, 0xfd, + 0xb4, 0x91, 0x0a, 0xf6, 0x94, 0x9c, 0x17, 0xd1, 0x1b, 0x7a, 0x9d, 0x4f, 0x85, 0xaf, 0x7e, 0x35, + 0x18, 0x69, 0x1e, 0xb2, 0xeb, 0x27, 0xf9, 0xbc, 0xa1, 0xbe, 0xc8, 0x9c, 0xe0, 0x21, 0x73, 0xab, + 0x19, 0xb0, 0xf1, 0x9e, 0xec, 0xfe, 0x73, 0xd5, 0xed, 0xfc, 0x7b, 0xd5, 0xed, 0xfc, 0x77, 0xd5, + 0xed, 0xfc, 0xf5, 0x7f, 0xf7, 0xda, 0x0f, 0x0f, 0x97, 0x91, 0x01, 0xad, 0x77, 0x22, 0xd9, 0x5b, + 0xfd, 0xea, 0x4d, 0x65, 0x6f, 0x69, 0x7a, 0xf9, 0xc7, 0xa6, 0x1e, 0xf5, 0x69, 0xea, 0xec, 0x46, + 0x6e, 0xfb, 0xf2, 0x65, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8d, 0x51, 0x5d, 0x35, 0xd5, 0x12, 0x00, + 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/go/vt/proto/throttlerdata/throttlerdata.pb.go b/go/vt/proto/throttlerdata/throttlerdata.pb.go index aab974ebf86..c144a8f2a64 100644 --- a/go/vt/proto/throttlerdata/throttlerdata.pb.go +++ b/go/vt/proto/throttlerdata/throttlerdata.pb.go @@ -1,11 +1,14 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: throttlerdata.proto package throttlerdata import ( + encoding_binary "encoding/binary" fmt "fmt" + io "io" math "math" + math_bits "math/bits" proto "github.com/golang/protobuf/proto" ) @@ -34,18 +37,26 @@ func (*MaxRatesRequest) ProtoMessage() {} func (*MaxRatesRequest) Descriptor() ([]byte, []int) { return fileDescriptor_b67db2b008a2453d, []int{0} } - func (m *MaxRatesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MaxRatesRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *MaxRatesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MaxRatesRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_MaxRatesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *MaxRatesRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_MaxRatesRequest.Merge(m, src) } func (m *MaxRatesRequest) XXX_Size() int { - return xxx_messageInfo_MaxRatesRequest.Size(m) + return m.Size() } func (m *MaxRatesRequest) XXX_DiscardUnknown() { xxx_messageInfo_MaxRatesRequest.DiscardUnknown(m) @@ -69,18 +80,26 @@ func (*MaxRatesResponse) ProtoMessage() {} func (*MaxRatesResponse) Descriptor() ([]byte, []int) { return fileDescriptor_b67db2b008a2453d, []int{1} } - func (m *MaxRatesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MaxRatesResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *MaxRatesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MaxRatesResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_MaxRatesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *MaxRatesResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_MaxRatesResponse.Merge(m, src) } func (m *MaxRatesResponse) XXX_Size() int { - return xxx_messageInfo_MaxRatesResponse.Size(m) + return m.Size() } func (m *MaxRatesResponse) XXX_DiscardUnknown() { xxx_messageInfo_MaxRatesResponse.DiscardUnknown(m) @@ -109,18 +128,26 @@ func (*SetMaxRateRequest) ProtoMessage() {} func (*SetMaxRateRequest) Descriptor() ([]byte, []int) { return fileDescriptor_b67db2b008a2453d, []int{2} } - func (m *SetMaxRateRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetMaxRateRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *SetMaxRateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetMaxRateRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_SetMaxRateRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *SetMaxRateRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_SetMaxRateRequest.Merge(m, src) } func (m *SetMaxRateRequest) XXX_Size() int { - return xxx_messageInfo_SetMaxRateRequest.Size(m) + return m.Size() } func (m *SetMaxRateRequest) XXX_DiscardUnknown() { xxx_messageInfo_SetMaxRateRequest.DiscardUnknown(m) @@ -150,18 +177,26 @@ func (*SetMaxRateResponse) ProtoMessage() {} func (*SetMaxRateResponse) Descriptor() ([]byte, []int) { return fileDescriptor_b67db2b008a2453d, []int{3} } - func (m *SetMaxRateResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetMaxRateResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *SetMaxRateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetMaxRateResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_SetMaxRateResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *SetMaxRateResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_SetMaxRateResponse.Merge(m, src) } func (m *SetMaxRateResponse) XXX_Size() int { - return xxx_messageInfo_SetMaxRateResponse.Size(m) + return m.Size() } func (m *SetMaxRateResponse) XXX_DiscardUnknown() { xxx_messageInfo_SetMaxRateResponse.DiscardUnknown(m) @@ -268,18 +303,26 @@ func (*Configuration) ProtoMessage() {} func (*Configuration) Descriptor() ([]byte, []int) { return fileDescriptor_b67db2b008a2453d, []int{4} } - func (m *Configuration) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Configuration.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *Configuration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Configuration.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_Configuration.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *Configuration) XXX_Merge(src proto.Message) { xxx_messageInfo_Configuration.Merge(m, src) } func (m *Configuration) XXX_Size() int { - return xxx_messageInfo_Configuration.Size(m) + return m.Size() } func (m *Configuration) XXX_DiscardUnknown() { xxx_messageInfo_Configuration.DiscardUnknown(m) @@ -401,18 +444,26 @@ func (*GetConfigurationRequest) ProtoMessage() {} func (*GetConfigurationRequest) Descriptor() ([]byte, []int) { return fileDescriptor_b67db2b008a2453d, []int{5} } - func (m *GetConfigurationRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetConfigurationRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *GetConfigurationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetConfigurationRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_GetConfigurationRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *GetConfigurationRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_GetConfigurationRequest.Merge(m, src) } func (m *GetConfigurationRequest) XXX_Size() int { - return xxx_messageInfo_GetConfigurationRequest.Size(m) + return m.Size() } func (m *GetConfigurationRequest) XXX_DiscardUnknown() { xxx_messageInfo_GetConfigurationRequest.DiscardUnknown(m) @@ -443,18 +494,26 @@ func (*GetConfigurationResponse) ProtoMessage() {} func (*GetConfigurationResponse) Descriptor() ([]byte, []int) { return fileDescriptor_b67db2b008a2453d, []int{6} } - func (m *GetConfigurationResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetConfigurationResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *GetConfigurationResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetConfigurationResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_GetConfigurationResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *GetConfigurationResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_GetConfigurationResponse.Merge(m, src) } func (m *GetConfigurationResponse) XXX_Size() int { - return xxx_messageInfo_GetConfigurationResponse.Size(m) + return m.Size() } func (m *GetConfigurationResponse) XXX_DiscardUnknown() { xxx_messageInfo_GetConfigurationResponse.DiscardUnknown(m) @@ -490,18 +549,26 @@ func (*UpdateConfigurationRequest) ProtoMessage() {} func (*UpdateConfigurationRequest) Descriptor() ([]byte, []int) { return fileDescriptor_b67db2b008a2453d, []int{7} } - func (m *UpdateConfigurationRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UpdateConfigurationRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *UpdateConfigurationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UpdateConfigurationRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_UpdateConfigurationRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *UpdateConfigurationRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_UpdateConfigurationRequest.Merge(m, src) } func (m *UpdateConfigurationRequest) XXX_Size() int { - return xxx_messageInfo_UpdateConfigurationRequest.Size(m) + return m.Size() } func (m *UpdateConfigurationRequest) XXX_DiscardUnknown() { xxx_messageInfo_UpdateConfigurationRequest.DiscardUnknown(m) @@ -545,18 +612,26 @@ func (*UpdateConfigurationResponse) ProtoMessage() {} func (*UpdateConfigurationResponse) Descriptor() ([]byte, []int) { return fileDescriptor_b67db2b008a2453d, []int{8} } - func (m *UpdateConfigurationResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UpdateConfigurationResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *UpdateConfigurationResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UpdateConfigurationResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_UpdateConfigurationResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *UpdateConfigurationResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_UpdateConfigurationResponse.Merge(m, src) } func (m *UpdateConfigurationResponse) XXX_Size() int { - return xxx_messageInfo_UpdateConfigurationResponse.Size(m) + return m.Size() } func (m *UpdateConfigurationResponse) XXX_DiscardUnknown() { xxx_messageInfo_UpdateConfigurationResponse.DiscardUnknown(m) @@ -587,18 +662,26 @@ func (*ResetConfigurationRequest) ProtoMessage() {} func (*ResetConfigurationRequest) Descriptor() ([]byte, []int) { return fileDescriptor_b67db2b008a2453d, []int{9} } - func (m *ResetConfigurationRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResetConfigurationRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ResetConfigurationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResetConfigurationRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ResetConfigurationRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ResetConfigurationRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ResetConfigurationRequest.Merge(m, src) } func (m *ResetConfigurationRequest) XXX_Size() int { - return xxx_messageInfo_ResetConfigurationRequest.Size(m) + return m.Size() } func (m *ResetConfigurationRequest) XXX_DiscardUnknown() { xxx_messageInfo_ResetConfigurationRequest.DiscardUnknown(m) @@ -628,18 +711,26 @@ func (*ResetConfigurationResponse) ProtoMessage() {} func (*ResetConfigurationResponse) Descriptor() ([]byte, []int) { return fileDescriptor_b67db2b008a2453d, []int{10} } - func (m *ResetConfigurationResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResetConfigurationResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ResetConfigurationResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResetConfigurationResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ResetConfigurationResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ResetConfigurationResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ResetConfigurationResponse.Merge(m, src) } func (m *ResetConfigurationResponse) XXX_Size() int { - return xxx_messageInfo_ResetConfigurationResponse.Size(m) + return m.Size() } func (m *ResetConfigurationResponse) XXX_DiscardUnknown() { xxx_messageInfo_ResetConfigurationResponse.DiscardUnknown(m) @@ -673,51 +764,2216 @@ func init() { func init() { proto.RegisterFile("throttlerdata.proto", fileDescriptor_b67db2b008a2453d) } var fileDescriptor_b67db2b008a2453d = []byte{ - // 734 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x5f, 0x4f, 0x03, 0x45, - 0x10, 0xcf, 0x51, 0x8a, 0x30, 0xa5, 0x40, 0x17, 0x84, 0xa3, 0x18, 0x53, 0x2f, 0x31, 0x36, 0x8d, - 0xb6, 0x49, 0x89, 0x11, 0x25, 0x26, 0x50, 0x31, 0x46, 0xa3, 0x3c, 0x1c, 0xea, 0x03, 0x2f, 0x9b, - 0xed, 0xdd, 0x70, 0xbd, 0x70, 0x77, 0x7b, 0xee, 0x2e, 0xd0, 0xfa, 0x21, 0xfc, 0x20, 0xbe, 0xf9, - 0x8d, 0xfc, 0x28, 0xe6, 0x76, 0xb7, 0x7f, 0xae, 0x14, 0x30, 0xe1, 0x6d, 0x77, 0xe6, 0x37, 0xbf, - 0xf9, 0xcd, 0xde, 0xcc, 0x1c, 0xec, 0xab, 0x91, 0xe0, 0x4a, 0x25, 0x28, 0x42, 0xa6, 0x58, 0x37, - 0x17, 0x5c, 0x71, 0x52, 0x2f, 0x19, 0xbd, 0x06, 0xec, 0xfe, 0xc2, 0xc6, 0x3e, 0x53, 0x28, 0x7d, - 0xfc, 0xe3, 0x01, 0xa5, 0xf2, 0xfe, 0x72, 0x60, 0x6f, 0x6e, 0x93, 0x39, 0xcf, 0x24, 0x92, 0x0b, - 0xa8, 0x8a, 0xc2, 0xe0, 0x3a, 0xad, 0x4a, 0xbb, 0xd6, 0xef, 0x74, 0xcb, 0xdc, 0xcb, 0xf8, 0xae, - 0xbe, 0x7d, 0x9f, 0x29, 0x31, 0xf1, 0x4d, 0x60, 0xf3, 0x0c, 0x60, 0x6e, 0x24, 0x7b, 0x50, 0xb9, - 0xc7, 0x89, 0xeb, 0xb4, 0x9c, 0xf6, 0x96, 0x5f, 0x1c, 0xc9, 0x01, 0x54, 0x1f, 0x59, 0xf2, 0x80, - 0xee, 0x5a, 0xcb, 0x69, 0x57, 0x7c, 0x73, 0xf9, 0x66, 0xed, 0xcc, 0xf1, 0x3e, 0x83, 0xc6, 0x0d, - 0x2a, 0x9b, 0xc2, 0xaa, 0x24, 0x04, 0xd6, 0x0b, 0x5e, 0xcd, 0x50, 0xf1, 0xf5, 0xd9, 0xeb, 0x00, - 0x59, 0x04, 0x5a, 0xe9, 0x07, 0x50, 0xcd, 0x58, 0x6a, 0xa5, 0x6f, 0xf9, 0xe6, 0xe2, 0xfd, 0xbd, - 0x01, 0xf5, 0xef, 0x78, 0x76, 0x17, 0x47, 0x0f, 0x82, 0xa9, 0x98, 0x67, 0xe4, 0x1c, 0x9a, 0x8a, - 0x89, 0x08, 0x15, 0x15, 0x98, 0x27, 0x71, 0xa0, 0xad, 0x34, 0x61, 0x11, 0x95, 0x18, 0xd8, 0x3c, - 0x47, 0x06, 0xe1, 0xcf, 0x01, 0x3f, 0xb3, 0xe8, 0x06, 0x03, 0xf2, 0x25, 0x1c, 0xa5, 0x6c, 0xbc, - 0x32, 0xd2, 0xd4, 0x73, 0x90, 0xb2, 0xf1, 0xf3, 0xb0, 0x4f, 0x60, 0x3b, 0xce, 0x62, 0x15, 0xb3, - 0x84, 0xea, 0x6a, 0x2a, 0x1a, 0x5b, 0xb3, 0xb6, 0xa2, 0x8c, 0x02, 0x52, 0x30, 0xc7, 0x59, 0x20, - 0x90, 0x49, 0x74, 0xd7, 0x5b, 0x4e, 0xdb, 0xf1, 0x6b, 0x29, 0x1b, 0xff, 0x68, 0x4d, 0xe4, 0x0b, - 0x20, 0x98, 0xa2, 0x88, 0x30, 0x0b, 0x26, 0x34, 0x44, 0x0b, 0xac, 0x6a, 0x60, 0x63, 0xe6, 0xb9, - 0xb2, 0x0e, 0xf2, 0x13, 0x78, 0x69, 0x9c, 0xd1, 0xd0, 0x16, 0x4e, 0x87, 0xa8, 0x9e, 0x10, 0xb3, - 0x59, 0x0a, 0xa9, 0x65, 0x6f, 0x68, 0x29, 0x1f, 0xa7, 0x71, 0x76, 0x65, 0x81, 0x03, 0x83, 0x9b, - 0xa6, 0x95, 0x45, 0x01, 0x05, 0x17, 0x1b, 0xbf, 0xc5, 0xf5, 0x81, 0xe5, 0x62, 0xe3, 0xb7, 0xb8, - 0x56, 0xe9, 0x9a, 0x56, 0x64, 0xb8, 0x36, 0x5f, 0xd2, 0x35, 0xad, 0x4f, 0x73, 0x7d, 0x0d, 0xc7, - 0x32, 0x17, 0xc8, 0x42, 0x3a, 0x64, 0xc1, 0x7d, 0xc2, 0x23, 0xca, 0x02, 0xc1, 0xa5, 0xa1, 0xd8, - 0xd2, 0x14, 0x87, 0x06, 0x30, 0x30, 0xfe, 0x4b, 0xed, 0xb6, 0xa1, 0x71, 0x94, 0x71, 0x81, 0x34, - 0xa3, 0x32, 0xe1, 0x4f, 0x28, 0x67, 0x1d, 0x21, 0x5d, 0x68, 0x39, 0xed, 0xaa, 0x7f, 0x68, 0x00, - 0xd7, 0x37, 0xc6, 0x6d, 0xbf, 0xab, 0x24, 0x5f, 0x81, 0xfb, 0x3c, 0x34, 0xe4, 0x59, 0x32, 0x91, - 0x6e, 0x4d, 0x47, 0x7e, 0xb8, 0x14, 0x69, 0x9c, 0xa4, 0x0f, 0x87, 0x2c, 0x42, 0x3a, 0x64, 0xa1, - 0xee, 0x03, 0xca, 0xee, 0x14, 0x0a, 0xad, 0x75, 0x5b, 0x6b, 0x25, 0x2c, 0xc2, 0x01, 0x0b, 0x8b, - 0x86, 0xb8, 0x2c, 0x5c, 0x85, 0xce, 0x0e, 0x34, 0x66, 0xf8, 0x59, 0x77, 0xd4, 0xf5, 0x47, 0xdf, - 0x1d, 0x1a, 0xec, 0xac, 0x43, 0xbe, 0x85, 0x13, 0xdd, 0x9e, 0x9a, 0x3b, 0xcf, 0x05, 0x67, 0xc1, - 0x88, 0xaa, 0x91, 0x40, 0x39, 0xe2, 0x49, 0xe8, 0xee, 0xe8, 0x28, 0x37, 0x35, 0x93, 0x73, 0x69, - 0x01, 0xbf, 0x4e, 0xfd, 0xde, 0x05, 0x1c, 0xfd, 0x80, 0xaa, 0x34, 0x2e, 0xd3, 0x39, 0xfc, 0x14, - 0x76, 0x66, 0xab, 0x80, 0x16, 0xa3, 0x65, 0x67, 0x7a, 0xbe, 0x67, 0xae, 0x59, 0x8a, 0xde, 0xbf, - 0x0e, 0xb8, 0xcf, 0x29, 0xec, 0x84, 0x06, 0xb0, 0x13, 0x2c, 0x3a, 0xa6, 0x5b, 0xe6, 0x7c, 0x69, - 0xcb, 0xbc, 0x44, 0xd0, 0x2d, 0x59, 0xed, 0xda, 0x59, 0xa2, 0x6c, 0x52, 0xd8, 0x5f, 0x01, 0x5b, - 0xb1, 0x88, 0xfa, 0x8b, 0x8b, 0xa8, 0xd6, 0xff, 0x68, 0x49, 0x44, 0x59, 0xc1, 0xc2, 0x9a, 0xfa, - 0xc7, 0x81, 0xe6, 0x6f, 0x79, 0xc8, 0x14, 0xbe, 0xe3, 0xa1, 0xc8, 0x00, 0xea, 0x25, 0xe1, 0xff, - 0x4b, 0x45, 0x39, 0x84, 0xb4, 0x61, 0x2f, 0xe0, 0xf9, 0x84, 0xfe, 0x89, 0x82, 0x53, 0x2d, 0x50, - 0xea, 0xcd, 0xb2, 0x59, 0x3c, 0x4a, 0x3e, 0xb9, 0x45, 0xc1, 0x7f, 0xd7, 0x56, 0xef, 0x14, 0x4e, - 0x56, 0x4a, 0x7e, 0x75, 0x75, 0x0e, 0xe0, 0xd8, 0x47, 0xf9, 0xbe, 0x7e, 0xe8, 0x43, 0x73, 0x15, - 0xc7, 0x6b, 0x79, 0x07, 0x9f, 0xdf, 0x76, 0x1e, 0x63, 0x85, 0x52, 0x76, 0x63, 0xde, 0x33, 0xa7, - 0x5e, 0xc4, 0x7b, 0x8f, 0xaa, 0xa7, 0x7f, 0x6d, 0xbd, 0xd2, 0x0b, 0x0d, 0x37, 0xb4, 0xf1, 0xf4, - 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x6c, 0xe5, 0x12, 0x96, 0x06, 0x07, 0x00, 0x00, + // 761 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x5f, 0x6f, 0xe3, 0x44, + 0x10, 0xc7, 0x97, 0x4b, 0xb9, 0x4e, 0x2e, 0xbd, 0x66, 0xaf, 0xb4, 0xbe, 0x1c, 0x8a, 0x82, 0x25, + 0x44, 0x14, 0x89, 0x44, 0xca, 0x09, 0x71, 0x50, 0x90, 0xda, 0x50, 0x84, 0x40, 0xd0, 0x07, 0x17, + 0x78, 0xe8, 0xcb, 0x6a, 0x63, 0x4f, 0x1d, 0xab, 0xb6, 0xd7, 0xec, 0x6e, 0xdb, 0x84, 0x0f, 0xc1, + 0x33, 0x9f, 0x81, 0x37, 0xbe, 0x05, 0x8f, 0x7c, 0x84, 0xaa, 0x7c, 0x11, 0xe4, 0xdd, 0xcd, 0x1f, + 0xa7, 0x69, 0x7b, 0x52, 0xdf, 0x76, 0x67, 0x7e, 0xf3, 0x9b, 0xdf, 0xac, 0x67, 0xc6, 0xf0, 0x52, + 0x8d, 0x05, 0x57, 0x2a, 0x41, 0x11, 0x32, 0xc5, 0x7a, 0xb9, 0xe0, 0x8a, 0x93, 0x7a, 0xc9, 0xe8, + 0x35, 0xe0, 0xc5, 0x4f, 0x6c, 0xe2, 0x33, 0x85, 0xd2, 0xc7, 0xdf, 0x2e, 0x50, 0x2a, 0xef, 0x0f, + 0x07, 0xb6, 0x17, 0x36, 0x99, 0xf3, 0x4c, 0x22, 0x39, 0x80, 0xaa, 0x28, 0x0c, 0xae, 0xd3, 0xae, + 0x74, 0x6a, 0x83, 0x6e, 0xaf, 0xcc, 0xbd, 0x8a, 0xef, 0xe9, 0xdb, 0xb7, 0x99, 0x12, 0x53, 0xdf, + 0x04, 0x36, 0xdf, 0x02, 0x2c, 0x8c, 0x64, 0x1b, 0x2a, 0xe7, 0x38, 0x75, 0x9d, 0xb6, 0xd3, 0xd9, + 0xf4, 0x8b, 0x23, 0xd9, 0x81, 0xea, 0x25, 0x4b, 0x2e, 0xd0, 0x7d, 0xd2, 0x76, 0x3a, 0x15, 0xdf, + 0x5c, 0xbe, 0x7c, 0xf2, 0xd6, 0xf1, 0x3e, 0x81, 0xc6, 0x09, 0x2a, 0x9b, 0xc2, 0xaa, 0x24, 0x04, + 0x9e, 0x16, 0xbc, 0x9a, 0xa1, 0xe2, 0xeb, 0xb3, 0xd7, 0x05, 0xb2, 0x0c, 0xb4, 0xd2, 0x77, 0xa0, + 0x9a, 0xb1, 0xd4, 0x4a, 0xdf, 0xf4, 0xcd, 0xc5, 0xfb, 0x6b, 0x03, 0xea, 0xdf, 0xf0, 0xec, 0x2c, + 0x8e, 0x2e, 0x04, 0x53, 0x31, 0xcf, 0xc8, 0x3e, 0x34, 0x15, 0x13, 0x11, 0x2a, 0x2a, 0x30, 0x4f, + 0xe2, 0x40, 0x5b, 0x69, 0xc2, 0x22, 0x2a, 0x31, 0xb0, 0x79, 0xf6, 0x0c, 0xc2, 0x5f, 0x00, 0x7e, + 0x64, 0xd1, 0x09, 0x06, 0xe4, 0x33, 0xd8, 0x4b, 0xd9, 0x64, 0x6d, 0xa4, 0xa9, 0x67, 0x27, 0x65, + 0x93, 0xdb, 0x61, 0x1f, 0xc1, 0xf3, 0x38, 0x8b, 0x55, 0xcc, 0x12, 0xaa, 0xab, 0xa9, 0x68, 0x6c, + 0xcd, 0xda, 0x8a, 0x32, 0x0a, 0x48, 0xc1, 0x1c, 0x67, 0x81, 0x40, 0x26, 0xd1, 0x7d, 0xda, 0x76, + 0x3a, 0x8e, 0x5f, 0x4b, 0xd9, 0xe4, 0x7b, 0x6b, 0x22, 0x9f, 0x02, 0xc1, 0x14, 0x45, 0x84, 0x59, + 0x30, 0xa5, 0x21, 0x5a, 0x60, 0x55, 0x03, 0x1b, 0x73, 0xcf, 0x91, 0x75, 0x90, 0x1f, 0xc0, 0x4b, + 0xe3, 0x8c, 0x86, 0xb6, 0x70, 0x3a, 0x42, 0x75, 0x85, 0x98, 0xcd, 0x53, 0x48, 0x2d, 0x7b, 0x43, + 0x4b, 0x69, 0xa5, 0x71, 0x76, 0x64, 0x81, 0x43, 0x83, 0x9b, 0xa5, 0x95, 0x45, 0x01, 0x05, 0x17, + 0x9b, 0x3c, 0xc4, 0xf5, 0xbe, 0xe5, 0x62, 0x93, 0x87, 0xb8, 0xd6, 0xe9, 0x9a, 0x55, 0x64, 0xb8, + 0x9e, 0xdd, 0xa5, 0x6b, 0x56, 0x9f, 0xe6, 0xfa, 0x02, 0x5e, 0xc9, 0x5c, 0x20, 0x0b, 0xe9, 0x88, + 0x05, 0xe7, 0x09, 0x8f, 0x28, 0x0b, 0x04, 0x97, 0x86, 0x62, 0x53, 0x53, 0xec, 0x1a, 0xc0, 0xd0, + 0xf8, 0x0f, 0xb5, 0xdb, 0x86, 0xc6, 0x51, 0xc6, 0x05, 0xd2, 0x8c, 0xca, 0x84, 0x5f, 0xa1, 0x9c, + 0x77, 0x84, 0x74, 0xa1, 0xed, 0x74, 0xaa, 0xfe, 0xae, 0x01, 0x1c, 0x9f, 0x18, 0xb7, 0xfd, 0xae, + 0x92, 0x7c, 0x0e, 0xee, 0xed, 0xd0, 0x90, 0x67, 0xc9, 0x54, 0xba, 0x35, 0x1d, 0xf9, 0xc1, 0x4a, + 0xa4, 0x71, 0x92, 0x01, 0xec, 0xb2, 0x08, 0xe9, 0x88, 0x85, 0xba, 0x0f, 0x28, 0x3b, 0x53, 0x28, + 0xb4, 0xd6, 0xe7, 0x5a, 0x2b, 0x61, 0x11, 0x0e, 0x59, 0x58, 0x34, 0xc4, 0x61, 0xe1, 0x2a, 0x74, + 0x76, 0xa1, 0x31, 0xc7, 0xcf, 0xbb, 0xa3, 0xae, 0x3f, 0xfa, 0x8b, 0x91, 0xc1, 0xce, 0x3b, 0xe4, + 0x6b, 0x78, 0xad, 0xdb, 0x53, 0x73, 0xe7, 0xb9, 0xe0, 0x2c, 0x18, 0x53, 0x35, 0x16, 0x28, 0xc7, + 0x3c, 0x09, 0xdd, 0x2d, 0x1d, 0xe5, 0xa6, 0x66, 0x72, 0x0e, 0x2d, 0xe0, 0xe7, 0x99, 0xdf, 0x3b, + 0x80, 0xbd, 0xef, 0x50, 0x95, 0xc6, 0x65, 0x36, 0x87, 0x1f, 0xc3, 0xd6, 0x7c, 0x15, 0xd0, 0x62, + 0xb4, 0xec, 0x4c, 0x2f, 0xf6, 0xcc, 0x31, 0x4b, 0xd1, 0xbb, 0x76, 0xc0, 0xbd, 0x4d, 0x61, 0x27, + 0x34, 0x80, 0xad, 0x60, 0xd9, 0x31, 0xdb, 0x32, 0xfb, 0x2b, 0x5b, 0xe6, 0x2e, 0x82, 0x5e, 0xc9, + 0x6a, 0xd7, 0xce, 0x0a, 0x65, 0x93, 0xc2, 0xcb, 0x35, 0xb0, 0x35, 0x8b, 0x68, 0xb0, 0xbc, 0x88, + 0x6a, 0x83, 0x0f, 0x57, 0x44, 0x94, 0x15, 0x2c, 0xad, 0xa9, 0xbf, 0x1d, 0x68, 0xfe, 0x92, 0x87, + 0x4c, 0xe1, 0x23, 0x1e, 0x8a, 0x0c, 0xa1, 0x5e, 0x12, 0xfe, 0x4e, 0x2a, 0xca, 0x21, 0xa4, 0x03, + 0xdb, 0x01, 0xcf, 0xa7, 0xf4, 0x77, 0x14, 0x9c, 0x6a, 0x81, 0x52, 0x6f, 0x96, 0x67, 0xc5, 0xa3, + 0xe4, 0xd3, 0x53, 0x14, 0xfc, 0x57, 0x6d, 0xf5, 0xde, 0xc0, 0xeb, 0xb5, 0x92, 0xef, 0x5d, 0x9d, + 0x43, 0x78, 0xe5, 0xa3, 0x7c, 0x5c, 0x3f, 0x0c, 0xa0, 0xb9, 0x8e, 0xe3, 0xbe, 0xbc, 0xc3, 0xaf, + 0xfe, 0xb9, 0x69, 0x39, 0xff, 0xde, 0xb4, 0x9c, 0xeb, 0x9b, 0x96, 0xf3, 0xe7, 0x7f, 0xad, 0xf7, + 0x4e, 0xbb, 0x97, 0xb1, 0x42, 0x29, 0x7b, 0x31, 0xef, 0x9b, 0x53, 0x3f, 0xe2, 0xfd, 0x4b, 0xd5, + 0xd7, 0xbf, 0xba, 0x7e, 0xe9, 0xc5, 0x46, 0x1b, 0xda, 0xf8, 0xe6, 0xff, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x85, 0x05, 0xbd, 0x73, 0x16, 0x07, 0x00, 0x00, +} + +func (m *MaxRatesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MaxRatesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MaxRatesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *MaxRatesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MaxRatesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MaxRatesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Rates) > 0 { + for k := range m.Rates { + v := m.Rates[k] + baseI := i + i = encodeVarintThrottlerdata(dAtA, i, uint64(v)) + i-- + dAtA[i] = 0x10 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintThrottlerdata(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintThrottlerdata(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *SetMaxRateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SetMaxRateRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SetMaxRateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Rate != 0 { + i = encodeVarintThrottlerdata(dAtA, i, uint64(m.Rate)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *SetMaxRateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SetMaxRateResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SetMaxRateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Names) > 0 { + for iNdEx := len(m.Names) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Names[iNdEx]) + copy(dAtA[i:], m.Names[iNdEx]) + i = encodeVarintThrottlerdata(dAtA, i, uint64(len(m.Names[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Configuration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Configuration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Configuration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.MaxRateApproachThreshold != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.MaxRateApproachThreshold)))) + i-- + dAtA[i] = 0x71 + } + if m.BadRateIncrease != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.BadRateIncrease)))) + i-- + dAtA[i] = 0x69 + } + if m.AgeBadRateAfterSec != 0 { + i = encodeVarintThrottlerdata(dAtA, i, uint64(m.AgeBadRateAfterSec)) + i-- + dAtA[i] = 0x60 + } + if m.IgnoreNSlowestRdonlys != 0 { + i = encodeVarintThrottlerdata(dAtA, i, uint64(m.IgnoreNSlowestRdonlys)) + i-- + dAtA[i] = 0x58 + } + if m.IgnoreNSlowestReplicas != 0 { + i = encodeVarintThrottlerdata(dAtA, i, uint64(m.IgnoreNSlowestReplicas)) + i-- + dAtA[i] = 0x50 + } + if m.SpreadBacklogAcrossSec != 0 { + i = encodeVarintThrottlerdata(dAtA, i, uint64(m.SpreadBacklogAcrossSec)) + i-- + dAtA[i] = 0x48 + } + if m.MinDurationBetweenDecreasesSec != 0 { + i = encodeVarintThrottlerdata(dAtA, i, uint64(m.MinDurationBetweenDecreasesSec)) + i-- + dAtA[i] = 0x40 + } + if m.MaxDurationBetweenIncreasesSec != 0 { + i = encodeVarintThrottlerdata(dAtA, i, uint64(m.MaxDurationBetweenIncreasesSec)) + i-- + dAtA[i] = 0x38 + } + if m.MinDurationBetweenIncreasesSec != 0 { + i = encodeVarintThrottlerdata(dAtA, i, uint64(m.MinDurationBetweenIncreasesSec)) + i-- + dAtA[i] = 0x30 + } + if m.EmergencyDecrease != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.EmergencyDecrease)))) + i-- + dAtA[i] = 0x29 + } + if m.MaxIncrease != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.MaxIncrease)))) + i-- + dAtA[i] = 0x21 + } + if m.InitialRate != 0 { + i = encodeVarintThrottlerdata(dAtA, i, uint64(m.InitialRate)) + i-- + dAtA[i] = 0x18 + } + if m.MaxReplicationLagSec != 0 { + i = encodeVarintThrottlerdata(dAtA, i, uint64(m.MaxReplicationLagSec)) + i-- + dAtA[i] = 0x10 + } + if m.TargetReplicationLagSec != 0 { + i = encodeVarintThrottlerdata(dAtA, i, uint64(m.TargetReplicationLagSec)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *GetConfigurationRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetConfigurationRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetConfigurationRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.ThrottlerName) > 0 { + i -= len(m.ThrottlerName) + copy(dAtA[i:], m.ThrottlerName) + i = encodeVarintThrottlerdata(dAtA, i, uint64(len(m.ThrottlerName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetConfigurationResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetConfigurationResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetConfigurationResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Configurations) > 0 { + for k := range m.Configurations { + v := m.Configurations[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintThrottlerdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintThrottlerdata(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintThrottlerdata(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *UpdateConfigurationRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateConfigurationRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UpdateConfigurationRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.CopyZeroValues { + i-- + if m.CopyZeroValues { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Configuration != nil { + { + size, err := m.Configuration.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintThrottlerdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.ThrottlerName) > 0 { + i -= len(m.ThrottlerName) + copy(dAtA[i:], m.ThrottlerName) + i = encodeVarintThrottlerdata(dAtA, i, uint64(len(m.ThrottlerName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpdateConfigurationResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateConfigurationResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UpdateConfigurationResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Names) > 0 { + for iNdEx := len(m.Names) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Names[iNdEx]) + copy(dAtA[i:], m.Names[iNdEx]) + i = encodeVarintThrottlerdata(dAtA, i, uint64(len(m.Names[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResetConfigurationRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResetConfigurationRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResetConfigurationRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.ThrottlerName) > 0 { + i -= len(m.ThrottlerName) + copy(dAtA[i:], m.ThrottlerName) + i = encodeVarintThrottlerdata(dAtA, i, uint64(len(m.ThrottlerName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResetConfigurationResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResetConfigurationResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResetConfigurationResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Names) > 0 { + for iNdEx := len(m.Names) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Names[iNdEx]) + copy(dAtA[i:], m.Names[iNdEx]) + i = encodeVarintThrottlerdata(dAtA, i, uint64(len(m.Names[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintThrottlerdata(dAtA []byte, offset int, v uint64) int { + offset -= sovThrottlerdata(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MaxRatesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MaxRatesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Rates) > 0 { + for k, v := range m.Rates { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovThrottlerdata(uint64(len(k))) + 1 + sovThrottlerdata(uint64(v)) + n += mapEntrySize + 1 + sovThrottlerdata(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SetMaxRateRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Rate != 0 { + n += 1 + sovThrottlerdata(uint64(m.Rate)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SetMaxRateResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovThrottlerdata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Configuration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TargetReplicationLagSec != 0 { + n += 1 + sovThrottlerdata(uint64(m.TargetReplicationLagSec)) + } + if m.MaxReplicationLagSec != 0 { + n += 1 + sovThrottlerdata(uint64(m.MaxReplicationLagSec)) + } + if m.InitialRate != 0 { + n += 1 + sovThrottlerdata(uint64(m.InitialRate)) + } + if m.MaxIncrease != 0 { + n += 9 + } + if m.EmergencyDecrease != 0 { + n += 9 + } + if m.MinDurationBetweenIncreasesSec != 0 { + n += 1 + sovThrottlerdata(uint64(m.MinDurationBetweenIncreasesSec)) + } + if m.MaxDurationBetweenIncreasesSec != 0 { + n += 1 + sovThrottlerdata(uint64(m.MaxDurationBetweenIncreasesSec)) + } + if m.MinDurationBetweenDecreasesSec != 0 { + n += 1 + sovThrottlerdata(uint64(m.MinDurationBetweenDecreasesSec)) + } + if m.SpreadBacklogAcrossSec != 0 { + n += 1 + sovThrottlerdata(uint64(m.SpreadBacklogAcrossSec)) + } + if m.IgnoreNSlowestReplicas != 0 { + n += 1 + sovThrottlerdata(uint64(m.IgnoreNSlowestReplicas)) + } + if m.IgnoreNSlowestRdonlys != 0 { + n += 1 + sovThrottlerdata(uint64(m.IgnoreNSlowestRdonlys)) + } + if m.AgeBadRateAfterSec != 0 { + n += 1 + sovThrottlerdata(uint64(m.AgeBadRateAfterSec)) + } + if m.BadRateIncrease != 0 { + n += 9 + } + if m.MaxRateApproachThreshold != 0 { + n += 9 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n } + +func (m *GetConfigurationRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ThrottlerName) + if l > 0 { + n += 1 + l + sovThrottlerdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetConfigurationResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Configurations) > 0 { + for k, v := range m.Configurations { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovThrottlerdata(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovThrottlerdata(uint64(len(k))) + l + n += mapEntrySize + 1 + sovThrottlerdata(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UpdateConfigurationRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ThrottlerName) + if l > 0 { + n += 1 + l + sovThrottlerdata(uint64(l)) + } + if m.Configuration != nil { + l = m.Configuration.Size() + n += 1 + l + sovThrottlerdata(uint64(l)) + } + if m.CopyZeroValues { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UpdateConfigurationResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovThrottlerdata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ResetConfigurationRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ThrottlerName) + if l > 0 { + n += 1 + l + sovThrottlerdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ResetConfigurationResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovThrottlerdata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovThrottlerdata(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozThrottlerdata(x uint64) (n int) { + return sovThrottlerdata(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MaxRatesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MaxRatesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MaxRatesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipThrottlerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthThrottlerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthThrottlerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MaxRatesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MaxRatesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MaxRatesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthThrottlerdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthThrottlerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Rates == nil { + m.Rates = make(map[string]int64) + } + var mapkey string + var mapvalue int64 + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthThrottlerdata + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthThrottlerdata + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvalue |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + } else { + iNdEx = entryPreIndex + skippy, err := skipThrottlerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthThrottlerdata + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Rates[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipThrottlerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthThrottlerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthThrottlerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SetMaxRateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetMaxRateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetMaxRateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Rate", wireType) + } + m.Rate = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Rate |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipThrottlerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthThrottlerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthThrottlerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SetMaxRateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetMaxRateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetMaxRateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthThrottlerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthThrottlerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipThrottlerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthThrottlerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthThrottlerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Configuration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Configuration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Configuration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetReplicationLagSec", wireType) + } + m.TargetReplicationLagSec = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TargetReplicationLagSec |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxReplicationLagSec", wireType) + } + m.MaxReplicationLagSec = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxReplicationLagSec |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InitialRate", wireType) + } + m.InitialRate = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.InitialRate |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxIncrease", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.MaxIncrease = float64(math.Float64frombits(v)) + case 5: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field EmergencyDecrease", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.EmergencyDecrease = float64(math.Float64frombits(v)) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinDurationBetweenIncreasesSec", wireType) + } + m.MinDurationBetweenIncreasesSec = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinDurationBetweenIncreasesSec |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxDurationBetweenIncreasesSec", wireType) + } + m.MaxDurationBetweenIncreasesSec = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxDurationBetweenIncreasesSec |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinDurationBetweenDecreasesSec", wireType) + } + m.MinDurationBetweenDecreasesSec = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinDurationBetweenDecreasesSec |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SpreadBacklogAcrossSec", wireType) + } + m.SpreadBacklogAcrossSec = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SpreadBacklogAcrossSec |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IgnoreNSlowestReplicas", wireType) + } + m.IgnoreNSlowestReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.IgnoreNSlowestReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IgnoreNSlowestRdonlys", wireType) + } + m.IgnoreNSlowestRdonlys = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.IgnoreNSlowestRdonlys |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AgeBadRateAfterSec", wireType) + } + m.AgeBadRateAfterSec = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AgeBadRateAfterSec |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 13: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field BadRateIncrease", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.BadRateIncrease = float64(math.Float64frombits(v)) + case 14: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxRateApproachThreshold", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.MaxRateApproachThreshold = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipThrottlerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthThrottlerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthThrottlerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetConfigurationRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetConfigurationRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetConfigurationRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ThrottlerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthThrottlerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthThrottlerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ThrottlerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipThrottlerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthThrottlerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthThrottlerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetConfigurationResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetConfigurationResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetConfigurationResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Configurations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthThrottlerdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthThrottlerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Configurations == nil { + m.Configurations = make(map[string]*Configuration) + } + var mapkey string + var mapvalue *Configuration + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthThrottlerdata + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthThrottlerdata + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthThrottlerdata + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthThrottlerdata + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Configuration{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipThrottlerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthThrottlerdata + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Configurations[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipThrottlerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthThrottlerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthThrottlerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateConfigurationRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateConfigurationRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateConfigurationRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ThrottlerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthThrottlerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthThrottlerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ThrottlerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Configuration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthThrottlerdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthThrottlerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Configuration == nil { + m.Configuration = &Configuration{} + } + if err := m.Configuration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CopyZeroValues", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CopyZeroValues = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipThrottlerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthThrottlerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthThrottlerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateConfigurationResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateConfigurationResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateConfigurationResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthThrottlerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthThrottlerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipThrottlerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthThrottlerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthThrottlerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResetConfigurationRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResetConfigurationRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResetConfigurationRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ThrottlerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthThrottlerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthThrottlerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ThrottlerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipThrottlerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthThrottlerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthThrottlerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResetConfigurationResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResetConfigurationResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResetConfigurationResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthThrottlerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthThrottlerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipThrottlerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthThrottlerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthThrottlerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipThrottlerdata(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowThrottlerdata + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthThrottlerdata + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupThrottlerdata + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthThrottlerdata + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthThrottlerdata = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowThrottlerdata = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupThrottlerdata = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/vt/proto/throttlerservice/throttlerservice.pb.go b/go/vt/proto/throttlerservice/throttlerservice.pb.go index 5e0289c73bc..769f7d3c2e1 100644 --- a/go/vt/proto/throttlerservice/throttlerservice.pb.go +++ b/go/vt/proto/throttlerservice/throttlerservice.pb.go @@ -1,4 +1,4 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: throttlerservice.proto package throttlerservice @@ -12,7 +12,6 @@ import ( grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" - throttlerdata "vitess.io/vitess/go/vt/proto/throttlerdata" ) @@ -30,23 +29,24 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package func init() { proto.RegisterFile("throttlerservice.proto", fileDescriptor_33af55db6d07f810) } var fileDescriptor_33af55db6d07f810 = []byte{ - // 241 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x3d, 0x4b, 0xc4, 0x40, - 0x10, 0x86, 0x05, 0x41, 0x74, 0xaa, 0x63, 0x0f, 0x2c, 0xae, 0xf0, 0xab, 0x50, 0x4f, 0x30, 0x0b, - 0xfa, 0x0f, 0xb4, 0xb0, 0xba, 0x26, 0xa7, 0x8d, 0xdd, 0xea, 0x8d, 0x71, 0x51, 0x76, 0xe2, 0xce, - 0x24, 0xf8, 0xbf, 0xfd, 0x03, 0x42, 0xe2, 0xae, 0x64, 0xfc, 0xb8, 0x74, 0xe1, 0x7d, 0x9f, 0x7d, - 0x1f, 0x02, 0x03, 0xbb, 0xf2, 0x1c, 0x49, 0xe4, 0x15, 0x23, 0x63, 0x6c, 0xfd, 0x23, 0x16, 0x75, - 0x24, 0x21, 0x33, 0xd1, 0xf9, 0x6c, 0x9a, 0x93, 0x95, 0x13, 0xd7, 0x63, 0x17, 0x1f, 0x9b, 0xb0, - 0x73, 0x9b, 0x72, 0xb3, 0x80, 0xed, 0x85, 0x7b, 0x2f, 0x9d, 0x20, 0x9b, 0xbd, 0x62, 0xc8, 0xa7, - 0xa2, 0xc4, 0xb7, 0x06, 0x59, 0x66, 0xfb, 0x7f, 0xf6, 0x5c, 0x53, 0x60, 0x3c, 0xda, 0x30, 0x4b, - 0x80, 0x25, 0xca, 0x57, 0x61, 0x0e, 0xd4, 0x83, 0xef, 0x2a, 0x4d, 0x1e, 0xfe, 0x43, 0xe4, 0x51, - 0x84, 0xc9, 0x0d, 0xca, 0x35, 0x85, 0x27, 0x5f, 0x35, 0xd1, 0x89, 0xa7, 0x60, 0x8e, 0xd5, 0x43, - 0x0d, 0x24, 0xc1, 0xc9, 0x5a, 0x2e, 0x6b, 0x02, 0x4c, 0xef, 0xea, 0x95, 0x13, 0x1c, 0x9a, 0xe6, - 0x6a, 0xe1, 0x17, 0x26, 0xc9, 0xce, 0xc6, 0xa0, 0xd9, 0xf7, 0x02, 0xa6, 0x44, 0xd6, 0x3f, 0x76, - 0xaa, 0x36, 0x7e, 0x22, 0xc9, 0x36, 0x1f, 0x41, 0x26, 0xd9, 0x95, 0xbd, 0x3f, 0x6f, 0xbd, 0x20, - 0x73, 0xe1, 0xc9, 0xf6, 0x5f, 0xb6, 0x22, 0xdb, 0x8a, 0xed, 0xae, 0xc2, 0xea, 0xdb, 0x79, 0xd8, - 0xea, 0xf2, 0xcb, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x49, 0x64, 0xc0, 0xd9, 0x6e, 0x02, 0x00, - 0x00, + // 260 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2b, 0xc9, 0x28, 0xca, + 0x2f, 0x29, 0xc9, 0x49, 0x2d, 0x2a, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, + 0x2f, 0xc9, 0x17, 0x12, 0x40, 0x17, 0x97, 0x12, 0x86, 0x8b, 0xa4, 0x24, 0x96, 0x24, 0x42, 0x94, + 0x19, 0x7d, 0x66, 0xe6, 0xe2, 0x0c, 0x81, 0x89, 0x0b, 0xf9, 0x72, 0x71, 0xf8, 0x26, 0x56, 0x04, + 0x25, 0x96, 0xa4, 0x16, 0x0b, 0xc9, 0xe9, 0xa1, 0xaa, 0x87, 0x49, 0x04, 0xa5, 0x16, 0x96, 0xa6, + 0x16, 0x97, 0x48, 0xc9, 0xe3, 0x94, 0x2f, 0x2e, 0xc8, 0xcf, 0x2b, 0x4e, 0x55, 0x62, 0x10, 0x0a, + 0xe6, 0xe2, 0x0a, 0x4e, 0x2d, 0x81, 0x4a, 0x08, 0x29, 0xa0, 0x69, 0x40, 0x48, 0xc1, 0x8c, 0x54, + 0xc4, 0xa3, 0x02, 0x6e, 0x68, 0x2a, 0x97, 0x80, 0x7b, 0x6a, 0x89, 0x73, 0x7e, 0x5e, 0x5a, 0x66, + 0x7a, 0x69, 0x51, 0x62, 0x49, 0x66, 0x7e, 0x9e, 0x90, 0x1a, 0x9a, 0x46, 0x74, 0x05, 0x30, 0x0b, + 0xd4, 0x09, 0xaa, 0x83, 0x5b, 0x93, 0xc7, 0x25, 0x1c, 0x5a, 0x90, 0x92, 0x58, 0x92, 0x8a, 0x6a, + 0x93, 0x26, 0x9a, 0x09, 0x58, 0xd4, 0xc0, 0x2c, 0xd3, 0x22, 0x46, 0x29, 0xdc, 0xbe, 0x6c, 0x2e, + 0xa1, 0xa0, 0xd4, 0x62, 0x74, 0x8f, 0x69, 0xa0, 0x99, 0x81, 0xa9, 0x04, 0x66, 0x9b, 0x26, 0x11, + 0x2a, 0x61, 0x96, 0x39, 0xd9, 0x9f, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, + 0x72, 0x8c, 0x33, 0x1e, 0xcb, 0x31, 0x44, 0xe9, 0x96, 0x65, 0x96, 0xa4, 0x16, 0x17, 0xeb, 0x65, + 0xe6, 0xeb, 0x43, 0x58, 0xfa, 0xe9, 0xf9, 0xfa, 0x65, 0x25, 0xfa, 0xe0, 0x54, 0xa2, 0x8f, 0x9e, + 0x96, 0x92, 0xd8, 0xc0, 0xe2, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf3, 0x62, 0x05, 0x96, + 0x7e, 0x02, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/go/vt/proto/topodata/cached_size.go b/go/vt/proto/topodata/cached_size.go new file mode 100644 index 00000000000..a473c6f8a4b --- /dev/null +++ b/go/vt/proto/topodata/cached_size.go @@ -0,0 +1,35 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by Sizegen. DO NOT EDIT. + +package topodata + +func (cached *KeyRange) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(76) + } + // field Start []byte + size += int64(cap(cached.Start)) + // field End []byte + size += int64(cap(cached.End)) + // field XXX_unrecognized []byte + size += int64(cap(cached.XXX_unrecognized)) + return size +} diff --git a/go/vt/proto/topodata/topodata.pb.go b/go/vt/proto/topodata/topodata.pb.go index 98306ac985e..7f8e815186d 100644 --- a/go/vt/proto/topodata/topodata.pb.go +++ b/go/vt/proto/topodata/topodata.pb.go @@ -1,14 +1,15 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: topodata.proto package topodata import ( fmt "fmt" + io "io" math "math" + math_bits "math/bits" proto "github.com/golang/protobuf/proto" - vttime "vitess.io/vitess/go/vt/proto/vttime" ) @@ -173,18 +174,26 @@ func (*KeyRange) ProtoMessage() {} func (*KeyRange) Descriptor() ([]byte, []int) { return fileDescriptor_52c350cb619f972e, []int{0} } - func (m *KeyRange) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_KeyRange.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *KeyRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_KeyRange.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_KeyRange.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *KeyRange) XXX_Merge(src proto.Message) { xxx_messageInfo_KeyRange.Merge(m, src) } func (m *KeyRange) XXX_Size() int { - return xxx_messageInfo_KeyRange.Size(m) + return m.Size() } func (m *KeyRange) XXX_DiscardUnknown() { xxx_messageInfo_KeyRange.DiscardUnknown(m) @@ -224,18 +233,26 @@ func (*TabletAlias) ProtoMessage() {} func (*TabletAlias) Descriptor() ([]byte, []int) { return fileDescriptor_52c350cb619f972e, []int{1} } - func (m *TabletAlias) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TabletAlias.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *TabletAlias) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TabletAlias.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_TabletAlias.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *TabletAlias) XXX_Merge(src proto.Message) { xxx_messageInfo_TabletAlias.Merge(m, src) } func (m *TabletAlias) XXX_Size() int { - return xxx_messageInfo_TabletAlias.Size(m) + return m.Size() } func (m *TabletAlias) XXX_DiscardUnknown() { xxx_messageInfo_TabletAlias.DiscardUnknown(m) @@ -311,18 +328,26 @@ func (*Tablet) ProtoMessage() {} func (*Tablet) Descriptor() ([]byte, []int) { return fileDescriptor_52c350cb619f972e, []int{2} } - func (m *Tablet) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Tablet.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *Tablet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Tablet.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_Tablet.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *Tablet) XXX_Merge(src proto.Message) { xxx_messageInfo_Tablet.Merge(m, src) } func (m *Tablet) XXX_Size() int { - return xxx_messageInfo_Tablet.Size(m) + return m.Size() } func (m *Tablet) XXX_DiscardUnknown() { xxx_messageInfo_Tablet.DiscardUnknown(m) @@ -469,18 +494,26 @@ func (*Shard) ProtoMessage() {} func (*Shard) Descriptor() ([]byte, []int) { return fileDescriptor_52c350cb619f972e, []int{3} } - func (m *Shard) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Shard.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *Shard) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Shard.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_Shard.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *Shard) XXX_Merge(src proto.Message) { xxx_messageInfo_Shard.Merge(m, src) } func (m *Shard) XXX_Size() int { - return xxx_messageInfo_Shard.Size(m) + return m.Size() } func (m *Shard) XXX_DiscardUnknown() { xxx_messageInfo_Shard.DiscardUnknown(m) @@ -552,18 +585,26 @@ func (*Shard_ServedType) ProtoMessage() {} func (*Shard_ServedType) Descriptor() ([]byte, []int) { return fileDescriptor_52c350cb619f972e, []int{3, 0} } - func (m *Shard_ServedType) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Shard_ServedType.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *Shard_ServedType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Shard_ServedType.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_Shard_ServedType.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *Shard_ServedType) XXX_Merge(src proto.Message) { xxx_messageInfo_Shard_ServedType.Merge(m, src) } func (m *Shard_ServedType) XXX_Size() int { - return xxx_messageInfo_Shard_ServedType.Size(m) + return m.Size() } func (m *Shard_ServedType) XXX_DiscardUnknown() { xxx_messageInfo_Shard_ServedType.DiscardUnknown(m) @@ -610,18 +651,26 @@ func (*Shard_SourceShard) ProtoMessage() {} func (*Shard_SourceShard) Descriptor() ([]byte, []int) { return fileDescriptor_52c350cb619f972e, []int{3, 1} } - func (m *Shard_SourceShard) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Shard_SourceShard.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *Shard_SourceShard) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Shard_SourceShard.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_Shard_SourceShard.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *Shard_SourceShard) XXX_Merge(src proto.Message) { xxx_messageInfo_Shard_SourceShard.Merge(m, src) } func (m *Shard_SourceShard) XXX_Size() int { - return xxx_messageInfo_Shard_SourceShard.Size(m) + return m.Size() } func (m *Shard_SourceShard) XXX_DiscardUnknown() { xxx_messageInfo_Shard_SourceShard.DiscardUnknown(m) @@ -684,18 +733,26 @@ func (*Shard_TabletControl) ProtoMessage() {} func (*Shard_TabletControl) Descriptor() ([]byte, []int) { return fileDescriptor_52c350cb619f972e, []int{3, 2} } - func (m *Shard_TabletControl) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Shard_TabletControl.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *Shard_TabletControl) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Shard_TabletControl.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_Shard_TabletControl.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *Shard_TabletControl) XXX_Merge(src proto.Message) { xxx_messageInfo_Shard_TabletControl.Merge(m, src) } func (m *Shard_TabletControl) XXX_Size() int { - return xxx_messageInfo_Shard_TabletControl.Size(m) + return m.Size() } func (m *Shard_TabletControl) XXX_DiscardUnknown() { xxx_messageInfo_Shard_TabletControl.DiscardUnknown(m) @@ -765,18 +822,26 @@ func (*Keyspace) ProtoMessage() {} func (*Keyspace) Descriptor() ([]byte, []int) { return fileDescriptor_52c350cb619f972e, []int{4} } - func (m *Keyspace) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Keyspace.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *Keyspace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Keyspace.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_Keyspace.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *Keyspace) XXX_Merge(src proto.Message) { xxx_messageInfo_Keyspace.Merge(m, src) } func (m *Keyspace) XXX_Size() int { - return xxx_messageInfo_Keyspace.Size(m) + return m.Size() } func (m *Keyspace) XXX_DiscardUnknown() { xxx_messageInfo_Keyspace.DiscardUnknown(m) @@ -846,18 +911,26 @@ func (*Keyspace_ServedFrom) ProtoMessage() {} func (*Keyspace_ServedFrom) Descriptor() ([]byte, []int) { return fileDescriptor_52c350cb619f972e, []int{4, 0} } - func (m *Keyspace_ServedFrom) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Keyspace_ServedFrom.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *Keyspace_ServedFrom) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Keyspace_ServedFrom.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_Keyspace_ServedFrom.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *Keyspace_ServedFrom) XXX_Merge(src proto.Message) { xxx_messageInfo_Keyspace_ServedFrom.Merge(m, src) } func (m *Keyspace_ServedFrom) XXX_Size() int { - return xxx_messageInfo_Keyspace_ServedFrom.Size(m) + return m.Size() } func (m *Keyspace_ServedFrom) XXX_DiscardUnknown() { xxx_messageInfo_Keyspace_ServedFrom.DiscardUnknown(m) @@ -903,18 +976,26 @@ func (*ShardReplication) ProtoMessage() {} func (*ShardReplication) Descriptor() ([]byte, []int) { return fileDescriptor_52c350cb619f972e, []int{5} } - func (m *ShardReplication) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ShardReplication.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ShardReplication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ShardReplication.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ShardReplication.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ShardReplication) XXX_Merge(src proto.Message) { xxx_messageInfo_ShardReplication.Merge(m, src) } func (m *ShardReplication) XXX_Size() int { - return xxx_messageInfo_ShardReplication.Size(m) + return m.Size() } func (m *ShardReplication) XXX_DiscardUnknown() { xxx_messageInfo_ShardReplication.DiscardUnknown(m) @@ -943,18 +1024,26 @@ func (*ShardReplication_Node) ProtoMessage() {} func (*ShardReplication_Node) Descriptor() ([]byte, []int) { return fileDescriptor_52c350cb619f972e, []int{5, 0} } - func (m *ShardReplication_Node) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ShardReplication_Node.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ShardReplication_Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ShardReplication_Node.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ShardReplication_Node.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ShardReplication_Node) XXX_Merge(src proto.Message) { xxx_messageInfo_ShardReplication_Node.Merge(m, src) } func (m *ShardReplication_Node) XXX_Size() int { - return xxx_messageInfo_ShardReplication_Node.Size(m) + return m.Size() } func (m *ShardReplication_Node) XXX_DiscardUnknown() { xxx_messageInfo_ShardReplication_Node.DiscardUnknown(m) @@ -985,18 +1074,26 @@ func (*ShardReference) ProtoMessage() {} func (*ShardReference) Descriptor() ([]byte, []int) { return fileDescriptor_52c350cb619f972e, []int{6} } - func (m *ShardReference) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ShardReference.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ShardReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ShardReference.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ShardReference.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ShardReference) XXX_Merge(src proto.Message) { xxx_messageInfo_ShardReference.Merge(m, src) } func (m *ShardReference) XXX_Size() int { - return xxx_messageInfo_ShardReference.Size(m) + return m.Size() } func (m *ShardReference) XXX_DiscardUnknown() { xxx_messageInfo_ShardReference.DiscardUnknown(m) @@ -1036,18 +1133,26 @@ func (*ShardTabletControl) ProtoMessage() {} func (*ShardTabletControl) Descriptor() ([]byte, []int) { return fileDescriptor_52c350cb619f972e, []int{7} } - func (m *ShardTabletControl) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ShardTabletControl.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ShardTabletControl) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ShardTabletControl.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ShardTabletControl.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ShardTabletControl) XXX_Merge(src proto.Message) { xxx_messageInfo_ShardTabletControl.Merge(m, src) } func (m *ShardTabletControl) XXX_Size() int { - return xxx_messageInfo_ShardTabletControl.Size(m) + return m.Size() } func (m *ShardTabletControl) XXX_DiscardUnknown() { xxx_messageInfo_ShardTabletControl.DiscardUnknown(m) @@ -1095,18 +1200,26 @@ func (*SrvKeyspace) ProtoMessage() {} func (*SrvKeyspace) Descriptor() ([]byte, []int) { return fileDescriptor_52c350cb619f972e, []int{8} } - func (m *SrvKeyspace) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SrvKeyspace.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *SrvKeyspace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SrvKeyspace.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_SrvKeyspace.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *SrvKeyspace) XXX_Merge(src proto.Message) { xxx_messageInfo_SrvKeyspace.Merge(m, src) } func (m *SrvKeyspace) XXX_Size() int { - return xxx_messageInfo_SrvKeyspace.Size(m) + return m.Size() } func (m *SrvKeyspace) XXX_DiscardUnknown() { xxx_messageInfo_SrvKeyspace.DiscardUnknown(m) @@ -1160,18 +1273,26 @@ func (*SrvKeyspace_KeyspacePartition) ProtoMessage() {} func (*SrvKeyspace_KeyspacePartition) Descriptor() ([]byte, []int) { return fileDescriptor_52c350cb619f972e, []int{8, 0} } - func (m *SrvKeyspace_KeyspacePartition) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SrvKeyspace_KeyspacePartition.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *SrvKeyspace_KeyspacePartition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SrvKeyspace_KeyspacePartition.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_SrvKeyspace_KeyspacePartition.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *SrvKeyspace_KeyspacePartition) XXX_Merge(src proto.Message) { xxx_messageInfo_SrvKeyspace_KeyspacePartition.Merge(m, src) } func (m *SrvKeyspace_KeyspacePartition) XXX_Size() int { - return xxx_messageInfo_SrvKeyspace_KeyspacePartition.Size(m) + return m.Size() } func (m *SrvKeyspace_KeyspacePartition) XXX_DiscardUnknown() { xxx_messageInfo_SrvKeyspace_KeyspacePartition.DiscardUnknown(m) @@ -1218,18 +1339,26 @@ func (*SrvKeyspace_ServedFrom) ProtoMessage() {} func (*SrvKeyspace_ServedFrom) Descriptor() ([]byte, []int) { return fileDescriptor_52c350cb619f972e, []int{8, 1} } - func (m *SrvKeyspace_ServedFrom) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SrvKeyspace_ServedFrom.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *SrvKeyspace_ServedFrom) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SrvKeyspace_ServedFrom.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_SrvKeyspace_ServedFrom.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *SrvKeyspace_ServedFrom) XXX_Merge(src proto.Message) { xxx_messageInfo_SrvKeyspace_ServedFrom.Merge(m, src) } func (m *SrvKeyspace_ServedFrom) XXX_Size() int { - return xxx_messageInfo_SrvKeyspace_ServedFrom.Size(m) + return m.Size() } func (m *SrvKeyspace_ServedFrom) XXX_DiscardUnknown() { xxx_messageInfo_SrvKeyspace_ServedFrom.DiscardUnknown(m) @@ -1274,18 +1403,26 @@ func (*CellInfo) ProtoMessage() {} func (*CellInfo) Descriptor() ([]byte, []int) { return fileDescriptor_52c350cb619f972e, []int{9} } - func (m *CellInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CellInfo.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *CellInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CellInfo.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_CellInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *CellInfo) XXX_Merge(src proto.Message) { xxx_messageInfo_CellInfo.Merge(m, src) } func (m *CellInfo) XXX_Size() int { - return xxx_messageInfo_CellInfo.Size(m) + return m.Size() } func (m *CellInfo) XXX_DiscardUnknown() { xxx_messageInfo_CellInfo.DiscardUnknown(m) @@ -1322,18 +1459,26 @@ func (*CellsAlias) ProtoMessage() {} func (*CellsAlias) Descriptor() ([]byte, []int) { return fileDescriptor_52c350cb619f972e, []int{10} } - func (m *CellsAlias) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CellsAlias.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *CellsAlias) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CellsAlias.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_CellsAlias.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *CellsAlias) XXX_Merge(src proto.Message) { xxx_messageInfo_CellsAlias.Merge(m, src) } func (m *CellsAlias) XXX_Size() int { - return xxx_messageInfo_CellsAlias.Size(m) + return m.Size() } func (m *CellsAlias) XXX_DiscardUnknown() { xxx_messageInfo_CellsAlias.DiscardUnknown(m) @@ -1348,6 +1493,164 @@ func (m *CellsAlias) GetCells() []string { return nil } +type TopoConfig struct { + TopoType string `protobuf:"bytes,1,opt,name=topo_type,json=topoType,proto3" json:"topo_type,omitempty"` + Server string `protobuf:"bytes,2,opt,name=server,proto3" json:"server,omitempty"` + Root string `protobuf:"bytes,3,opt,name=root,proto3" json:"root,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TopoConfig) Reset() { *m = TopoConfig{} } +func (m *TopoConfig) String() string { return proto.CompactTextString(m) } +func (*TopoConfig) ProtoMessage() {} +func (*TopoConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_52c350cb619f972e, []int{11} +} +func (m *TopoConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TopoConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TopoConfig.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TopoConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_TopoConfig.Merge(m, src) +} +func (m *TopoConfig) XXX_Size() int { + return m.Size() +} +func (m *TopoConfig) XXX_DiscardUnknown() { + xxx_messageInfo_TopoConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_TopoConfig proto.InternalMessageInfo + +func (m *TopoConfig) GetTopoType() string { + if m != nil { + return m.TopoType + } + return "" +} + +func (m *TopoConfig) GetServer() string { + if m != nil { + return m.Server + } + return "" +} + +func (m *TopoConfig) GetRoot() string { + if m != nil { + return m.Root + } + return "" +} + +type ExternalVitessCluster struct { + TopoConfig *TopoConfig `protobuf:"bytes,1,opt,name=topo_config,json=topoConfig,proto3" json:"topo_config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExternalVitessCluster) Reset() { *m = ExternalVitessCluster{} } +func (m *ExternalVitessCluster) String() string { return proto.CompactTextString(m) } +func (*ExternalVitessCluster) ProtoMessage() {} +func (*ExternalVitessCluster) Descriptor() ([]byte, []int) { + return fileDescriptor_52c350cb619f972e, []int{12} +} +func (m *ExternalVitessCluster) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalVitessCluster) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExternalVitessCluster.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExternalVitessCluster) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalVitessCluster.Merge(m, src) +} +func (m *ExternalVitessCluster) XXX_Size() int { + return m.Size() +} +func (m *ExternalVitessCluster) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalVitessCluster.DiscardUnknown(m) +} + +var xxx_messageInfo_ExternalVitessCluster proto.InternalMessageInfo + +func (m *ExternalVitessCluster) GetTopoConfig() *TopoConfig { + if m != nil { + return m.TopoConfig + } + return nil +} + +// ExternalClusters +type ExternalClusters struct { + VitessCluster []*ExternalVitessCluster `protobuf:"bytes,1,rep,name=vitess_cluster,json=vitessCluster,proto3" json:"vitess_cluster,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExternalClusters) Reset() { *m = ExternalClusters{} } +func (m *ExternalClusters) String() string { return proto.CompactTextString(m) } +func (*ExternalClusters) ProtoMessage() {} +func (*ExternalClusters) Descriptor() ([]byte, []int) { + return fileDescriptor_52c350cb619f972e, []int{13} +} +func (m *ExternalClusters) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalClusters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExternalClusters.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExternalClusters) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalClusters.Merge(m, src) +} +func (m *ExternalClusters) XXX_Size() int { + return m.Size() +} +func (m *ExternalClusters) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalClusters.DiscardUnknown(m) +} + +var xxx_messageInfo_ExternalClusters proto.InternalMessageInfo + +func (m *ExternalClusters) GetVitessCluster() []*ExternalVitessCluster { + if m != nil { + return m.VitessCluster + } + return nil +} + func init() { proto.RegisterEnum("topodata.KeyspaceType", KeyspaceType_name, KeyspaceType_value) proto.RegisterEnum("topodata.KeyspaceIdType", KeyspaceIdType_name, KeyspaceIdType_value) @@ -1372,95 +1675,5271 @@ func init() { proto.RegisterType((*SrvKeyspace_ServedFrom)(nil), "topodata.SrvKeyspace.ServedFrom") proto.RegisterType((*CellInfo)(nil), "topodata.CellInfo") proto.RegisterType((*CellsAlias)(nil), "topodata.CellsAlias") + proto.RegisterType((*TopoConfig)(nil), "topodata.TopoConfig") + proto.RegisterType((*ExternalVitessCluster)(nil), "topodata.ExternalVitessCluster") + proto.RegisterType((*ExternalClusters)(nil), "topodata.ExternalClusters") } func init() { proto.RegisterFile("topodata.proto", fileDescriptor_52c350cb619f972e) } var fileDescriptor_52c350cb619f972e = []byte{ - // 1349 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0xcf, 0x6e, 0xdb, 0x46, - 0x13, 0x0f, 0xf5, 0xcf, 0xd4, 0x88, 0x92, 0x99, 0x8d, 0x63, 0x10, 0xfa, 0xbe, 0xa0, 0x86, 0x8a, - 0xa0, 0x82, 0x8b, 0xca, 0xad, 0x93, 0xb4, 0x46, 0x8a, 0x02, 0x51, 0x64, 0xa5, 0x71, 0x6c, 0xcb, - 0xc2, 0x4a, 0x46, 0x9b, 0x5e, 0x08, 0x5a, 0x5a, 0x3b, 0x84, 0x25, 0x52, 0xd9, 0x5d, 0x0b, 0x50, - 0x5f, 0xa1, 0x87, 0xf6, 0xdc, 0x37, 0xe8, 0xfb, 0xf4, 0xd8, 0x4b, 0xfb, 0x1c, 0x3d, 0x14, 0x3b, - 0x4b, 0x52, 0x94, 0x14, 0xa7, 0x4e, 0xe1, 0xdb, 0xcc, 0xec, 0xcc, 0x70, 0xe6, 0xb7, 0xbf, 0x99, - 0x95, 0xa0, 0x22, 0xc3, 0x49, 0x38, 0xf4, 0xa4, 0xd7, 0x98, 0xf0, 0x50, 0x86, 0xc4, 0x8c, 0xf5, - 0xaa, 0x35, 0x95, 0xd2, 0x1f, 0x33, 0x6d, 0xaf, 0xed, 0x82, 0x79, 0xc8, 0x66, 0xd4, 0x0b, 0x2e, - 0x18, 0xd9, 0x80, 0xbc, 0x90, 0x1e, 0x97, 0x8e, 0xb1, 0x65, 0xd4, 0x2d, 0xaa, 0x15, 0x62, 0x43, - 0x96, 0x05, 0x43, 0x27, 0x83, 0x36, 0x25, 0xd6, 0x1e, 0x41, 0xa9, 0xef, 0x9d, 0x8d, 0x98, 0x6c, - 0x8e, 0x7c, 0x4f, 0x10, 0x02, 0xb9, 0x01, 0x1b, 0x8d, 0x30, 0xaa, 0x48, 0x51, 0x56, 0x41, 0x57, - 0xbe, 0x0e, 0x2a, 0x53, 0x25, 0xd6, 0xfe, 0xce, 0x41, 0x41, 0x47, 0x91, 0x4f, 0x21, 0xef, 0xa9, - 0x48, 0x8c, 0x28, 0xed, 0xde, 0x6f, 0x24, 0xb5, 0xa6, 0xd2, 0x52, 0xed, 0x43, 0xaa, 0x60, 0xbe, - 0x09, 0x85, 0x0c, 0xbc, 0x31, 0xc3, 0x74, 0x45, 0x9a, 0xe8, 0x64, 0x0f, 0xcc, 0x49, 0xc8, 0xa5, - 0x3b, 0xf6, 0x26, 0x4e, 0x6e, 0x2b, 0x5b, 0x2f, 0xed, 0x3e, 0x58, 0xce, 0xd5, 0xe8, 0x86, 0x5c, - 0x1e, 0x7b, 0x93, 0x76, 0x20, 0xf9, 0x8c, 0xae, 0x4d, 0xb4, 0xa6, 0xb2, 0x5e, 0xb2, 0x99, 0x98, - 0x78, 0x03, 0xe6, 0xe4, 0x75, 0xd6, 0x58, 0x47, 0x18, 0xde, 0x78, 0x7c, 0xe8, 0x14, 0xf0, 0x40, - 0x2b, 0x64, 0x07, 0x8a, 0x97, 0x6c, 0xe6, 0x72, 0x85, 0x94, 0xb3, 0x86, 0x85, 0x93, 0xf9, 0xc7, - 0x62, 0x0c, 0x31, 0x8d, 0x46, 0xb3, 0x0e, 0x39, 0x39, 0x9b, 0x30, 0xc7, 0xdc, 0x32, 0xea, 0x95, - 0xdd, 0x8d, 0xe5, 0xc2, 0xfa, 0xb3, 0x09, 0xa3, 0xe8, 0x41, 0xea, 0x60, 0x0f, 0xcf, 0x5c, 0xd5, - 0x91, 0x1b, 0x4e, 0x19, 0xe7, 0xfe, 0x90, 0x39, 0x45, 0xfc, 0x76, 0x65, 0x78, 0xd6, 0xf1, 0xc6, - 0xec, 0x24, 0xb2, 0x92, 0x06, 0xe4, 0xa4, 0x77, 0x21, 0x1c, 0xc0, 0x66, 0xab, 0x2b, 0xcd, 0xf6, - 0xbd, 0x0b, 0xa1, 0x3b, 0x45, 0x3f, 0xf2, 0x10, 0x2a, 0xe3, 0x99, 0x78, 0x3b, 0x72, 0x13, 0x08, - 0x2d, 0xcc, 0x5b, 0x46, 0xeb, 0xcb, 0x18, 0xc7, 0x07, 0x00, 0xda, 0x4d, 0xc1, 0xe3, 0x94, 0xb7, - 0x8c, 0x7a, 0x9e, 0x16, 0xd1, 0xa2, 0xd0, 0x23, 0x4d, 0xd8, 0x1c, 0x7b, 0x42, 0x32, 0xee, 0x4a, - 0xc6, 0xc7, 0x2e, 0xd2, 0xc2, 0x55, 0x1c, 0x72, 0x2a, 0x88, 0x83, 0xd5, 0x88, 0x28, 0xd5, 0xf7, - 0xc7, 0x8c, 0xde, 0xd3, 0xbe, 0x7d, 0xc6, 0xc7, 0x3d, 0xe5, 0xa9, 0x8c, 0xd5, 0xa7, 0x60, 0xa5, - 0x2f, 0x42, 0xf1, 0xe3, 0x92, 0xcd, 0x22, 0xca, 0x28, 0x51, 0xa1, 0x3e, 0xf5, 0x46, 0x57, 0xfa, - 0x92, 0xf3, 0x54, 0x2b, 0x4f, 0x33, 0x7b, 0x46, 0xf5, 0x2b, 0x28, 0x26, 0x7d, 0xfd, 0x5b, 0x60, - 0x31, 0x15, 0xf8, 0x2a, 0x67, 0x66, 0xed, 0xdc, 0xab, 0x9c, 0x59, 0xb2, 0xad, 0xda, 0xef, 0x05, - 0xc8, 0xf7, 0xf0, 0x22, 0xf7, 0xc0, 0x8a, 0xba, 0xb9, 0x01, 0x09, 0x4b, 0xda, 0x55, 0x13, 0xfd, - 0x7a, 0x1c, 0xcc, 0x1b, 0xe2, 0xb0, 0xc8, 0xa2, 0xcc, 0x0d, 0x58, 0xf4, 0x0d, 0x58, 0x82, 0xf1, - 0x29, 0x1b, 0xba, 0x8a, 0x2a, 0xc2, 0xc9, 0x2e, 0xdf, 0x3c, 0x36, 0xd5, 0xe8, 0xa1, 0x0f, 0x72, - 0xaa, 0x24, 0x12, 0x59, 0x90, 0x67, 0x50, 0x16, 0xe1, 0x15, 0x1f, 0x30, 0x17, 0x59, 0x2c, 0xa2, - 0x31, 0xf9, 0xdf, 0x4a, 0x3c, 0x3a, 0xa1, 0x4c, 0x2d, 0x31, 0x57, 0x04, 0x79, 0x01, 0xeb, 0x12, - 0x01, 0x71, 0x07, 0x61, 0x20, 0x79, 0x38, 0x12, 0x4e, 0x61, 0x79, 0xd4, 0x74, 0x0e, 0x8d, 0x5b, - 0x4b, 0x7b, 0xd1, 0x8a, 0x4c, 0xab, 0x82, 0x6c, 0xc3, 0x5d, 0x5f, 0xb8, 0x11, 0x7e, 0xaa, 0x44, - 0x3f, 0xb8, 0xc0, 0x39, 0x32, 0xe9, 0xba, 0x2f, 0x8e, 0xd1, 0xde, 0xd3, 0xe6, 0xea, 0x6b, 0x80, - 0x79, 0x43, 0xe4, 0x09, 0x94, 0xa2, 0x0a, 0x70, 0x9e, 0x8c, 0xf7, 0xcc, 0x13, 0xc8, 0x44, 0x56, - 0xbc, 0x50, 0xab, 0x48, 0x38, 0x99, 0xad, 0xac, 0xe2, 0x05, 0x2a, 0xd5, 0x5f, 0x0d, 0x28, 0xa5, - 0x9a, 0x8d, 0x17, 0x95, 0x91, 0x2c, 0xaa, 0x85, 0xd5, 0x90, 0xb9, 0x6e, 0x35, 0x64, 0xaf, 0x5d, - 0x0d, 0xb9, 0x1b, 0x5c, 0xea, 0x26, 0x14, 0xb0, 0x50, 0xe1, 0xe4, 0xb1, 0xb6, 0x48, 0xab, 0xfe, - 0x66, 0x40, 0x79, 0x01, 0xc5, 0x5b, 0xed, 0x9d, 0x7c, 0x06, 0xe4, 0x6c, 0xe4, 0x0d, 0x2e, 0x47, - 0xbe, 0x90, 0x8a, 0x50, 0xba, 0x84, 0x1c, 0xba, 0xdc, 0x4d, 0x9d, 0x60, 0x52, 0xa1, 0xaa, 0x3c, - 0xe7, 0xe1, 0x8f, 0x2c, 0xc0, 0x0d, 0x69, 0xd2, 0x48, 0x4b, 0xc6, 0x2a, 0x6f, 0x17, 0x6a, 0x7f, - 0x64, 0xf1, 0xfd, 0xd0, 0xe8, 0x7c, 0x0e, 0x1b, 0x08, 0x88, 0x1f, 0x5c, 0xb8, 0x83, 0x70, 0x74, - 0x35, 0x0e, 0x70, 0xa9, 0x45, 0xc3, 0x4a, 0xe2, 0xb3, 0x16, 0x1e, 0xa9, 0xbd, 0x46, 0x5e, 0xad, - 0x46, 0x60, 0x9f, 0x19, 0xec, 0xd3, 0x59, 0x00, 0x11, 0xbf, 0x71, 0xa0, 0x39, 0xbe, 0x94, 0x0b, - 0x7b, 0x7e, 0x96, 0x4c, 0xca, 0x39, 0x0f, 0xc7, 0x62, 0xf5, 0x41, 0x88, 0x73, 0x44, 0xc3, 0xf2, - 0x82, 0x87, 0xe3, 0x78, 0x58, 0x94, 0x2c, 0xc8, 0xd7, 0x50, 0x8e, 0x6f, 0x5a, 0x97, 0x91, 0xc7, - 0x32, 0x36, 0x57, 0x53, 0x60, 0x11, 0xd6, 0x65, 0x4a, 0x23, 0x1f, 0x43, 0xf9, 0xcc, 0x13, 0xcc, - 0x4d, 0xb8, 0xa3, 0x5f, 0x0f, 0x4b, 0x19, 0x13, 0x84, 0xbe, 0x80, 0xb2, 0x08, 0xbc, 0x89, 0x78, - 0x13, 0x46, 0x8b, 0x63, 0xed, 0x1d, 0x8b, 0xc3, 0x8a, 0x5d, 0x70, 0x73, 0x5e, 0xc5, 0xb3, 0xa0, - 0x6a, 0xbc, 0x5d, 0x3e, 0xa4, 0x99, 0x9e, 0x5d, 0x64, 0xba, 0xbe, 0xe4, 0xda, 0x4f, 0x06, 0xd8, - 0x7a, 0x29, 0xb0, 0xc9, 0xc8, 0x1f, 0x78, 0xd2, 0x0f, 0x03, 0xf2, 0x04, 0xf2, 0x41, 0x38, 0x64, - 0x6a, 0x73, 0x2a, 0x84, 0x3f, 0x5a, 0xda, 0x03, 0x29, 0xd7, 0x46, 0x27, 0x1c, 0x32, 0xaa, 0xbd, - 0xab, 0xcf, 0x20, 0xa7, 0x54, 0xb5, 0x7f, 0xa3, 0x16, 0x6e, 0xb2, 0x7f, 0xe5, 0x5c, 0xa9, 0x9d, - 0x42, 0x25, 0xfa, 0xc2, 0x39, 0xe3, 0x2c, 0x18, 0x30, 0xf5, 0xd3, 0x23, 0xc5, 0x30, 0x94, 0x3f, - 0x78, 0xc5, 0xd6, 0x7e, 0x36, 0x80, 0x60, 0xde, 0xc5, 0xd1, 0xbb, 0x8d, 0xdc, 0xe4, 0x31, 0x6c, - 0xbe, 0xbd, 0x62, 0x7c, 0xa6, 0x37, 0xde, 0x80, 0xb9, 0x43, 0x5f, 0xa8, 0xaf, 0xe8, 0x0d, 0x62, - 0xd2, 0x0d, 0x3c, 0xed, 0xe9, 0xc3, 0xfd, 0xe8, 0xac, 0xf6, 0x57, 0x0e, 0x4a, 0x3d, 0x3e, 0x4d, - 0x68, 0xf3, 0x2d, 0xc0, 0xc4, 0xe3, 0xd2, 0x57, 0x98, 0xc6, 0xb0, 0x7f, 0x92, 0x82, 0x7d, 0xee, - 0x9a, 0x30, 0xb4, 0x1b, 0xfb, 0xd3, 0x54, 0xe8, 0xb5, 0x13, 0x9a, 0xf9, 0xe0, 0x09, 0xcd, 0xfe, - 0x87, 0x09, 0x6d, 0x42, 0x29, 0x35, 0xa1, 0xd1, 0x80, 0x6e, 0xbd, 0xbb, 0x8f, 0xd4, 0x8c, 0xc2, - 0x7c, 0x46, 0xab, 0x7f, 0x1a, 0x70, 0x77, 0xa5, 0x45, 0x35, 0x15, 0xa9, 0x47, 0xf2, 0xfd, 0x53, - 0x31, 0x7f, 0x1d, 0x49, 0x0b, 0x6c, 0xac, 0xd2, 0xe5, 0x31, 0xa1, 0xf4, 0x80, 0x94, 0xd2, 0x7d, - 0x2d, 0x32, 0x8e, 0xae, 0x8b, 0x05, 0x5d, 0x90, 0x2e, 0xdc, 0xd7, 0x49, 0x96, 0x5f, 0x49, 0xfd, - 0x52, 0xff, 0x7f, 0x29, 0xd3, 0xe2, 0x23, 0x79, 0x4f, 0xac, 0xd8, 0x44, 0xd5, 0xbd, 0x8d, 0x89, - 0x7f, 0xcf, 0x2b, 0x16, 0xad, 0xee, 0x43, 0x30, 0x5b, 0x6c, 0x34, 0x3a, 0x08, 0xce, 0x43, 0xf5, - 0x3b, 0x11, 0x71, 0xe1, 0xae, 0x37, 0x1c, 0x72, 0x26, 0x44, 0xc4, 0xfa, 0xb2, 0xb6, 0x36, 0xb5, - 0x51, 0x8d, 0x04, 0x0f, 0x43, 0x19, 0x25, 0x44, 0x39, 0x5a, 0x14, 0x35, 0x00, 0x95, 0x4c, 0xe8, - 0x1f, 0x4a, 0xef, 0x5c, 0x37, 0xdb, 0x75, 0xb0, 0xd2, 0xfb, 0x93, 0x00, 0x14, 0x3a, 0x27, 0xf4, - 0xb8, 0x79, 0x64, 0xdf, 0x21, 0x16, 0x98, 0xbd, 0x4e, 0xb3, 0xdb, 0x7b, 0x79, 0xd2, 0xb7, 0x8d, - 0xed, 0x5d, 0xa8, 0x2c, 0xd2, 0x89, 0x14, 0x21, 0x7f, 0xda, 0xe9, 0xb5, 0xfb, 0xf6, 0x1d, 0x15, - 0x76, 0x7a, 0xd0, 0xe9, 0x7f, 0xf9, 0xd8, 0x36, 0x94, 0xf9, 0xf9, 0xeb, 0x7e, 0xbb, 0x67, 0x67, - 0xb6, 0x7f, 0x31, 0x00, 0xe6, 0x58, 0x90, 0x12, 0xac, 0x9d, 0x76, 0x0e, 0x3b, 0x27, 0xdf, 0x75, - 0x74, 0xc8, 0x71, 0xb3, 0xd7, 0x6f, 0x53, 0xdb, 0x50, 0x07, 0xb4, 0xdd, 0x3d, 0x3a, 0x68, 0x35, - 0xed, 0x8c, 0x3a, 0xa0, 0xfb, 0x27, 0x9d, 0xa3, 0xd7, 0x76, 0x16, 0x73, 0x35, 0xfb, 0xad, 0x97, - 0x5a, 0xec, 0x75, 0x9b, 0xb4, 0x6d, 0xe7, 0x88, 0x0d, 0x56, 0xfb, 0xfb, 0x6e, 0x9b, 0x1e, 0x1c, - 0xb7, 0x3b, 0xfd, 0xe6, 0x91, 0x9d, 0x57, 0x31, 0xcf, 0x9b, 0xad, 0xc3, 0xd3, 0xae, 0x5d, 0xd0, - 0xc9, 0x7a, 0xfd, 0x13, 0xda, 0xb6, 0xd7, 0x94, 0xb2, 0x4f, 0x9b, 0x07, 0x9d, 0xf6, 0xbe, 0x6d, - 0x56, 0x33, 0xb6, 0xf1, 0x7c, 0x0f, 0xd6, 0xfd, 0xb0, 0x31, 0xf5, 0x25, 0x13, 0x42, 0xff, 0xdd, - 0xfa, 0xe1, 0x61, 0xa4, 0xf9, 0xe1, 0x8e, 0x96, 0x76, 0x2e, 0xc2, 0x9d, 0xa9, 0xdc, 0xc1, 0xd3, - 0x9d, 0xf8, 0x52, 0xcf, 0x0a, 0xa8, 0x3f, 0xfa, 0x27, 0x00, 0x00, 0xff, 0xff, 0x51, 0xac, 0x2b, - 0xc1, 0xc6, 0x0d, 0x00, 0x00, + // 1470 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0x4f, 0x6f, 0xdb, 0xc6, + 0x12, 0x0f, 0xf5, 0xcf, 0xd2, 0x88, 0x92, 0x99, 0x8d, 0x63, 0x10, 0xca, 0x8b, 0x9f, 0xa1, 0x87, + 0xe0, 0x19, 0x7e, 0x78, 0x72, 0xeb, 0x24, 0x6d, 0x90, 0xa2, 0x40, 0x14, 0x59, 0xa9, 0x1d, 0xdb, + 0xb2, 0xb0, 0x92, 0xdb, 0x26, 0x17, 0x82, 0x96, 0xd6, 0x0e, 0x61, 0x8a, 0x54, 0xb8, 0x2b, 0xa1, + 0xea, 0x57, 0xe8, 0xa1, 0x3d, 0x16, 0xfd, 0x06, 0xfd, 0x26, 0x3d, 0xf6, 0xd0, 0x63, 0x0f, 0xad, + 0xfb, 0x35, 0x7a, 0x28, 0x76, 0x96, 0xa4, 0x28, 0xc9, 0x4e, 0x9d, 0xc2, 0xb7, 0x9d, 0xd9, 0x99, + 0xd9, 0x99, 0xdf, 0xfe, 0x66, 0x96, 0x84, 0xb2, 0xf0, 0x87, 0x7e, 0xdf, 0x16, 0x76, 0x6d, 0x18, + 0xf8, 0xc2, 0x27, 0xf9, 0x48, 0xae, 0xe8, 0x63, 0x21, 0x9c, 0x01, 0x53, 0xfa, 0xea, 0x36, 0xe4, + 0xf7, 0xd9, 0x84, 0xda, 0xde, 0x19, 0x23, 0x2b, 0x90, 0xe5, 0xc2, 0x0e, 0x84, 0xa9, 0xad, 0x6b, + 0x1b, 0x3a, 0x55, 0x02, 0x31, 0x20, 0xcd, 0xbc, 0xbe, 0x99, 0x42, 0x9d, 0x5c, 0x56, 0x1f, 0x42, + 0xb1, 0x6b, 0x9f, 0xb8, 0x4c, 0xd4, 0x5d, 0xc7, 0xe6, 0x84, 0x40, 0xa6, 0xc7, 0x5c, 0x17, 0xbd, + 0x0a, 0x14, 0xd7, 0xd2, 0x69, 0xe4, 0x28, 0xa7, 0x12, 0x95, 0xcb, 0xea, 0x9f, 0x19, 0xc8, 0x29, + 0x2f, 0xf2, 0x3f, 0xc8, 0xda, 0xd2, 0x13, 0x3d, 0x8a, 0xdb, 0x77, 0x6b, 0x71, 0xae, 0x89, 0xb0, + 0x54, 0xd9, 0x90, 0x0a, 0xe4, 0xdf, 0xf8, 0x5c, 0x78, 0xf6, 0x80, 0x61, 0xb8, 0x02, 0x8d, 0x65, + 0xf2, 0x04, 0xf2, 0x43, 0x3f, 0x10, 0xd6, 0xc0, 0x1e, 0x9a, 0x99, 0xf5, 0xf4, 0x46, 0x71, 0xfb, + 0xfe, 0x7c, 0xac, 0x5a, 0xdb, 0x0f, 0xc4, 0xa1, 0x3d, 0x6c, 0x7a, 0x22, 0x98, 0xd0, 0xa5, 0xa1, + 0x92, 0x64, 0xd4, 0x73, 0x36, 0xe1, 0x43, 0xbb, 0xc7, 0xcc, 0xac, 0x8a, 0x1a, 0xc9, 0x08, 0xc3, + 0x1b, 0x3b, 0xe8, 0x9b, 0x39, 0xdc, 0x50, 0x02, 0xd9, 0x82, 0xc2, 0x39, 0x9b, 0x58, 0x81, 0x44, + 0xca, 0x5c, 0xc2, 0xc4, 0xc9, 0xf4, 0xb0, 0x08, 0x43, 0x0c, 0xa3, 0xd0, 0xdc, 0x80, 0x8c, 0x98, + 0x0c, 0x99, 0x99, 0x5f, 0xd7, 0x36, 0xca, 0xdb, 0x2b, 0xf3, 0x89, 0x75, 0x27, 0x43, 0x46, 0xd1, + 0x82, 0x6c, 0x80, 0xd1, 0x3f, 0xb1, 0x64, 0x45, 0x96, 0x3f, 0x66, 0x41, 0xe0, 0xf4, 0x99, 0x59, + 0xc0, 0xb3, 0xcb, 0xfd, 0x93, 0x96, 0x3d, 0x60, 0x47, 0xa1, 0x96, 0xd4, 0x20, 0x23, 0xec, 0x33, + 0x6e, 0x02, 0x16, 0x5b, 0x59, 0x28, 0xb6, 0x6b, 0x9f, 0x71, 0x55, 0x29, 0xda, 0x91, 0x07, 0x50, + 0x1e, 0x4c, 0xf8, 0x5b, 0xd7, 0x8a, 0x21, 0xd4, 0x31, 0x6e, 0x09, 0xb5, 0xbb, 0x11, 0x8e, 0xf7, + 0x01, 0x94, 0x99, 0x84, 0xc7, 0x2c, 0xad, 0x6b, 0x1b, 0x59, 0x5a, 0x40, 0x8d, 0x44, 0x8f, 0xd4, + 0x61, 0x75, 0x60, 0x73, 0xc1, 0x02, 0x4b, 0xb0, 0x60, 0x60, 0x21, 0x2d, 0x2c, 0xc9, 0x21, 0xb3, + 0x8c, 0x38, 0xe8, 0xb5, 0x90, 0x52, 0x5d, 0x67, 0xc0, 0xe8, 0x1d, 0x65, 0xdb, 0x65, 0xc1, 0xa0, + 0x23, 0x2d, 0xa5, 0xb2, 0xf2, 0x14, 0xf4, 0xe4, 0x45, 0x48, 0x7e, 0x9c, 0xb3, 0x49, 0x48, 0x19, + 0xb9, 0x94, 0xa8, 0x8f, 0x6d, 0x77, 0xa4, 0x2e, 0x39, 0x4b, 0x95, 0xf0, 0x34, 0xf5, 0x44, 0xab, + 0x7c, 0x0c, 0x85, 0xb8, 0xae, 0xbf, 0x73, 0x2c, 0x24, 0x1c, 0x5f, 0x66, 0xf2, 0x69, 0x23, 0xf3, + 0x32, 0x93, 0x2f, 0x1a, 0x7a, 0xf5, 0x97, 0x1c, 0x64, 0x3b, 0x78, 0x91, 0x4f, 0x40, 0x0f, 0xab, + 0xb9, 0x06, 0x09, 0x8b, 0xca, 0x54, 0x11, 0xfd, 0x6a, 0x1c, 0xf2, 0xd7, 0xc4, 0x61, 0x96, 0x45, + 0xa9, 0x6b, 0xb0, 0xe8, 0x53, 0xd0, 0x39, 0x0b, 0xc6, 0xac, 0x6f, 0x49, 0xaa, 0x70, 0x33, 0x3d, + 0x7f, 0xf3, 0x58, 0x54, 0xad, 0x83, 0x36, 0xc8, 0xa9, 0x22, 0x8f, 0xd7, 0x9c, 0x3c, 0x83, 0x12, + 0xf7, 0x47, 0x41, 0x8f, 0x59, 0xc8, 0x62, 0x1e, 0xb6, 0xc9, 0xbd, 0x05, 0x7f, 0x34, 0xc2, 0x35, + 0xd5, 0xf9, 0x54, 0xe0, 0xe4, 0x05, 0x2c, 0x0b, 0x04, 0xc4, 0xea, 0xf9, 0x9e, 0x08, 0x7c, 0x97, + 0x9b, 0xb9, 0xf9, 0x56, 0x53, 0x31, 0x14, 0x6e, 0x0d, 0x65, 0x45, 0xcb, 0x22, 0x29, 0x72, 0xb2, + 0x09, 0xb7, 0x1d, 0x6e, 0x85, 0xf8, 0xc9, 0x14, 0x1d, 0xef, 0x0c, 0xfb, 0x28, 0x4f, 0x97, 0x1d, + 0x7e, 0x88, 0xfa, 0x8e, 0x52, 0x57, 0x5e, 0x01, 0x4c, 0x0b, 0x22, 0x8f, 0xa1, 0x18, 0x66, 0x80, + 0xfd, 0xa4, 0xbd, 0xa3, 0x9f, 0x40, 0xc4, 0x6b, 0xc9, 0x0b, 0x39, 0x8a, 0xb8, 0x99, 0x5a, 0x4f, + 0x4b, 0x5e, 0xa0, 0x50, 0xf9, 0x41, 0x83, 0x62, 0xa2, 0xd8, 0x68, 0x50, 0x69, 0xf1, 0xa0, 0x9a, + 0x19, 0x0d, 0xa9, 0xab, 0x46, 0x43, 0xfa, 0xca, 0xd1, 0x90, 0xb9, 0xc6, 0xa5, 0xae, 0x42, 0x0e, + 0x13, 0xe5, 0x66, 0x16, 0x73, 0x0b, 0xa5, 0xca, 0x8f, 0x1a, 0x94, 0x66, 0x50, 0xbc, 0xd1, 0xda, + 0xc9, 0xff, 0x81, 0x9c, 0xb8, 0x76, 0xef, 0xdc, 0x75, 0xb8, 0x90, 0x84, 0x52, 0x29, 0x64, 0xd0, + 0xe4, 0x76, 0x62, 0x07, 0x83, 0x72, 0x99, 0xe5, 0x69, 0xe0, 0x7f, 0xcd, 0x3c, 0x9c, 0x90, 0x79, + 0x1a, 0x4a, 0x71, 0x5b, 0x65, 0x8d, 0x5c, 0xf5, 0xd7, 0x34, 0xbe, 0x1f, 0x0a, 0x9d, 0x0f, 0x60, + 0x05, 0x01, 0x71, 0xbc, 0x33, 0xab, 0xe7, 0xbb, 0xa3, 0x81, 0x87, 0x43, 0x2d, 0x6c, 0x56, 0x12, + 0xed, 0x35, 0x70, 0x4b, 0xce, 0x35, 0xf2, 0x72, 0xd1, 0x03, 0xeb, 0x4c, 0x61, 0x9d, 0xe6, 0x0c, + 0x88, 0x78, 0xc6, 0x9e, 0xe2, 0xf8, 0x5c, 0x2c, 0xac, 0xf9, 0x59, 0xdc, 0x29, 0xa7, 0x81, 0x3f, + 0xe0, 0x8b, 0x0f, 0x42, 0x14, 0x23, 0x6c, 0x96, 0x17, 0x81, 0x3f, 0x88, 0x9a, 0x45, 0xae, 0x39, + 0xf9, 0x04, 0x4a, 0xd1, 0x4d, 0xab, 0x34, 0xb2, 0x98, 0xc6, 0xea, 0x62, 0x08, 0x4c, 0x42, 0x3f, + 0x4f, 0x48, 0xe4, 0x3f, 0x50, 0x3a, 0xb1, 0x39, 0xb3, 0x62, 0xee, 0xa8, 0xd7, 0x43, 0x97, 0xca, + 0x18, 0xa1, 0x0f, 0xa1, 0xc4, 0x3d, 0x7b, 0xc8, 0xdf, 0xf8, 0xe1, 0xe0, 0x58, 0xba, 0x64, 0x70, + 0xe8, 0x91, 0x09, 0x4e, 0xce, 0x51, 0xd4, 0x0b, 0x32, 0xc7, 0x9b, 0xe5, 0x43, 0x92, 0xe9, 0xe9, + 0x59, 0xa6, 0xab, 0x4b, 0xae, 0x7e, 0xa3, 0x81, 0xa1, 0x86, 0x02, 0x1b, 0xba, 0x4e, 0xcf, 0x16, + 0x8e, 0xef, 0x91, 0xc7, 0x90, 0xf5, 0xfc, 0x3e, 0x93, 0x93, 0x53, 0x22, 0xfc, 0xef, 0xb9, 0x39, + 0x90, 0x30, 0xad, 0xb5, 0xfc, 0x3e, 0xa3, 0xca, 0xba, 0xf2, 0x0c, 0x32, 0x52, 0x94, 0xf3, 0x37, + 0x2c, 0xe1, 0x3a, 0xf3, 0x57, 0x4c, 0x85, 0xea, 0x31, 0x94, 0xc3, 0x13, 0x4e, 0x59, 0xc0, 0xbc, + 0x1e, 0x93, 0x9f, 0x1e, 0x09, 0x86, 0xe1, 0xfa, 0xbd, 0x47, 0x6c, 0xf5, 0x5b, 0x0d, 0x08, 0xc6, + 0x9d, 0x6d, 0xbd, 0x9b, 0x88, 0x4d, 0x1e, 0xc1, 0xea, 0xdb, 0x11, 0x0b, 0x26, 0x6a, 0xe2, 0xf5, + 0x98, 0xd5, 0x77, 0xb8, 0x3c, 0x45, 0x4d, 0x90, 0x3c, 0x5d, 0xc1, 0xdd, 0x8e, 0xda, 0xdc, 0x09, + 0xf7, 0xaa, 0x17, 0x19, 0x28, 0x76, 0x82, 0x71, 0x4c, 0x9b, 0xcf, 0x00, 0x86, 0x76, 0x20, 0x1c, + 0x89, 0x69, 0x04, 0xfb, 0x7f, 0x13, 0xb0, 0x4f, 0x4d, 0x63, 0x86, 0xb6, 0x23, 0x7b, 0x9a, 0x70, + 0xbd, 0xb2, 0x43, 0x53, 0xef, 0xdd, 0xa1, 0xe9, 0x7f, 0xd0, 0xa1, 0x75, 0x28, 0x26, 0x3a, 0x34, + 0x6c, 0xd0, 0xf5, 0xcb, 0xeb, 0x48, 0xf4, 0x28, 0x4c, 0x7b, 0xb4, 0xf2, 0xbb, 0x06, 0xb7, 0x17, + 0x4a, 0x94, 0x5d, 0x91, 0x78, 0x24, 0xdf, 0xdd, 0x15, 0xd3, 0xd7, 0x91, 0x34, 0xc0, 0xc0, 0x2c, + 0xad, 0x20, 0x22, 0x94, 0x6a, 0x90, 0x62, 0xb2, 0xae, 0x59, 0xc6, 0xd1, 0x65, 0x3e, 0x23, 0x73, + 0xd2, 0x86, 0xbb, 0x2a, 0xc8, 0xfc, 0x2b, 0xa9, 0x5e, 0xea, 0x7f, 0xcd, 0x45, 0x9a, 0x7d, 0x24, + 0xef, 0xf0, 0x05, 0x1d, 0xaf, 0x58, 0x37, 0xd1, 0xf1, 0xef, 0x78, 0xc5, 0xc2, 0xd1, 0xbd, 0x0f, + 0xf9, 0x06, 0x73, 0xdd, 0x3d, 0xef, 0xd4, 0x97, 0xdf, 0x89, 0x88, 0x4b, 0x60, 0xd9, 0xfd, 0x7e, + 0xc0, 0x38, 0x0f, 0x59, 0x5f, 0x52, 0xda, 0xba, 0x52, 0xca, 0x96, 0x08, 0x7c, 0x5f, 0x84, 0x01, + 0x71, 0x1d, 0x0e, 0x8a, 0x2a, 0x80, 0x0c, 0xc6, 0xd5, 0x87, 0xd2, 0xa5, 0xe3, 0xa6, 0x7a, 0x0c, + 0xd0, 0xf5, 0x87, 0x7e, 0xc3, 0xf7, 0x4e, 0x9d, 0x33, 0x72, 0x0f, 0x0a, 0xb2, 0x86, 0x69, 0x55, + 0x05, 0x8a, 0xff, 0x28, 0x98, 0xfd, 0x2a, 0xe4, 0xd4, 0xc9, 0xe1, 0x51, 0xa1, 0x14, 0x27, 0x90, + 0x9e, 0x26, 0x50, 0x6d, 0xc1, 0xdd, 0xe6, 0x57, 0x82, 0x05, 0x9e, 0xed, 0x7e, 0xee, 0x08, 0xc6, + 0x79, 0xc3, 0x1d, 0xc9, 0x8f, 0x09, 0x44, 0x4e, 0x9e, 0xd0, 0xc3, 0x03, 0xc3, 0x39, 0x93, 0x44, + 0x2e, 0x4e, 0x86, 0x82, 0x88, 0xd7, 0xd5, 0xd7, 0x60, 0x44, 0xf1, 0xc2, 0x48, 0xf2, 0x23, 0xa8, + 0x3c, 0xc6, 0xd8, 0x56, 0x4f, 0xa9, 0x16, 0x67, 0xdf, 0xa5, 0x39, 0xd0, 0xd2, 0x38, 0x29, 0x6e, + 0x6e, 0x80, 0x9e, 0x7c, 0x42, 0x08, 0x40, 0xae, 0x75, 0x44, 0x0f, 0xeb, 0x07, 0xc6, 0x2d, 0xa2, + 0x43, 0xbe, 0xd3, 0xaa, 0xb7, 0x3b, 0xbb, 0x47, 0x5d, 0x43, 0xdb, 0xdc, 0x86, 0xf2, 0x6c, 0x47, + 0x91, 0x02, 0x64, 0x8f, 0x5b, 0x9d, 0x66, 0xd7, 0xb8, 0x25, 0xdd, 0x8e, 0xf7, 0x5a, 0xdd, 0x8f, + 0x1e, 0x19, 0x9a, 0x54, 0x3f, 0x7f, 0xd5, 0x6d, 0x76, 0x8c, 0xd4, 0xe6, 0x77, 0x1a, 0xc0, 0x94, + 0x0e, 0xa4, 0x08, 0x4b, 0xc7, 0xad, 0xfd, 0xd6, 0xd1, 0x17, 0x2d, 0xe5, 0x72, 0x58, 0xef, 0x74, + 0x9b, 0xd4, 0xd0, 0xe4, 0x06, 0x6d, 0xb6, 0x0f, 0xf6, 0x1a, 0x75, 0x23, 0x25, 0x37, 0xe8, 0xce, + 0x51, 0xeb, 0xe0, 0x95, 0x91, 0xc6, 0x58, 0xf5, 0x6e, 0x63, 0x57, 0x2d, 0x3b, 0xed, 0x3a, 0x6d, + 0x1a, 0x19, 0x62, 0x80, 0xde, 0xfc, 0xb2, 0xdd, 0xa4, 0x7b, 0x87, 0xcd, 0x56, 0xb7, 0x7e, 0x60, + 0x64, 0xa5, 0xcf, 0xf3, 0x7a, 0x63, 0xff, 0xb8, 0x6d, 0xe4, 0x54, 0xb0, 0x4e, 0xf7, 0x88, 0x36, + 0x8d, 0x25, 0x29, 0xec, 0xd0, 0xfa, 0x5e, 0xab, 0xb9, 0x63, 0xe4, 0x2b, 0x29, 0x43, 0x7b, 0xbe, + 0xfb, 0xd3, 0xc5, 0x9a, 0xf6, 0xf3, 0xc5, 0x9a, 0xf6, 0xdb, 0xc5, 0x9a, 0xf6, 0xfd, 0x1f, 0x6b, + 0xb7, 0x60, 0xd9, 0xf1, 0x6b, 0x0a, 0x14, 0xf5, 0x07, 0xfa, 0xfa, 0x41, 0x28, 0x39, 0xfe, 0x96, + 0x5a, 0x6d, 0x9d, 0xf9, 0x5b, 0x63, 0xb1, 0x85, 0xbb, 0x5b, 0x11, 0xbe, 0x27, 0x39, 0x94, 0x1f, + 0xfe, 0x15, 0x00, 0x00, 0xff, 0xff, 0x0c, 0xd6, 0x3a, 0x79, 0xd9, 0x0e, 0x00, 0x00, +} + +func (m *KeyRange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyRange) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KeyRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.End) > 0 { + i -= len(m.End) + copy(dAtA[i:], m.End) + i = encodeVarintTopodata(dAtA, i, uint64(len(m.End))) + i-- + dAtA[i] = 0x12 + } + if len(m.Start) > 0 { + i -= len(m.Start) + copy(dAtA[i:], m.Start) + i = encodeVarintTopodata(dAtA, i, uint64(len(m.Start))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TabletAlias) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TabletAlias) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TabletAlias) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Uid != 0 { + i = encodeVarintTopodata(dAtA, i, uint64(m.Uid)) + i-- + dAtA[i] = 0x10 + } + if len(m.Cell) > 0 { + i -= len(m.Cell) + copy(dAtA[i:], m.Cell) + i = encodeVarintTopodata(dAtA, i, uint64(len(m.Cell))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Tablet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Tablet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Tablet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.MasterTermStartTime != nil { + { + size, err := m.MasterTermStartTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTopodata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + if m.MysqlPort != 0 { + i = encodeVarintTopodata(dAtA, i, uint64(m.MysqlPort)) + i-- + dAtA[i] = 0x68 + } + if len(m.MysqlHostname) > 0 { + i -= len(m.MysqlHostname) + copy(dAtA[i:], m.MysqlHostname) + i = encodeVarintTopodata(dAtA, i, uint64(len(m.MysqlHostname))) + i-- + dAtA[i] = 0x62 + } + if len(m.Tags) > 0 { + for k := range m.Tags { + v := m.Tags[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintTopodata(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintTopodata(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintTopodata(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x52 + } + } + if len(m.DbNameOverride) > 0 { + i -= len(m.DbNameOverride) + copy(dAtA[i:], m.DbNameOverride) + i = encodeVarintTopodata(dAtA, i, uint64(len(m.DbNameOverride))) + i-- + dAtA[i] = 0x4a + } + if m.Type != 0 { + i = encodeVarintTopodata(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x40 + } + if m.KeyRange != nil { + { + size, err := m.KeyRange.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTopodata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarintTopodata(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x32 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintTopodata(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0x2a + } + if len(m.PortMap) > 0 { + for k := range m.PortMap { + v := m.PortMap[k] + baseI := i + i = encodeVarintTopodata(dAtA, i, uint64(v)) + i-- + dAtA[i] = 0x10 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintTopodata(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintTopodata(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Hostname) > 0 { + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarintTopodata(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0x12 + } + if m.Alias != nil { + { + size, err := m.Alias.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTopodata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Shard) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Shard) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Shard) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.MasterTermStartTime != nil { + { + size, err := m.MasterTermStartTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTopodata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.IsMasterServing { + i-- + if m.IsMasterServing { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if len(m.TabletControls) > 0 { + for iNdEx := len(m.TabletControls) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TabletControls[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTopodata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.SourceShards) > 0 { + for iNdEx := len(m.SourceShards) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.SourceShards[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTopodata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.ServedTypes) > 0 { + for iNdEx := len(m.ServedTypes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ServedTypes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTopodata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.KeyRange != nil { + { + size, err := m.KeyRange.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTopodata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.MasterAlias != nil { + { + size, err := m.MasterAlias.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTopodata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Shard_ServedType) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Shard_ServedType) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Shard_ServedType) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarintTopodata(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.TabletType != 0 { + i = encodeVarintTopodata(dAtA, i, uint64(m.TabletType)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Shard_SourceShard) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Shard_SourceShard) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Shard_SourceShard) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Tables) > 0 { + for iNdEx := len(m.Tables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Tables[iNdEx]) + copy(dAtA[i:], m.Tables[iNdEx]) + i = encodeVarintTopodata(dAtA, i, uint64(len(m.Tables[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if m.KeyRange != nil { + { + size, err := m.KeyRange.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTopodata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarintTopodata(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x1a + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintTopodata(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0x12 + } + if m.Uid != 0 { + i = encodeVarintTopodata(dAtA, i, uint64(m.Uid)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Shard_TabletControl) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Shard_TabletControl) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Shard_TabletControl) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Frozen { + i-- + if m.Frozen { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if len(m.BlacklistedTables) > 0 { + for iNdEx := len(m.BlacklistedTables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.BlacklistedTables[iNdEx]) + copy(dAtA[i:], m.BlacklistedTables[iNdEx]) + i = encodeVarintTopodata(dAtA, i, uint64(len(m.BlacklistedTables[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarintTopodata(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.TabletType != 0 { + i = encodeVarintTopodata(dAtA, i, uint64(m.TabletType)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Keyspace) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Keyspace) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Keyspace) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.SnapshotTime != nil { + { + size, err := m.SnapshotTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTopodata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if len(m.BaseKeyspace) > 0 { + i -= len(m.BaseKeyspace) + copy(dAtA[i:], m.BaseKeyspace) + i = encodeVarintTopodata(dAtA, i, uint64(len(m.BaseKeyspace))) + i-- + dAtA[i] = 0x32 + } + if m.KeyspaceType != 0 { + i = encodeVarintTopodata(dAtA, i, uint64(m.KeyspaceType)) + i-- + dAtA[i] = 0x28 + } + if len(m.ServedFroms) > 0 { + for iNdEx := len(m.ServedFroms) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ServedFroms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTopodata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.ShardingColumnType != 0 { + i = encodeVarintTopodata(dAtA, i, uint64(m.ShardingColumnType)) + i-- + dAtA[i] = 0x10 + } + if len(m.ShardingColumnName) > 0 { + i -= len(m.ShardingColumnName) + copy(dAtA[i:], m.ShardingColumnName) + i = encodeVarintTopodata(dAtA, i, uint64(len(m.ShardingColumnName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Keyspace_ServedFrom) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Keyspace_ServedFrom) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Keyspace_ServedFrom) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintTopodata(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0x1a + } + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarintTopodata(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.TabletType != 0 { + i = encodeVarintTopodata(dAtA, i, uint64(m.TabletType)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ShardReplication) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ShardReplication) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ShardReplication) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Nodes) > 0 { + for iNdEx := len(m.Nodes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Nodes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTopodata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ShardReplication_Node) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ShardReplication_Node) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ShardReplication_Node) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.TabletAlias != nil { + { + size, err := m.TabletAlias.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTopodata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ShardReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ShardReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ShardReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.KeyRange != nil { + { + size, err := m.KeyRange.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTopodata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintTopodata(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ShardTabletControl) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ShardTabletControl) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } + +func (m *ShardTabletControl) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.QueryServiceDisabled { + i-- + if m.QueryServiceDisabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.KeyRange != nil { + { + size, err := m.KeyRange.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTopodata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintTopodata(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SrvKeyspace) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SrvKeyspace) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SrvKeyspace) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.ServedFrom) > 0 { + for iNdEx := len(m.ServedFrom) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ServedFrom[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTopodata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.ShardingColumnType != 0 { + i = encodeVarintTopodata(dAtA, i, uint64(m.ShardingColumnType)) + i-- + dAtA[i] = 0x18 + } + if len(m.ShardingColumnName) > 0 { + i -= len(m.ShardingColumnName) + copy(dAtA[i:], m.ShardingColumnName) + i = encodeVarintTopodata(dAtA, i, uint64(len(m.ShardingColumnName))) + i-- + dAtA[i] = 0x12 + } + if len(m.Partitions) > 0 { + for iNdEx := len(m.Partitions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Partitions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTopodata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *SrvKeyspace_KeyspacePartition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SrvKeyspace_KeyspacePartition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SrvKeyspace_KeyspacePartition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.ShardTabletControls) > 0 { + for iNdEx := len(m.ShardTabletControls) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ShardTabletControls[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTopodata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.ShardReferences) > 0 { + for iNdEx := len(m.ShardReferences) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ShardReferences[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTopodata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.ServedType != 0 { + i = encodeVarintTopodata(dAtA, i, uint64(m.ServedType)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *SrvKeyspace_ServedFrom) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SrvKeyspace_ServedFrom) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SrvKeyspace_ServedFrom) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintTopodata(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0x12 + } + if m.TabletType != 0 { + i = encodeVarintTopodata(dAtA, i, uint64(m.TabletType)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *CellInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CellInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CellInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Root) > 0 { + i -= len(m.Root) + copy(dAtA[i:], m.Root) + i = encodeVarintTopodata(dAtA, i, uint64(len(m.Root))) + i-- + dAtA[i] = 0x12 + } + if len(m.ServerAddress) > 0 { + i -= len(m.ServerAddress) + copy(dAtA[i:], m.ServerAddress) + i = encodeVarintTopodata(dAtA, i, uint64(len(m.ServerAddress))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CellsAlias) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CellsAlias) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CellsAlias) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarintTopodata(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + return len(dAtA) - i, nil +} + +func (m *TopoConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TopoConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TopoConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Root) > 0 { + i -= len(m.Root) + copy(dAtA[i:], m.Root) + i = encodeVarintTopodata(dAtA, i, uint64(len(m.Root))) + i-- + dAtA[i] = 0x1a + } + if len(m.Server) > 0 { + i -= len(m.Server) + copy(dAtA[i:], m.Server) + i = encodeVarintTopodata(dAtA, i, uint64(len(m.Server))) + i-- + dAtA[i] = 0x12 + } + if len(m.TopoType) > 0 { + i -= len(m.TopoType) + copy(dAtA[i:], m.TopoType) + i = encodeVarintTopodata(dAtA, i, uint64(len(m.TopoType))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExternalVitessCluster) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExternalVitessCluster) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalVitessCluster) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.TopoConfig != nil { + { + size, err := m.TopoConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTopodata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExternalClusters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExternalClusters) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalClusters) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.VitessCluster) > 0 { + for iNdEx := len(m.VitessCluster) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VitessCluster[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTopodata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintTopodata(dAtA []byte, offset int, v uint64) int { + offset -= sovTopodata(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *KeyRange) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Start) + if l > 0 { + n += 1 + l + sovTopodata(uint64(l)) + } + l = len(m.End) + if l > 0 { + n += 1 + l + sovTopodata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *TabletAlias) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Cell) + if l > 0 { + n += 1 + l + sovTopodata(uint64(l)) + } + if m.Uid != 0 { + n += 1 + sovTopodata(uint64(m.Uid)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Tablet) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Alias != nil { + l = m.Alias.Size() + n += 1 + l + sovTopodata(uint64(l)) + } + l = len(m.Hostname) + if l > 0 { + n += 1 + l + sovTopodata(uint64(l)) + } + if len(m.PortMap) > 0 { + for k, v := range m.PortMap { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTopodata(uint64(len(k))) + 1 + sovTopodata(uint64(v)) + n += mapEntrySize + 1 + sovTopodata(uint64(mapEntrySize)) + } + } + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovTopodata(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sovTopodata(uint64(l)) + } + if m.KeyRange != nil { + l = m.KeyRange.Size() + n += 1 + l + sovTopodata(uint64(l)) + } + if m.Type != 0 { + n += 1 + sovTopodata(uint64(m.Type)) + } + l = len(m.DbNameOverride) + if l > 0 { + n += 1 + l + sovTopodata(uint64(l)) + } + if len(m.Tags) > 0 { + for k, v := range m.Tags { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTopodata(uint64(len(k))) + 1 + len(v) + sovTopodata(uint64(len(v))) + n += mapEntrySize + 1 + sovTopodata(uint64(mapEntrySize)) + } + } + l = len(m.MysqlHostname) + if l > 0 { + n += 1 + l + sovTopodata(uint64(l)) + } + if m.MysqlPort != 0 { + n += 1 + sovTopodata(uint64(m.MysqlPort)) + } + if m.MasterTermStartTime != nil { + l = m.MasterTermStartTime.Size() + n += 1 + l + sovTopodata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Shard) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MasterAlias != nil { + l = m.MasterAlias.Size() + n += 1 + l + sovTopodata(uint64(l)) + } + if m.KeyRange != nil { + l = m.KeyRange.Size() + n += 1 + l + sovTopodata(uint64(l)) + } + if len(m.ServedTypes) > 0 { + for _, e := range m.ServedTypes { + l = e.Size() + n += 1 + l + sovTopodata(uint64(l)) + } + } + if len(m.SourceShards) > 0 { + for _, e := range m.SourceShards { + l = e.Size() + n += 1 + l + sovTopodata(uint64(l)) + } + } + if len(m.TabletControls) > 0 { + for _, e := range m.TabletControls { + l = e.Size() + n += 1 + l + sovTopodata(uint64(l)) + } + } + if m.IsMasterServing { + n += 2 + } + if m.MasterTermStartTime != nil { + l = m.MasterTermStartTime.Size() + n += 1 + l + sovTopodata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Shard_ServedType) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletType != 0 { + n += 1 + sovTopodata(uint64(m.TabletType)) + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sovTopodata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Shard_SourceShard) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Uid != 0 { + n += 1 + sovTopodata(uint64(m.Uid)) + } + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovTopodata(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sovTopodata(uint64(l)) + } + if m.KeyRange != nil { + l = m.KeyRange.Size() + n += 1 + l + sovTopodata(uint64(l)) + } + if len(m.Tables) > 0 { + for _, s := range m.Tables { + l = len(s) + n += 1 + l + sovTopodata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Shard_TabletControl) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletType != 0 { + n += 1 + sovTopodata(uint64(m.TabletType)) + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sovTopodata(uint64(l)) + } + } + if len(m.BlacklistedTables) > 0 { + for _, s := range m.BlacklistedTables { + l = len(s) + n += 1 + l + sovTopodata(uint64(l)) + } + } + if m.Frozen { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Keyspace) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ShardingColumnName) + if l > 0 { + n += 1 + l + sovTopodata(uint64(l)) + } + if m.ShardingColumnType != 0 { + n += 1 + sovTopodata(uint64(m.ShardingColumnType)) + } + if len(m.ServedFroms) > 0 { + for _, e := range m.ServedFroms { + l = e.Size() + n += 1 + l + sovTopodata(uint64(l)) + } + } + if m.KeyspaceType != 0 { + n += 1 + sovTopodata(uint64(m.KeyspaceType)) + } + l = len(m.BaseKeyspace) + if l > 0 { + n += 1 + l + sovTopodata(uint64(l)) + } + if m.SnapshotTime != nil { + l = m.SnapshotTime.Size() + n += 1 + l + sovTopodata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Keyspace_ServedFrom) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletType != 0 { + n += 1 + sovTopodata(uint64(m.TabletType)) + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sovTopodata(uint64(l)) + } + } + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovTopodata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ShardReplication) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Nodes) > 0 { + for _, e := range m.Nodes { + l = e.Size() + n += 1 + l + sovTopodata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ShardReplication_Node) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.Size() + n += 1 + l + sovTopodata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ShardReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTopodata(uint64(l)) + } + if m.KeyRange != nil { + l = m.KeyRange.Size() + n += 1 + l + sovTopodata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ShardTabletControl) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTopodata(uint64(l)) + } + if m.KeyRange != nil { + l = m.KeyRange.Size() + n += 1 + l + sovTopodata(uint64(l)) + } + if m.QueryServiceDisabled { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SrvKeyspace) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Partitions) > 0 { + for _, e := range m.Partitions { + l = e.Size() + n += 1 + l + sovTopodata(uint64(l)) + } + } + l = len(m.ShardingColumnName) + if l > 0 { + n += 1 + l + sovTopodata(uint64(l)) + } + if m.ShardingColumnType != 0 { + n += 1 + sovTopodata(uint64(m.ShardingColumnType)) + } + if len(m.ServedFrom) > 0 { + for _, e := range m.ServedFrom { + l = e.Size() + n += 1 + l + sovTopodata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SrvKeyspace_KeyspacePartition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ServedType != 0 { + n += 1 + sovTopodata(uint64(m.ServedType)) + } + if len(m.ShardReferences) > 0 { + for _, e := range m.ShardReferences { + l = e.Size() + n += 1 + l + sovTopodata(uint64(l)) + } + } + if len(m.ShardTabletControls) > 0 { + for _, e := range m.ShardTabletControls { + l = e.Size() + n += 1 + l + sovTopodata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SrvKeyspace_ServedFrom) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletType != 0 { + n += 1 + sovTopodata(uint64(m.TabletType)) + } + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovTopodata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CellInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ServerAddress) + if l > 0 { + n += 1 + l + sovTopodata(uint64(l)) + } + l = len(m.Root) + if l > 0 { + n += 1 + l + sovTopodata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CellsAlias) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sovTopodata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *TopoConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TopoType) + if l > 0 { + n += 1 + l + sovTopodata(uint64(l)) + } + l = len(m.Server) + if l > 0 { + n += 1 + l + sovTopodata(uint64(l)) + } + l = len(m.Root) + if l > 0 { + n += 1 + l + sovTopodata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ExternalVitessCluster) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TopoConfig != nil { + l = m.TopoConfig.Size() + n += 1 + l + sovTopodata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ExternalClusters) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.VitessCluster) > 0 { + for _, e := range m.VitessCluster { + l = e.Size() + n += 1 + l + sovTopodata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovTopodata(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTopodata(x uint64) (n int) { + return sovTopodata(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *KeyRange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyRange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyRange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Start = append(m.Start[:0], dAtA[iNdEx:postIndex]...) + if m.Start == nil { + m.Start = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.End = append(m.End[:0], dAtA[iNdEx:postIndex]...) + if m.End == nil { + m.End = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTopodata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TabletAlias) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TabletAlias: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TabletAlias: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cell = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) + } + m.Uid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Uid |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTopodata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Tablet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Tablet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Tablet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Alias", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Alias == nil { + m.Alias = &TabletAlias{} + } + if err := m.Alias.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortMap", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PortMap == nil { + m.PortMap = make(map[string]int32) + } + var mapkey string + var mapvalue int32 + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTopodata + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthTopodata + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvalue |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + } else { + iNdEx = entryPreIndex + skippy, err := skipTopodata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.PortMap[mapkey] = mapvalue + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KeyRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.KeyRange == nil { + m.KeyRange = &KeyRange{} + } + if err := m.KeyRange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DbNameOverride", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DbNameOverride = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tags == nil { + m.Tags = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTopodata + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthTopodata + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTopodata + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthTopodata + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipTopodata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Tags[mapkey] = mapvalue + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MysqlHostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MysqlHostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MysqlPort", wireType) + } + m.MysqlPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MysqlPort |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MasterTermStartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MasterTermStartTime == nil { + m.MasterTermStartTime = &vttime.Time{} + } + if err := m.MasterTermStartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTopodata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Shard) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Shard: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Shard: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MasterAlias", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MasterAlias == nil { + m.MasterAlias = &TabletAlias{} + } + if err := m.MasterAlias.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KeyRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.KeyRange == nil { + m.KeyRange = &KeyRange{} + } + if err := m.KeyRange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServedTypes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServedTypes = append(m.ServedTypes, &Shard_ServedType{}) + if err := m.ServedTypes[len(m.ServedTypes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceShards", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourceShards = append(m.SourceShards, &Shard_SourceShard{}) + if err := m.SourceShards[len(m.SourceShards)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletControls", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TabletControls = append(m.TabletControls, &Shard_TabletControl{}) + if err := m.TabletControls[len(m.TabletControls)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsMasterServing", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsMasterServing = bool(v != 0) + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MasterTermStartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MasterTermStartTime == nil { + m.MasterTermStartTime = &vttime.Time{} + } + if err := m.MasterTermStartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTopodata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Shard_ServedType) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServedType: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServedType: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletType", wireType) + } + m.TabletType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TabletType |= TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTopodata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Shard_SourceShard) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SourceShard: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SourceShard: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) + } + m.Uid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Uid |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KeyRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.KeyRange == nil { + m.KeyRange = &KeyRange{} + } + if err := m.KeyRange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tables = append(m.Tables, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTopodata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Shard_TabletControl) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TabletControl: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TabletControl: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletType", wireType) + } + m.TabletType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TabletType |= TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlacklistedTables", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BlacklistedTables = append(m.BlacklistedTables, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Frozen", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Frozen = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTopodata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Keyspace) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Keyspace: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Keyspace: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShardingColumnName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShardingColumnName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ShardingColumnType", wireType) + } + m.ShardingColumnType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ShardingColumnType |= KeyspaceIdType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServedFroms", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServedFroms = append(m.ServedFroms, &Keyspace_ServedFrom{}) + if err := m.ServedFroms[len(m.ServedFroms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KeyspaceType", wireType) + } + m.KeyspaceType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.KeyspaceType |= KeyspaceType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BaseKeyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BaseKeyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SnapshotTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SnapshotTime == nil { + m.SnapshotTime = &vttime.Time{} + } + if err := m.SnapshotTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTopodata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Keyspace_ServedFrom) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServedFrom: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServedFrom: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletType", wireType) + } + m.TabletType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TabletType |= TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTopodata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ShardReplication) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ShardReplication: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ShardReplication: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Nodes = append(m.Nodes, &ShardReplication_Node{}) + if err := m.Nodes[len(m.Nodes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTopodata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ShardReplication_Node) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Node: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Node: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TabletAlias == nil { + m.TabletAlias = &TabletAlias{} + } + if err := m.TabletAlias.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTopodata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ShardReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ShardReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ShardReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KeyRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.KeyRange == nil { + m.KeyRange = &KeyRange{} + } + if err := m.KeyRange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTopodata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ShardTabletControl) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ShardTabletControl: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ShardTabletControl: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KeyRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.KeyRange == nil { + m.KeyRange = &KeyRange{} + } + if err := m.KeyRange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field QueryServiceDisabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.QueryServiceDisabled = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTopodata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SrvKeyspace) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SrvKeyspace: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SrvKeyspace: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Partitions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Partitions = append(m.Partitions, &SrvKeyspace_KeyspacePartition{}) + if err := m.Partitions[len(m.Partitions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShardingColumnName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShardingColumnName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ShardingColumnType", wireType) + } + m.ShardingColumnType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ShardingColumnType |= KeyspaceIdType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServedFrom", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServedFrom = append(m.ServedFrom, &SrvKeyspace_ServedFrom{}) + if err := m.ServedFrom[len(m.ServedFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTopodata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SrvKeyspace_KeyspacePartition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyspacePartition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyspacePartition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ServedType", wireType) + } + m.ServedType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ServedType |= TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShardReferences", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShardReferences = append(m.ShardReferences, &ShardReference{}) + if err := m.ShardReferences[len(m.ShardReferences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShardTabletControls", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShardTabletControls = append(m.ShardTabletControls, &ShardTabletControl{}) + if err := m.ShardTabletControls[len(m.ShardTabletControls)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTopodata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SrvKeyspace_ServedFrom) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServedFrom: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServedFrom: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletType", wireType) + } + m.TabletType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TabletType |= TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTopodata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CellInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CellInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CellInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServerAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Root", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Root = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTopodata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CellsAlias) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CellsAlias: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CellsAlias: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTopodata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TopoConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TopoConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TopoConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopoType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopoType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Server", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Server = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Root", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Root = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTopodata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExternalVitessCluster) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExternalVitessCluster: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExternalVitessCluster: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopoConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TopoConfig == nil { + m.TopoConfig = &TopoConfig{} + } + if err := m.TopoConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTopodata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExternalClusters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExternalClusters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExternalClusters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VitessCluster", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VitessCluster = append(m.VitessCluster, &ExternalVitessCluster{}) + if err := m.VitessCluster[len(m.VitessCluster)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTopodata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTopodata(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTopodata + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTopodata + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTopodata + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTopodata + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTopodata + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTopodata + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTopodata = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTopodata = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTopodata = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/vt/proto/vschema/vschema.pb.go b/go/vt/proto/vschema/vschema.pb.go index 0cdf2bb471e..b399bc37a5c 100644 --- a/go/vt/proto/vschema/vschema.pb.go +++ b/go/vt/proto/vschema/vschema.pb.go @@ -1,14 +1,15 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: vschema.proto package vschema import ( fmt "fmt" + io "io" math "math" + math_bits "math/bits" proto "github.com/golang/protobuf/proto" - query "vitess.io/vitess/go/vt/proto/query" ) @@ -40,18 +41,26 @@ func (*RoutingRules) ProtoMessage() {} func (*RoutingRules) Descriptor() ([]byte, []int) { return fileDescriptor_3f6849254fea3e77, []int{0} } - func (m *RoutingRules) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RoutingRules.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *RoutingRules) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RoutingRules.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_RoutingRules.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *RoutingRules) XXX_Merge(src proto.Message) { xxx_messageInfo_RoutingRules.Merge(m, src) } func (m *RoutingRules) XXX_Size() int { - return xxx_messageInfo_RoutingRules.Size(m) + return m.Size() } func (m *RoutingRules) XXX_DiscardUnknown() { xxx_messageInfo_RoutingRules.DiscardUnknown(m) @@ -81,18 +90,26 @@ func (*RoutingRule) ProtoMessage() {} func (*RoutingRule) Descriptor() ([]byte, []int) { return fileDescriptor_3f6849254fea3e77, []int{1} } - func (m *RoutingRule) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RoutingRule.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *RoutingRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RoutingRule.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_RoutingRule.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *RoutingRule) XXX_Merge(src proto.Message) { xxx_messageInfo_RoutingRule.Merge(m, src) } func (m *RoutingRule) XXX_Size() int { - return xxx_messageInfo_RoutingRule.Size(m) + return m.Size() } func (m *RoutingRule) XXX_DiscardUnknown() { xxx_messageInfo_RoutingRule.DiscardUnknown(m) @@ -133,18 +150,26 @@ func (*Keyspace) ProtoMessage() {} func (*Keyspace) Descriptor() ([]byte, []int) { return fileDescriptor_3f6849254fea3e77, []int{2} } - func (m *Keyspace) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Keyspace.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *Keyspace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Keyspace.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_Keyspace.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *Keyspace) XXX_Merge(src proto.Message) { xxx_messageInfo_Keyspace.Merge(m, src) } func (m *Keyspace) XXX_Size() int { - return xxx_messageInfo_Keyspace.Size(m) + return m.Size() } func (m *Keyspace) XXX_DiscardUnknown() { xxx_messageInfo_Keyspace.DiscardUnknown(m) @@ -206,18 +231,26 @@ func (*Vindex) ProtoMessage() {} func (*Vindex) Descriptor() ([]byte, []int) { return fileDescriptor_3f6849254fea3e77, []int{3} } - func (m *Vindex) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Vindex.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *Vindex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Vindex.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_Vindex.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *Vindex) XXX_Merge(src proto.Message) { xxx_messageInfo_Vindex.Merge(m, src) } func (m *Vindex) XXX_Size() int { - return xxx_messageInfo_Vindex.Size(m) + return m.Size() } func (m *Vindex) XXX_DiscardUnknown() { xxx_messageInfo_Vindex.DiscardUnknown(m) @@ -284,18 +317,26 @@ func (*Table) ProtoMessage() {} func (*Table) Descriptor() ([]byte, []int) { return fileDescriptor_3f6849254fea3e77, []int{4} } - func (m *Table) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Table.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *Table) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Table.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_Table.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *Table) XXX_Merge(src proto.Message) { xxx_messageInfo_Table.Merge(m, src) } func (m *Table) XXX_Size() int { - return xxx_messageInfo_Table.Size(m) + return m.Size() } func (m *Table) XXX_DiscardUnknown() { xxx_messageInfo_Table.DiscardUnknown(m) @@ -364,18 +405,26 @@ func (*ColumnVindex) ProtoMessage() {} func (*ColumnVindex) Descriptor() ([]byte, []int) { return fileDescriptor_3f6849254fea3e77, []int{5} } - func (m *ColumnVindex) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ColumnVindex.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ColumnVindex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ColumnVindex.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ColumnVindex.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ColumnVindex) XXX_Merge(src proto.Message) { xxx_messageInfo_ColumnVindex.Merge(m, src) } func (m *ColumnVindex) XXX_Size() int { - return xxx_messageInfo_ColumnVindex.Size(m) + return m.Size() } func (m *ColumnVindex) XXX_DiscardUnknown() { xxx_messageInfo_ColumnVindex.DiscardUnknown(m) @@ -420,18 +469,26 @@ func (*AutoIncrement) ProtoMessage() {} func (*AutoIncrement) Descriptor() ([]byte, []int) { return fileDescriptor_3f6849254fea3e77, []int{6} } - func (m *AutoIncrement) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AutoIncrement.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *AutoIncrement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AutoIncrement.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_AutoIncrement.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *AutoIncrement) XXX_Merge(src proto.Message) { xxx_messageInfo_AutoIncrement.Merge(m, src) } func (m *AutoIncrement) XXX_Size() int { - return xxx_messageInfo_AutoIncrement.Size(m) + return m.Size() } func (m *AutoIncrement) XXX_DiscardUnknown() { xxx_messageInfo_AutoIncrement.DiscardUnknown(m) @@ -468,18 +525,26 @@ func (*Column) ProtoMessage() {} func (*Column) Descriptor() ([]byte, []int) { return fileDescriptor_3f6849254fea3e77, []int{7} } - func (m *Column) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Column.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *Column) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Column.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_Column.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *Column) XXX_Merge(src proto.Message) { xxx_messageInfo_Column.Merge(m, src) } func (m *Column) XXX_Size() int { - return xxx_messageInfo_Column.Size(m) + return m.Size() } func (m *Column) XXX_DiscardUnknown() { xxx_messageInfo_Column.DiscardUnknown(m) @@ -517,18 +582,26 @@ func (*SrvVSchema) ProtoMessage() {} func (*SrvVSchema) Descriptor() ([]byte, []int) { return fileDescriptor_3f6849254fea3e77, []int{8} } - func (m *SrvVSchema) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SrvVSchema.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *SrvVSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SrvVSchema.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_SrvVSchema.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *SrvVSchema) XXX_Merge(src proto.Message) { xxx_messageInfo_SrvVSchema.Merge(m, src) } func (m *SrvVSchema) XXX_Size() int { - return xxx_messageInfo_SrvVSchema.Size(m) + return m.Size() } func (m *SrvVSchema) XXX_DiscardUnknown() { xxx_messageInfo_SrvVSchema.DiscardUnknown(m) @@ -569,48 +642,2562 @@ func init() { func init() { proto.RegisterFile("vschema.proto", fileDescriptor_3f6849254fea3e77) } var fileDescriptor_3f6849254fea3e77 = []byte{ - // 673 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x54, 0xcf, 0x4e, 0xdb, 0x4e, - 0x10, 0x96, 0x13, 0x62, 0x92, 0x31, 0x09, 0xbf, 0xdf, 0x0a, 0xa8, 0x1b, 0x84, 0x88, 0x2c, 0xda, - 0xa6, 0x3d, 0x24, 0x52, 0x50, 0x25, 0x9a, 0x8a, 0xaa, 0x14, 0x71, 0x40, 0x45, 0x6a, 0x65, 0x10, - 0x87, 0x5e, 0x2c, 0xe3, 0x6c, 0x61, 0x45, 0xe2, 0x35, 0xbb, 0x6b, 0x97, 0x3c, 0x4a, 0xaf, 0x7d, - 0xad, 0x3e, 0x42, 0x5f, 0xa2, 0xf2, 0xfe, 0x31, 0x1b, 0x48, 0x6f, 0x3b, 0x3b, 0xf3, 0x7d, 0xf3, - 0xed, 0xec, 0xcc, 0x40, 0xbb, 0xe0, 0xc9, 0x0d, 0x9e, 0xc5, 0x83, 0x8c, 0x51, 0x41, 0xd1, 0xaa, - 0x36, 0xbb, 0xde, 0x5d, 0x8e, 0xd9, 0x5c, 0xdd, 0x06, 0x63, 0x58, 0x0b, 0x69, 0x2e, 0x48, 0x7a, - 0x1d, 0xe6, 0x53, 0xcc, 0xd1, 0x1b, 0x68, 0xb0, 0xf2, 0xe0, 0x3b, 0xbd, 0x7a, 0xdf, 0x1b, 0x6d, - 0x0c, 0x0c, 0x89, 0x15, 0x15, 0xaa, 0x90, 0xe0, 0x14, 0x3c, 0xeb, 0x16, 0xed, 0x00, 0x7c, 0x67, - 0x74, 0x16, 0x89, 0xf8, 0x6a, 0x8a, 0x7d, 0xa7, 0xe7, 0xf4, 0x5b, 0x61, 0xab, 0xbc, 0xb9, 0x28, - 0x2f, 0xd0, 0x36, 0xb4, 0x04, 0x55, 0x4e, 0xee, 0xd7, 0x7a, 0xf5, 0x7e, 0x2b, 0x6c, 0x0a, 0x2a, - 0x7d, 0x3c, 0xf8, 0x53, 0x83, 0xe6, 0x67, 0x3c, 0xe7, 0x59, 0x9c, 0x60, 0xe4, 0xc3, 0x2a, 0xbf, - 0x89, 0xd9, 0x04, 0x4f, 0x24, 0x4b, 0x33, 0x34, 0x26, 0x7a, 0x0f, 0xcd, 0x82, 0xa4, 0x13, 0x7c, - 0xaf, 0x29, 0xbc, 0xd1, 0x6e, 0x25, 0xd0, 0xc0, 0x07, 0x97, 0x3a, 0xe2, 0x24, 0x15, 0x6c, 0x1e, - 0x56, 0x00, 0xf4, 0x16, 0x5c, 0x9d, 0xbd, 0x2e, 0xa1, 0x3b, 0x4f, 0xa1, 0x4a, 0x8d, 0x02, 0xea, - 0x60, 0x74, 0x00, 0x3e, 0xc3, 0x77, 0x39, 0x61, 0x38, 0xc2, 0xf7, 0xd9, 0x94, 0x24, 0x44, 0x44, - 0x4c, 0x3d, 0xdb, 0x5f, 0x91, 0xf2, 0xb6, 0xb4, 0xff, 0x44, 0xbb, 0x75, 0x51, 0xba, 0x67, 0xd0, - 0x5e, 0xd0, 0x82, 0xfe, 0x83, 0xfa, 0x2d, 0x9e, 0xeb, 0xd2, 0x94, 0x47, 0xf4, 0x02, 0x1a, 0x45, - 0x3c, 0xcd, 0xb1, 0x5f, 0xeb, 0x39, 0x7d, 0x6f, 0xb4, 0x5e, 0x49, 0x52, 0xc0, 0x50, 0x79, 0xc7, - 0xb5, 0x03, 0xa7, 0x7b, 0x0a, 0x9e, 0x25, 0x6f, 0x09, 0xd7, 0xde, 0x22, 0x57, 0xa7, 0xe2, 0x92, - 0x30, 0x8b, 0x2a, 0xf8, 0xe5, 0x80, 0xab, 0x12, 0x20, 0x04, 0x2b, 0x62, 0x9e, 0x99, 0xef, 0x92, - 0x67, 0xb4, 0x0f, 0x6e, 0x16, 0xb3, 0x78, 0x66, 0x6a, 0xbc, 0xfd, 0x48, 0xd5, 0xe0, 0xab, 0xf4, - 0xea, 0x32, 0xa9, 0x50, 0xb4, 0x01, 0x0d, 0xfa, 0x23, 0xc5, 0xcc, 0xaf, 0x4b, 0x26, 0x65, 0x74, - 0xdf, 0x81, 0x67, 0x05, 0x2f, 0x11, 0xbd, 0x61, 0x8b, 0x6e, 0xd9, 0x22, 0x7f, 0xd6, 0xa0, 0xa1, - 0x3a, 0x67, 0x99, 0xc6, 0x0f, 0xb0, 0x9e, 0xd0, 0x69, 0x3e, 0x4b, 0xa3, 0x47, 0x0d, 0xb1, 0x59, - 0x89, 0x3d, 0x96, 0x7e, 0x5d, 0xc8, 0x4e, 0x62, 0x59, 0x98, 0xa3, 0x43, 0xe8, 0xc4, 0xb9, 0xa0, - 0x11, 0x49, 0x13, 0x86, 0x67, 0x38, 0x15, 0x52, 0xb7, 0x37, 0xda, 0xaa, 0xe0, 0x47, 0xb9, 0xa0, - 0xa7, 0xc6, 0x1b, 0xb6, 0x63, 0xdb, 0x44, 0xaf, 0x61, 0x55, 0x11, 0x72, 0x7f, 0x45, 0xa6, 0x5d, - 0x7f, 0x94, 0x36, 0x34, 0x7e, 0xb4, 0x05, 0x6e, 0x46, 0xd2, 0x14, 0x4f, 0xfc, 0x86, 0xd4, 0xaf, - 0x2d, 0x34, 0x86, 0xe7, 0xfa, 0x05, 0x53, 0xc2, 0x45, 0x14, 0xe7, 0xe2, 0x86, 0x32, 0x22, 0x62, - 0x41, 0x0a, 0xec, 0xbb, 0xb2, 0xb1, 0x9e, 0xa9, 0x80, 0x33, 0xc2, 0xc5, 0x91, 0xed, 0x0e, 0x2e, - 0x60, 0xcd, 0x7e, 0x5d, 0x99, 0x43, 0x85, 0xea, 0x1a, 0x69, 0xab, 0xac, 0x5c, 0x1a, 0xcf, 0x4c, - 0x71, 0xe5, 0xb9, 0x9c, 0x2e, 0x23, 0xbd, 0x2e, 0xa7, 0xd0, 0x98, 0xc1, 0x31, 0xb4, 0x17, 0x1e, - 0xfd, 0x4f, 0xda, 0x2e, 0x34, 0x39, 0xbe, 0xcb, 0x71, 0x9a, 0x18, 0xea, 0xca, 0x0e, 0x0e, 0xc1, - 0x3d, 0x5e, 0x4c, 0xee, 0x58, 0xc9, 0x77, 0xf5, 0x57, 0x96, 0xa8, 0xce, 0xc8, 0x1b, 0xa8, 0x55, - 0x74, 0x31, 0xcf, 0xb0, 0xfa, 0xd7, 0xe0, 0xb7, 0x03, 0x70, 0xce, 0x8a, 0xcb, 0x73, 0x59, 0x4c, - 0xf4, 0x11, 0x5a, 0xb7, 0x7a, 0x38, 0xcd, 0x4a, 0x0a, 0xaa, 0x4a, 0x3f, 0xc4, 0x55, 0x13, 0xac, - 0x9b, 0xf2, 0x01, 0x84, 0xc6, 0xd0, 0xd6, 0xd3, 0x1a, 0xa9, 0xc5, 0xa6, 0xa6, 0x63, 0x73, 0xd9, - 0x62, 0xe3, 0xe1, 0x1a, 0xb3, 0xac, 0xee, 0x17, 0xe8, 0x2c, 0x12, 0x2f, 0x69, 0xe0, 0x57, 0x8b, - 0x53, 0xf7, 0xff, 0x93, 0xa5, 0x62, 0xf5, 0xf4, 0xa7, 0x97, 0xdf, 0xf6, 0x0a, 0x22, 0x30, 0xe7, - 0x03, 0x42, 0x87, 0xea, 0x34, 0xbc, 0xa6, 0xc3, 0x42, 0x0c, 0xe5, 0x36, 0x1e, 0x6a, 0xec, 0x95, - 0x2b, 0xcd, 0xfd, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xb8, 0xa7, 0x99, 0x19, 0xc3, 0x05, 0x00, - 0x00, + // 695 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x54, 0xc1, 0x4e, 0xdb, 0x4c, + 0x10, 0xfe, 0x9d, 0x90, 0x90, 0x8c, 0x49, 0xf8, 0xbb, 0x02, 0xea, 0x06, 0x11, 0x22, 0x8b, 0xaa, + 0x69, 0x0f, 0x89, 0x14, 0xd4, 0x8a, 0xa6, 0xa2, 0x2a, 0x45, 0x1c, 0x50, 0x91, 0x5a, 0x19, 0xc4, + 0xa1, 0x17, 0xcb, 0x38, 0x5b, 0x58, 0x91, 0x78, 0xcd, 0xee, 0xda, 0x25, 0x6f, 0xd2, 0x5e, 0xfb, + 0x34, 0x3d, 0xf6, 0xde, 0x4b, 0x45, 0x8f, 0x7d, 0x89, 0xca, 0xbb, 0x6b, 0xb3, 0x81, 0xf4, 0xb6, + 0xdf, 0xce, 0xcc, 0x37, 0xdf, 0xce, 0xce, 0x0c, 0x34, 0x52, 0x1e, 0x5e, 0xe0, 0x49, 0xd0, 0x8b, + 0x19, 0x15, 0x14, 0x2d, 0x6a, 0xd8, 0xb2, 0xaf, 0x12, 0xcc, 0xa6, 0xea, 0xd6, 0x1d, 0xc2, 0x92, + 0x47, 0x13, 0x41, 0xa2, 0x73, 0x2f, 0x19, 0x63, 0x8e, 0x9e, 0x41, 0x85, 0x65, 0x07, 0xc7, 0xea, + 0x94, 0xbb, 0xf6, 0x60, 0xa5, 0x97, 0x93, 0x18, 0x5e, 0x9e, 0x72, 0x71, 0x0f, 0xc1, 0x36, 0x6e, + 0xd1, 0x06, 0xc0, 0x27, 0x46, 0x27, 0xbe, 0x08, 0xce, 0xc6, 0xd8, 0xb1, 0x3a, 0x56, 0xb7, 0xee, + 0xd5, 0xb3, 0x9b, 0x93, 0xec, 0x02, 0xad, 0x43, 0x5d, 0x50, 0x65, 0xe4, 0x4e, 0xa9, 0x53, 0xee, + 0xd6, 0xbd, 0x9a, 0xa0, 0xd2, 0xc6, 0xdd, 0x3f, 0x25, 0xa8, 0xbd, 0xc3, 0x53, 0x1e, 0x07, 0x21, + 0x46, 0x0e, 0x2c, 0xf2, 0x8b, 0x80, 0x8d, 0xf0, 0x48, 0xb2, 0xd4, 0xbc, 0x1c, 0xa2, 0x57, 0x50, + 0x4b, 0x49, 0x34, 0xc2, 0xd7, 0x9a, 0xc2, 0x1e, 0x6c, 0x16, 0x02, 0xf3, 0xf0, 0xde, 0xa9, 0xf6, + 0x38, 0x88, 0x04, 0x9b, 0x7a, 0x45, 0x00, 0x7a, 0x0e, 0x55, 0x9d, 0xbd, 0x2c, 0x43, 0x37, 0xee, + 0x87, 0x2a, 0x35, 0x2a, 0x50, 0x3b, 0xa3, 0x1d, 0x70, 0x18, 0xbe, 0x4a, 0x08, 0xc3, 0x3e, 0xbe, + 0x8e, 0xc7, 0x24, 0x24, 0xc2, 0x67, 0xea, 0xd9, 0xce, 0x82, 0x94, 0xb7, 0xa6, 0xed, 0x07, 0xda, + 0xac, 0x8b, 0xd2, 0x3a, 0x82, 0xc6, 0x8c, 0x16, 0xf4, 0x3f, 0x94, 0x2f, 0xf1, 0x54, 0x97, 0x26, + 0x3b, 0xa2, 0xc7, 0x50, 0x49, 0x83, 0x71, 0x82, 0x9d, 0x52, 0xc7, 0xea, 0xda, 0x83, 0xe5, 0x42, + 0x92, 0x0a, 0xf4, 0x94, 0x75, 0x58, 0xda, 0xb1, 0x5a, 0x87, 0x60, 0x1b, 0xf2, 0xe6, 0x70, 0x6d, + 0xcd, 0x72, 0x35, 0x0b, 0x2e, 0x19, 0x66, 0x50, 0xb9, 0xdf, 0x2c, 0xa8, 0xaa, 0x04, 0x08, 0xc1, + 0x82, 0x98, 0xc6, 0xf9, 0x77, 0xc9, 0x33, 0xda, 0x86, 0x6a, 0x1c, 0xb0, 0x60, 0x92, 0xd7, 0x78, + 0xfd, 0x8e, 0xaa, 0xde, 0x07, 0x69, 0xd5, 0x65, 0x52, 0xae, 0x68, 0x05, 0x2a, 0xf4, 0x73, 0x84, + 0x99, 0x53, 0x96, 0x4c, 0x0a, 0xb4, 0x5e, 0x82, 0x6d, 0x38, 0xcf, 0x11, 0xbd, 0x62, 0x8a, 0xae, + 0x9b, 0x22, 0xbf, 0x96, 0xa0, 0xa2, 0x3a, 0x67, 0x9e, 0xc6, 0xd7, 0xb0, 0x1c, 0xd2, 0x71, 0x32, + 0x89, 0xfc, 0x3b, 0x0d, 0xb1, 0x5a, 0x88, 0xdd, 0x97, 0x76, 0x5d, 0xc8, 0x66, 0x68, 0x20, 0xcc, + 0xd1, 0x2e, 0x34, 0x83, 0x44, 0x50, 0x9f, 0x44, 0x21, 0xc3, 0x13, 0x1c, 0x09, 0xa9, 0xdb, 0x1e, + 0xac, 0x15, 0xe1, 0x7b, 0x89, 0xa0, 0x87, 0xb9, 0xd5, 0x6b, 0x04, 0x26, 0x44, 0x4f, 0x61, 0x51, + 0x11, 0x72, 0x67, 0x41, 0xa6, 0x5d, 0xbe, 0x93, 0xd6, 0xcb, 0xed, 0x68, 0x0d, 0xaa, 0x31, 0x89, + 0x22, 0x3c, 0x72, 0x2a, 0x52, 0xbf, 0x46, 0x68, 0x08, 0x8f, 0xf4, 0x0b, 0xc6, 0x84, 0x0b, 0x3f, + 0x48, 0xc4, 0x05, 0x65, 0x44, 0x04, 0x82, 0xa4, 0xd8, 0xa9, 0xca, 0xc6, 0x7a, 0xa8, 0x1c, 0x8e, + 0x08, 0x17, 0x7b, 0xa6, 0xd9, 0x3d, 0x81, 0x25, 0xf3, 0x75, 0x59, 0x0e, 0xe5, 0xaa, 0x6b, 0xa4, + 0x51, 0x56, 0xb9, 0x28, 0x98, 0xe4, 0xc5, 0x95, 0xe7, 0x6c, 0xba, 0x72, 0xe9, 0x65, 0x39, 0x85, + 0x39, 0x74, 0xf7, 0xa1, 0x31, 0xf3, 0xe8, 0x7f, 0xd2, 0xb6, 0xa0, 0xc6, 0xf1, 0x55, 0x82, 0xa3, + 0x30, 0xa7, 0x2e, 0xb0, 0xbb, 0x0b, 0xd5, 0xfd, 0xd9, 0xe4, 0x96, 0x91, 0x7c, 0x53, 0x7f, 0x65, + 0x16, 0xd5, 0x1c, 0xd8, 0x3d, 0xb5, 0x8a, 0x4e, 0xa6, 0x31, 0x56, 0xff, 0xea, 0xfe, 0xb4, 0x00, + 0x8e, 0x59, 0x7a, 0x7a, 0x2c, 0x8b, 0x89, 0xde, 0x40, 0xfd, 0x52, 0x0f, 0x67, 0xbe, 0x92, 0xdc, + 0xa2, 0xd2, 0xb7, 0x7e, 0xc5, 0x04, 0xeb, 0xa6, 0xbc, 0x0d, 0x42, 0x43, 0x68, 0xe8, 0x69, 0xf5, + 0xd5, 0x62, 0x53, 0xd3, 0xb1, 0x3a, 0x6f, 0xb1, 0x71, 0x6f, 0x89, 0x19, 0xa8, 0xf5, 0x1e, 0x9a, + 0xb3, 0xc4, 0x73, 0x1a, 0xf8, 0xc9, 0xec, 0xd4, 0x3d, 0xb8, 0xb7, 0x54, 0x8c, 0x9e, 0x7e, 0xfb, + 0xe2, 0xfb, 0x4d, 0xdb, 0xfa, 0x71, 0xd3, 0xb6, 0x7e, 0xdd, 0xb4, 0xad, 0x2f, 0xbf, 0xdb, 0xff, + 0x7d, 0xdc, 0x4a, 0x89, 0xc0, 0x9c, 0xf7, 0x08, 0xed, 0xab, 0x53, 0xff, 0x9c, 0xf6, 0x53, 0xd1, + 0x97, 0xdb, 0xb9, 0xaf, 0xb9, 0xce, 0xaa, 0x12, 0x6e, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x68, + 0x39, 0x77, 0x25, 0xd3, 0x05, 0x00, 0x00, +} + +func (m *RoutingRules) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoutingRules) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoutingRules) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVschema(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RoutingRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } + +func (m *RoutingRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoutingRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.ToTables) > 0 { + for iNdEx := len(m.ToTables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ToTables[iNdEx]) + copy(dAtA[i:], m.ToTables[iNdEx]) + i = encodeVarintVschema(dAtA, i, uint64(len(m.ToTables[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.FromTable) > 0 { + i -= len(m.FromTable) + copy(dAtA[i:], m.FromTable) + i = encodeVarintVschema(dAtA, i, uint64(len(m.FromTable))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Keyspace) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Keyspace) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Keyspace) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.RequireExplicitRouting { + i-- + if m.RequireExplicitRouting { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.Tables) > 0 { + for k := range m.Tables { + v := m.Tables[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVschema(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintVschema(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintVschema(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Vindexes) > 0 { + for k := range m.Vindexes { + v := m.Vindexes[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVschema(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintVschema(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintVschema(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if m.Sharded { + i-- + if m.Sharded { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Vindex) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Vindex) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Vindex) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintVschema(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0x1a + } + if len(m.Params) > 0 { + for k := range m.Params { + v := m.Params[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintVschema(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintVschema(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintVschema(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintVschema(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Table) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Table) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Table) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.ColumnListAuthoritative { + i-- + if m.ColumnListAuthoritative { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if len(m.Pinned) > 0 { + i -= len(m.Pinned) + copy(dAtA[i:], m.Pinned) + i = encodeVarintVschema(dAtA, i, uint64(len(m.Pinned))) + i-- + dAtA[i] = 0x2a + } + if len(m.Columns) > 0 { + for iNdEx := len(m.Columns) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Columns[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVschema(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.AutoIncrement != nil { + { + size, err := m.AutoIncrement.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVschema(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.ColumnVindexes) > 0 { + for iNdEx := len(m.ColumnVindexes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ColumnVindexes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVschema(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintVschema(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ColumnVindex) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ColumnVindex) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ColumnVindex) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Columns) > 0 { + for iNdEx := len(m.Columns) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Columns[iNdEx]) + copy(dAtA[i:], m.Columns[iNdEx]) + i = encodeVarintVschema(dAtA, i, uint64(len(m.Columns[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintVschema(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if len(m.Column) > 0 { + i -= len(m.Column) + copy(dAtA[i:], m.Column) + i = encodeVarintVschema(dAtA, i, uint64(len(m.Column))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AutoIncrement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AutoIncrement) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AutoIncrement) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Sequence) > 0 { + i -= len(m.Sequence) + copy(dAtA[i:], m.Sequence) + i = encodeVarintVschema(dAtA, i, uint64(len(m.Sequence))) + i-- + dAtA[i] = 0x12 + } + if len(m.Column) > 0 { + i -= len(m.Column) + copy(dAtA[i:], m.Column) + i = encodeVarintVschema(dAtA, i, uint64(len(m.Column))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Column) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Column) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Column) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Type != 0 { + i = encodeVarintVschema(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintVschema(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SrvVSchema) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SrvVSchema) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SrvVSchema) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.RoutingRules != nil { + { + size, err := m.RoutingRules.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVschema(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspaces) > 0 { + for k := range m.Keyspaces { + v := m.Keyspaces[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVschema(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintVschema(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintVschema(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintVschema(dAtA []byte, offset int, v uint64) int { + offset -= sovVschema(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *RoutingRules) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovVschema(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RoutingRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.FromTable) + if l > 0 { + n += 1 + l + sovVschema(uint64(l)) + } + if len(m.ToTables) > 0 { + for _, s := range m.ToTables { + l = len(s) + n += 1 + l + sovVschema(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Keyspace) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Sharded { + n += 2 + } + if len(m.Vindexes) > 0 { + for k, v := range m.Vindexes { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovVschema(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovVschema(uint64(len(k))) + l + n += mapEntrySize + 1 + sovVschema(uint64(mapEntrySize)) + } + } + if len(m.Tables) > 0 { + for k, v := range m.Tables { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovVschema(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovVschema(uint64(len(k))) + l + n += mapEntrySize + 1 + sovVschema(uint64(mapEntrySize)) + } + } + if m.RequireExplicitRouting { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Vindex) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sovVschema(uint64(l)) + } + if len(m.Params) > 0 { + for k, v := range m.Params { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovVschema(uint64(len(k))) + 1 + len(v) + sovVschema(uint64(len(v))) + n += mapEntrySize + 1 + sovVschema(uint64(mapEntrySize)) + } + } + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovVschema(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Table) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sovVschema(uint64(l)) + } + if len(m.ColumnVindexes) > 0 { + for _, e := range m.ColumnVindexes { + l = e.Size() + n += 1 + l + sovVschema(uint64(l)) + } + } + if m.AutoIncrement != nil { + l = m.AutoIncrement.Size() + n += 1 + l + sovVschema(uint64(l)) + } + if len(m.Columns) > 0 { + for _, e := range m.Columns { + l = e.Size() + n += 1 + l + sovVschema(uint64(l)) + } + } + l = len(m.Pinned) + if l > 0 { + n += 1 + l + sovVschema(uint64(l)) + } + if m.ColumnListAuthoritative { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ColumnVindex) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Column) + if l > 0 { + n += 1 + l + sovVschema(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovVschema(uint64(l)) + } + if len(m.Columns) > 0 { + for _, s := range m.Columns { + l = len(s) + n += 1 + l + sovVschema(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AutoIncrement) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Column) + if l > 0 { + n += 1 + l + sovVschema(uint64(l)) + } + l = len(m.Sequence) + if l > 0 { + n += 1 + l + sovVschema(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Column) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovVschema(uint64(l)) + } + if m.Type != 0 { + n += 1 + sovVschema(uint64(m.Type)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SrvVSchema) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Keyspaces) > 0 { + for k, v := range m.Keyspaces { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovVschema(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovVschema(uint64(len(k))) + l + n += mapEntrySize + 1 + sovVschema(uint64(mapEntrySize)) + } + } + if m.RoutingRules != nil { + l = m.RoutingRules.Size() + n += 1 + l + sovVschema(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovVschema(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozVschema(x uint64) (n int) { + return sovVschema(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *RoutingRules) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoutingRules: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoutingRules: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVschema + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVschema + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, &RoutingRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVschema(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVschema + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVschema + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoutingRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoutingRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoutingRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FromTable", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVschema + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVschema + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FromTable = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ToTables", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVschema + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVschema + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ToTables = append(m.ToTables, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVschema(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVschema + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVschema + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Keyspace) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Keyspace: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Keyspace: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sharded", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Sharded = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vindexes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVschema + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVschema + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Vindexes == nil { + m.Vindexes = make(map[string]*Vindex) + } + var mapkey string + var mapvalue *Vindex + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthVschema + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthVschema + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthVschema + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthVschema + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Vindex{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipVschema(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVschema + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Vindexes[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVschema + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVschema + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tables == nil { + m.Tables = make(map[string]*Table) + } + var mapkey string + var mapvalue *Table + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthVschema + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthVschema + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthVschema + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthVschema + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Table{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipVschema(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVschema + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Tables[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RequireExplicitRouting", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RequireExplicitRouting = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipVschema(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVschema + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVschema + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Vindex) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Vindex: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Vindex: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVschema + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVschema + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVschema + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVschema + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Params == nil { + m.Params = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthVschema + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthVschema + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthVschema + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthVschema + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipVschema(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVschema + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Params[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVschema + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVschema + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVschema(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVschema + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVschema + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Table) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Table: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Table: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVschema + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVschema + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ColumnVindexes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVschema + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVschema + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ColumnVindexes = append(m.ColumnVindexes, &ColumnVindex{}) + if err := m.ColumnVindexes[len(m.ColumnVindexes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AutoIncrement", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVschema + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVschema + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AutoIncrement == nil { + m.AutoIncrement = &AutoIncrement{} + } + if err := m.AutoIncrement.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Columns", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVschema + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVschema + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Columns = append(m.Columns, &Column{}) + if err := m.Columns[len(m.Columns)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pinned", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVschema + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVschema + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pinned = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ColumnListAuthoritative", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ColumnListAuthoritative = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipVschema(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVschema + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVschema + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ColumnVindex) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ColumnVindex: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ColumnVindex: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Column", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVschema + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVschema + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Column = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVschema + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVschema + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Columns", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVschema + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVschema + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Columns = append(m.Columns, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVschema(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVschema + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVschema + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AutoIncrement) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AutoIncrement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AutoIncrement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Column", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVschema + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVschema + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Column = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVschema + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVschema + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sequence = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVschema(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVschema + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVschema + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Column) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Column: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Column: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVschema + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVschema + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= query.Type(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipVschema(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVschema + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVschema + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SrvVSchema) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SrvVSchema: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SrvVSchema: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspaces", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVschema + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVschema + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Keyspaces == nil { + m.Keyspaces = make(map[string]*Keyspace) + } + var mapkey string + var mapvalue *Keyspace + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthVschema + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthVschema + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthVschema + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthVschema + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Keyspace{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipVschema(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVschema + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Keyspaces[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoutingRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVschema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVschema + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVschema + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RoutingRules == nil { + m.RoutingRules = &RoutingRules{} + } + if err := m.RoutingRules.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVschema(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVschema + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVschema + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipVschema(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVschema + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVschema + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVschema + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthVschema + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupVschema + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthVschema + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthVschema = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowVschema = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupVschema = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/vt/proto/vtadmin/vtadmin.pb.go b/go/vt/proto/vtadmin/vtadmin.pb.go index c65c6cc7f42..90beb3595b4 100644 --- a/go/vt/proto/vtadmin/vtadmin.pb.go +++ b/go/vt/proto/vtadmin/vtadmin.pb.go @@ -1,4 +1,4 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: vtadmin.proto package vtadmin @@ -6,14 +6,18 @@ package vtadmin import ( context "context" fmt "fmt" + io "io" math "math" + math_bits "math/bits" proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" - + tabletmanagerdata "vitess.io/vitess/go/vt/proto/tabletmanagerdata" topodata "vitess.io/vitess/go/vt/proto/topodata" + vschema "vitess.io/vitess/go/vt/proto/vschema" + vtctldata "vitess.io/vitess/go/vt/proto/vtctldata" ) // Reference imports to suppress errors if they are not otherwise used. @@ -52,7 +56,7 @@ func (x Tablet_ServingState) String() string { } func (Tablet_ServingState) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_609739e22a0a50b3, []int{1, 0} + return fileDescriptor_609739e22a0a50b3, []int{4, 0} } // Cluster represents information about a Vitess cluster. @@ -70,18 +74,26 @@ func (*Cluster) ProtoMessage() {} func (*Cluster) Descriptor() ([]byte, []int) { return fileDescriptor_609739e22a0a50b3, []int{0} } - func (m *Cluster) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Cluster.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *Cluster) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Cluster.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_Cluster.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *Cluster) XXX_Merge(src proto.Message) { xxx_messageInfo_Cluster.Merge(m, src) } func (m *Cluster) XXX_Size() int { - return xxx_messageInfo_Cluster.Size(m) + return m.Size() } func (m *Cluster) XXX_DiscardUnknown() { xxx_messageInfo_Cluster.DiscardUnknown(m) @@ -103,6 +115,320 @@ func (m *Cluster) GetName() string { return "" } +type ClusterWorkflows struct { + Workflows []*Workflow `protobuf:"bytes,1,rep,name=workflows,proto3" json:"workflows,omitempty"` + // Warnings is a list of non-fatal errors encountered when fetching + // workflows for a particular cluster. + Warnings []string `protobuf:"bytes,2,rep,name=warnings,proto3" json:"warnings,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClusterWorkflows) Reset() { *m = ClusterWorkflows{} } +func (m *ClusterWorkflows) String() string { return proto.CompactTextString(m) } +func (*ClusterWorkflows) ProtoMessage() {} +func (*ClusterWorkflows) Descriptor() ([]byte, []int) { + return fileDescriptor_609739e22a0a50b3, []int{1} +} +func (m *ClusterWorkflows) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterWorkflows) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ClusterWorkflows.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ClusterWorkflows) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterWorkflows.Merge(m, src) +} +func (m *ClusterWorkflows) XXX_Size() int { + return m.Size() +} +func (m *ClusterWorkflows) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterWorkflows.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterWorkflows proto.InternalMessageInfo + +func (m *ClusterWorkflows) GetWorkflows() []*Workflow { + if m != nil { + return m.Workflows + } + return nil +} + +func (m *ClusterWorkflows) GetWarnings() []string { + if m != nil { + return m.Warnings + } + return nil +} + +// Keyspace represents information about a keyspace in a particular Vitess +// cluster. +type Keyspace struct { + Cluster *Cluster `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` + Keyspace *vtctldata.Keyspace `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shards map[string]*vtctldata.Shard `protobuf:"bytes,3,rep,name=shards,proto3" json:"shards,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Keyspace) Reset() { *m = Keyspace{} } +func (m *Keyspace) String() string { return proto.CompactTextString(m) } +func (*Keyspace) ProtoMessage() {} +func (*Keyspace) Descriptor() ([]byte, []int) { + return fileDescriptor_609739e22a0a50b3, []int{2} +} +func (m *Keyspace) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Keyspace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Keyspace.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Keyspace) XXX_Merge(src proto.Message) { + xxx_messageInfo_Keyspace.Merge(m, src) +} +func (m *Keyspace) XXX_Size() int { + return m.Size() +} +func (m *Keyspace) XXX_DiscardUnknown() { + xxx_messageInfo_Keyspace.DiscardUnknown(m) +} + +var xxx_messageInfo_Keyspace proto.InternalMessageInfo + +func (m *Keyspace) GetCluster() *Cluster { + if m != nil { + return m.Cluster + } + return nil +} + +func (m *Keyspace) GetKeyspace() *vtctldata.Keyspace { + if m != nil { + return m.Keyspace + } + return nil +} + +func (m *Keyspace) GetShards() map[string]*vtctldata.Shard { + if m != nil { + return m.Shards + } + return nil +} + +type Schema struct { + Cluster *Cluster `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` + Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + TableDefinitions []*tabletmanagerdata.TableDefinition `protobuf:"bytes,3,rep,name=table_definitions,json=tableDefinitions,proto3" json:"table_definitions,omitempty"` + // TableSizes is a mapping of table name to TableSize information. + TableSizes map[string]*Schema_TableSize `protobuf:"bytes,4,rep,name=table_sizes,json=tableSizes,proto3" json:"table_sizes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Schema) Reset() { *m = Schema{} } +func (m *Schema) String() string { return proto.CompactTextString(m) } +func (*Schema) ProtoMessage() {} +func (*Schema) Descriptor() ([]byte, []int) { + return fileDescriptor_609739e22a0a50b3, []int{3} +} +func (m *Schema) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Schema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Schema.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Schema) XXX_Merge(src proto.Message) { + xxx_messageInfo_Schema.Merge(m, src) +} +func (m *Schema) XXX_Size() int { + return m.Size() +} +func (m *Schema) XXX_DiscardUnknown() { + xxx_messageInfo_Schema.DiscardUnknown(m) +} + +var xxx_messageInfo_Schema proto.InternalMessageInfo + +func (m *Schema) GetCluster() *Cluster { + if m != nil { + return m.Cluster + } + return nil +} + +func (m *Schema) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *Schema) GetTableDefinitions() []*tabletmanagerdata.TableDefinition { + if m != nil { + return m.TableDefinitions + } + return nil +} + +func (m *Schema) GetTableSizes() map[string]*Schema_TableSize { + if m != nil { + return m.TableSizes + } + return nil +} + +type Schema_ShardTableSize struct { + RowCount uint64 `protobuf:"varint,1,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"` + DataLength uint64 `protobuf:"varint,2,opt,name=data_length,json=dataLength,proto3" json:"data_length,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Schema_ShardTableSize) Reset() { *m = Schema_ShardTableSize{} } +func (m *Schema_ShardTableSize) String() string { return proto.CompactTextString(m) } +func (*Schema_ShardTableSize) ProtoMessage() {} +func (*Schema_ShardTableSize) Descriptor() ([]byte, []int) { + return fileDescriptor_609739e22a0a50b3, []int{3, 1} +} +func (m *Schema_ShardTableSize) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Schema_ShardTableSize) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Schema_ShardTableSize.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Schema_ShardTableSize) XXX_Merge(src proto.Message) { + xxx_messageInfo_Schema_ShardTableSize.Merge(m, src) +} +func (m *Schema_ShardTableSize) XXX_Size() int { + return m.Size() +} +func (m *Schema_ShardTableSize) XXX_DiscardUnknown() { + xxx_messageInfo_Schema_ShardTableSize.DiscardUnknown(m) +} + +var xxx_messageInfo_Schema_ShardTableSize proto.InternalMessageInfo + +func (m *Schema_ShardTableSize) GetRowCount() uint64 { + if m != nil { + return m.RowCount + } + return 0 +} + +func (m *Schema_ShardTableSize) GetDataLength() uint64 { + if m != nil { + return m.DataLength + } + return 0 +} + +// TableSize aggregates table size information across all shards containing +// in the given keyspace and cluster, as well as per-shard size information. +type Schema_TableSize struct { + RowCount uint64 `protobuf:"varint,1,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"` + DataLength uint64 `protobuf:"varint,2,opt,name=data_length,json=dataLength,proto3" json:"data_length,omitempty"` + ByShard map[string]*Schema_ShardTableSize `protobuf:"bytes,3,rep,name=by_shard,json=byShard,proto3" json:"by_shard,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Schema_TableSize) Reset() { *m = Schema_TableSize{} } +func (m *Schema_TableSize) String() string { return proto.CompactTextString(m) } +func (*Schema_TableSize) ProtoMessage() {} +func (*Schema_TableSize) Descriptor() ([]byte, []int) { + return fileDescriptor_609739e22a0a50b3, []int{3, 2} +} +func (m *Schema_TableSize) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Schema_TableSize) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Schema_TableSize.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Schema_TableSize) XXX_Merge(src proto.Message) { + xxx_messageInfo_Schema_TableSize.Merge(m, src) +} +func (m *Schema_TableSize) XXX_Size() int { + return m.Size() +} +func (m *Schema_TableSize) XXX_DiscardUnknown() { + xxx_messageInfo_Schema_TableSize.DiscardUnknown(m) +} + +var xxx_messageInfo_Schema_TableSize proto.InternalMessageInfo + +func (m *Schema_TableSize) GetRowCount() uint64 { + if m != nil { + return m.RowCount + } + return 0 +} + +func (m *Schema_TableSize) GetDataLength() uint64 { + if m != nil { + return m.DataLength + } + return 0 +} + +func (m *Schema_TableSize) GetByShard() map[string]*Schema_ShardTableSize { + if m != nil { + return m.ByShard + } + return nil +} + // Tablet groups the topo information of a tablet together with the Vitess // cluster it belongs to. type Tablet struct { @@ -118,20 +444,28 @@ func (m *Tablet) Reset() { *m = Tablet{} } func (m *Tablet) String() string { return proto.CompactTextString(m) } func (*Tablet) ProtoMessage() {} func (*Tablet) Descriptor() ([]byte, []int) { - return fileDescriptor_609739e22a0a50b3, []int{1} + return fileDescriptor_609739e22a0a50b3, []int{4} } - func (m *Tablet) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Tablet.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *Tablet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Tablet.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_Tablet.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *Tablet) XXX_Merge(src proto.Message) { xxx_messageInfo_Tablet.Merge(m, src) } func (m *Tablet) XXX_Size() int { - return xxx_messageInfo_Tablet.Size(m) + return m.Size() } func (m *Tablet) XXX_DiscardUnknown() { xxx_messageInfo_Tablet.DiscardUnknown(m) @@ -160,6 +494,127 @@ func (m *Tablet) GetState() Tablet_ServingState { return Tablet_UNKNOWN } +// VSchema represents the vschema for a keyspace in the cluster it belongs to. +type VSchema struct { + Cluster *Cluster `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` + // Name is the name of the keyspace this VSchema is for. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + VSchema *vschema.Keyspace `protobuf:"bytes,3,opt,name=v_schema,json=vSchema,proto3" json:"v_schema,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VSchema) Reset() { *m = VSchema{} } +func (m *VSchema) String() string { return proto.CompactTextString(m) } +func (*VSchema) ProtoMessage() {} +func (*VSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_609739e22a0a50b3, []int{5} +} +func (m *VSchema) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_VSchema.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *VSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_VSchema.Merge(m, src) +} +func (m *VSchema) XXX_Size() int { + return m.Size() +} +func (m *VSchema) XXX_DiscardUnknown() { + xxx_messageInfo_VSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_VSchema proto.InternalMessageInfo + +func (m *VSchema) GetCluster() *Cluster { + if m != nil { + return m.Cluster + } + return nil +} + +func (m *VSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *VSchema) GetVSchema() *vschema.Keyspace { + if m != nil { + return m.VSchema + } + return nil +} + +// Vtctld represents information about a single Vtctld host. +type Vtctld struct { + Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname,omitempty"` + Cluster *Cluster `protobuf:"bytes,2,opt,name=cluster,proto3" json:"cluster,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Vtctld) Reset() { *m = Vtctld{} } +func (m *Vtctld) String() string { return proto.CompactTextString(m) } +func (*Vtctld) ProtoMessage() {} +func (*Vtctld) Descriptor() ([]byte, []int) { + return fileDescriptor_609739e22a0a50b3, []int{6} +} +func (m *Vtctld) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Vtctld) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Vtctld.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Vtctld) XXX_Merge(src proto.Message) { + xxx_messageInfo_Vtctld.Merge(m, src) +} +func (m *Vtctld) XXX_Size() int { + return m.Size() +} +func (m *Vtctld) XXX_DiscardUnknown() { + xxx_messageInfo_Vtctld.DiscardUnknown(m) +} + +var xxx_messageInfo_Vtctld proto.InternalMessageInfo + +func (m *Vtctld) GetHostname() string { + if m != nil { + return m.Hostname + } + return "" +} + +func (m *Vtctld) GetCluster() *Cluster { + if m != nil { + return m.Cluster + } + return nil +} + // VTGate represents information about a single VTGate host. type VTGate struct { // Hostname is the shortname of the VTGate. @@ -183,20 +638,28 @@ func (m *VTGate) Reset() { *m = VTGate{} } func (m *VTGate) String() string { return proto.CompactTextString(m) } func (*VTGate) ProtoMessage() {} func (*VTGate) Descriptor() ([]byte, []int) { - return fileDescriptor_609739e22a0a50b3, []int{2} + return fileDescriptor_609739e22a0a50b3, []int{7} } - func (m *VTGate) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VTGate.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *VTGate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VTGate.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_VTGate.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *VTGate) XXX_Merge(src proto.Message) { xxx_messageInfo_VTGate.Merge(m, src) } func (m *VTGate) XXX_Size() int { - return xxx_messageInfo_VTGate.Size(m) + return m.Size() } func (m *VTGate) XXX_DiscardUnknown() { xxx_messageInfo_VTGate.DiscardUnknown(m) @@ -239,84 +702,626 @@ func (m *VTGate) GetKeyspaces() []string { return nil } -type GetGatesRequest struct { - ClusterIds []string `protobuf:"bytes,1,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type Workflow struct { + Cluster *Cluster `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` + Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Workflow *vtctldata.Workflow `protobuf:"bytes,3,opt,name=workflow,proto3" json:"workflow,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *GetGatesRequest) Reset() { *m = GetGatesRequest{} } -func (m *GetGatesRequest) String() string { return proto.CompactTextString(m) } -func (*GetGatesRequest) ProtoMessage() {} -func (*GetGatesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_609739e22a0a50b3, []int{3} +func (m *Workflow) Reset() { *m = Workflow{} } +func (m *Workflow) String() string { return proto.CompactTextString(m) } +func (*Workflow) ProtoMessage() {} +func (*Workflow) Descriptor() ([]byte, []int) { + return fileDescriptor_609739e22a0a50b3, []int{8} } - -func (m *GetGatesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetGatesRequest.Unmarshal(m, b) +func (m *Workflow) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } -func (m *GetGatesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetGatesRequest.Marshal(b, m, deterministic) +func (m *Workflow) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Workflow.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } -func (m *GetGatesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetGatesRequest.Merge(m, src) +func (m *Workflow) XXX_Merge(src proto.Message) { + xxx_messageInfo_Workflow.Merge(m, src) } -func (m *GetGatesRequest) XXX_Size() int { - return xxx_messageInfo_GetGatesRequest.Size(m) +func (m *Workflow) XXX_Size() int { + return m.Size() } -func (m *GetGatesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetGatesRequest.DiscardUnknown(m) +func (m *Workflow) XXX_DiscardUnknown() { + xxx_messageInfo_Workflow.DiscardUnknown(m) } -var xxx_messageInfo_GetGatesRequest proto.InternalMessageInfo +var xxx_messageInfo_Workflow proto.InternalMessageInfo -func (m *GetGatesRequest) GetClusterIds() []string { +func (m *Workflow) GetCluster() *Cluster { if m != nil { - return m.ClusterIds + return m.Cluster } return nil } -type GetGatesResponse struct { - Gates []*VTGate `protobuf:"bytes,1,rep,name=gates,proto3" json:"gates,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (m *Workflow) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" } -func (m *GetGatesResponse) Reset() { *m = GetGatesResponse{} } -func (m *GetGatesResponse) String() string { return proto.CompactTextString(m) } -func (*GetGatesResponse) ProtoMessage() {} -func (*GetGatesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_609739e22a0a50b3, []int{4} +func (m *Workflow) GetWorkflow() *vtctldata.Workflow { + if m != nil { + return m.Workflow + } + return nil } -func (m *GetGatesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetGatesResponse.Unmarshal(m, b) +type FindSchemaRequest struct { + Table string `protobuf:"bytes,1,opt,name=table,proto3" json:"table,omitempty"` + ClusterIds []string `protobuf:"bytes,2,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` + TableSizeOptions *GetSchemaTableSizeOptions `protobuf:"bytes,3,opt,name=table_size_options,json=tableSizeOptions,proto3" json:"table_size_options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *GetGatesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetGatesResponse.Marshal(b, m, deterministic) + +func (m *FindSchemaRequest) Reset() { *m = FindSchemaRequest{} } +func (m *FindSchemaRequest) String() string { return proto.CompactTextString(m) } +func (*FindSchemaRequest) ProtoMessage() {} +func (*FindSchemaRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_609739e22a0a50b3, []int{9} } -func (m *GetGatesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetGatesResponse.Merge(m, src) +func (m *FindSchemaRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } -func (m *GetGatesResponse) XXX_Size() int { - return xxx_messageInfo_GetGatesResponse.Size(m) +func (m *FindSchemaRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FindSchemaRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } -func (m *GetGatesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetGatesResponse.DiscardUnknown(m) +func (m *FindSchemaRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_FindSchemaRequest.Merge(m, src) +} +func (m *FindSchemaRequest) XXX_Size() int { + return m.Size() +} +func (m *FindSchemaRequest) XXX_DiscardUnknown() { + xxx_messageInfo_FindSchemaRequest.DiscardUnknown(m) } -var xxx_messageInfo_GetGatesResponse proto.InternalMessageInfo +var xxx_messageInfo_FindSchemaRequest proto.InternalMessageInfo -func (m *GetGatesResponse) GetGates() []*VTGate { +func (m *FindSchemaRequest) GetTable() string { if m != nil { - return m.Gates + return m.Table + } + return "" +} + +func (m *FindSchemaRequest) GetClusterIds() []string { + if m != nil { + return m.ClusterIds + } + return nil +} + +func (m *FindSchemaRequest) GetTableSizeOptions() *GetSchemaTableSizeOptions { + if m != nil { + return m.TableSizeOptions + } + return nil +} + +type GetClustersRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetClustersRequest) Reset() { *m = GetClustersRequest{} } +func (m *GetClustersRequest) String() string { return proto.CompactTextString(m) } +func (*GetClustersRequest) ProtoMessage() {} +func (*GetClustersRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_609739e22a0a50b3, []int{10} +} +func (m *GetClustersRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetClustersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetClustersRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetClustersRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetClustersRequest.Merge(m, src) +} +func (m *GetClustersRequest) XXX_Size() int { + return m.Size() +} +func (m *GetClustersRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetClustersRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetClustersRequest proto.InternalMessageInfo + +type GetClustersResponse struct { + Clusters []*Cluster `protobuf:"bytes,1,rep,name=clusters,proto3" json:"clusters,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetClustersResponse) Reset() { *m = GetClustersResponse{} } +func (m *GetClustersResponse) String() string { return proto.CompactTextString(m) } +func (*GetClustersResponse) ProtoMessage() {} +func (*GetClustersResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_609739e22a0a50b3, []int{11} +} +func (m *GetClustersResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetClustersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetClustersResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetClustersResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetClustersResponse.Merge(m, src) +} +func (m *GetClustersResponse) XXX_Size() int { + return m.Size() +} +func (m *GetClustersResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetClustersResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetClustersResponse proto.InternalMessageInfo + +func (m *GetClustersResponse) GetClusters() []*Cluster { + if m != nil { + return m.Clusters + } + return nil +} + +type GetGatesRequest struct { + ClusterIds []string `protobuf:"bytes,1,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetGatesRequest) Reset() { *m = GetGatesRequest{} } +func (m *GetGatesRequest) String() string { return proto.CompactTextString(m) } +func (*GetGatesRequest) ProtoMessage() {} +func (*GetGatesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_609739e22a0a50b3, []int{12} +} +func (m *GetGatesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetGatesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetGatesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetGatesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetGatesRequest.Merge(m, src) +} +func (m *GetGatesRequest) XXX_Size() int { + return m.Size() +} +func (m *GetGatesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetGatesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetGatesRequest proto.InternalMessageInfo + +func (m *GetGatesRequest) GetClusterIds() []string { + if m != nil { + return m.ClusterIds + } + return nil +} + +type GetGatesResponse struct { + Gates []*VTGate `protobuf:"bytes,1,rep,name=gates,proto3" json:"gates,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetGatesResponse) Reset() { *m = GetGatesResponse{} } +func (m *GetGatesResponse) String() string { return proto.CompactTextString(m) } +func (*GetGatesResponse) ProtoMessage() {} +func (*GetGatesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_609739e22a0a50b3, []int{13} +} +func (m *GetGatesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetGatesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetGatesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetGatesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetGatesResponse.Merge(m, src) +} +func (m *GetGatesResponse) XXX_Size() int { + return m.Size() +} +func (m *GetGatesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetGatesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetGatesResponse proto.InternalMessageInfo + +func (m *GetGatesResponse) GetGates() []*VTGate { + if m != nil { + return m.Gates + } + return nil +} + +type GetKeyspacesRequest struct { + ClusterIds []string `protobuf:"bytes,1,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetKeyspacesRequest) Reset() { *m = GetKeyspacesRequest{} } +func (m *GetKeyspacesRequest) String() string { return proto.CompactTextString(m) } +func (*GetKeyspacesRequest) ProtoMessage() {} +func (*GetKeyspacesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_609739e22a0a50b3, []int{14} +} +func (m *GetKeyspacesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetKeyspacesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetKeyspacesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetKeyspacesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetKeyspacesRequest.Merge(m, src) +} +func (m *GetKeyspacesRequest) XXX_Size() int { + return m.Size() +} +func (m *GetKeyspacesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetKeyspacesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetKeyspacesRequest proto.InternalMessageInfo + +func (m *GetKeyspacesRequest) GetClusterIds() []string { + if m != nil { + return m.ClusterIds + } + return nil +} + +type GetKeyspacesResponse struct { + Keyspaces []*Keyspace `protobuf:"bytes,1,rep,name=keyspaces,proto3" json:"keyspaces,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetKeyspacesResponse) Reset() { *m = GetKeyspacesResponse{} } +func (m *GetKeyspacesResponse) String() string { return proto.CompactTextString(m) } +func (*GetKeyspacesResponse) ProtoMessage() {} +func (*GetKeyspacesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_609739e22a0a50b3, []int{15} +} +func (m *GetKeyspacesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetKeyspacesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetKeyspacesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetKeyspacesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetKeyspacesResponse.Merge(m, src) +} +func (m *GetKeyspacesResponse) XXX_Size() int { + return m.Size() +} +func (m *GetKeyspacesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetKeyspacesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetKeyspacesResponse proto.InternalMessageInfo + +func (m *GetKeyspacesResponse) GetKeyspaces() []*Keyspace { + if m != nil { + return m.Keyspaces + } + return nil +} + +type GetSchemaRequest struct { + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Table string `protobuf:"bytes,3,opt,name=table,proto3" json:"table,omitempty"` + TableSizeOptions *GetSchemaTableSizeOptions `protobuf:"bytes,4,opt,name=table_size_options,json=tableSizeOptions,proto3" json:"table_size_options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetSchemaRequest) Reset() { *m = GetSchemaRequest{} } +func (m *GetSchemaRequest) String() string { return proto.CompactTextString(m) } +func (*GetSchemaRequest) ProtoMessage() {} +func (*GetSchemaRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_609739e22a0a50b3, []int{16} +} +func (m *GetSchemaRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetSchemaRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetSchemaRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetSchemaRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSchemaRequest.Merge(m, src) +} +func (m *GetSchemaRequest) XXX_Size() int { + return m.Size() +} +func (m *GetSchemaRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetSchemaRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSchemaRequest proto.InternalMessageInfo + +func (m *GetSchemaRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *GetSchemaRequest) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *GetSchemaRequest) GetTable() string { + if m != nil { + return m.Table + } + return "" +} + +func (m *GetSchemaRequest) GetTableSizeOptions() *GetSchemaTableSizeOptions { + if m != nil { + return m.TableSizeOptions + } + return nil +} + +type GetSchemasRequest struct { + ClusterIds []string `protobuf:"bytes,1,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` + TableSizeOptions *GetSchemaTableSizeOptions `protobuf:"bytes,2,opt,name=table_size_options,json=tableSizeOptions,proto3" json:"table_size_options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetSchemasRequest) Reset() { *m = GetSchemasRequest{} } +func (m *GetSchemasRequest) String() string { return proto.CompactTextString(m) } +func (*GetSchemasRequest) ProtoMessage() {} +func (*GetSchemasRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_609739e22a0a50b3, []int{17} +} +func (m *GetSchemasRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetSchemasRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetSchemasRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetSchemasRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSchemasRequest.Merge(m, src) +} +func (m *GetSchemasRequest) XXX_Size() int { + return m.Size() +} +func (m *GetSchemasRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetSchemasRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSchemasRequest proto.InternalMessageInfo + +func (m *GetSchemasRequest) GetClusterIds() []string { + if m != nil { + return m.ClusterIds + } + return nil +} + +func (m *GetSchemasRequest) GetTableSizeOptions() *GetSchemaTableSizeOptions { + if m != nil { + return m.TableSizeOptions + } + return nil +} + +type GetSchemasResponse struct { + Schemas []*Schema `protobuf:"bytes,1,rep,name=schemas,proto3" json:"schemas,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetSchemasResponse) Reset() { *m = GetSchemasResponse{} } +func (m *GetSchemasResponse) String() string { return proto.CompactTextString(m) } +func (*GetSchemasResponse) ProtoMessage() {} +func (*GetSchemasResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_609739e22a0a50b3, []int{18} +} +func (m *GetSchemasResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetSchemasResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetSchemasResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetSchemasResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSchemasResponse.Merge(m, src) +} +func (m *GetSchemasResponse) XXX_Size() int { + return m.Size() +} +func (m *GetSchemasResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetSchemasResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSchemasResponse proto.InternalMessageInfo + +func (m *GetSchemasResponse) GetSchemas() []*Schema { + if m != nil { + return m.Schemas } return nil } +type GetSchemaTableSizeOptions struct { + AggregateSizes bool `protobuf:"varint,1,opt,name=aggregate_sizes,json=aggregateSizes,proto3" json:"aggregate_sizes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetSchemaTableSizeOptions) Reset() { *m = GetSchemaTableSizeOptions{} } +func (m *GetSchemaTableSizeOptions) String() string { return proto.CompactTextString(m) } +func (*GetSchemaTableSizeOptions) ProtoMessage() {} +func (*GetSchemaTableSizeOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_609739e22a0a50b3, []int{19} +} +func (m *GetSchemaTableSizeOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetSchemaTableSizeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetSchemaTableSizeOptions.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetSchemaTableSizeOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSchemaTableSizeOptions.Merge(m, src) +} +func (m *GetSchemaTableSizeOptions) XXX_Size() int { + return m.Size() +} +func (m *GetSchemaTableSizeOptions) XXX_DiscardUnknown() { + xxx_messageInfo_GetSchemaTableSizeOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSchemaTableSizeOptions proto.InternalMessageInfo + +func (m *GetSchemaTableSizeOptions) GetAggregateSizes() bool { + if m != nil { + return m.AggregateSizes + } + return false +} + type GetTabletRequest struct { Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname,omitempty"` // ClusterIDs is an optional parameter to narrow the scope of the search, if @@ -332,20 +1337,28 @@ func (m *GetTabletRequest) Reset() { *m = GetTabletRequest{} } func (m *GetTabletRequest) String() string { return proto.CompactTextString(m) } func (*GetTabletRequest) ProtoMessage() {} func (*GetTabletRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_609739e22a0a50b3, []int{5} + return fileDescriptor_609739e22a0a50b3, []int{20} } - func (m *GetTabletRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetTabletRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *GetTabletRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetTabletRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_GetTabletRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *GetTabletRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_GetTabletRequest.Merge(m, src) } func (m *GetTabletRequest) XXX_Size() int { - return xxx_messageInfo_GetTabletRequest.Size(m) + return m.Size() } func (m *GetTabletRequest) XXX_DiscardUnknown() { xxx_messageInfo_GetTabletRequest.DiscardUnknown(m) @@ -378,20 +1391,28 @@ func (m *GetTabletsRequest) Reset() { *m = GetTabletsRequest{} } func (m *GetTabletsRequest) String() string { return proto.CompactTextString(m) } func (*GetTabletsRequest) ProtoMessage() {} func (*GetTabletsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_609739e22a0a50b3, []int{6} + return fileDescriptor_609739e22a0a50b3, []int{21} } - func (m *GetTabletsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetTabletsRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *GetTabletsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetTabletsRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_GetTabletsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *GetTabletsRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_GetTabletsRequest.Merge(m, src) } func (m *GetTabletsRequest) XXX_Size() int { - return xxx_messageInfo_GetTabletsRequest.Size(m) + return m.Size() } func (m *GetTabletsRequest) XXX_DiscardUnknown() { xxx_messageInfo_GetTabletsRequest.DiscardUnknown(m) @@ -417,20 +1438,28 @@ func (m *GetTabletsResponse) Reset() { *m = GetTabletsResponse{} } func (m *GetTabletsResponse) String() string { return proto.CompactTextString(m) } func (*GetTabletsResponse) ProtoMessage() {} func (*GetTabletsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_609739e22a0a50b3, []int{7} + return fileDescriptor_609739e22a0a50b3, []int{22} } - func (m *GetTabletsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetTabletsResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *GetTabletsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetTabletsResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_GetTabletsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *GetTabletsResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_GetTabletsResponse.Merge(m, src) } func (m *GetTabletsResponse) XXX_Size() int { - return xxx_messageInfo_GetTabletsResponse.Size(m) + return m.Size() } func (m *GetTabletsResponse) XXX_DiscardUnknown() { xxx_messageInfo_GetTabletsResponse.DiscardUnknown(m) @@ -445,52 +1474,606 @@ func (m *GetTabletsResponse) GetTablets() []*Tablet { return nil } -func init() { - proto.RegisterEnum("vtadmin.Tablet_ServingState", Tablet_ServingState_name, Tablet_ServingState_value) - proto.RegisterType((*Cluster)(nil), "vtadmin.Cluster") - proto.RegisterType((*Tablet)(nil), "vtadmin.Tablet") - proto.RegisterType((*VTGate)(nil), "vtadmin.VTGate") - proto.RegisterType((*GetGatesRequest)(nil), "vtadmin.GetGatesRequest") - proto.RegisterType((*GetGatesResponse)(nil), "vtadmin.GetGatesResponse") - proto.RegisterType((*GetTabletRequest)(nil), "vtadmin.GetTabletRequest") +type GetVSchemaRequest struct { + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetVSchemaRequest) Reset() { *m = GetVSchemaRequest{} } +func (m *GetVSchemaRequest) String() string { return proto.CompactTextString(m) } +func (*GetVSchemaRequest) ProtoMessage() {} +func (*GetVSchemaRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_609739e22a0a50b3, []int{23} +} +func (m *GetVSchemaRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetVSchemaRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetVSchemaRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetVSchemaRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetVSchemaRequest.Merge(m, src) +} +func (m *GetVSchemaRequest) XXX_Size() int { + return m.Size() +} +func (m *GetVSchemaRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetVSchemaRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetVSchemaRequest proto.InternalMessageInfo + +func (m *GetVSchemaRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *GetVSchemaRequest) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +type GetVSchemasRequest struct { + ClusterIds []string `protobuf:"bytes,1,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetVSchemasRequest) Reset() { *m = GetVSchemasRequest{} } +func (m *GetVSchemasRequest) String() string { return proto.CompactTextString(m) } +func (*GetVSchemasRequest) ProtoMessage() {} +func (*GetVSchemasRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_609739e22a0a50b3, []int{24} +} +func (m *GetVSchemasRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetVSchemasRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetVSchemasRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetVSchemasRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetVSchemasRequest.Merge(m, src) +} +func (m *GetVSchemasRequest) XXX_Size() int { + return m.Size() +} +func (m *GetVSchemasRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetVSchemasRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetVSchemasRequest proto.InternalMessageInfo + +func (m *GetVSchemasRequest) GetClusterIds() []string { + if m != nil { + return m.ClusterIds + } + return nil +} + +type GetVSchemasResponse struct { + VSchemas []*VSchema `protobuf:"bytes,1,rep,name=v_schemas,json=vSchemas,proto3" json:"v_schemas,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetVSchemasResponse) Reset() { *m = GetVSchemasResponse{} } +func (m *GetVSchemasResponse) String() string { return proto.CompactTextString(m) } +func (*GetVSchemasResponse) ProtoMessage() {} +func (*GetVSchemasResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_609739e22a0a50b3, []int{25} +} +func (m *GetVSchemasResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetVSchemasResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetVSchemasResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetVSchemasResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetVSchemasResponse.Merge(m, src) +} +func (m *GetVSchemasResponse) XXX_Size() int { + return m.Size() +} +func (m *GetVSchemasResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetVSchemasResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetVSchemasResponse proto.InternalMessageInfo + +func (m *GetVSchemasResponse) GetVSchemas() []*VSchema { + if m != nil { + return m.VSchemas + } + return nil +} + +type GetWorkflowRequest struct { + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + ActiveOnly bool `protobuf:"varint,4,opt,name=active_only,json=activeOnly,proto3" json:"active_only,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetWorkflowRequest) Reset() { *m = GetWorkflowRequest{} } +func (m *GetWorkflowRequest) String() string { return proto.CompactTextString(m) } +func (*GetWorkflowRequest) ProtoMessage() {} +func (*GetWorkflowRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_609739e22a0a50b3, []int{26} +} +func (m *GetWorkflowRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetWorkflowRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetWorkflowRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetWorkflowRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetWorkflowRequest.Merge(m, src) +} +func (m *GetWorkflowRequest) XXX_Size() int { + return m.Size() +} +func (m *GetWorkflowRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetWorkflowRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetWorkflowRequest proto.InternalMessageInfo + +func (m *GetWorkflowRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *GetWorkflowRequest) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *GetWorkflowRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *GetWorkflowRequest) GetActiveOnly() bool { + if m != nil { + return m.ActiveOnly + } + return false +} + +type GetWorkflowsRequest struct { + ClusterIds []string `protobuf:"bytes,1,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` + // ActiveOnly specifies whether to return workflows that are currently + // active (running or paused) instead of all workflows. + ActiveOnly bool `protobuf:"varint,2,opt,name=active_only,json=activeOnly,proto3" json:"active_only,omitempty"` + // Keyspaces is a list of keyspaces to restrict the workflow search to. Note + // that the keyspaces list applies across all cluster IDs in the request. + // + // If, for example, you have two clusters, each with a keyspace called "foo" + // and want the workflows from "foo" in cluster1 but not from cluster2, you + // must make two requests. + // + // Keyspaces and IgnoreKeyspaces are mutually-exclusive, and Keyspaces takes + // precedence; if Keyspaces is a non-empty list, then IgnoreKeyspaces is + // ignored completely. + Keyspaces []string `protobuf:"bytes,3,rep,name=keyspaces,proto3" json:"keyspaces,omitempty"` + // IgnoreKeyspaces is a list of keyspaces to skip during the workflow + // search. It has the same semantics as the Keyspaces parameter, so refer to + // that documentation for more details. + IgnoreKeyspaces []string `protobuf:"bytes,4,rep,name=ignore_keyspaces,json=ignoreKeyspaces,proto3" json:"ignore_keyspaces,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetWorkflowsRequest) Reset() { *m = GetWorkflowsRequest{} } +func (m *GetWorkflowsRequest) String() string { return proto.CompactTextString(m) } +func (*GetWorkflowsRequest) ProtoMessage() {} +func (*GetWorkflowsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_609739e22a0a50b3, []int{27} +} +func (m *GetWorkflowsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetWorkflowsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetWorkflowsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetWorkflowsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetWorkflowsRequest.Merge(m, src) +} +func (m *GetWorkflowsRequest) XXX_Size() int { + return m.Size() +} +func (m *GetWorkflowsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetWorkflowsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetWorkflowsRequest proto.InternalMessageInfo + +func (m *GetWorkflowsRequest) GetClusterIds() []string { + if m != nil { + return m.ClusterIds + } + return nil +} + +func (m *GetWorkflowsRequest) GetActiveOnly() bool { + if m != nil { + return m.ActiveOnly + } + return false +} + +func (m *GetWorkflowsRequest) GetKeyspaces() []string { + if m != nil { + return m.Keyspaces + } + return nil +} + +func (m *GetWorkflowsRequest) GetIgnoreKeyspaces() []string { + if m != nil { + return m.IgnoreKeyspaces + } + return nil +} + +type GetWorkflowsResponse struct { + WorkflowsByCluster map[string]*ClusterWorkflows `protobuf:"bytes,1,rep,name=workflows_by_cluster,json=workflowsByCluster,proto3" json:"workflows_by_cluster,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetWorkflowsResponse) Reset() { *m = GetWorkflowsResponse{} } +func (m *GetWorkflowsResponse) String() string { return proto.CompactTextString(m) } +func (*GetWorkflowsResponse) ProtoMessage() {} +func (*GetWorkflowsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_609739e22a0a50b3, []int{28} +} +func (m *GetWorkflowsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetWorkflowsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetWorkflowsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetWorkflowsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetWorkflowsResponse.Merge(m, src) +} +func (m *GetWorkflowsResponse) XXX_Size() int { + return m.Size() +} +func (m *GetWorkflowsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetWorkflowsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetWorkflowsResponse proto.InternalMessageInfo + +func (m *GetWorkflowsResponse) GetWorkflowsByCluster() map[string]*ClusterWorkflows { + if m != nil { + return m.WorkflowsByCluster + } + return nil +} + +type VTExplainRequest struct { + Cluster string `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` + Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Sql string `protobuf:"bytes,3,opt,name=sql,proto3" json:"sql,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VTExplainRequest) Reset() { *m = VTExplainRequest{} } +func (m *VTExplainRequest) String() string { return proto.CompactTextString(m) } +func (*VTExplainRequest) ProtoMessage() {} +func (*VTExplainRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_609739e22a0a50b3, []int{29} +} +func (m *VTExplainRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VTExplainRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_VTExplainRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *VTExplainRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_VTExplainRequest.Merge(m, src) +} +func (m *VTExplainRequest) XXX_Size() int { + return m.Size() +} +func (m *VTExplainRequest) XXX_DiscardUnknown() { + xxx_messageInfo_VTExplainRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_VTExplainRequest proto.InternalMessageInfo + +func (m *VTExplainRequest) GetCluster() string { + if m != nil { + return m.Cluster + } + return "" +} + +func (m *VTExplainRequest) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *VTExplainRequest) GetSql() string { + if m != nil { + return m.Sql + } + return "" +} + +type VTExplainResponse struct { + Response string `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VTExplainResponse) Reset() { *m = VTExplainResponse{} } +func (m *VTExplainResponse) String() string { return proto.CompactTextString(m) } +func (*VTExplainResponse) ProtoMessage() {} +func (*VTExplainResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_609739e22a0a50b3, []int{30} +} +func (m *VTExplainResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VTExplainResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_VTExplainResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *VTExplainResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_VTExplainResponse.Merge(m, src) +} +func (m *VTExplainResponse) XXX_Size() int { + return m.Size() +} +func (m *VTExplainResponse) XXX_DiscardUnknown() { + xxx_messageInfo_VTExplainResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_VTExplainResponse proto.InternalMessageInfo + +func (m *VTExplainResponse) GetResponse() string { + if m != nil { + return m.Response + } + return "" +} + +func init() { + proto.RegisterEnum("vtadmin.Tablet_ServingState", Tablet_ServingState_name, Tablet_ServingState_value) + proto.RegisterType((*Cluster)(nil), "vtadmin.Cluster") + proto.RegisterType((*ClusterWorkflows)(nil), "vtadmin.ClusterWorkflows") + proto.RegisterType((*Keyspace)(nil), "vtadmin.Keyspace") + proto.RegisterMapType((map[string]*vtctldata.Shard)(nil), "vtadmin.Keyspace.ShardsEntry") + proto.RegisterType((*Schema)(nil), "vtadmin.Schema") + proto.RegisterMapType((map[string]*Schema_TableSize)(nil), "vtadmin.Schema.TableSizesEntry") + proto.RegisterType((*Schema_ShardTableSize)(nil), "vtadmin.Schema.ShardTableSize") + proto.RegisterType((*Schema_TableSize)(nil), "vtadmin.Schema.TableSize") + proto.RegisterMapType((map[string]*Schema_ShardTableSize)(nil), "vtadmin.Schema.TableSize.ByShardEntry") + proto.RegisterType((*Tablet)(nil), "vtadmin.Tablet") + proto.RegisterType((*VSchema)(nil), "vtadmin.VSchema") + proto.RegisterType((*Vtctld)(nil), "vtadmin.Vtctld") + proto.RegisterType((*VTGate)(nil), "vtadmin.VTGate") + proto.RegisterType((*Workflow)(nil), "vtadmin.Workflow") + proto.RegisterType((*FindSchemaRequest)(nil), "vtadmin.FindSchemaRequest") + proto.RegisterType((*GetClustersRequest)(nil), "vtadmin.GetClustersRequest") + proto.RegisterType((*GetClustersResponse)(nil), "vtadmin.GetClustersResponse") + proto.RegisterType((*GetGatesRequest)(nil), "vtadmin.GetGatesRequest") + proto.RegisterType((*GetGatesResponse)(nil), "vtadmin.GetGatesResponse") + proto.RegisterType((*GetKeyspacesRequest)(nil), "vtadmin.GetKeyspacesRequest") + proto.RegisterType((*GetKeyspacesResponse)(nil), "vtadmin.GetKeyspacesResponse") + proto.RegisterType((*GetSchemaRequest)(nil), "vtadmin.GetSchemaRequest") + proto.RegisterType((*GetSchemasRequest)(nil), "vtadmin.GetSchemasRequest") + proto.RegisterType((*GetSchemasResponse)(nil), "vtadmin.GetSchemasResponse") + proto.RegisterType((*GetSchemaTableSizeOptions)(nil), "vtadmin.GetSchemaTableSizeOptions") + proto.RegisterType((*GetTabletRequest)(nil), "vtadmin.GetTabletRequest") proto.RegisterType((*GetTabletsRequest)(nil), "vtadmin.GetTabletsRequest") proto.RegisterType((*GetTabletsResponse)(nil), "vtadmin.GetTabletsResponse") + proto.RegisterType((*GetVSchemaRequest)(nil), "vtadmin.GetVSchemaRequest") + proto.RegisterType((*GetVSchemasRequest)(nil), "vtadmin.GetVSchemasRequest") + proto.RegisterType((*GetVSchemasResponse)(nil), "vtadmin.GetVSchemasResponse") + proto.RegisterType((*GetWorkflowRequest)(nil), "vtadmin.GetWorkflowRequest") + proto.RegisterType((*GetWorkflowsRequest)(nil), "vtadmin.GetWorkflowsRequest") + proto.RegisterType((*GetWorkflowsResponse)(nil), "vtadmin.GetWorkflowsResponse") + proto.RegisterMapType((map[string]*ClusterWorkflows)(nil), "vtadmin.GetWorkflowsResponse.WorkflowsByClusterEntry") + proto.RegisterType((*VTExplainRequest)(nil), "vtadmin.VTExplainRequest") + proto.RegisterType((*VTExplainResponse)(nil), "vtadmin.VTExplainResponse") } func init() { proto.RegisterFile("vtadmin.proto", fileDescriptor_609739e22a0a50b3) } var fileDescriptor_609739e22a0a50b3 = []byte{ - // 474 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x5f, 0x8b, 0xd3, 0x4e, - 0x14, 0x6d, 0xb2, 0xdb, 0x66, 0x73, 0xf3, 0xfb, 0xb5, 0xf5, 0x3e, 0xc5, 0xb8, 0x60, 0x19, 0x54, - 0xaa, 0x60, 0x03, 0xd1, 0x97, 0x3e, 0xc9, 0x2a, 0x52, 0x16, 0x21, 0x85, 0x69, 0xad, 0xe0, 0xcb, - 0x92, 0x6d, 0x86, 0x1a, 0xcc, 0x76, 0x62, 0x67, 0xb6, 0xe0, 0x17, 0xf1, 0x5b, 0x09, 0x7e, 0x24, - 0x99, 0x3f, 0x49, 0xbb, 0xed, 0xa2, 0xbe, 0xdd, 0x7b, 0xcf, 0x39, 0x77, 0xce, 0x3d, 0x6d, 0xe0, - 0xff, 0xad, 0xcc, 0xf2, 0x9b, 0x62, 0x3d, 0xaa, 0x36, 0x5c, 0x72, 0xf4, 0x6c, 0x1b, 0x75, 0x25, - 0xaf, 0x78, 0x9e, 0xc9, 0xcc, 0x00, 0xe4, 0x25, 0x78, 0xef, 0xca, 0x5b, 0x21, 0xd9, 0x06, 0xbb, - 0xe0, 0x16, 0x79, 0xe8, 0x0c, 0x9c, 0xa1, 0x4f, 0xdd, 0x22, 0x47, 0x84, 0xd3, 0x75, 0x76, 0xc3, - 0x42, 0x57, 0x4f, 0x74, 0x4d, 0x7e, 0x3a, 0xd0, 0x99, 0x67, 0xd7, 0x25, 0x93, 0xf8, 0x02, 0xbc, - 0xa5, 0x51, 0x6a, 0x4d, 0x90, 0xf4, 0x47, 0xf5, 0x9b, 0x76, 0x23, 0xad, 0x09, 0x38, 0x84, 0x8e, - 0xd4, 0x2a, 0xbd, 0x4c, 0x51, 0x1b, 0x1b, 0x66, 0x1b, 0xb5, 0x38, 0x26, 0xd0, 0x16, 0x32, 0x93, - 0x2c, 0x3c, 0x19, 0x38, 0xc3, 0x6e, 0x72, 0xde, 0xec, 0x34, 0xbc, 0xd1, 0x8c, 0x6d, 0xb6, 0xc5, - 0x7a, 0x35, 0x53, 0x1c, 0x6a, 0xa8, 0x64, 0x0c, 0xff, 0xed, 0x8f, 0x31, 0x00, 0xef, 0x63, 0xfa, - 0x21, 0x9d, 0x7e, 0x4a, 0xfb, 0x2d, 0xd5, 0xcc, 0xde, 0xd3, 0xc5, 0x65, 0x3a, 0xe9, 0x3b, 0xd8, - 0x83, 0x20, 0x9d, 0xce, 0xaf, 0xea, 0x81, 0x4b, 0x7e, 0x38, 0xd0, 0x59, 0xcc, 0x27, 0x4a, 0x15, - 0xc1, 0xd9, 0x17, 0x2e, 0xa4, 0x3e, 0xd9, 0x84, 0xd0, 0xf4, 0x2a, 0x8a, 0x8a, 0xf3, 0xb2, 0x8e, - 0x42, 0xd5, 0x6a, 0xb6, 0x64, 0x65, 0xa9, 0x8d, 0xfa, 0x54, 0xd7, 0xfb, 0x99, 0x9c, 0xfe, 0x2d, - 0x93, 0x73, 0xf0, 0xbf, 0xb2, 0xef, 0xa2, 0xca, 0x96, 0x4c, 0x84, 0xed, 0xc1, 0xc9, 0xd0, 0xa7, - 0xbb, 0x01, 0x49, 0xa0, 0x37, 0x61, 0x52, 0x19, 0x13, 0x94, 0x7d, 0xbb, 0x65, 0x42, 0xe2, 0x63, - 0x08, 0xac, 0xf6, 0xaa, 0xc8, 0x45, 0xe8, 0x68, 0x09, 0xd8, 0xd1, 0x65, 0x2e, 0xc8, 0x18, 0xfa, - 0x3b, 0x8d, 0xa8, 0xf8, 0x5a, 0x30, 0x7c, 0x0a, 0xed, 0x95, 0x1a, 0x68, 0x7a, 0x90, 0xf4, 0x1a, - 0x3f, 0xe6, 0x6a, 0x6a, 0x50, 0x32, 0xd5, 0x52, 0xfb, 0x5b, 0xd8, 0xf7, 0xfe, 0x14, 0xc8, 0x81, - 0x17, 0xf7, 0xc8, 0xcb, 0x6b, 0x78, 0xd0, 0x2c, 0xfc, 0xf7, 0x0b, 0xde, 0x00, 0xee, 0xab, 0xec, - 0x0d, 0xcf, 0xc1, 0x33, 0xff, 0x8e, 0xe3, 0x2b, 0xac, 0xe3, 0x1a, 0x4f, 0x7e, 0x39, 0xe0, 0x2d, - 0xe6, 0x17, 0x0a, 0xc3, 0x0b, 0x38, 0xab, 0xe3, 0xc0, 0xb0, 0x51, 0x1c, 0xa4, 0x1a, 0x3d, 0xbc, - 0x07, 0x31, 0xef, 0x92, 0x16, 0x8e, 0xc1, 0x6f, 0xfc, 0xe0, 0x1d, 0xe6, 0x9d, 0xa8, 0xa2, 0x43, - 0x43, 0xa4, 0x85, 0x13, 0x80, 0xdd, 0x29, 0x18, 0x1d, 0x6b, 0x1b, 0x07, 0x8f, 0xee, 0xc5, 0x6a, - 0x0f, 0x6f, 0x9f, 0x7d, 0x7e, 0xb2, 0x2d, 0x24, 0x13, 0x62, 0x54, 0xf0, 0xd8, 0x54, 0xf1, 0x8a, - 0xc7, 0x5b, 0x19, 0xeb, 0x2f, 0x38, 0xb6, 0xe2, 0xeb, 0x8e, 0x6e, 0x5f, 0xfd, 0x0e, 0x00, 0x00, - 0xff, 0xff, 0x34, 0xe6, 0x6b, 0x4d, 0xfa, 0x03, 0x00, 0x00, + // 1465 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x58, 0x5f, 0x6f, 0xdb, 0x54, + 0x14, 0xaf, 0x93, 0x34, 0x89, 0x4f, 0xb6, 0x26, 0xbd, 0xab, 0xb4, 0xd4, 0xed, 0xba, 0xea, 0x0a, + 0x46, 0x87, 0x58, 0x22, 0x85, 0x6d, 0x62, 0x80, 0x34, 0xb6, 0x75, 0x44, 0x63, 0x90, 0x4e, 0x4e, + 0xc9, 0xd0, 0x5e, 0x8c, 0x9b, 0x78, 0xa9, 0xb5, 0xd4, 0xce, 0xec, 0xdb, 0x84, 0xf0, 0x0a, 0xe2, + 0x89, 0x67, 0xc4, 0x03, 0x0f, 0x7c, 0x09, 0xbe, 0x03, 0x2f, 0x48, 0x7c, 0x04, 0x18, 0x2f, 0x48, + 0x7c, 0x01, 0x1e, 0x91, 0xef, 0x3f, 0x5f, 0xdb, 0x49, 0xd7, 0xb2, 0xbd, 0xf9, 0x9e, 0x73, 0x7c, + 0xce, 0xef, 0x9e, 0xfb, 0xbb, 0xbf, 0xe3, 0x04, 0xce, 0x4f, 0x88, 0x3d, 0x38, 0x72, 0xbd, 0xc6, + 0x38, 0xf0, 0x89, 0x8f, 0x4a, 0x7c, 0x69, 0x5c, 0x24, 0xf6, 0xc1, 0xc8, 0x21, 0x47, 0xb6, 0x67, + 0x0f, 0x9d, 0x60, 0x60, 0x13, 0x9b, 0x45, 0x18, 0x2b, 0xc4, 0x1f, 0xfb, 0xca, 0xfa, 0xfc, 0x24, + 0xec, 0x1f, 0x3a, 0x47, 0x62, 0x59, 0x9d, 0x90, 0x3e, 0x19, 0xc5, 0x7e, 0x7c, 0x0d, 0x4a, 0xf7, + 0x46, 0xc7, 0x21, 0x71, 0x02, 0xb4, 0x02, 0x39, 0x77, 0x50, 0xd7, 0xb6, 0xb5, 0x1d, 0xdd, 0xcc, + 0xb9, 0x03, 0x84, 0xa0, 0xe0, 0xd9, 0x47, 0x4e, 0x3d, 0x47, 0x2d, 0xf4, 0x19, 0x5b, 0x50, 0xe3, + 0xe1, 0x8f, 0xfd, 0xe0, 0xd9, 0xd3, 0x91, 0x3f, 0x0d, 0x51, 0x13, 0xf4, 0xa9, 0x58, 0xd4, 0xb5, + 0xed, 0xfc, 0x4e, 0xa5, 0xb5, 0xda, 0x10, 0xb8, 0x45, 0x98, 0x19, 0xc7, 0x20, 0x03, 0xca, 0x53, + 0x3b, 0xf0, 0x5c, 0x6f, 0x18, 0xd6, 0x73, 0xdb, 0xf9, 0x1d, 0xdd, 0x94, 0x6b, 0xfc, 0x8f, 0x06, + 0xe5, 0x87, 0xce, 0x2c, 0x1c, 0xdb, 0x7d, 0x07, 0xbd, 0x0d, 0xa5, 0x3e, 0xab, 0x46, 0x61, 0x55, + 0x5a, 0x35, 0x99, 0x97, 0xa3, 0x30, 0x45, 0x00, 0x6a, 0x42, 0xf9, 0x19, 0x7f, 0x8f, 0x22, 0xae, + 0xb4, 0x2e, 0x34, 0xe2, 0xcd, 0x8a, 0x94, 0xa6, 0x0c, 0x42, 0x37, 0xa0, 0x18, 0x1e, 0xda, 0xc1, + 0x20, 0xac, 0xe7, 0x29, 0xe6, 0x4b, 0x32, 0xb7, 0x08, 0x6e, 0x74, 0xa9, 0xff, 0xbe, 0x47, 0x82, + 0x99, 0xc9, 0x83, 0x8d, 0x87, 0x50, 0x51, 0xcc, 0xa8, 0x06, 0xf9, 0x67, 0xce, 0x8c, 0x77, 0x2d, + 0x7a, 0x44, 0x57, 0x60, 0x79, 0x62, 0x8f, 0x8e, 0x05, 0x8a, 0x9a, 0x82, 0x82, 0xbe, 0x68, 0x32, + 0xf7, 0xfb, 0xb9, 0xf7, 0x34, 0xfc, 0x6f, 0x01, 0x8a, 0x5d, 0x7a, 0x3e, 0x67, 0xda, 0xab, 0x91, + 0xda, 0xab, 0xae, 0x6c, 0x6b, 0x0f, 0x56, 0x29, 0x37, 0xac, 0x81, 0xf3, 0xd4, 0xf5, 0x5c, 0xe2, + 0xfa, 0x9e, 0xd8, 0x21, 0x6e, 0x64, 0x59, 0xb3, 0x1f, 0x59, 0x76, 0x65, 0xa8, 0x59, 0x23, 0x49, + 0x43, 0x88, 0x3e, 0x82, 0x0a, 0x4b, 0x18, 0xba, 0x5f, 0x3b, 0x61, 0xbd, 0x40, 0x53, 0x5d, 0x96, + 0xe0, 0x18, 0x7c, 0x96, 0xa7, 0x1b, 0x45, 0xb0, 0x76, 0x01, 0x91, 0x06, 0xe3, 0x0b, 0xa8, 0xa6, + 0xdc, 0x73, 0xda, 0xd6, 0x4c, 0xb6, 0x6d, 0x7d, 0x61, 0x01, 0xa5, 0x7f, 0x46, 0x07, 0x56, 0x68, + 0x4f, 0xa5, 0x13, 0x6d, 0x80, 0x1e, 0xf8, 0x53, 0xab, 0xef, 0x1f, 0x7b, 0x84, 0xa6, 0x2f, 0x98, + 0xe5, 0xc0, 0x9f, 0xde, 0x8b, 0xd6, 0xe8, 0x32, 0x54, 0xa2, 0x4d, 0x5b, 0x23, 0xc7, 0x1b, 0x92, + 0x43, 0x5a, 0xa9, 0x60, 0x42, 0x64, 0xfa, 0x94, 0x5a, 0x8c, 0xbf, 0x35, 0xd0, 0x5f, 0x53, 0x2e, + 0x74, 0x07, 0xca, 0x07, 0x33, 0x8b, 0xb2, 0x86, 0xf7, 0xff, 0xca, 0xc2, 0x3d, 0x35, 0xee, 0xce, + 0xe8, 0x3e, 0x58, 0xef, 0x4a, 0x07, 0x6c, 0x65, 0x3c, 0x81, 0x73, 0xaa, 0x63, 0x4e, 0xd7, 0xae, + 0x27, 0xbb, 0xb6, 0x95, 0xae, 0x90, 0xec, 0x8e, 0x4a, 0xbd, 0xdf, 0x34, 0x28, 0x52, 0x07, 0x39, + 0x13, 0xf5, 0x76, 0xa0, 0xc8, 0x48, 0x24, 0xe9, 0x2d, 0x05, 0x87, 0x65, 0x33, 0xb9, 0x1f, 0xb5, + 0x60, 0x39, 0x24, 0x36, 0x71, 0xea, 0xf9, 0x6d, 0x6d, 0x67, 0xa5, 0xb5, 0x29, 0x73, 0xb2, 0xb8, + 0x46, 0xd7, 0x09, 0x26, 0xae, 0x37, 0xec, 0x46, 0x31, 0x26, 0x0b, 0xc5, 0xb7, 0xe0, 0x9c, 0x6a, + 0x46, 0x15, 0x28, 0x7d, 0xde, 0x79, 0xd8, 0xd9, 0x7b, 0xdc, 0xa9, 0x2d, 0x45, 0x8b, 0xee, 0x7d, + 0xb3, 0xf7, 0xa0, 0xd3, 0xae, 0x69, 0xa8, 0x0a, 0x95, 0xce, 0xde, 0xbe, 0x25, 0x0c, 0x39, 0x3c, + 0x85, 0x52, 0xef, 0x7f, 0x5c, 0xa5, 0x39, 0x22, 0x87, 0xde, 0x81, 0xf2, 0xc4, 0x62, 0xb2, 0x49, + 0xc1, 0x53, 0x3d, 0xe3, 0x32, 0x2a, 0x85, 0xa4, 0x34, 0x61, 0xd5, 0xf0, 0x23, 0x28, 0xf6, 0xe8, + 0x0d, 0x8f, 0xae, 0xe5, 0xa1, 0x1f, 0x12, 0x9a, 0x8f, 0x9d, 0x91, 0x5c, 0xab, 0x98, 0x72, 0x2f, + 0xc1, 0x84, 0x7f, 0xd0, 0xa0, 0xd8, 0xdb, 0x6f, 0x47, 0x0d, 0x38, 0x29, 0x25, 0x82, 0xc2, 0xd8, + 0xf7, 0x47, 0x02, 0x7a, 0xf4, 0x1c, 0xd9, 0xfa, 0xce, 0x68, 0x44, 0x61, 0xeb, 0x26, 0x7d, 0x56, + 0x4b, 0x17, 0x5e, 0xd6, 0x8e, 0x4d, 0xd0, 0x85, 0x92, 0x84, 0xf5, 0x65, 0xaa, 0xcd, 0xb1, 0x01, + 0x7f, 0xa3, 0x41, 0x59, 0x08, 0xfa, 0x6b, 0x13, 0xac, 0x26, 0x94, 0xc5, 0x68, 0xe0, 0xdd, 0x56, + 0x85, 0x5b, 0xce, 0x0f, 0x19, 0x84, 0x7f, 0xd2, 0x60, 0xf5, 0x63, 0xd7, 0x1b, 0xb0, 0xfe, 0x9b, + 0xce, 0xf3, 0x63, 0x27, 0x24, 0x68, 0x0d, 0x96, 0x29, 0xf1, 0x78, 0x9b, 0xd8, 0x22, 0xba, 0xa5, + 0x1c, 0x83, 0xe5, 0x0e, 0xc4, 0xb4, 0x01, 0x6e, 0x7a, 0x30, 0x08, 0xd1, 0x23, 0x40, 0xb1, 0xba, + 0x59, 0xfe, 0x58, 0xe8, 0xa5, 0x46, 0xf5, 0x52, 0x6c, 0xa8, 0xed, 0x10, 0x56, 0x4d, 0x5e, 0xa5, + 0x3d, 0x16, 0xc9, 0xf5, 0x52, 0xb1, 0xe0, 0x35, 0x40, 0x6d, 0x87, 0xf0, 0x16, 0x84, 0x1c, 0x1e, + 0xbe, 0x07, 0x17, 0x12, 0xd6, 0x70, 0xec, 0x7b, 0x21, 0xa5, 0x1a, 0x07, 0x23, 0x46, 0x67, 0xb6, + 0x8b, 0x32, 0x02, 0xb7, 0xa0, 0xda, 0x76, 0x48, 0x44, 0x0c, 0x91, 0x37, 0xbd, 0x41, 0x2d, 0xbd, + 0x41, 0x7c, 0x0b, 0x6a, 0xf1, 0x3b, 0xbc, 0xea, 0x9b, 0xb0, 0x3c, 0x8c, 0x0c, 0xbc, 0x64, 0x55, + 0x96, 0x64, 0xac, 0x33, 0x99, 0x17, 0xdf, 0xa4, 0x98, 0x05, 0xe3, 0x4f, 0x5f, 0xb2, 0x0d, 0x6b, + 0xc9, 0xf7, 0x78, 0xd9, 0xa6, 0x4a, 0xae, 0xf4, 0x87, 0x82, 0xbc, 0x58, 0x0a, 0xdf, 0x7e, 0xd1, + 0x28, 0xf8, 0xe4, 0x41, 0x5f, 0x02, 0x88, 0xcb, 0xf3, 0xd3, 0xd6, 0x65, 0xf5, 0x13, 0xa9, 0x26, + 0x39, 0x92, 0x57, 0x39, 0x32, 0x9f, 0x02, 0x85, 0x57, 0xa0, 0xc0, 0x77, 0x1a, 0xac, 0xca, 0xf8, + 0x53, 0xf7, 0x6d, 0x01, 0x90, 0xdc, 0x2b, 0x00, 0xb9, 0x4d, 0xb9, 0x28, 0x71, 0xf0, 0x73, 0xb8, + 0x0a, 0x25, 0xa6, 0x66, 0x59, 0x02, 0xf0, 0x56, 0x0b, 0x3f, 0xde, 0x85, 0xf5, 0x85, 0xf5, 0xd0, + 0x5b, 0x50, 0xb5, 0x87, 0xc3, 0xc0, 0x89, 0xd8, 0xc2, 0xbf, 0x0e, 0xa2, 0xe3, 0x28, 0x9b, 0x2b, + 0xd2, 0x4c, 0x47, 0x3e, 0xde, 0xa3, 0xc7, 0xc8, 0xe7, 0x03, 0xef, 0xc6, 0x49, 0xca, 0xf6, 0xb2, + 0x5b, 0x8b, 0xaf, 0xd3, 0xfe, 0xb2, 0x84, 0xa7, 0xe7, 0x25, 0xeb, 0x86, 0x7c, 0x2b, 0xee, 0x06, + 0x9b, 0x58, 0xd9, 0x6e, 0x70, 0xc4, 0xc2, 0x8f, 0x3b, 0xb4, 0x6c, 0xef, 0x75, 0xf1, 0x11, 0xdf, + 0xa0, 0x80, 0x7a, 0x67, 0xe4, 0x09, 0xde, 0xa5, 0xf7, 0xb2, 0x97, 0x3e, 0xd6, 0x6b, 0xa0, 0x8b, + 0xb1, 0x95, 0x15, 0x13, 0x01, 0xba, 0xcc, 0xc7, 0x56, 0x88, 0xbf, 0xd5, 0x68, 0x75, 0x29, 0xb0, + 0xaf, 0x7e, 0xbd, 0xc4, 0x2c, 0xcd, 0x2b, 0xb3, 0xf4, 0x32, 0x54, 0xec, 0x3e, 0x71, 0x27, 0x8e, + 0xe5, 0x7b, 0xa3, 0x19, 0xbd, 0x55, 0x65, 0x13, 0x98, 0x69, 0xcf, 0x1b, 0xcd, 0xf0, 0xcf, 0x1a, + 0xdd, 0x8d, 0xfc, 0x39, 0x71, 0xea, 0xdb, 0x92, 0xca, 0x9c, 0x4b, 0x67, 0x4e, 0xce, 0xb2, 0x7c, + 0x6a, 0x96, 0xa1, 0xab, 0x50, 0x73, 0x87, 0x9e, 0x1f, 0x38, 0x56, 0x1c, 0x54, 0xa0, 0x41, 0x55, + 0x66, 0x97, 0xfa, 0x85, 0xff, 0xd4, 0xa8, 0xa0, 0x29, 0x10, 0x79, 0xc7, 0x87, 0xb0, 0x26, 0x7f, + 0xd5, 0x58, 0x07, 0x33, 0x2b, 0x9e, 0x87, 0x51, 0xf3, 0x6f, 0xa8, 0x57, 0x36, 0xf3, 0xb2, 0x9c, + 0x6c, 0xe1, 0xdd, 0x19, 0x57, 0x7a, 0xf6, 0xf5, 0x87, 0xa6, 0x19, 0x87, 0xf1, 0x25, 0x5c, 0x5c, + 0x10, 0x7e, 0x96, 0x2f, 0xe9, 0xf4, 0x2f, 0x37, 0xf5, 0x73, 0xf0, 0x09, 0xd4, 0x7a, 0xfb, 0xf7, + 0xbf, 0x1a, 0x8f, 0x6c, 0xd7, 0x13, 0x47, 0x50, 0x4f, 0x4e, 0x78, 0xfd, 0x74, 0xf3, 0xbc, 0x06, + 0xf9, 0xf0, 0xb9, 0xf8, 0x02, 0x89, 0x1e, 0x71, 0x13, 0x56, 0x95, 0xdc, 0xbc, 0x77, 0x06, 0x94, + 0x03, 0xfe, 0x2c, 0xee, 0xbf, 0x58, 0xb7, 0xbe, 0x2f, 0x41, 0xa9, 0xb7, 0x7f, 0x27, 0x02, 0x8d, + 0x3e, 0x00, 0x88, 0x87, 0x3d, 0x32, 0xe4, 0x66, 0x32, 0x5f, 0x00, 0x46, 0x5a, 0xc5, 0xf0, 0x12, + 0xfa, 0x04, 0x2a, 0xca, 0xd4, 0x45, 0x1b, 0xea, 0x89, 0xa4, 0x26, 0xb4, 0xb1, 0x39, 0xdf, 0xc9, + 0x20, 0xe1, 0xa5, 0xe8, 0x7b, 0x5e, 0x0c, 0x52, 0x54, 0x57, 0x63, 0xd5, 0x79, 0x6c, 0xac, 0xcf, + 0xf1, 0xc8, 0x14, 0x9f, 0xc1, 0x39, 0x75, 0x30, 0xa2, 0x44, 0xc9, 0xf4, 0x9c, 0x35, 0x2e, 0x2d, + 0xf0, 0xca, 0x74, 0xb7, 0x40, 0x97, 0xe2, 0x8c, 0xd6, 0xb3, 0x03, 0xe2, 0x84, 0xc6, 0xb4, 0x01, + 0xe2, 0xc1, 0xa0, 0x74, 0x35, 0x33, 0xb5, 0x8c, 0x8d, 0xb9, 0xbe, 0x14, 0x06, 0xfe, 0x43, 0x22, + 0x81, 0x21, 0x21, 0xf7, 0x46, 0x5a, 0x54, 0x25, 0x06, 0x2e, 0xc7, 0x49, 0x0c, 0x49, 0x65, 0x4f, + 0x62, 0x48, 0xe9, 0x37, 0x5e, 0x42, 0x1f, 0xd2, 0x44, 0xbd, 0x0c, 0x45, 0x32, 0x5a, 0x6d, 0x64, + 0xf4, 0x50, 0x72, 0x44, 0xa8, 0x69, 0x92, 0x23, 0x29, 0x69, 0x4e, 0x72, 0xa4, 0x97, 0xed, 0xc6, + 0x6d, 0x9a, 0x4b, 0x7e, 0x22, 0x6f, 0xcc, 0x53, 0x00, 0x91, 0x2b, 0xfb, 0x1f, 0x89, 0x64, 0x48, + 0xfc, 0xdf, 0xca, 0xe6, 0x02, 0x0d, 0x99, 0xc3, 0x90, 0x8c, 0xc2, 0xe0, 0x25, 0xb4, 0x0b, 0xba, + 0xbc, 0x79, 0xca, 0xe9, 0xa4, 0x6f, 0xba, 0x61, 0xcc, 0x73, 0x89, 0x2c, 0x77, 0x6f, 0xfe, 0xfa, + 0x62, 0x4b, 0xfb, 0xfd, 0xc5, 0x96, 0xf6, 0xc7, 0x8b, 0x2d, 0xed, 0xc7, 0xbf, 0xb6, 0x96, 0x9e, + 0xbc, 0x31, 0x71, 0x89, 0x13, 0x86, 0x0d, 0xd7, 0x6f, 0xb2, 0xa7, 0xe6, 0xd0, 0x6f, 0x4e, 0x48, + 0x93, 0xfe, 0xa7, 0xd4, 0xe4, 0xb9, 0x0e, 0x8a, 0x74, 0xf9, 0xee, 0x7f, 0x01, 0x00, 0x00, 0xff, + 0xff, 0x30, 0xe9, 0x27, 0x5f, 0xc5, 0x12, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -505,13 +2088,41 @@ const _ = grpc.SupportPackageIsVersion4 // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type VTAdminClient interface { + // FindSchema returns a single Schema that matches the provided table name + // across all specified clusters IDs. Not specifying a set of cluster IDs + // causes the search to span all configured clusters. + // + // An error occurs if either no table exists across any of the clusters with + // the specified table name, or if multiple tables exist with that name. + FindSchema(ctx context.Context, in *FindSchemaRequest, opts ...grpc.CallOption) (*Schema, error) + // GetClusters returns all configured clusters. + GetClusters(ctx context.Context, in *GetClustersRequest, opts ...grpc.CallOption) (*GetClustersResponse, error) // GetGates returns all gates across all the specified clusters. GetGates(ctx context.Context, in *GetGatesRequest, opts ...grpc.CallOption) (*GetGatesResponse, error) + // GetKeyspaces returns all keyspaces across the specified clusters. + GetKeyspaces(ctx context.Context, in *GetKeyspacesRequest, opts ...grpc.CallOption) (*GetKeyspacesResponse, error) + // GetSchema returns the schema for the specified (cluster, keyspace, table) + // tuple. + GetSchema(ctx context.Context, in *GetSchemaRequest, opts ...grpc.CallOption) (*Schema, error) + // GetSchemas returns all schemas across the specified clusters. + GetSchemas(ctx context.Context, in *GetSchemasRequest, opts ...grpc.CallOption) (*GetSchemasResponse, error) // GetTablet looks up a tablet by hostname across all clusters and returns // the result. GetTablet(ctx context.Context, in *GetTabletRequest, opts ...grpc.CallOption) (*Tablet, error) // GetTablets returns all tablets across all the specified clusters. GetTablets(ctx context.Context, in *GetTabletsRequest, opts ...grpc.CallOption) (*GetTabletsResponse, error) + // GetVSchema returns a VSchema for the specified keyspace in the specified + // cluster. + GetVSchema(ctx context.Context, in *GetVSchemaRequest, opts ...grpc.CallOption) (*VSchema, error) + // GetVSchemas returns the VSchemas for all specified clusters. + GetVSchemas(ctx context.Context, in *GetVSchemasRequest, opts ...grpc.CallOption) (*GetVSchemasResponse, error) + // GetWorkflow returns a single Workflow for a given cluster, keyspace, and + // workflow name. + GetWorkflow(ctx context.Context, in *GetWorkflowRequest, opts ...grpc.CallOption) (*Workflow, error) + // GetWorkflows returns the Workflows for all specified clusters. + GetWorkflows(ctx context.Context, in *GetWorkflowsRequest, opts ...grpc.CallOption) (*GetWorkflowsResponse, error) + // VTExplain provides information on how Vitess plans to execute a particular query. + VTExplain(ctx context.Context, in *VTExplainRequest, opts ...grpc.CallOption) (*VTExplainResponse, error) } type vTAdminClient struct { @@ -522,6 +2133,24 @@ func NewVTAdminClient(cc *grpc.ClientConn) VTAdminClient { return &vTAdminClient{cc} } +func (c *vTAdminClient) FindSchema(ctx context.Context, in *FindSchemaRequest, opts ...grpc.CallOption) (*Schema, error) { + out := new(Schema) + err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/FindSchema", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vTAdminClient) GetClusters(ctx context.Context, in *GetClustersRequest, opts ...grpc.CallOption) (*GetClustersResponse, error) { + out := new(GetClustersResponse) + err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/GetClusters", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *vTAdminClient) GetGates(ctx context.Context, in *GetGatesRequest, opts ...grpc.CallOption) (*GetGatesResponse, error) { out := new(GetGatesResponse) err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/GetGates", in, out, opts...) @@ -531,6 +2160,33 @@ func (c *vTAdminClient) GetGates(ctx context.Context, in *GetGatesRequest, opts return out, nil } +func (c *vTAdminClient) GetKeyspaces(ctx context.Context, in *GetKeyspacesRequest, opts ...grpc.CallOption) (*GetKeyspacesResponse, error) { + out := new(GetKeyspacesResponse) + err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/GetKeyspaces", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vTAdminClient) GetSchema(ctx context.Context, in *GetSchemaRequest, opts ...grpc.CallOption) (*Schema, error) { + out := new(Schema) + err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/GetSchema", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vTAdminClient) GetSchemas(ctx context.Context, in *GetSchemasRequest, opts ...grpc.CallOption) (*GetSchemasResponse, error) { + out := new(GetSchemasResponse) + err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/GetSchemas", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *vTAdminClient) GetTablet(ctx context.Context, in *GetTabletRequest, opts ...grpc.CallOption) (*Tablet, error) { out := new(Tablet) err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/GetTablet", in, out, opts...) @@ -549,35 +2205,174 @@ func (c *vTAdminClient) GetTablets(ctx context.Context, in *GetTabletsRequest, o return out, nil } -// VTAdminServer is the server API for VTAdmin service. -type VTAdminServer interface { - // GetGates returns all gates across all the specified clusters. - GetGates(context.Context, *GetGatesRequest) (*GetGatesResponse, error) - // GetTablet looks up a tablet by hostname across all clusters and returns - // the result. - GetTablet(context.Context, *GetTabletRequest) (*Tablet, error) - // GetTablets returns all tablets across all the specified clusters. - GetTablets(context.Context, *GetTabletsRequest) (*GetTabletsResponse, error) +func (c *vTAdminClient) GetVSchema(ctx context.Context, in *GetVSchemaRequest, opts ...grpc.CallOption) (*VSchema, error) { + out := new(VSchema) + err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/GetVSchema", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil } -// UnimplementedVTAdminServer can be embedded to have forward compatible implementations. -type UnimplementedVTAdminServer struct { +func (c *vTAdminClient) GetVSchemas(ctx context.Context, in *GetVSchemasRequest, opts ...grpc.CallOption) (*GetVSchemasResponse, error) { + out := new(GetVSchemasResponse) + err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/GetVSchemas", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil } -func (*UnimplementedVTAdminServer) GetGates(ctx context.Context, req *GetGatesRequest) (*GetGatesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetGates not implemented") -} -func (*UnimplementedVTAdminServer) GetTablet(ctx context.Context, req *GetTabletRequest) (*Tablet, error) { +func (c *vTAdminClient) GetWorkflow(ctx context.Context, in *GetWorkflowRequest, opts ...grpc.CallOption) (*Workflow, error) { + out := new(Workflow) + err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/GetWorkflow", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vTAdminClient) GetWorkflows(ctx context.Context, in *GetWorkflowsRequest, opts ...grpc.CallOption) (*GetWorkflowsResponse, error) { + out := new(GetWorkflowsResponse) + err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/GetWorkflows", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vTAdminClient) VTExplain(ctx context.Context, in *VTExplainRequest, opts ...grpc.CallOption) (*VTExplainResponse, error) { + out := new(VTExplainResponse) + err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/VTExplain", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// VTAdminServer is the server API for VTAdmin service. +type VTAdminServer interface { + // FindSchema returns a single Schema that matches the provided table name + // across all specified clusters IDs. Not specifying a set of cluster IDs + // causes the search to span all configured clusters. + // + // An error occurs if either no table exists across any of the clusters with + // the specified table name, or if multiple tables exist with that name. + FindSchema(context.Context, *FindSchemaRequest) (*Schema, error) + // GetClusters returns all configured clusters. + GetClusters(context.Context, *GetClustersRequest) (*GetClustersResponse, error) + // GetGates returns all gates across all the specified clusters. + GetGates(context.Context, *GetGatesRequest) (*GetGatesResponse, error) + // GetKeyspaces returns all keyspaces across the specified clusters. + GetKeyspaces(context.Context, *GetKeyspacesRequest) (*GetKeyspacesResponse, error) + // GetSchema returns the schema for the specified (cluster, keyspace, table) + // tuple. + GetSchema(context.Context, *GetSchemaRequest) (*Schema, error) + // GetSchemas returns all schemas across the specified clusters. + GetSchemas(context.Context, *GetSchemasRequest) (*GetSchemasResponse, error) + // GetTablet looks up a tablet by hostname across all clusters and returns + // the result. + GetTablet(context.Context, *GetTabletRequest) (*Tablet, error) + // GetTablets returns all tablets across all the specified clusters. + GetTablets(context.Context, *GetTabletsRequest) (*GetTabletsResponse, error) + // GetVSchema returns a VSchema for the specified keyspace in the specified + // cluster. + GetVSchema(context.Context, *GetVSchemaRequest) (*VSchema, error) + // GetVSchemas returns the VSchemas for all specified clusters. + GetVSchemas(context.Context, *GetVSchemasRequest) (*GetVSchemasResponse, error) + // GetWorkflow returns a single Workflow for a given cluster, keyspace, and + // workflow name. + GetWorkflow(context.Context, *GetWorkflowRequest) (*Workflow, error) + // GetWorkflows returns the Workflows for all specified clusters. + GetWorkflows(context.Context, *GetWorkflowsRequest) (*GetWorkflowsResponse, error) + // VTExplain provides information on how Vitess plans to execute a particular query. + VTExplain(context.Context, *VTExplainRequest) (*VTExplainResponse, error) +} + +// UnimplementedVTAdminServer can be embedded to have forward compatible implementations. +type UnimplementedVTAdminServer struct { +} + +func (*UnimplementedVTAdminServer) FindSchema(ctx context.Context, req *FindSchemaRequest) (*Schema, error) { + return nil, status.Errorf(codes.Unimplemented, "method FindSchema not implemented") +} +func (*UnimplementedVTAdminServer) GetClusters(ctx context.Context, req *GetClustersRequest) (*GetClustersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetClusters not implemented") +} +func (*UnimplementedVTAdminServer) GetGates(ctx context.Context, req *GetGatesRequest) (*GetGatesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetGates not implemented") +} +func (*UnimplementedVTAdminServer) GetKeyspaces(ctx context.Context, req *GetKeyspacesRequest) (*GetKeyspacesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetKeyspaces not implemented") +} +func (*UnimplementedVTAdminServer) GetSchema(ctx context.Context, req *GetSchemaRequest) (*Schema, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSchema not implemented") +} +func (*UnimplementedVTAdminServer) GetSchemas(ctx context.Context, req *GetSchemasRequest) (*GetSchemasResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSchemas not implemented") +} +func (*UnimplementedVTAdminServer) GetTablet(ctx context.Context, req *GetTabletRequest) (*Tablet, error) { return nil, status.Errorf(codes.Unimplemented, "method GetTablet not implemented") } func (*UnimplementedVTAdminServer) GetTablets(ctx context.Context, req *GetTabletsRequest) (*GetTabletsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetTablets not implemented") } +func (*UnimplementedVTAdminServer) GetVSchema(ctx context.Context, req *GetVSchemaRequest) (*VSchema, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetVSchema not implemented") +} +func (*UnimplementedVTAdminServer) GetVSchemas(ctx context.Context, req *GetVSchemasRequest) (*GetVSchemasResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetVSchemas not implemented") +} +func (*UnimplementedVTAdminServer) GetWorkflow(ctx context.Context, req *GetWorkflowRequest) (*Workflow, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetWorkflow not implemented") +} +func (*UnimplementedVTAdminServer) GetWorkflows(ctx context.Context, req *GetWorkflowsRequest) (*GetWorkflowsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetWorkflows not implemented") +} +func (*UnimplementedVTAdminServer) VTExplain(ctx context.Context, req *VTExplainRequest) (*VTExplainResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VTExplain not implemented") +} func RegisterVTAdminServer(s *grpc.Server, srv VTAdminServer) { s.RegisterService(&_VTAdmin_serviceDesc, srv) } +func _VTAdmin_FindSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(FindSchemaRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VTAdminServer).FindSchema(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtadmin.VTAdmin/FindSchema", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VTAdminServer).FindSchema(ctx, req.(*FindSchemaRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VTAdmin_GetClusters_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetClustersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VTAdminServer).GetClusters(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtadmin.VTAdmin/GetClusters", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VTAdminServer).GetClusters(ctx, req.(*GetClustersRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _VTAdmin_GetGates_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetGatesRequest) if err := dec(in); err != nil { @@ -596,6 +2391,60 @@ func _VTAdmin_GetGates_Handler(srv interface{}, ctx context.Context, dec func(in return interceptor(ctx, in, info, handler) } +func _VTAdmin_GetKeyspaces_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetKeyspacesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VTAdminServer).GetKeyspaces(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtadmin.VTAdmin/GetKeyspaces", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VTAdminServer).GetKeyspaces(ctx, req.(*GetKeyspacesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VTAdmin_GetSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSchemaRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VTAdminServer).GetSchema(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtadmin.VTAdmin/GetSchema", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VTAdminServer).GetSchema(ctx, req.(*GetSchemaRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VTAdmin_GetSchemas_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSchemasRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VTAdminServer).GetSchemas(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtadmin.VTAdmin/GetSchemas", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VTAdminServer).GetSchemas(ctx, req.(*GetSchemasRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _VTAdmin_GetTablet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetTabletRequest) if err := dec(in); err != nil { @@ -632,14 +2481,124 @@ func _VTAdmin_GetTablets_Handler(srv interface{}, ctx context.Context, dec func( return interceptor(ctx, in, info, handler) } +func _VTAdmin_GetVSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetVSchemaRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VTAdminServer).GetVSchema(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtadmin.VTAdmin/GetVSchema", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VTAdminServer).GetVSchema(ctx, req.(*GetVSchemaRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VTAdmin_GetVSchemas_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetVSchemasRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VTAdminServer).GetVSchemas(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtadmin.VTAdmin/GetVSchemas", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VTAdminServer).GetVSchemas(ctx, req.(*GetVSchemasRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VTAdmin_GetWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetWorkflowRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VTAdminServer).GetWorkflow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtadmin.VTAdmin/GetWorkflow", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VTAdminServer).GetWorkflow(ctx, req.(*GetWorkflowRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VTAdmin_GetWorkflows_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetWorkflowsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VTAdminServer).GetWorkflows(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtadmin.VTAdmin/GetWorkflows", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VTAdminServer).GetWorkflows(ctx, req.(*GetWorkflowsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VTAdmin_VTExplain_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VTExplainRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VTAdminServer).VTExplain(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtadmin.VTAdmin/VTExplain", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VTAdminServer).VTExplain(ctx, req.(*VTExplainRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _VTAdmin_serviceDesc = grpc.ServiceDesc{ ServiceName: "vtadmin.VTAdmin", HandlerType: (*VTAdminServer)(nil), Methods: []grpc.MethodDesc{ + { + MethodName: "FindSchema", + Handler: _VTAdmin_FindSchema_Handler, + }, + { + MethodName: "GetClusters", + Handler: _VTAdmin_GetClusters_Handler, + }, { MethodName: "GetGates", Handler: _VTAdmin_GetGates_Handler, }, + { + MethodName: "GetKeyspaces", + Handler: _VTAdmin_GetKeyspaces_Handler, + }, + { + MethodName: "GetSchema", + Handler: _VTAdmin_GetSchema_Handler, + }, + { + MethodName: "GetSchemas", + Handler: _VTAdmin_GetSchemas_Handler, + }, { MethodName: "GetTablet", Handler: _VTAdmin_GetTablet_Handler, @@ -648,7 +2607,6817 @@ var _VTAdmin_serviceDesc = grpc.ServiceDesc{ MethodName: "GetTablets", Handler: _VTAdmin_GetTablets_Handler, }, + { + MethodName: "GetVSchema", + Handler: _VTAdmin_GetVSchema_Handler, + }, + { + MethodName: "GetVSchemas", + Handler: _VTAdmin_GetVSchemas_Handler, + }, + { + MethodName: "GetWorkflow", + Handler: _VTAdmin_GetWorkflow_Handler, + }, + { + MethodName: "GetWorkflows", + Handler: _VTAdmin_GetWorkflows_Handler, + }, + { + MethodName: "VTExplain", + Handler: _VTAdmin_VTExplain_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "vtadmin.proto", } + +func (m *Cluster) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Cluster) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintVtadmin(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintVtadmin(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClusterWorkflows) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterWorkflows) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterWorkflows) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Warnings) > 0 { + for iNdEx := len(m.Warnings) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Warnings[iNdEx]) + copy(dAtA[i:], m.Warnings[iNdEx]) + i = encodeVarintVtadmin(dAtA, i, uint64(len(m.Warnings[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Workflows) > 0 { + for iNdEx := len(m.Workflows) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Workflows[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtadmin(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Keyspace) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Keyspace) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Keyspace) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Shards) > 0 { + for k := range m.Shards { + v := m.Shards[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtadmin(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintVtadmin(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintVtadmin(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if m.Keyspace != nil { + { + size, err := m.Keyspace.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtadmin(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Cluster != nil { + { + size, err := m.Cluster.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtadmin(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Schema) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Schema) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Schema) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.TableSizes) > 0 { + for k := range m.TableSizes { + v := m.TableSizes[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtadmin(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintVtadmin(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintVtadmin(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.TableDefinitions) > 0 { + for iNdEx := len(m.TableDefinitions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TableDefinitions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtadmin(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintVtadmin(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0x12 + } + if m.Cluster != nil { + { + size, err := m.Cluster.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtadmin(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Schema_ShardTableSize) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Schema_ShardTableSize) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Schema_ShardTableSize) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.DataLength != 0 { + i = encodeVarintVtadmin(dAtA, i, uint64(m.DataLength)) + i-- + dAtA[i] = 0x10 + } + if m.RowCount != 0 { + i = encodeVarintVtadmin(dAtA, i, uint64(m.RowCount)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Schema_TableSize) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Schema_TableSize) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Schema_TableSize) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.ByShard) > 0 { + for k := range m.ByShard { + v := m.ByShard[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtadmin(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintVtadmin(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintVtadmin(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if m.DataLength != 0 { + i = encodeVarintVtadmin(dAtA, i, uint64(m.DataLength)) + i-- + dAtA[i] = 0x10 + } + if m.RowCount != 0 { + i = encodeVarintVtadmin(dAtA, i, uint64(m.RowCount)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Tablet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Tablet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Tablet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.State != 0 { + i = encodeVarintVtadmin(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x18 + } + if m.Tablet != nil { + { + size, err := m.Tablet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtadmin(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Cluster != nil { + { + size, err := m.Cluster.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtadmin(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VSchema) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VSchema) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VSchema) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.VSchema != nil { + { + size, err := m.VSchema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtadmin(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintVtadmin(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if m.Cluster != nil { + { + size, err := m.Cluster.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtadmin(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Vtctld) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Vtctld) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Vtctld) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Cluster != nil { + { + size, err := m.Cluster.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtadmin(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Hostname) > 0 { + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarintVtadmin(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VTGate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VTGate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VTGate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Keyspaces) > 0 { + for iNdEx := len(m.Keyspaces) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Keyspaces[iNdEx]) + copy(dAtA[i:], m.Keyspaces[iNdEx]) + i = encodeVarintVtadmin(dAtA, i, uint64(len(m.Keyspaces[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if m.Cluster != nil { + { + size, err := m.Cluster.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtadmin(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.Cell) > 0 { + i -= len(m.Cell) + copy(dAtA[i:], m.Cell) + i = encodeVarintVtadmin(dAtA, i, uint64(len(m.Cell))) + i-- + dAtA[i] = 0x1a + } + if len(m.Pool) > 0 { + i -= len(m.Pool) + copy(dAtA[i:], m.Pool) + i = encodeVarintVtadmin(dAtA, i, uint64(len(m.Pool))) + i-- + dAtA[i] = 0x12 + } + if len(m.Hostname) > 0 { + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarintVtadmin(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Workflow) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Workflow) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Workflow) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Workflow != nil { + { + size, err := m.Workflow.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtadmin(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintVtadmin(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0x12 + } + if m.Cluster != nil { + { + size, err := m.Cluster.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtadmin(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FindSchemaRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FindSchemaRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FindSchemaRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.TableSizeOptions != nil { + { + size, err := m.TableSizeOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtadmin(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.ClusterIds) > 0 { + for iNdEx := len(m.ClusterIds) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ClusterIds[iNdEx]) + copy(dAtA[i:], m.ClusterIds[iNdEx]) + i = encodeVarintVtadmin(dAtA, i, uint64(len(m.ClusterIds[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Table) > 0 { + i -= len(m.Table) + copy(dAtA[i:], m.Table) + i = encodeVarintVtadmin(dAtA, i, uint64(len(m.Table))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetClustersRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetClustersRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetClustersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *GetClustersResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetClustersResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetClustersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Clusters) > 0 { + for iNdEx := len(m.Clusters) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Clusters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtadmin(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GetGatesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetGatesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetGatesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.ClusterIds) > 0 { + for iNdEx := len(m.ClusterIds) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ClusterIds[iNdEx]) + copy(dAtA[i:], m.ClusterIds[iNdEx]) + i = encodeVarintVtadmin(dAtA, i, uint64(len(m.ClusterIds[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GetGatesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetGatesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetGatesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Gates) > 0 { + for iNdEx := len(m.Gates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Gates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtadmin(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GetKeyspacesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetKeyspacesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetKeyspacesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.ClusterIds) > 0 { + for iNdEx := len(m.ClusterIds) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ClusterIds[iNdEx]) + copy(dAtA[i:], m.ClusterIds[iNdEx]) + i = encodeVarintVtadmin(dAtA, i, uint64(len(m.ClusterIds[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GetKeyspacesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetKeyspacesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetKeyspacesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Keyspaces) > 0 { + for iNdEx := len(m.Keyspaces) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Keyspaces[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtadmin(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GetSchemaRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetSchemaRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetSchemaRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.TableSizeOptions != nil { + { + size, err := m.TableSizeOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtadmin(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.Table) > 0 { + i -= len(m.Table) + copy(dAtA[i:], m.Table) + i = encodeVarintVtadmin(dAtA, i, uint64(len(m.Table))) + i-- + dAtA[i] = 0x1a + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintVtadmin(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0x12 + } + if len(m.ClusterId) > 0 { + i -= len(m.ClusterId) + copy(dAtA[i:], m.ClusterId) + i = encodeVarintVtadmin(dAtA, i, uint64(len(m.ClusterId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetSchemasRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetSchemasRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetSchemasRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.TableSizeOptions != nil { + { + size, err := m.TableSizeOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtadmin(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.ClusterIds) > 0 { + for iNdEx := len(m.ClusterIds) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ClusterIds[iNdEx]) + copy(dAtA[i:], m.ClusterIds[iNdEx]) + i = encodeVarintVtadmin(dAtA, i, uint64(len(m.ClusterIds[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GetSchemasResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetSchemasResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetSchemasResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Schemas) > 0 { + for iNdEx := len(m.Schemas) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Schemas[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtadmin(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GetSchemaTableSizeOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetSchemaTableSizeOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetSchemaTableSizeOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.AggregateSizes { + i-- + if m.AggregateSizes { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *GetTabletRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetTabletRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetTabletRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.ClusterIds) > 0 { + for iNdEx := len(m.ClusterIds) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ClusterIds[iNdEx]) + copy(dAtA[i:], m.ClusterIds[iNdEx]) + i = encodeVarintVtadmin(dAtA, i, uint64(len(m.ClusterIds[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Hostname) > 0 { + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarintVtadmin(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetTabletsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetTabletsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetTabletsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.ClusterIds) > 0 { + for iNdEx := len(m.ClusterIds) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ClusterIds[iNdEx]) + copy(dAtA[i:], m.ClusterIds[iNdEx]) + i = encodeVarintVtadmin(dAtA, i, uint64(len(m.ClusterIds[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GetTabletsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetTabletsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetTabletsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Tablets) > 0 { + for iNdEx := len(m.Tablets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tablets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtadmin(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GetVSchemaRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetVSchemaRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetVSchemaRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintVtadmin(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0x12 + } + if len(m.ClusterId) > 0 { + i -= len(m.ClusterId) + copy(dAtA[i:], m.ClusterId) + i = encodeVarintVtadmin(dAtA, i, uint64(len(m.ClusterId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetVSchemasRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetVSchemasRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetVSchemasRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.ClusterIds) > 0 { + for iNdEx := len(m.ClusterIds) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ClusterIds[iNdEx]) + copy(dAtA[i:], m.ClusterIds[iNdEx]) + i = encodeVarintVtadmin(dAtA, i, uint64(len(m.ClusterIds[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GetVSchemasResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetVSchemasResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetVSchemasResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.VSchemas) > 0 { + for iNdEx := len(m.VSchemas) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VSchemas[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtadmin(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GetWorkflowRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetWorkflowRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetWorkflowRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.ActiveOnly { + i-- + if m.ActiveOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintVtadmin(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintVtadmin(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0x12 + } + if len(m.ClusterId) > 0 { + i -= len(m.ClusterId) + copy(dAtA[i:], m.ClusterId) + i = encodeVarintVtadmin(dAtA, i, uint64(len(m.ClusterId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetWorkflowsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetWorkflowsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetWorkflowsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.IgnoreKeyspaces) > 0 { + for iNdEx := len(m.IgnoreKeyspaces) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.IgnoreKeyspaces[iNdEx]) + copy(dAtA[i:], m.IgnoreKeyspaces[iNdEx]) + i = encodeVarintVtadmin(dAtA, i, uint64(len(m.IgnoreKeyspaces[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Keyspaces) > 0 { + for iNdEx := len(m.Keyspaces) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Keyspaces[iNdEx]) + copy(dAtA[i:], m.Keyspaces[iNdEx]) + i = encodeVarintVtadmin(dAtA, i, uint64(len(m.Keyspaces[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if m.ActiveOnly { + i-- + if m.ActiveOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.ClusterIds) > 0 { + for iNdEx := len(m.ClusterIds) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ClusterIds[iNdEx]) + copy(dAtA[i:], m.ClusterIds[iNdEx]) + i = encodeVarintVtadmin(dAtA, i, uint64(len(m.ClusterIds[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GetWorkflowsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetWorkflowsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetWorkflowsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.WorkflowsByCluster) > 0 { + for k := range m.WorkflowsByCluster { + v := m.WorkflowsByCluster[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtadmin(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintVtadmin(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintVtadmin(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *VTExplainRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VTExplainRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VTExplainRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Sql) > 0 { + i -= len(m.Sql) + copy(dAtA[i:], m.Sql) + i = encodeVarintVtadmin(dAtA, i, uint64(len(m.Sql))) + i-- + dAtA[i] = 0x1a + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintVtadmin(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0x12 + } + if len(m.Cluster) > 0 { + i -= len(m.Cluster) + copy(dAtA[i:], m.Cluster) + i = encodeVarintVtadmin(dAtA, i, uint64(len(m.Cluster))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VTExplainResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VTExplainResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VTExplainResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Response) > 0 { + i -= len(m.Response) + copy(dAtA[i:], m.Response) + i = encodeVarintVtadmin(dAtA, i, uint64(len(m.Response))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintVtadmin(dAtA []byte, offset int, v uint64) int { + offset -= sovVtadmin(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Cluster) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sovVtadmin(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovVtadmin(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ClusterWorkflows) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Workflows) > 0 { + for _, e := range m.Workflows { + l = e.Size() + n += 1 + l + sovVtadmin(uint64(l)) + } + } + if len(m.Warnings) > 0 { + for _, s := range m.Warnings { + l = len(s) + n += 1 + l + sovVtadmin(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Keyspace) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Cluster != nil { + l = m.Cluster.Size() + n += 1 + l + sovVtadmin(uint64(l)) + } + if m.Keyspace != nil { + l = m.Keyspace.Size() + n += 1 + l + sovVtadmin(uint64(l)) + } + if len(m.Shards) > 0 { + for k, v := range m.Shards { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovVtadmin(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovVtadmin(uint64(len(k))) + l + n += mapEntrySize + 1 + sovVtadmin(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Schema) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Cluster != nil { + l = m.Cluster.Size() + n += 1 + l + sovVtadmin(uint64(l)) + } + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovVtadmin(uint64(l)) + } + if len(m.TableDefinitions) > 0 { + for _, e := range m.TableDefinitions { + l = e.Size() + n += 1 + l + sovVtadmin(uint64(l)) + } + } + if len(m.TableSizes) > 0 { + for k, v := range m.TableSizes { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovVtadmin(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovVtadmin(uint64(len(k))) + l + n += mapEntrySize + 1 + sovVtadmin(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Schema_ShardTableSize) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RowCount != 0 { + n += 1 + sovVtadmin(uint64(m.RowCount)) + } + if m.DataLength != 0 { + n += 1 + sovVtadmin(uint64(m.DataLength)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Schema_TableSize) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RowCount != 0 { + n += 1 + sovVtadmin(uint64(m.RowCount)) + } + if m.DataLength != 0 { + n += 1 + sovVtadmin(uint64(m.DataLength)) + } + if len(m.ByShard) > 0 { + for k, v := range m.ByShard { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovVtadmin(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovVtadmin(uint64(len(k))) + l + n += mapEntrySize + 1 + sovVtadmin(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Tablet) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Cluster != nil { + l = m.Cluster.Size() + n += 1 + l + sovVtadmin(uint64(l)) + } + if m.Tablet != nil { + l = m.Tablet.Size() + n += 1 + l + sovVtadmin(uint64(l)) + } + if m.State != 0 { + n += 1 + sovVtadmin(uint64(m.State)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *VSchema) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Cluster != nil { + l = m.Cluster.Size() + n += 1 + l + sovVtadmin(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovVtadmin(uint64(l)) + } + if m.VSchema != nil { + l = m.VSchema.Size() + n += 1 + l + sovVtadmin(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Vtctld) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Hostname) + if l > 0 { + n += 1 + l + sovVtadmin(uint64(l)) + } + if m.Cluster != nil { + l = m.Cluster.Size() + n += 1 + l + sovVtadmin(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *VTGate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Hostname) + if l > 0 { + n += 1 + l + sovVtadmin(uint64(l)) + } + l = len(m.Pool) + if l > 0 { + n += 1 + l + sovVtadmin(uint64(l)) + } + l = len(m.Cell) + if l > 0 { + n += 1 + l + sovVtadmin(uint64(l)) + } + if m.Cluster != nil { + l = m.Cluster.Size() + n += 1 + l + sovVtadmin(uint64(l)) + } + if len(m.Keyspaces) > 0 { + for _, s := range m.Keyspaces { + l = len(s) + n += 1 + l + sovVtadmin(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Workflow) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Cluster != nil { + l = m.Cluster.Size() + n += 1 + l + sovVtadmin(uint64(l)) + } + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovVtadmin(uint64(l)) + } + if m.Workflow != nil { + l = m.Workflow.Size() + n += 1 + l + sovVtadmin(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *FindSchemaRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Table) + if l > 0 { + n += 1 + l + sovVtadmin(uint64(l)) + } + if len(m.ClusterIds) > 0 { + for _, s := range m.ClusterIds { + l = len(s) + n += 1 + l + sovVtadmin(uint64(l)) + } + } + if m.TableSizeOptions != nil { + l = m.TableSizeOptions.Size() + n += 1 + l + sovVtadmin(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetClustersRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetClustersResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Clusters) > 0 { + for _, e := range m.Clusters { + l = e.Size() + n += 1 + l + sovVtadmin(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetGatesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ClusterIds) > 0 { + for _, s := range m.ClusterIds { + l = len(s) + n += 1 + l + sovVtadmin(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetGatesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Gates) > 0 { + for _, e := range m.Gates { + l = e.Size() + n += 1 + l + sovVtadmin(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetKeyspacesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ClusterIds) > 0 { + for _, s := range m.ClusterIds { + l = len(s) + n += 1 + l + sovVtadmin(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetKeyspacesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Keyspaces) > 0 { + for _, e := range m.Keyspaces { + l = e.Size() + n += 1 + l + sovVtadmin(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetSchemaRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClusterId) + if l > 0 { + n += 1 + l + sovVtadmin(uint64(l)) + } + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovVtadmin(uint64(l)) + } + l = len(m.Table) + if l > 0 { + n += 1 + l + sovVtadmin(uint64(l)) + } + if m.TableSizeOptions != nil { + l = m.TableSizeOptions.Size() + n += 1 + l + sovVtadmin(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetSchemasRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ClusterIds) > 0 { + for _, s := range m.ClusterIds { + l = len(s) + n += 1 + l + sovVtadmin(uint64(l)) + } + } + if m.TableSizeOptions != nil { + l = m.TableSizeOptions.Size() + n += 1 + l + sovVtadmin(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetSchemasResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Schemas) > 0 { + for _, e := range m.Schemas { + l = e.Size() + n += 1 + l + sovVtadmin(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetSchemaTableSizeOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AggregateSizes { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetTabletRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Hostname) + if l > 0 { + n += 1 + l + sovVtadmin(uint64(l)) + } + if len(m.ClusterIds) > 0 { + for _, s := range m.ClusterIds { + l = len(s) + n += 1 + l + sovVtadmin(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetTabletsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ClusterIds) > 0 { + for _, s := range m.ClusterIds { + l = len(s) + n += 1 + l + sovVtadmin(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetTabletsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Tablets) > 0 { + for _, e := range m.Tablets { + l = e.Size() + n += 1 + l + sovVtadmin(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetVSchemaRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClusterId) + if l > 0 { + n += 1 + l + sovVtadmin(uint64(l)) + } + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovVtadmin(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetVSchemasRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ClusterIds) > 0 { + for _, s := range m.ClusterIds { + l = len(s) + n += 1 + l + sovVtadmin(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetVSchemasResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.VSchemas) > 0 { + for _, e := range m.VSchemas { + l = e.Size() + n += 1 + l + sovVtadmin(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetWorkflowRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClusterId) + if l > 0 { + n += 1 + l + sovVtadmin(uint64(l)) + } + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovVtadmin(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovVtadmin(uint64(l)) + } + if m.ActiveOnly { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetWorkflowsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ClusterIds) > 0 { + for _, s := range m.ClusterIds { + l = len(s) + n += 1 + l + sovVtadmin(uint64(l)) + } + } + if m.ActiveOnly { + n += 2 + } + if len(m.Keyspaces) > 0 { + for _, s := range m.Keyspaces { + l = len(s) + n += 1 + l + sovVtadmin(uint64(l)) + } + } + if len(m.IgnoreKeyspaces) > 0 { + for _, s := range m.IgnoreKeyspaces { + l = len(s) + n += 1 + l + sovVtadmin(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetWorkflowsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.WorkflowsByCluster) > 0 { + for k, v := range m.WorkflowsByCluster { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovVtadmin(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovVtadmin(uint64(len(k))) + l + n += mapEntrySize + 1 + sovVtadmin(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *VTExplainRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Cluster) + if l > 0 { + n += 1 + l + sovVtadmin(uint64(l)) + } + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovVtadmin(uint64(l)) + } + l = len(m.Sql) + if l > 0 { + n += 1 + l + sovVtadmin(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *VTExplainResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Response) + if l > 0 { + n += 1 + l + sovVtadmin(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovVtadmin(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozVtadmin(x uint64) (n int) { + return sovVtadmin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Cluster) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Cluster: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Cluster: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterWorkflows) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterWorkflows: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterWorkflows: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Workflows", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Workflows = append(m.Workflows, &Workflow{}) + if err := m.Workflows[len(m.Workflows)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Warnings", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Warnings = append(m.Warnings, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Keyspace) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Keyspace: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Keyspace: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cluster == nil { + m.Cluster = &Cluster{} + } + if err := m.Cluster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Keyspace == nil { + m.Keyspace = &vtctldata.Keyspace{} + } + if err := m.Keyspace.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shards", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Shards == nil { + m.Shards = make(map[string]*vtctldata.Shard) + } + var mapkey string + var mapvalue *vtctldata.Shard + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthVtadmin + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthVtadmin + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthVtadmin + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &vtctldata.Shard{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Shards[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Schema) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Schema: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Schema: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cluster == nil { + m.Cluster = &Cluster{} + } + if err := m.Cluster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TableDefinitions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TableDefinitions = append(m.TableDefinitions, &tabletmanagerdata.TableDefinition{}) + if err := m.TableDefinitions[len(m.TableDefinitions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TableSizes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TableSizes == nil { + m.TableSizes = make(map[string]*Schema_TableSize) + } + var mapkey string + var mapvalue *Schema_TableSize + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthVtadmin + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthVtadmin + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthVtadmin + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Schema_TableSize{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.TableSizes[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Schema_ShardTableSize) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ShardTableSize: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ShardTableSize: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RowCount", wireType) + } + m.RowCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RowCount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DataLength", wireType) + } + m.DataLength = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DataLength |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Schema_TableSize) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TableSize: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TableSize: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RowCount", wireType) + } + m.RowCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RowCount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DataLength", wireType) + } + m.DataLength = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DataLength |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ByShard", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ByShard == nil { + m.ByShard = make(map[string]*Schema_ShardTableSize) + } + var mapkey string + var mapvalue *Schema_ShardTableSize + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthVtadmin + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthVtadmin + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthVtadmin + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Schema_ShardTableSize{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ByShard[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Tablet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Tablet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Tablet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cluster == nil { + m.Cluster = &Cluster{} + } + if err := m.Cluster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tablet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tablet == nil { + m.Tablet = &topodata.Tablet{} + } + if err := m.Tablet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= Tablet_ServingState(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VSchema) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VSchema: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VSchema: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cluster == nil { + m.Cluster = &Cluster{} + } + if err := m.Cluster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VSchema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VSchema == nil { + m.VSchema = &vschema.Keyspace{} + } + if err := m.VSchema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Vtctld) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Vtctld: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Vtctld: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cluster == nil { + m.Cluster = &Cluster{} + } + if err := m.Cluster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VTGate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VTGate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VTGate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pool = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cell = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cluster == nil { + m.Cluster = &Cluster{} + } + if err := m.Cluster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspaces", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspaces = append(m.Keyspaces, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Workflow) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Workflow: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Workflow: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cluster == nil { + m.Cluster = &Cluster{} + } + if err := m.Cluster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Workflow == nil { + m.Workflow = &vtctldata.Workflow{} + } + if err := m.Workflow.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FindSchemaRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FindSchemaRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FindSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Table", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Table = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterIds", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterIds = append(m.ClusterIds, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TableSizeOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TableSizeOptions == nil { + m.TableSizeOptions = &GetSchemaTableSizeOptions{} + } + if err := m.TableSizeOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetClustersRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetClustersRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetClustersRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetClustersResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetClustersResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetClustersResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Clusters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Clusters = append(m.Clusters, &Cluster{}) + if err := m.Clusters[len(m.Clusters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetGatesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetGatesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetGatesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterIds", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterIds = append(m.ClusterIds, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetGatesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetGatesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetGatesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Gates = append(m.Gates, &VTGate{}) + if err := m.Gates[len(m.Gates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetKeyspacesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetKeyspacesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetKeyspacesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterIds", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterIds = append(m.ClusterIds, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetKeyspacesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetKeyspacesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetKeyspacesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspaces", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspaces = append(m.Keyspaces, &Keyspace{}) + if err := m.Keyspaces[len(m.Keyspaces)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetSchemaRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetSchemaRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Table", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Table = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TableSizeOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TableSizeOptions == nil { + m.TableSizeOptions = &GetSchemaTableSizeOptions{} + } + if err := m.TableSizeOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetSchemasRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetSchemasRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetSchemasRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterIds", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterIds = append(m.ClusterIds, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TableSizeOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TableSizeOptions == nil { + m.TableSizeOptions = &GetSchemaTableSizeOptions{} + } + if err := m.TableSizeOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetSchemasResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetSchemasResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetSchemasResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schemas", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Schemas = append(m.Schemas, &Schema{}) + if err := m.Schemas[len(m.Schemas)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetSchemaTableSizeOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetSchemaTableSizeOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetSchemaTableSizeOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregateSizes", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AggregateSizes = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetTabletRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetTabletRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetTabletRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterIds", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterIds = append(m.ClusterIds, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetTabletsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetTabletsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetTabletsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterIds", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterIds = append(m.ClusterIds, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetTabletsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetTabletsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetTabletsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tablets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tablets = append(m.Tablets, &Tablet{}) + if err := m.Tablets[len(m.Tablets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetVSchemaRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetVSchemaRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetVSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetVSchemasRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetVSchemasRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetVSchemasRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterIds", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterIds = append(m.ClusterIds, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetVSchemasResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetVSchemasResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetVSchemasResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VSchemas", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VSchemas = append(m.VSchemas, &VSchema{}) + if err := m.VSchemas[len(m.VSchemas)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetWorkflowRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetWorkflowRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetWorkflowRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ActiveOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetWorkflowsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetWorkflowsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetWorkflowsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterIds", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterIds = append(m.ClusterIds, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ActiveOnly = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspaces", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspaces = append(m.Keyspaces, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IgnoreKeyspaces", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IgnoreKeyspaces = append(m.IgnoreKeyspaces, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetWorkflowsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetWorkflowsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetWorkflowsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkflowsByCluster", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WorkflowsByCluster == nil { + m.WorkflowsByCluster = make(map[string]*ClusterWorkflows) + } + var mapkey string + var mapvalue *ClusterWorkflows + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthVtadmin + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthVtadmin + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthVtadmin + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ClusterWorkflows{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.WorkflowsByCluster[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VTExplainRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VTExplainRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VTExplainRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cluster = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sql", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sql = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VTExplainResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VTExplainResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VTExplainResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtadmin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtadmin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtadmin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Response = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtadmin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtadmin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipVtadmin(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVtadmin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVtadmin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVtadmin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthVtadmin + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupVtadmin + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthVtadmin + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthVtadmin = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowVtadmin = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupVtadmin = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/vt/proto/vtctldata/vtctldata.pb.go b/go/vt/proto/vtctldata/vtctldata.pb.go index 3d648080931..40ec87c07e1 100644 --- a/go/vt/proto/vtctldata/vtctldata.pb.go +++ b/go/vt/proto/vtctldata/vtctldata.pb.go @@ -1,16 +1,23 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: vtctldata.proto package vtctldata import ( fmt "fmt" + io "io" math "math" + math_bits "math/bits" proto "github.com/golang/protobuf/proto" - + binlogdata "vitess.io/vitess/go/vt/proto/binlogdata" logutil "vitess.io/vitess/go/vt/proto/logutil" + mysqlctl "vitess.io/vitess/go/vt/proto/mysqlctl" + replicationdata "vitess.io/vitess/go/vt/proto/replicationdata" + tabletmanagerdata "vitess.io/vitess/go/vt/proto/tabletmanagerdata" topodata "vitess.io/vitess/go/vt/proto/topodata" + vschema "vitess.io/vitess/go/vt/proto/vschema" + vttime "vitess.io/vitess/go/vt/proto/vttime" ) // Reference imports to suppress errors if they are not otherwise used. @@ -40,18 +47,26 @@ func (*ExecuteVtctlCommandRequest) ProtoMessage() {} func (*ExecuteVtctlCommandRequest) Descriptor() ([]byte, []int) { return fileDescriptor_f41247b323a1ab2e, []int{0} } - func (m *ExecuteVtctlCommandRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExecuteVtctlCommandRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ExecuteVtctlCommandRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExecuteVtctlCommandRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ExecuteVtctlCommandRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ExecuteVtctlCommandRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ExecuteVtctlCommandRequest.Merge(m, src) } func (m *ExecuteVtctlCommandRequest) XXX_Size() int { - return xxx_messageInfo_ExecuteVtctlCommandRequest.Size(m) + return m.Size() } func (m *ExecuteVtctlCommandRequest) XXX_DiscardUnknown() { xxx_messageInfo_ExecuteVtctlCommandRequest.DiscardUnknown(m) @@ -87,18 +102,26 @@ func (*ExecuteVtctlCommandResponse) ProtoMessage() {} func (*ExecuteVtctlCommandResponse) Descriptor() ([]byte, []int) { return fileDescriptor_f41247b323a1ab2e, []int{1} } - func (m *ExecuteVtctlCommandResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExecuteVtctlCommandResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ExecuteVtctlCommandResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExecuteVtctlCommandResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ExecuteVtctlCommandResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ExecuteVtctlCommandResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ExecuteVtctlCommandResponse.Merge(m, src) } func (m *ExecuteVtctlCommandResponse) XXX_Size() int { - return xxx_messageInfo_ExecuteVtctlCommandResponse.Size(m) + return m.Size() } func (m *ExecuteVtctlCommandResponse) XXX_DiscardUnknown() { xxx_messageInfo_ExecuteVtctlCommandResponse.DiscardUnknown(m) @@ -113,152 +136,181 @@ func (m *ExecuteVtctlCommandResponse) GetEvent() *logutil.Event { return nil } -type GetKeyspacesRequest struct { +// TableMaterializeSttings contains the settings for one table. +type TableMaterializeSettings struct { + TargetTable string `protobuf:"bytes,1,opt,name=target_table,json=targetTable,proto3" json:"target_table,omitempty"` + // source_expression is a select statement. + SourceExpression string `protobuf:"bytes,2,opt,name=source_expression,json=sourceExpression,proto3" json:"source_expression,omitempty"` + // create_ddl contains the DDL to create the target table. + // If empty, the target table must already exist. + // if "copy", the target table DDL is the same as the source table. + CreateDdl string `protobuf:"bytes,3,opt,name=create_ddl,json=createDdl,proto3" json:"create_ddl,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *GetKeyspacesRequest) Reset() { *m = GetKeyspacesRequest{} } -func (m *GetKeyspacesRequest) String() string { return proto.CompactTextString(m) } -func (*GetKeyspacesRequest) ProtoMessage() {} -func (*GetKeyspacesRequest) Descriptor() ([]byte, []int) { +func (m *TableMaterializeSettings) Reset() { *m = TableMaterializeSettings{} } +func (m *TableMaterializeSettings) String() string { return proto.CompactTextString(m) } +func (*TableMaterializeSettings) ProtoMessage() {} +func (*TableMaterializeSettings) Descriptor() ([]byte, []int) { return fileDescriptor_f41247b323a1ab2e, []int{2} } - -func (m *GetKeyspacesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetKeyspacesRequest.Unmarshal(m, b) +func (m *TableMaterializeSettings) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } -func (m *GetKeyspacesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetKeyspacesRequest.Marshal(b, m, deterministic) +func (m *TableMaterializeSettings) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TableMaterializeSettings.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } -func (m *GetKeyspacesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetKeyspacesRequest.Merge(m, src) +func (m *TableMaterializeSettings) XXX_Merge(src proto.Message) { + xxx_messageInfo_TableMaterializeSettings.Merge(m, src) } -func (m *GetKeyspacesRequest) XXX_Size() int { - return xxx_messageInfo_GetKeyspacesRequest.Size(m) +func (m *TableMaterializeSettings) XXX_Size() int { + return m.Size() } -func (m *GetKeyspacesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetKeyspacesRequest.DiscardUnknown(m) +func (m *TableMaterializeSettings) XXX_DiscardUnknown() { + xxx_messageInfo_TableMaterializeSettings.DiscardUnknown(m) } -var xxx_messageInfo_GetKeyspacesRequest proto.InternalMessageInfo - -type GetKeyspacesResponse struct { - Keyspaces []*Keyspace `protobuf:"bytes,1,rep,name=keyspaces,proto3" json:"keyspaces,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} +var xxx_messageInfo_TableMaterializeSettings proto.InternalMessageInfo -func (m *GetKeyspacesResponse) Reset() { *m = GetKeyspacesResponse{} } -func (m *GetKeyspacesResponse) String() string { return proto.CompactTextString(m) } -func (*GetKeyspacesResponse) ProtoMessage() {} -func (*GetKeyspacesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f41247b323a1ab2e, []int{3} +func (m *TableMaterializeSettings) GetTargetTable() string { + if m != nil { + return m.TargetTable + } + return "" } -func (m *GetKeyspacesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetKeyspacesResponse.Unmarshal(m, b) -} -func (m *GetKeyspacesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetKeyspacesResponse.Marshal(b, m, deterministic) -} -func (m *GetKeyspacesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetKeyspacesResponse.Merge(m, src) -} -func (m *GetKeyspacesResponse) XXX_Size() int { - return xxx_messageInfo_GetKeyspacesResponse.Size(m) -} -func (m *GetKeyspacesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetKeyspacesResponse.DiscardUnknown(m) +func (m *TableMaterializeSettings) GetSourceExpression() string { + if m != nil { + return m.SourceExpression + } + return "" } -var xxx_messageInfo_GetKeyspacesResponse proto.InternalMessageInfo - -func (m *GetKeyspacesResponse) GetKeyspaces() []*Keyspace { +func (m *TableMaterializeSettings) GetCreateDdl() string { if m != nil { - return m.Keyspaces + return m.CreateDdl } - return nil + return "" } -type GetKeyspaceRequest struct { - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` +// MaterializeSettings contains the settings for the Materialize command. +type MaterializeSettings struct { + // workflow is the name of the workflow. + Workflow string `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"` + SourceKeyspace string `protobuf:"bytes,2,opt,name=source_keyspace,json=sourceKeyspace,proto3" json:"source_keyspace,omitempty"` + TargetKeyspace string `protobuf:"bytes,3,opt,name=target_keyspace,json=targetKeyspace,proto3" json:"target_keyspace,omitempty"` + // stop_after_copy specifies if vreplication should be stopped after copying. + StopAfterCopy bool `protobuf:"varint,4,opt,name=stop_after_copy,json=stopAfterCopy,proto3" json:"stop_after_copy,omitempty"` + TableSettings []*TableMaterializeSettings `protobuf:"bytes,5,rep,name=table_settings,json=tableSettings,proto3" json:"table_settings,omitempty"` + // optional parameters. + Cell string `protobuf:"bytes,6,opt,name=cell,proto3" json:"cell,omitempty"` + TabletTypes string `protobuf:"bytes,7,opt,name=tablet_types,json=tabletTypes,proto3" json:"tablet_types,omitempty"` + // ExternalCluster is the name of the mounted cluster which has the source keyspace/db for this workflow + // it is of the type + ExternalCluster string `protobuf:"bytes,8,opt,name=external_cluster,json=externalCluster,proto3" json:"external_cluster,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *GetKeyspaceRequest) Reset() { *m = GetKeyspaceRequest{} } -func (m *GetKeyspaceRequest) String() string { return proto.CompactTextString(m) } -func (*GetKeyspaceRequest) ProtoMessage() {} -func (*GetKeyspaceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f41247b323a1ab2e, []int{4} +func (m *MaterializeSettings) Reset() { *m = MaterializeSettings{} } +func (m *MaterializeSettings) String() string { return proto.CompactTextString(m) } +func (*MaterializeSettings) ProtoMessage() {} +func (*MaterializeSettings) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{3} } - -func (m *GetKeyspaceRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetKeyspaceRequest.Unmarshal(m, b) +func (m *MaterializeSettings) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } -func (m *GetKeyspaceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetKeyspaceRequest.Marshal(b, m, deterministic) +func (m *MaterializeSettings) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MaterializeSettings.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } -func (m *GetKeyspaceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetKeyspaceRequest.Merge(m, src) +func (m *MaterializeSettings) XXX_Merge(src proto.Message) { + xxx_messageInfo_MaterializeSettings.Merge(m, src) } -func (m *GetKeyspaceRequest) XXX_Size() int { - return xxx_messageInfo_GetKeyspaceRequest.Size(m) +func (m *MaterializeSettings) XXX_Size() int { + return m.Size() } -func (m *GetKeyspaceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetKeyspaceRequest.DiscardUnknown(m) +func (m *MaterializeSettings) XXX_DiscardUnknown() { + xxx_messageInfo_MaterializeSettings.DiscardUnknown(m) } -var xxx_messageInfo_GetKeyspaceRequest proto.InternalMessageInfo +var xxx_messageInfo_MaterializeSettings proto.InternalMessageInfo -func (m *GetKeyspaceRequest) GetKeyspace() string { +func (m *MaterializeSettings) GetWorkflow() string { if m != nil { - return m.Keyspace + return m.Workflow } return "" } -type GetKeyspaceResponse struct { - Keyspace *Keyspace `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (m *MaterializeSettings) GetSourceKeyspace() string { + if m != nil { + return m.SourceKeyspace + } + return "" } -func (m *GetKeyspaceResponse) Reset() { *m = GetKeyspaceResponse{} } -func (m *GetKeyspaceResponse) String() string { return proto.CompactTextString(m) } -func (*GetKeyspaceResponse) ProtoMessage() {} -func (*GetKeyspaceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f41247b323a1ab2e, []int{5} +func (m *MaterializeSettings) GetTargetKeyspace() string { + if m != nil { + return m.TargetKeyspace + } + return "" } -func (m *GetKeyspaceResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetKeyspaceResponse.Unmarshal(m, b) -} -func (m *GetKeyspaceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetKeyspaceResponse.Marshal(b, m, deterministic) -} -func (m *GetKeyspaceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetKeyspaceResponse.Merge(m, src) +func (m *MaterializeSettings) GetStopAfterCopy() bool { + if m != nil { + return m.StopAfterCopy + } + return false } -func (m *GetKeyspaceResponse) XXX_Size() int { - return xxx_messageInfo_GetKeyspaceResponse.Size(m) + +func (m *MaterializeSettings) GetTableSettings() []*TableMaterializeSettings { + if m != nil { + return m.TableSettings + } + return nil } -func (m *GetKeyspaceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetKeyspaceResponse.DiscardUnknown(m) + +func (m *MaterializeSettings) GetCell() string { + if m != nil { + return m.Cell + } + return "" } -var xxx_messageInfo_GetKeyspaceResponse proto.InternalMessageInfo +func (m *MaterializeSettings) GetTabletTypes() string { + if m != nil { + return m.TabletTypes + } + return "" +} -func (m *GetKeyspaceResponse) GetKeyspace() *Keyspace { +func (m *MaterializeSettings) GetExternalCluster() string { if m != nil { - return m.Keyspace + return m.ExternalCluster } - return nil + return "" } type Keyspace struct { @@ -273,20 +325,28 @@ func (m *Keyspace) Reset() { *m = Keyspace{} } func (m *Keyspace) String() string { return proto.CompactTextString(m) } func (*Keyspace) ProtoMessage() {} func (*Keyspace) Descriptor() ([]byte, []int) { - return fileDescriptor_f41247b323a1ab2e, []int{6} + return fileDescriptor_f41247b323a1ab2e, []int{4} } - func (m *Keyspace) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Keyspace.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *Keyspace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Keyspace.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_Keyspace.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *Keyspace) XXX_Merge(src proto.Message) { xxx_messageInfo_Keyspace.Merge(m, src) } func (m *Keyspace) XXX_Size() int { - return xxx_messageInfo_Keyspace.Size(m) + return m.Size() } func (m *Keyspace) XXX_DiscardUnknown() { xxx_messageInfo_Keyspace.DiscardUnknown(m) @@ -308,348 +368,18359 @@ func (m *Keyspace) GetKeyspace() *topodata.Keyspace { return nil } -type FindAllShardsInKeyspaceRequest struct { - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type Shard struct { + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Shard *topodata.Shard `protobuf:"bytes,3,opt,name=shard,proto3" json:"shard,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *FindAllShardsInKeyspaceRequest) Reset() { *m = FindAllShardsInKeyspaceRequest{} } -func (m *FindAllShardsInKeyspaceRequest) String() string { return proto.CompactTextString(m) } -func (*FindAllShardsInKeyspaceRequest) ProtoMessage() {} -func (*FindAllShardsInKeyspaceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f41247b323a1ab2e, []int{7} +func (m *Shard) Reset() { *m = Shard{} } +func (m *Shard) String() string { return proto.CompactTextString(m) } +func (*Shard) ProtoMessage() {} +func (*Shard) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{5} } - -func (m *FindAllShardsInKeyspaceRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FindAllShardsInKeyspaceRequest.Unmarshal(m, b) +func (m *Shard) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } -func (m *FindAllShardsInKeyspaceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FindAllShardsInKeyspaceRequest.Marshal(b, m, deterministic) +func (m *Shard) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Shard.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } -func (m *FindAllShardsInKeyspaceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_FindAllShardsInKeyspaceRequest.Merge(m, src) +func (m *Shard) XXX_Merge(src proto.Message) { + xxx_messageInfo_Shard.Merge(m, src) } -func (m *FindAllShardsInKeyspaceRequest) XXX_Size() int { - return xxx_messageInfo_FindAllShardsInKeyspaceRequest.Size(m) +func (m *Shard) XXX_Size() int { + return m.Size() } -func (m *FindAllShardsInKeyspaceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_FindAllShardsInKeyspaceRequest.DiscardUnknown(m) +func (m *Shard) XXX_DiscardUnknown() { + xxx_messageInfo_Shard.DiscardUnknown(m) } -var xxx_messageInfo_FindAllShardsInKeyspaceRequest proto.InternalMessageInfo +var xxx_messageInfo_Shard proto.InternalMessageInfo -func (m *FindAllShardsInKeyspaceRequest) GetKeyspace() string { +func (m *Shard) GetKeyspace() string { if m != nil { return m.Keyspace } return "" } -type FindAllShardsInKeyspaceResponse struct { - Shards map[string]*Shard `protobuf:"bytes,1,rep,name=shards,proto3" json:"shards,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (m *Shard) GetName() string { + if m != nil { + return m.Name + } + return "" } -func (m *FindAllShardsInKeyspaceResponse) Reset() { *m = FindAllShardsInKeyspaceResponse{} } -func (m *FindAllShardsInKeyspaceResponse) String() string { return proto.CompactTextString(m) } -func (*FindAllShardsInKeyspaceResponse) ProtoMessage() {} -func (*FindAllShardsInKeyspaceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f41247b323a1ab2e, []int{8} +func (m *Shard) GetShard() *topodata.Shard { + if m != nil { + return m.Shard + } + return nil } -func (m *FindAllShardsInKeyspaceResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FindAllShardsInKeyspaceResponse.Unmarshal(m, b) +// TODO: comment the hell out of this. +type Workflow struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Source *Workflow_ReplicationLocation `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"` + Target *Workflow_ReplicationLocation `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + MaxVReplicationLag int64 `protobuf:"varint,4,opt,name=max_v_replication_lag,json=maxVReplicationLag,proto3" json:"max_v_replication_lag,omitempty"` + ShardStreams map[string]*Workflow_ShardStream `protobuf:"bytes,5,rep,name=shard_streams,json=shardStreams,proto3" json:"shard_streams,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *FindAllShardsInKeyspaceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FindAllShardsInKeyspaceResponse.Marshal(b, m, deterministic) + +func (m *Workflow) Reset() { *m = Workflow{} } +func (m *Workflow) String() string { return proto.CompactTextString(m) } +func (*Workflow) ProtoMessage() {} +func (*Workflow) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{6} } -func (m *FindAllShardsInKeyspaceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_FindAllShardsInKeyspaceResponse.Merge(m, src) +func (m *Workflow) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } -func (m *FindAllShardsInKeyspaceResponse) XXX_Size() int { - return xxx_messageInfo_FindAllShardsInKeyspaceResponse.Size(m) +func (m *Workflow) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Workflow.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } -func (m *FindAllShardsInKeyspaceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_FindAllShardsInKeyspaceResponse.DiscardUnknown(m) +func (m *Workflow) XXX_Merge(src proto.Message) { + xxx_messageInfo_Workflow.Merge(m, src) +} +func (m *Workflow) XXX_Size() int { + return m.Size() +} +func (m *Workflow) XXX_DiscardUnknown() { + xxx_messageInfo_Workflow.DiscardUnknown(m) } -var xxx_messageInfo_FindAllShardsInKeyspaceResponse proto.InternalMessageInfo +var xxx_messageInfo_Workflow proto.InternalMessageInfo -func (m *FindAllShardsInKeyspaceResponse) GetShards() map[string]*Shard { +func (m *Workflow) GetName() string { if m != nil { - return m.Shards + return m.Name + } + return "" +} + +func (m *Workflow) GetSource() *Workflow_ReplicationLocation { + if m != nil { + return m.Source } return nil } -type Shard struct { - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - Shard *topodata.Shard `protobuf:"bytes,3,opt,name=shard,proto3" json:"shard,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (m *Workflow) GetTarget() *Workflow_ReplicationLocation { + if m != nil { + return m.Target + } + return nil } -func (m *Shard) Reset() { *m = Shard{} } -func (m *Shard) String() string { return proto.CompactTextString(m) } -func (*Shard) ProtoMessage() {} -func (*Shard) Descriptor() ([]byte, []int) { - return fileDescriptor_f41247b323a1ab2e, []int{9} +func (m *Workflow) GetMaxVReplicationLag() int64 { + if m != nil { + return m.MaxVReplicationLag + } + return 0 } -func (m *Shard) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Shard.Unmarshal(m, b) +func (m *Workflow) GetShardStreams() map[string]*Workflow_ShardStream { + if m != nil { + return m.ShardStreams + } + return nil } -func (m *Shard) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Shard.Marshal(b, m, deterministic) + +type Workflow_ReplicationLocation struct { + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shards []string `protobuf:"bytes,2,rep,name=shards,proto3" json:"shards,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Shard) XXX_Merge(src proto.Message) { - xxx_messageInfo_Shard.Merge(m, src) + +func (m *Workflow_ReplicationLocation) Reset() { *m = Workflow_ReplicationLocation{} } +func (m *Workflow_ReplicationLocation) String() string { return proto.CompactTextString(m) } +func (*Workflow_ReplicationLocation) ProtoMessage() {} +func (*Workflow_ReplicationLocation) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{6, 1} } -func (m *Shard) XXX_Size() int { - return xxx_messageInfo_Shard.Size(m) +func (m *Workflow_ReplicationLocation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } -func (m *Shard) XXX_DiscardUnknown() { - xxx_messageInfo_Shard.DiscardUnknown(m) +func (m *Workflow_ReplicationLocation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Workflow_ReplicationLocation.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Workflow_ReplicationLocation) XXX_Merge(src proto.Message) { + xxx_messageInfo_Workflow_ReplicationLocation.Merge(m, src) +} +func (m *Workflow_ReplicationLocation) XXX_Size() int { + return m.Size() +} +func (m *Workflow_ReplicationLocation) XXX_DiscardUnknown() { + xxx_messageInfo_Workflow_ReplicationLocation.DiscardUnknown(m) } -var xxx_messageInfo_Shard proto.InternalMessageInfo +var xxx_messageInfo_Workflow_ReplicationLocation proto.InternalMessageInfo -func (m *Shard) GetKeyspace() string { +func (m *Workflow_ReplicationLocation) GetKeyspace() string { if m != nil { return m.Keyspace } return "" } -func (m *Shard) GetName() string { +func (m *Workflow_ReplicationLocation) GetShards() []string { if m != nil { - return m.Name + return m.Shards } - return "" + return nil } -func (m *Shard) GetShard() *topodata.Shard { +type Workflow_ShardStream struct { + Streams []*Workflow_Stream `protobuf:"bytes,1,rep,name=streams,proto3" json:"streams,omitempty"` + TabletControls []*topodata.Shard_TabletControl `protobuf:"bytes,2,rep,name=tablet_controls,json=tabletControls,proto3" json:"tablet_controls,omitempty"` + IsPrimaryServing bool `protobuf:"varint,3,opt,name=is_primary_serving,json=isPrimaryServing,proto3" json:"is_primary_serving,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Workflow_ShardStream) Reset() { *m = Workflow_ShardStream{} } +func (m *Workflow_ShardStream) String() string { return proto.CompactTextString(m) } +func (*Workflow_ShardStream) ProtoMessage() {} +func (*Workflow_ShardStream) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{6, 2} +} +func (m *Workflow_ShardStream) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Workflow_ShardStream) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Workflow_ShardStream.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Workflow_ShardStream) XXX_Merge(src proto.Message) { + xxx_messageInfo_Workflow_ShardStream.Merge(m, src) +} +func (m *Workflow_ShardStream) XXX_Size() int { + return m.Size() +} +func (m *Workflow_ShardStream) XXX_DiscardUnknown() { + xxx_messageInfo_Workflow_ShardStream.DiscardUnknown(m) +} + +var xxx_messageInfo_Workflow_ShardStream proto.InternalMessageInfo + +func (m *Workflow_ShardStream) GetStreams() []*Workflow_Stream { if m != nil { - return m.Shard + return m.Streams } return nil } -// TableMaterializeSttings contains the settings for one table. -type TableMaterializeSettings struct { - TargetTable string `protobuf:"bytes,1,opt,name=target_table,json=targetTable,proto3" json:"target_table,omitempty"` - // source_expression is a select statement. - SourceExpression string `protobuf:"bytes,2,opt,name=source_expression,json=sourceExpression,proto3" json:"source_expression,omitempty"` - // create_ddl contains the DDL to create the target table. - // If empty, the target table must already exist. - // if "copy", the target table DDL is the same as the source table. - CreateDdl string `protobuf:"bytes,3,opt,name=create_ddl,json=createDdl,proto3" json:"create_ddl,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (m *Workflow_ShardStream) GetTabletControls() []*topodata.Shard_TabletControl { + if m != nil { + return m.TabletControls + } + return nil } -func (m *TableMaterializeSettings) Reset() { *m = TableMaterializeSettings{} } -func (m *TableMaterializeSettings) String() string { return proto.CompactTextString(m) } -func (*TableMaterializeSettings) ProtoMessage() {} -func (*TableMaterializeSettings) Descriptor() ([]byte, []int) { - return fileDescriptor_f41247b323a1ab2e, []int{10} +func (m *Workflow_ShardStream) GetIsPrimaryServing() bool { + if m != nil { + return m.IsPrimaryServing + } + return false } -func (m *TableMaterializeSettings) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TableMaterializeSettings.Unmarshal(m, b) +type Workflow_Stream struct { + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + Tablet *topodata.TabletAlias `protobuf:"bytes,3,opt,name=tablet,proto3" json:"tablet,omitempty"` + BinlogSource *binlogdata.BinlogSource `protobuf:"bytes,4,opt,name=binlog_source,json=binlogSource,proto3" json:"binlog_source,omitempty"` + Position string `protobuf:"bytes,5,opt,name=position,proto3" json:"position,omitempty"` + StopPosition string `protobuf:"bytes,6,opt,name=stop_position,json=stopPosition,proto3" json:"stop_position,omitempty"` + State string `protobuf:"bytes,7,opt,name=state,proto3" json:"state,omitempty"` + DbName string `protobuf:"bytes,8,opt,name=db_name,json=dbName,proto3" json:"db_name,omitempty"` + TransactionTimestamp *vttime.Time `protobuf:"bytes,9,opt,name=transaction_timestamp,json=transactionTimestamp,proto3" json:"transaction_timestamp,omitempty"` + TimeUpdated *vttime.Time `protobuf:"bytes,10,opt,name=time_updated,json=timeUpdated,proto3" json:"time_updated,omitempty"` + Message string `protobuf:"bytes,11,opt,name=message,proto3" json:"message,omitempty"` + CopyStates []*Workflow_Stream_CopyState `protobuf:"bytes,12,rep,name=copy_states,json=copyStates,proto3" json:"copy_states,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *TableMaterializeSettings) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TableMaterializeSettings.Marshal(b, m, deterministic) + +func (m *Workflow_Stream) Reset() { *m = Workflow_Stream{} } +func (m *Workflow_Stream) String() string { return proto.CompactTextString(m) } +func (*Workflow_Stream) ProtoMessage() {} +func (*Workflow_Stream) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{6, 3} } -func (m *TableMaterializeSettings) XXX_Merge(src proto.Message) { - xxx_messageInfo_TableMaterializeSettings.Merge(m, src) +func (m *Workflow_Stream) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } -func (m *TableMaterializeSettings) XXX_Size() int { - return xxx_messageInfo_TableMaterializeSettings.Size(m) +func (m *Workflow_Stream) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Workflow_Stream.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } -func (m *TableMaterializeSettings) XXX_DiscardUnknown() { - xxx_messageInfo_TableMaterializeSettings.DiscardUnknown(m) +func (m *Workflow_Stream) XXX_Merge(src proto.Message) { + xxx_messageInfo_Workflow_Stream.Merge(m, src) +} +func (m *Workflow_Stream) XXX_Size() int { + return m.Size() +} +func (m *Workflow_Stream) XXX_DiscardUnknown() { + xxx_messageInfo_Workflow_Stream.DiscardUnknown(m) } -var xxx_messageInfo_TableMaterializeSettings proto.InternalMessageInfo +var xxx_messageInfo_Workflow_Stream proto.InternalMessageInfo -func (m *TableMaterializeSettings) GetTargetTable() string { +func (m *Workflow_Stream) GetId() int64 { if m != nil { - return m.TargetTable + return m.Id + } + return 0 +} + +func (m *Workflow_Stream) GetShard() string { + if m != nil { + return m.Shard } return "" } -func (m *TableMaterializeSettings) GetSourceExpression() string { +func (m *Workflow_Stream) GetTablet() *topodata.TabletAlias { if m != nil { - return m.SourceExpression + return m.Tablet + } + return nil +} + +func (m *Workflow_Stream) GetBinlogSource() *binlogdata.BinlogSource { + if m != nil { + return m.BinlogSource + } + return nil +} + +func (m *Workflow_Stream) GetPosition() string { + if m != nil { + return m.Position } return "" } -func (m *TableMaterializeSettings) GetCreateDdl() string { +func (m *Workflow_Stream) GetStopPosition() string { if m != nil { - return m.CreateDdl + return m.StopPosition } return "" } -// MaterializeSettings contains the settings for the Materialize command. -type MaterializeSettings struct { - // workflow is the name of the workflow. - Workflow string `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"` - SourceKeyspace string `protobuf:"bytes,2,opt,name=source_keyspace,json=sourceKeyspace,proto3" json:"source_keyspace,omitempty"` - TargetKeyspace string `protobuf:"bytes,3,opt,name=target_keyspace,json=targetKeyspace,proto3" json:"target_keyspace,omitempty"` - // stop_after_copy specifies if vreplication should be stopped after copying. - StopAfterCopy bool `protobuf:"varint,4,opt,name=stop_after_copy,json=stopAfterCopy,proto3" json:"stop_after_copy,omitempty"` - TableSettings []*TableMaterializeSettings `protobuf:"bytes,5,rep,name=table_settings,json=tableSettings,proto3" json:"table_settings,omitempty"` - // optional parameters. - Cell string `protobuf:"bytes,6,opt,name=cell,proto3" json:"cell,omitempty"` - TabletTypes string `protobuf:"bytes,7,opt,name=tablet_types,json=tabletTypes,proto3" json:"tablet_types,omitempty"` +func (m *Workflow_Stream) GetState() string { + if m != nil { + return m.State + } + return "" +} + +func (m *Workflow_Stream) GetDbName() string { + if m != nil { + return m.DbName + } + return "" +} + +func (m *Workflow_Stream) GetTransactionTimestamp() *vttime.Time { + if m != nil { + return m.TransactionTimestamp + } + return nil +} + +func (m *Workflow_Stream) GetTimeUpdated() *vttime.Time { + if m != nil { + return m.TimeUpdated + } + return nil +} + +func (m *Workflow_Stream) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *Workflow_Stream) GetCopyStates() []*Workflow_Stream_CopyState { + if m != nil { + return m.CopyStates + } + return nil +} + +type Workflow_Stream_CopyState struct { + Table string `protobuf:"bytes,1,opt,name=table,proto3" json:"table,omitempty"` + LastPk string `protobuf:"bytes,2,opt,name=last_pk,json=lastPk,proto3" json:"last_pk,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *MaterializeSettings) Reset() { *m = MaterializeSettings{} } -func (m *MaterializeSettings) String() string { return proto.CompactTextString(m) } -func (*MaterializeSettings) ProtoMessage() {} -func (*MaterializeSettings) Descriptor() ([]byte, []int) { - return fileDescriptor_f41247b323a1ab2e, []int{11} +func (m *Workflow_Stream_CopyState) Reset() { *m = Workflow_Stream_CopyState{} } +func (m *Workflow_Stream_CopyState) String() string { return proto.CompactTextString(m) } +func (*Workflow_Stream_CopyState) ProtoMessage() {} +func (*Workflow_Stream_CopyState) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{6, 3, 0} } - -func (m *MaterializeSettings) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MaterializeSettings.Unmarshal(m, b) +func (m *Workflow_Stream_CopyState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } -func (m *MaterializeSettings) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MaterializeSettings.Marshal(b, m, deterministic) +func (m *Workflow_Stream_CopyState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Workflow_Stream_CopyState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } -func (m *MaterializeSettings) XXX_Merge(src proto.Message) { - xxx_messageInfo_MaterializeSettings.Merge(m, src) +func (m *Workflow_Stream_CopyState) XXX_Merge(src proto.Message) { + xxx_messageInfo_Workflow_Stream_CopyState.Merge(m, src) } -func (m *MaterializeSettings) XXX_Size() int { - return xxx_messageInfo_MaterializeSettings.Size(m) +func (m *Workflow_Stream_CopyState) XXX_Size() int { + return m.Size() } -func (m *MaterializeSettings) XXX_DiscardUnknown() { - xxx_messageInfo_MaterializeSettings.DiscardUnknown(m) +func (m *Workflow_Stream_CopyState) XXX_DiscardUnknown() { + xxx_messageInfo_Workflow_Stream_CopyState.DiscardUnknown(m) } -var xxx_messageInfo_MaterializeSettings proto.InternalMessageInfo +var xxx_messageInfo_Workflow_Stream_CopyState proto.InternalMessageInfo -func (m *MaterializeSettings) GetWorkflow() string { +func (m *Workflow_Stream_CopyState) GetTable() string { if m != nil { - return m.Workflow + return m.Table } return "" } -func (m *MaterializeSettings) GetSourceKeyspace() string { +func (m *Workflow_Stream_CopyState) GetLastPk() string { if m != nil { - return m.SourceKeyspace + return m.LastPk } return "" } -func (m *MaterializeSettings) GetTargetKeyspace() string { +type ChangeTabletTypeRequest struct { + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + DbType topodata.TabletType `protobuf:"varint,2,opt,name=db_type,json=dbType,proto3,enum=topodata.TabletType" json:"db_type,omitempty"` + DryRun bool `protobuf:"varint,3,opt,name=dry_run,json=dryRun,proto3" json:"dry_run,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ChangeTabletTypeRequest) Reset() { *m = ChangeTabletTypeRequest{} } +func (m *ChangeTabletTypeRequest) String() string { return proto.CompactTextString(m) } +func (*ChangeTabletTypeRequest) ProtoMessage() {} +func (*ChangeTabletTypeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{7} +} +func (m *ChangeTabletTypeRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ChangeTabletTypeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ChangeTabletTypeRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ChangeTabletTypeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ChangeTabletTypeRequest.Merge(m, src) +} +func (m *ChangeTabletTypeRequest) XXX_Size() int { + return m.Size() +} +func (m *ChangeTabletTypeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ChangeTabletTypeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ChangeTabletTypeRequest proto.InternalMessageInfo + +func (m *ChangeTabletTypeRequest) GetTabletAlias() *topodata.TabletAlias { if m != nil { - return m.TargetKeyspace + return m.TabletAlias } - return "" + return nil } -func (m *MaterializeSettings) GetStopAfterCopy() bool { +func (m *ChangeTabletTypeRequest) GetDbType() topodata.TabletType { if m != nil { - return m.StopAfterCopy + return m.DbType + } + return topodata.TabletType_UNKNOWN +} + +func (m *ChangeTabletTypeRequest) GetDryRun() bool { + if m != nil { + return m.DryRun } return false } -func (m *MaterializeSettings) GetTableSettings() []*TableMaterializeSettings { +type ChangeTabletTypeResponse struct { + BeforeTablet *topodata.Tablet `protobuf:"bytes,1,opt,name=before_tablet,json=beforeTablet,proto3" json:"before_tablet,omitempty"` + AfterTablet *topodata.Tablet `protobuf:"bytes,2,opt,name=after_tablet,json=afterTablet,proto3" json:"after_tablet,omitempty"` + WasDryRun bool `protobuf:"varint,3,opt,name=was_dry_run,json=wasDryRun,proto3" json:"was_dry_run,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ChangeTabletTypeResponse) Reset() { *m = ChangeTabletTypeResponse{} } +func (m *ChangeTabletTypeResponse) String() string { return proto.CompactTextString(m) } +func (*ChangeTabletTypeResponse) ProtoMessage() {} +func (*ChangeTabletTypeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{8} +} +func (m *ChangeTabletTypeResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ChangeTabletTypeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ChangeTabletTypeResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ChangeTabletTypeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ChangeTabletTypeResponse.Merge(m, src) +} +func (m *ChangeTabletTypeResponse) XXX_Size() int { + return m.Size() +} +func (m *ChangeTabletTypeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ChangeTabletTypeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ChangeTabletTypeResponse proto.InternalMessageInfo + +func (m *ChangeTabletTypeResponse) GetBeforeTablet() *topodata.Tablet { if m != nil { - return m.TableSettings + return m.BeforeTablet } return nil } -func (m *MaterializeSettings) GetCell() string { +func (m *ChangeTabletTypeResponse) GetAfterTablet() *topodata.Tablet { if m != nil { - return m.Cell + return m.AfterTablet } - return "" + return nil } -func (m *MaterializeSettings) GetTabletTypes() string { +func (m *ChangeTabletTypeResponse) GetWasDryRun() bool { if m != nil { - return m.TabletTypes + return m.WasDryRun } - return "" + return false } -func init() { - proto.RegisterType((*ExecuteVtctlCommandRequest)(nil), "vtctldata.ExecuteVtctlCommandRequest") - proto.RegisterType((*ExecuteVtctlCommandResponse)(nil), "vtctldata.ExecuteVtctlCommandResponse") - proto.RegisterType((*GetKeyspacesRequest)(nil), "vtctldata.GetKeyspacesRequest") - proto.RegisterType((*GetKeyspacesResponse)(nil), "vtctldata.GetKeyspacesResponse") - proto.RegisterType((*GetKeyspaceRequest)(nil), "vtctldata.GetKeyspaceRequest") - proto.RegisterType((*GetKeyspaceResponse)(nil), "vtctldata.GetKeyspaceResponse") - proto.RegisterType((*Keyspace)(nil), "vtctldata.Keyspace") - proto.RegisterType((*FindAllShardsInKeyspaceRequest)(nil), "vtctldata.FindAllShardsInKeyspaceRequest") - proto.RegisterType((*FindAllShardsInKeyspaceResponse)(nil), "vtctldata.FindAllShardsInKeyspaceResponse") - proto.RegisterMapType((map[string]*Shard)(nil), "vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry") - proto.RegisterType((*Shard)(nil), "vtctldata.Shard") - proto.RegisterType((*TableMaterializeSettings)(nil), "vtctldata.TableMaterializeSettings") - proto.RegisterType((*MaterializeSettings)(nil), "vtctldata.MaterializeSettings") +type CreateKeyspaceRequest struct { + // Name is the name of the keyspace. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Force proceeds with the request even if the keyspace already exists. + Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"` + // AllowEmptyVSchema allows a keyspace to be created with no vschema. + AllowEmptyVSchema bool `protobuf:"varint,3,opt,name=allow_empty_v_schema,json=allowEmptyVSchema,proto3" json:"allow_empty_v_schema,omitempty"` + // ShardingColumnName specifies the column to use for sharding operations. + ShardingColumnName string `protobuf:"bytes,4,opt,name=sharding_column_name,json=shardingColumnName,proto3" json:"sharding_column_name,omitempty"` + // ShardingColumnType specifies the type of the column to use for sharding + // operations. + ShardingColumnType topodata.KeyspaceIdType `protobuf:"varint,5,opt,name=sharding_column_type,json=shardingColumnType,proto3,enum=topodata.KeyspaceIdType" json:"sharding_column_type,omitempty"` + // ServedFroms specifies a set of db_type:keyspace pairs used to serve + // traffic for the keyspace. + ServedFroms []*topodata.Keyspace_ServedFrom `protobuf:"bytes,6,rep,name=served_froms,json=servedFroms,proto3" json:"served_froms,omitempty"` + // Type is the type of the keyspace to create. + Type topodata.KeyspaceType `protobuf:"varint,7,opt,name=type,proto3,enum=topodata.KeyspaceType" json:"type,omitempty"` + // BaseKeyspace specifies the base keyspace for SNAPSHOT keyspaces. It is + // required to create a SNAPSHOT keyspace. + BaseKeyspace string `protobuf:"bytes,8,opt,name=base_keyspace,json=baseKeyspace,proto3" json:"base_keyspace,omitempty"` + // SnapshotTime specifies the snapshot time for this keyspace. It is required + // to create a SNAPSHOT keyspace. + SnapshotTime *vttime.Time `protobuf:"bytes,9,opt,name=snapshot_time,json=snapshotTime,proto3" json:"snapshot_time,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func init() { proto.RegisterFile("vtctldata.proto", fileDescriptor_f41247b323a1ab2e) } - -var fileDescriptor_f41247b323a1ab2e = []byte{ - // 629 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x94, 0xdf, 0x6e, 0xd3, 0x30, - 0x14, 0xc6, 0x95, 0x76, 0x1d, 0xed, 0x29, 0x6d, 0x87, 0x07, 0x52, 0x54, 0x04, 0x94, 0xc0, 0xb6, - 0x4a, 0x48, 0x29, 0x0c, 0x09, 0x21, 0xc4, 0xcd, 0x18, 0x1d, 0x1a, 0x13, 0xbb, 0xc8, 0x26, 0x90, - 0xb8, 0x20, 0xf2, 0x92, 0xb3, 0x12, 0xcd, 0x8d, 0x43, 0x7c, 0xda, 0xad, 0xbc, 0x01, 0x2f, 0xc3, - 0x23, 0xf0, 0x6c, 0x28, 0x76, 0x92, 0x66, 0x68, 0x03, 0x71, 0xe7, 0xfc, 0xce, 0xbf, 0xef, 0x7c, - 0xb6, 0x02, 0xbd, 0x39, 0x05, 0x24, 0x42, 0x4e, 0xdc, 0x4d, 0x52, 0x49, 0x92, 0xb5, 0x4a, 0xd0, - 0xef, 0x08, 0x39, 0x99, 0x51, 0x24, 0x4c, 0xa4, 0xdf, 0x25, 0x99, 0xc8, 0x65, 0xa6, 0xf3, 0x09, - 0xfa, 0xe3, 0x0b, 0x0c, 0x66, 0x84, 0x1f, 0xb3, 0x92, 0x5d, 0x39, 0x9d, 0xf2, 0x38, 0xf4, 0xf0, - 0xdb, 0x0c, 0x15, 0x31, 0x06, 0x2b, 0x3c, 0x9d, 0x28, 0xdb, 0x1a, 0xd4, 0x87, 0x2d, 0x4f, 0x9f, - 0xd9, 0x06, 0x74, 0x79, 0x40, 0x91, 0x8c, 0x7d, 0x8a, 0xa6, 0x28, 0x67, 0x64, 0xd7, 0x06, 0xd6, - 0xb0, 0xee, 0x75, 0x0c, 0x3d, 0x36, 0xd0, 0xd9, 0x85, 0xbb, 0x57, 0x36, 0x56, 0x89, 0x8c, 0x15, - 0xb2, 0xc7, 0xd0, 0xc0, 0x39, 0xc6, 0x64, 0x5b, 0x03, 0x6b, 0xd8, 0xde, 0xee, 0xba, 0x85, 0xcc, - 0x71, 0x46, 0x3d, 0x13, 0x74, 0xee, 0xc0, 0xfa, 0x3b, 0xa4, 0x03, 0x5c, 0xa8, 0x84, 0x07, 0xa8, - 0x72, 0x59, 0xce, 0x3e, 0xdc, 0xbe, 0x8c, 0xf3, 0xa6, 0xcf, 0xa0, 0x75, 0x56, 0x40, 0xad, 0xb9, - 0xbd, 0xbd, 0xee, 0x2e, 0xbd, 0x29, 0x0a, 0xbc, 0x65, 0x96, 0xf3, 0x14, 0x58, 0xa5, 0x55, 0xb1, - 0x77, 0x1f, 0x9a, 0x45, 0x8a, 0x16, 0xd8, 0xf2, 0xca, 0x6f, 0x67, 0xef, 0x92, 0xa6, 0x72, 0xf6, - 0xe8, 0x8f, 0x92, 0x6b, 0x46, 0x2f, 0xfb, 0x1c, 0x42, 0xb3, 0xa0, 0x99, 0xcf, 0x31, 0x9f, 0x16, - 0xb3, 0xf4, 0x99, 0xb9, 0x95, 0x86, 0x35, 0xdd, 0x90, 0xb9, 0xe5, 0xe5, 0x5d, 0xd1, 0xef, 0x35, - 0xdc, 0xdf, 0x8b, 0xe2, 0x70, 0x47, 0x88, 0xa3, 0xaf, 0x3c, 0x0d, 0xd5, 0x7e, 0xfc, 0x3f, 0x5b, - 0xfd, 0xb2, 0xe0, 0xc1, 0xb5, 0xe5, 0xf9, 0x8a, 0x87, 0xb0, 0xaa, 0x74, 0x2c, 0xf7, 0xf6, 0x45, - 0x65, 0xc1, 0x7f, 0xd4, 0xba, 0x26, 0x30, 0x8e, 0x29, 0x5d, 0x78, 0x79, 0x97, 0xfe, 0x01, 0xb4, - 0x2b, 0x98, 0xad, 0x41, 0xfd, 0x0c, 0x17, 0xb9, 0xb2, 0xec, 0xc8, 0x36, 0xa1, 0x31, 0xe7, 0x62, - 0x56, 0xec, 0xbf, 0x56, 0x99, 0xa7, 0x0b, 0x3d, 0x13, 0x7e, 0x55, 0x7b, 0x69, 0x39, 0x5f, 0xa0, - 0xa1, 0xd9, 0xdf, 0xb6, 0x2c, 0x7d, 0xae, 0x55, 0x7c, 0xde, 0x80, 0x86, 0xd6, 0x63, 0xd7, 0xf5, - 0x90, 0xde, 0xd2, 0xe4, 0x7c, 0x86, 0x8e, 0x3a, 0x3f, 0x2c, 0xb0, 0x8f, 0xf9, 0x89, 0xc0, 0x0f, - 0x9c, 0x30, 0x8d, 0xb8, 0x88, 0xbe, 0xe3, 0x11, 0x12, 0x45, 0xf1, 0x44, 0xb1, 0x87, 0x70, 0x93, - 0x78, 0x3a, 0x41, 0xf2, 0x29, 0x4b, 0xc9, 0xe7, 0xb6, 0x0d, 0xd3, 0x55, 0xec, 0x09, 0xdc, 0x52, - 0x72, 0x96, 0x06, 0xe8, 0xe3, 0x45, 0x92, 0xa2, 0x52, 0x91, 0x8c, 0x73, 0x1d, 0x6b, 0x26, 0x30, - 0x2e, 0x39, 0xbb, 0x07, 0x10, 0xa4, 0xc8, 0x09, 0xfd, 0x30, 0x14, 0x5a, 0x58, 0xcb, 0x6b, 0x19, - 0xf2, 0x36, 0x14, 0xce, 0xcf, 0x1a, 0xac, 0x5f, 0x25, 0xa3, 0x0f, 0xcd, 0x73, 0x99, 0x9e, 0x9d, - 0x0a, 0x79, 0x5e, 0xac, 0x5e, 0x7c, 0xb3, 0x2d, 0xe8, 0xe5, 0xf3, 0x2f, 0xbd, 0xaa, 0x96, 0xd7, - 0x35, 0xb8, 0x7c, 0x8b, 0x5b, 0xd0, 0xcb, 0x77, 0x29, 0x13, 0x8d, 0x80, 0xae, 0xc1, 0x65, 0xe2, - 0x26, 0xf4, 0x14, 0xc9, 0xc4, 0xe7, 0xa7, 0x84, 0xa9, 0x1f, 0xc8, 0x64, 0x61, 0xaf, 0x0c, 0xac, - 0x61, 0xd3, 0xeb, 0x64, 0x78, 0x27, 0xa3, 0xbb, 0x32, 0x59, 0xb0, 0xf7, 0xd0, 0xd5, 0xae, 0xf8, - 0x2a, 0xd7, 0x69, 0x37, 0xf4, 0xf3, 0x79, 0x54, 0xb9, 0xce, 0xeb, 0x9c, 0xf5, 0x3a, 0xba, 0xb4, - 0xdc, 0x90, 0xc1, 0x4a, 0x80, 0x42, 0xd8, 0xab, 0xe6, 0x02, 0xb3, 0xb3, 0x31, 0xff, 0x44, 0x64, - 0xe6, 0x2f, 0x12, 0x54, 0xf6, 0x8d, 0xc2, 0xfc, 0x8c, 0x1d, 0x67, 0xe8, 0xcd, 0xf0, 0xf3, 0xe6, - 0x3c, 0x22, 0x54, 0xca, 0x8d, 0xe4, 0xc8, 0x9c, 0x46, 0x13, 0x39, 0x9a, 0xd3, 0x48, 0xff, 0x05, - 0x47, 0xa5, 0x90, 0x93, 0x55, 0x0d, 0x9e, 0xff, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x2e, 0xa9, 0x4e, - 0xcf, 0x53, 0x05, 0x00, 0x00, +func (m *CreateKeyspaceRequest) Reset() { *m = CreateKeyspaceRequest{} } +func (m *CreateKeyspaceRequest) String() string { return proto.CompactTextString(m) } +func (*CreateKeyspaceRequest) ProtoMessage() {} +func (*CreateKeyspaceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{9} +} +func (m *CreateKeyspaceRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CreateKeyspaceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CreateKeyspaceRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CreateKeyspaceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateKeyspaceRequest.Merge(m, src) +} +func (m *CreateKeyspaceRequest) XXX_Size() int { + return m.Size() +} +func (m *CreateKeyspaceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateKeyspaceRequest.DiscardUnknown(m) } + +var xxx_messageInfo_CreateKeyspaceRequest proto.InternalMessageInfo + +func (m *CreateKeyspaceRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateKeyspaceRequest) GetForce() bool { + if m != nil { + return m.Force + } + return false +} + +func (m *CreateKeyspaceRequest) GetAllowEmptyVSchema() bool { + if m != nil { + return m.AllowEmptyVSchema + } + return false +} + +func (m *CreateKeyspaceRequest) GetShardingColumnName() string { + if m != nil { + return m.ShardingColumnName + } + return "" +} + +func (m *CreateKeyspaceRequest) GetShardingColumnType() topodata.KeyspaceIdType { + if m != nil { + return m.ShardingColumnType + } + return topodata.KeyspaceIdType_UNSET +} + +func (m *CreateKeyspaceRequest) GetServedFroms() []*topodata.Keyspace_ServedFrom { + if m != nil { + return m.ServedFroms + } + return nil +} + +func (m *CreateKeyspaceRequest) GetType() topodata.KeyspaceType { + if m != nil { + return m.Type + } + return topodata.KeyspaceType_NORMAL +} + +func (m *CreateKeyspaceRequest) GetBaseKeyspace() string { + if m != nil { + return m.BaseKeyspace + } + return "" +} + +func (m *CreateKeyspaceRequest) GetSnapshotTime() *vttime.Time { + if m != nil { + return m.SnapshotTime + } + return nil +} + +type CreateKeyspaceResponse struct { + // Keyspace is the newly-created keyspace. + Keyspace *Keyspace `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateKeyspaceResponse) Reset() { *m = CreateKeyspaceResponse{} } +func (m *CreateKeyspaceResponse) String() string { return proto.CompactTextString(m) } +func (*CreateKeyspaceResponse) ProtoMessage() {} +func (*CreateKeyspaceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{10} +} +func (m *CreateKeyspaceResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CreateKeyspaceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CreateKeyspaceResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CreateKeyspaceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateKeyspaceResponse.Merge(m, src) +} +func (m *CreateKeyspaceResponse) XXX_Size() int { + return m.Size() +} +func (m *CreateKeyspaceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CreateKeyspaceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateKeyspaceResponse proto.InternalMessageInfo + +func (m *CreateKeyspaceResponse) GetKeyspace() *Keyspace { + if m != nil { + return m.Keyspace + } + return nil +} + +type CreateShardRequest struct { + // Keyspace is the name of the keyspace to create the shard in. + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // ShardName is the name of the shard to create. E.g. "-" or "-80". + ShardName string `protobuf:"bytes,2,opt,name=shard_name,json=shardName,proto3" json:"shard_name,omitempty"` + // Force treats an attempt to create a shard that already exists as a + // non-error. + Force bool `protobuf:"varint,3,opt,name=force,proto3" json:"force,omitempty"` + // IncludeParent creates the parent keyspace as an empty BASE keyspace, if it + // doesn't already exist. + IncludeParent bool `protobuf:"varint,4,opt,name=include_parent,json=includeParent,proto3" json:"include_parent,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateShardRequest) Reset() { *m = CreateShardRequest{} } +func (m *CreateShardRequest) String() string { return proto.CompactTextString(m) } +func (*CreateShardRequest) ProtoMessage() {} +func (*CreateShardRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{11} +} +func (m *CreateShardRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CreateShardRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CreateShardRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CreateShardRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateShardRequest.Merge(m, src) +} +func (m *CreateShardRequest) XXX_Size() int { + return m.Size() +} +func (m *CreateShardRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateShardRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateShardRequest proto.InternalMessageInfo + +func (m *CreateShardRequest) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *CreateShardRequest) GetShardName() string { + if m != nil { + return m.ShardName + } + return "" +} + +func (m *CreateShardRequest) GetForce() bool { + if m != nil { + return m.Force + } + return false +} + +func (m *CreateShardRequest) GetIncludeParent() bool { + if m != nil { + return m.IncludeParent + } + return false +} + +type CreateShardResponse struct { + // Keyspace is the created keyspace. It is set only if IncludeParent was + // specified in the request and the parent keyspace needed to be created. + Keyspace *Keyspace `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // Shard is the newly-created shard object. + Shard *Shard `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + // ShardAlreadyExists is set if Force was specified in the request and the + // shard already existed. + ShardAlreadyExists bool `protobuf:"varint,3,opt,name=shard_already_exists,json=shardAlreadyExists,proto3" json:"shard_already_exists,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateShardResponse) Reset() { *m = CreateShardResponse{} } +func (m *CreateShardResponse) String() string { return proto.CompactTextString(m) } +func (*CreateShardResponse) ProtoMessage() {} +func (*CreateShardResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{12} +} +func (m *CreateShardResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CreateShardResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CreateShardResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CreateShardResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateShardResponse.Merge(m, src) +} +func (m *CreateShardResponse) XXX_Size() int { + return m.Size() +} +func (m *CreateShardResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CreateShardResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateShardResponse proto.InternalMessageInfo + +func (m *CreateShardResponse) GetKeyspace() *Keyspace { + if m != nil { + return m.Keyspace + } + return nil +} + +func (m *CreateShardResponse) GetShard() *Shard { + if m != nil { + return m.Shard + } + return nil +} + +func (m *CreateShardResponse) GetShardAlreadyExists() bool { + if m != nil { + return m.ShardAlreadyExists + } + return false +} + +type DeleteKeyspaceRequest struct { + // Keyspace is the name of the keyspace to delete. + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // Recursive causes all shards in the keyspace to be recursively deleted + // before deleting the keyspace. It is an error to call DeleteKeyspace on a + // non-empty keyspace without also specifying Recursive. + Recursive bool `protobuf:"varint,2,opt,name=recursive,proto3" json:"recursive,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteKeyspaceRequest) Reset() { *m = DeleteKeyspaceRequest{} } +func (m *DeleteKeyspaceRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteKeyspaceRequest) ProtoMessage() {} +func (*DeleteKeyspaceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{13} +} +func (m *DeleteKeyspaceRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeleteKeyspaceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DeleteKeyspaceRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DeleteKeyspaceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteKeyspaceRequest.Merge(m, src) +} +func (m *DeleteKeyspaceRequest) XXX_Size() int { + return m.Size() +} +func (m *DeleteKeyspaceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteKeyspaceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteKeyspaceRequest proto.InternalMessageInfo + +func (m *DeleteKeyspaceRequest) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *DeleteKeyspaceRequest) GetRecursive() bool { + if m != nil { + return m.Recursive + } + return false +} + +type DeleteKeyspaceResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteKeyspaceResponse) Reset() { *m = DeleteKeyspaceResponse{} } +func (m *DeleteKeyspaceResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteKeyspaceResponse) ProtoMessage() {} +func (*DeleteKeyspaceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{14} +} +func (m *DeleteKeyspaceResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeleteKeyspaceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DeleteKeyspaceResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DeleteKeyspaceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteKeyspaceResponse.Merge(m, src) +} +func (m *DeleteKeyspaceResponse) XXX_Size() int { + return m.Size() +} +func (m *DeleteKeyspaceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteKeyspaceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteKeyspaceResponse proto.InternalMessageInfo + +type DeleteShardsRequest struct { + // Shards is the list of shards to delete. The nested topodatapb.Shard field + // is not required for DeleteShard, but the Keyspace and Shard fields are. + Shards []*Shard `protobuf:"bytes,1,rep,name=shards,proto3" json:"shards,omitempty"` + // Recursive also deletes all tablets belonging to the shard(s). It is an + // error to call DeleteShard on a non-empty shard without also specificying + // Recursive. + Recursive bool `protobuf:"varint,2,opt,name=recursive,proto3" json:"recursive,omitempty"` + // EvenIfServing allows a shard to be deleted even if it is serving, which is + // normally an error. Use with caution. + EvenIfServing bool `protobuf:"varint,4,opt,name=even_if_serving,json=evenIfServing,proto3" json:"even_if_serving,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteShardsRequest) Reset() { *m = DeleteShardsRequest{} } +func (m *DeleteShardsRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteShardsRequest) ProtoMessage() {} +func (*DeleteShardsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{15} +} +func (m *DeleteShardsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeleteShardsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DeleteShardsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DeleteShardsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteShardsRequest.Merge(m, src) +} +func (m *DeleteShardsRequest) XXX_Size() int { + return m.Size() +} +func (m *DeleteShardsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteShardsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteShardsRequest proto.InternalMessageInfo + +func (m *DeleteShardsRequest) GetShards() []*Shard { + if m != nil { + return m.Shards + } + return nil +} + +func (m *DeleteShardsRequest) GetRecursive() bool { + if m != nil { + return m.Recursive + } + return false +} + +func (m *DeleteShardsRequest) GetEvenIfServing() bool { + if m != nil { + return m.EvenIfServing + } + return false +} + +type DeleteShardsResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteShardsResponse) Reset() { *m = DeleteShardsResponse{} } +func (m *DeleteShardsResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteShardsResponse) ProtoMessage() {} +func (*DeleteShardsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{16} +} +func (m *DeleteShardsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeleteShardsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DeleteShardsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DeleteShardsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteShardsResponse.Merge(m, src) +} +func (m *DeleteShardsResponse) XXX_Size() int { + return m.Size() +} +func (m *DeleteShardsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteShardsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteShardsResponse proto.InternalMessageInfo + +type DeleteTabletsRequest struct { + // TabletAliases is the list of tablets to delete. + TabletAliases []*topodata.TabletAlias `protobuf:"bytes,1,rep,name=tablet_aliases,json=tabletAliases,proto3" json:"tablet_aliases,omitempty"` + // AllowPrimary allows for the master/primary tablet of a shard to be deleted. + // Use with caution. + AllowPrimary bool `protobuf:"varint,2,opt,name=allow_primary,json=allowPrimary,proto3" json:"allow_primary,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteTabletsRequest) Reset() { *m = DeleteTabletsRequest{} } +func (m *DeleteTabletsRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteTabletsRequest) ProtoMessage() {} +func (*DeleteTabletsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{17} +} +func (m *DeleteTabletsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeleteTabletsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DeleteTabletsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DeleteTabletsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteTabletsRequest.Merge(m, src) +} +func (m *DeleteTabletsRequest) XXX_Size() int { + return m.Size() +} +func (m *DeleteTabletsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteTabletsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteTabletsRequest proto.InternalMessageInfo + +func (m *DeleteTabletsRequest) GetTabletAliases() []*topodata.TabletAlias { + if m != nil { + return m.TabletAliases + } + return nil +} + +func (m *DeleteTabletsRequest) GetAllowPrimary() bool { + if m != nil { + return m.AllowPrimary + } + return false +} + +type DeleteTabletsResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteTabletsResponse) Reset() { *m = DeleteTabletsResponse{} } +func (m *DeleteTabletsResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteTabletsResponse) ProtoMessage() {} +func (*DeleteTabletsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{18} +} +func (m *DeleteTabletsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeleteTabletsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DeleteTabletsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DeleteTabletsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteTabletsResponse.Merge(m, src) +} +func (m *DeleteTabletsResponse) XXX_Size() int { + return m.Size() +} +func (m *DeleteTabletsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteTabletsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteTabletsResponse proto.InternalMessageInfo + +type EmergencyReparentShardRequest struct { + // Keyspace is the name of the keyspace to perform the Emergency Reparent in. + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // Shard is the name of the shard to perform the Emergency Reparent in. + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + // Optional alias of a tablet that should become the new shard primary. If not + // not specified, the vtctld will select the most up-to-date canditate to + // promote. + NewPrimary *topodata.TabletAlias `protobuf:"bytes,3,opt,name=new_primary,json=newPrimary,proto3" json:"new_primary,omitempty"` + // List of replica aliases to ignore during the Emergency Reparent. The vtctld + // will not attempt to stop replication on these tablets, nor attempt to + // demote any that may think they are the shard primary. + IgnoreReplicas []*topodata.TabletAlias `protobuf:"bytes,4,rep,name=ignore_replicas,json=ignoreReplicas,proto3" json:"ignore_replicas,omitempty"` + // WaitReplicasTimeout is the duration of time to wait for replicas to catch + // up in reparenting. + WaitReplicasTimeout *vttime.Duration `protobuf:"bytes,5,opt,name=wait_replicas_timeout,json=waitReplicasTimeout,proto3" json:"wait_replicas_timeout,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EmergencyReparentShardRequest) Reset() { *m = EmergencyReparentShardRequest{} } +func (m *EmergencyReparentShardRequest) String() string { return proto.CompactTextString(m) } +func (*EmergencyReparentShardRequest) ProtoMessage() {} +func (*EmergencyReparentShardRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{19} +} +func (m *EmergencyReparentShardRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EmergencyReparentShardRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EmergencyReparentShardRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EmergencyReparentShardRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_EmergencyReparentShardRequest.Merge(m, src) +} +func (m *EmergencyReparentShardRequest) XXX_Size() int { + return m.Size() +} +func (m *EmergencyReparentShardRequest) XXX_DiscardUnknown() { + xxx_messageInfo_EmergencyReparentShardRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_EmergencyReparentShardRequest proto.InternalMessageInfo + +func (m *EmergencyReparentShardRequest) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *EmergencyReparentShardRequest) GetShard() string { + if m != nil { + return m.Shard + } + return "" +} + +func (m *EmergencyReparentShardRequest) GetNewPrimary() *topodata.TabletAlias { + if m != nil { + return m.NewPrimary + } + return nil +} + +func (m *EmergencyReparentShardRequest) GetIgnoreReplicas() []*topodata.TabletAlias { + if m != nil { + return m.IgnoreReplicas + } + return nil +} + +func (m *EmergencyReparentShardRequest) GetWaitReplicasTimeout() *vttime.Duration { + if m != nil { + return m.WaitReplicasTimeout + } + return nil +} + +type EmergencyReparentShardResponse struct { + // Keyspace is the name of the keyspace the Emergency Reparent took place in. + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // Shard is the name of the shard the Emergency Reparent took place in. + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + // PromotedPrimary is the alias of the tablet that was promoted to shard + // primary. If NewPrimary was set in the request, then this will be the same + // alias. Otherwise, it will be the alias of the tablet found to be most + // up-to-date. + PromotedPrimary *topodata.TabletAlias `protobuf:"bytes,3,opt,name=promoted_primary,json=promotedPrimary,proto3" json:"promoted_primary,omitempty"` + Events []*logutil.Event `protobuf:"bytes,4,rep,name=events,proto3" json:"events,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EmergencyReparentShardResponse) Reset() { *m = EmergencyReparentShardResponse{} } +func (m *EmergencyReparentShardResponse) String() string { return proto.CompactTextString(m) } +func (*EmergencyReparentShardResponse) ProtoMessage() {} +func (*EmergencyReparentShardResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{20} +} +func (m *EmergencyReparentShardResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EmergencyReparentShardResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EmergencyReparentShardResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EmergencyReparentShardResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_EmergencyReparentShardResponse.Merge(m, src) +} +func (m *EmergencyReparentShardResponse) XXX_Size() int { + return m.Size() +} +func (m *EmergencyReparentShardResponse) XXX_DiscardUnknown() { + xxx_messageInfo_EmergencyReparentShardResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_EmergencyReparentShardResponse proto.InternalMessageInfo + +func (m *EmergencyReparentShardResponse) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *EmergencyReparentShardResponse) GetShard() string { + if m != nil { + return m.Shard + } + return "" +} + +func (m *EmergencyReparentShardResponse) GetPromotedPrimary() *topodata.TabletAlias { + if m != nil { + return m.PromotedPrimary + } + return nil +} + +func (m *EmergencyReparentShardResponse) GetEvents() []*logutil.Event { + if m != nil { + return m.Events + } + return nil +} + +type FindAllShardsInKeyspaceRequest struct { + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FindAllShardsInKeyspaceRequest) Reset() { *m = FindAllShardsInKeyspaceRequest{} } +func (m *FindAllShardsInKeyspaceRequest) String() string { return proto.CompactTextString(m) } +func (*FindAllShardsInKeyspaceRequest) ProtoMessage() {} +func (*FindAllShardsInKeyspaceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{21} +} +func (m *FindAllShardsInKeyspaceRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FindAllShardsInKeyspaceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FindAllShardsInKeyspaceRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FindAllShardsInKeyspaceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_FindAllShardsInKeyspaceRequest.Merge(m, src) +} +func (m *FindAllShardsInKeyspaceRequest) XXX_Size() int { + return m.Size() +} +func (m *FindAllShardsInKeyspaceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_FindAllShardsInKeyspaceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_FindAllShardsInKeyspaceRequest proto.InternalMessageInfo + +func (m *FindAllShardsInKeyspaceRequest) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +type FindAllShardsInKeyspaceResponse struct { + Shards map[string]*Shard `protobuf:"bytes,1,rep,name=shards,proto3" json:"shards,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FindAllShardsInKeyspaceResponse) Reset() { *m = FindAllShardsInKeyspaceResponse{} } +func (m *FindAllShardsInKeyspaceResponse) String() string { return proto.CompactTextString(m) } +func (*FindAllShardsInKeyspaceResponse) ProtoMessage() {} +func (*FindAllShardsInKeyspaceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{22} +} +func (m *FindAllShardsInKeyspaceResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FindAllShardsInKeyspaceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FindAllShardsInKeyspaceResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FindAllShardsInKeyspaceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_FindAllShardsInKeyspaceResponse.Merge(m, src) +} +func (m *FindAllShardsInKeyspaceResponse) XXX_Size() int { + return m.Size() +} +func (m *FindAllShardsInKeyspaceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_FindAllShardsInKeyspaceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_FindAllShardsInKeyspaceResponse proto.InternalMessageInfo + +func (m *FindAllShardsInKeyspaceResponse) GetShards() map[string]*Shard { + if m != nil { + return m.Shards + } + return nil +} + +type GetBackupsRequest struct { + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetBackupsRequest) Reset() { *m = GetBackupsRequest{} } +func (m *GetBackupsRequest) String() string { return proto.CompactTextString(m) } +func (*GetBackupsRequest) ProtoMessage() {} +func (*GetBackupsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{23} +} +func (m *GetBackupsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetBackupsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetBackupsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetBackupsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetBackupsRequest.Merge(m, src) +} +func (m *GetBackupsRequest) XXX_Size() int { + return m.Size() +} +func (m *GetBackupsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetBackupsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetBackupsRequest proto.InternalMessageInfo + +func (m *GetBackupsRequest) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *GetBackupsRequest) GetShard() string { + if m != nil { + return m.Shard + } + return "" +} + +type GetBackupsResponse struct { + Backups []*mysqlctl.BackupInfo `protobuf:"bytes,1,rep,name=backups,proto3" json:"backups,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetBackupsResponse) Reset() { *m = GetBackupsResponse{} } +func (m *GetBackupsResponse) String() string { return proto.CompactTextString(m) } +func (*GetBackupsResponse) ProtoMessage() {} +func (*GetBackupsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{24} +} +func (m *GetBackupsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetBackupsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetBackupsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetBackupsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetBackupsResponse.Merge(m, src) +} +func (m *GetBackupsResponse) XXX_Size() int { + return m.Size() +} +func (m *GetBackupsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetBackupsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetBackupsResponse proto.InternalMessageInfo + +func (m *GetBackupsResponse) GetBackups() []*mysqlctl.BackupInfo { + if m != nil { + return m.Backups + } + return nil +} + +type GetCellInfoNamesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetCellInfoNamesRequest) Reset() { *m = GetCellInfoNamesRequest{} } +func (m *GetCellInfoNamesRequest) String() string { return proto.CompactTextString(m) } +func (*GetCellInfoNamesRequest) ProtoMessage() {} +func (*GetCellInfoNamesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{25} +} +func (m *GetCellInfoNamesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetCellInfoNamesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetCellInfoNamesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetCellInfoNamesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetCellInfoNamesRequest.Merge(m, src) +} +func (m *GetCellInfoNamesRequest) XXX_Size() int { + return m.Size() +} +func (m *GetCellInfoNamesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetCellInfoNamesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetCellInfoNamesRequest proto.InternalMessageInfo + +type GetCellInfoNamesResponse struct { + Names []string `protobuf:"bytes,1,rep,name=names,proto3" json:"names,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetCellInfoNamesResponse) Reset() { *m = GetCellInfoNamesResponse{} } +func (m *GetCellInfoNamesResponse) String() string { return proto.CompactTextString(m) } +func (*GetCellInfoNamesResponse) ProtoMessage() {} +func (*GetCellInfoNamesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{26} +} +func (m *GetCellInfoNamesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetCellInfoNamesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetCellInfoNamesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetCellInfoNamesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetCellInfoNamesResponse.Merge(m, src) +} +func (m *GetCellInfoNamesResponse) XXX_Size() int { + return m.Size() +} +func (m *GetCellInfoNamesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetCellInfoNamesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetCellInfoNamesResponse proto.InternalMessageInfo + +func (m *GetCellInfoNamesResponse) GetNames() []string { + if m != nil { + return m.Names + } + return nil +} + +type GetCellInfoRequest struct { + Cell string `protobuf:"bytes,1,opt,name=cell,proto3" json:"cell,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetCellInfoRequest) Reset() { *m = GetCellInfoRequest{} } +func (m *GetCellInfoRequest) String() string { return proto.CompactTextString(m) } +func (*GetCellInfoRequest) ProtoMessage() {} +func (*GetCellInfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{27} +} +func (m *GetCellInfoRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetCellInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetCellInfoRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetCellInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetCellInfoRequest.Merge(m, src) +} +func (m *GetCellInfoRequest) XXX_Size() int { + return m.Size() +} +func (m *GetCellInfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetCellInfoRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetCellInfoRequest proto.InternalMessageInfo + +func (m *GetCellInfoRequest) GetCell() string { + if m != nil { + return m.Cell + } + return "" +} + +type GetCellInfoResponse struct { + CellInfo *topodata.CellInfo `protobuf:"bytes,1,opt,name=cell_info,json=cellInfo,proto3" json:"cell_info,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetCellInfoResponse) Reset() { *m = GetCellInfoResponse{} } +func (m *GetCellInfoResponse) String() string { return proto.CompactTextString(m) } +func (*GetCellInfoResponse) ProtoMessage() {} +func (*GetCellInfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{28} +} +func (m *GetCellInfoResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetCellInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetCellInfoResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetCellInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetCellInfoResponse.Merge(m, src) +} +func (m *GetCellInfoResponse) XXX_Size() int { + return m.Size() +} +func (m *GetCellInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetCellInfoResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetCellInfoResponse proto.InternalMessageInfo + +func (m *GetCellInfoResponse) GetCellInfo() *topodata.CellInfo { + if m != nil { + return m.CellInfo + } + return nil +} + +type GetCellsAliasesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetCellsAliasesRequest) Reset() { *m = GetCellsAliasesRequest{} } +func (m *GetCellsAliasesRequest) String() string { return proto.CompactTextString(m) } +func (*GetCellsAliasesRequest) ProtoMessage() {} +func (*GetCellsAliasesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{29} +} +func (m *GetCellsAliasesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetCellsAliasesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetCellsAliasesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetCellsAliasesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetCellsAliasesRequest.Merge(m, src) +} +func (m *GetCellsAliasesRequest) XXX_Size() int { + return m.Size() +} +func (m *GetCellsAliasesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetCellsAliasesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetCellsAliasesRequest proto.InternalMessageInfo + +type GetCellsAliasesResponse struct { + Aliases map[string]*topodata.CellsAlias `protobuf:"bytes,1,rep,name=aliases,proto3" json:"aliases,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetCellsAliasesResponse) Reset() { *m = GetCellsAliasesResponse{} } +func (m *GetCellsAliasesResponse) String() string { return proto.CompactTextString(m) } +func (*GetCellsAliasesResponse) ProtoMessage() {} +func (*GetCellsAliasesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{30} +} +func (m *GetCellsAliasesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetCellsAliasesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetCellsAliasesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetCellsAliasesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetCellsAliasesResponse.Merge(m, src) +} +func (m *GetCellsAliasesResponse) XXX_Size() int { + return m.Size() +} +func (m *GetCellsAliasesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetCellsAliasesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetCellsAliasesResponse proto.InternalMessageInfo + +func (m *GetCellsAliasesResponse) GetAliases() map[string]*topodata.CellsAlias { + if m != nil { + return m.Aliases + } + return nil +} + +type GetKeyspacesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetKeyspacesRequest) Reset() { *m = GetKeyspacesRequest{} } +func (m *GetKeyspacesRequest) String() string { return proto.CompactTextString(m) } +func (*GetKeyspacesRequest) ProtoMessage() {} +func (*GetKeyspacesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{31} +} +func (m *GetKeyspacesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetKeyspacesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetKeyspacesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetKeyspacesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetKeyspacesRequest.Merge(m, src) +} +func (m *GetKeyspacesRequest) XXX_Size() int { + return m.Size() +} +func (m *GetKeyspacesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetKeyspacesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetKeyspacesRequest proto.InternalMessageInfo + +type GetKeyspacesResponse struct { + Keyspaces []*Keyspace `protobuf:"bytes,1,rep,name=keyspaces,proto3" json:"keyspaces,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetKeyspacesResponse) Reset() { *m = GetKeyspacesResponse{} } +func (m *GetKeyspacesResponse) String() string { return proto.CompactTextString(m) } +func (*GetKeyspacesResponse) ProtoMessage() {} +func (*GetKeyspacesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{32} +} +func (m *GetKeyspacesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetKeyspacesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetKeyspacesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetKeyspacesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetKeyspacesResponse.Merge(m, src) +} +func (m *GetKeyspacesResponse) XXX_Size() int { + return m.Size() +} +func (m *GetKeyspacesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetKeyspacesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetKeyspacesResponse proto.InternalMessageInfo + +func (m *GetKeyspacesResponse) GetKeyspaces() []*Keyspace { + if m != nil { + return m.Keyspaces + } + return nil +} + +type GetKeyspaceRequest struct { + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetKeyspaceRequest) Reset() { *m = GetKeyspaceRequest{} } +func (m *GetKeyspaceRequest) String() string { return proto.CompactTextString(m) } +func (*GetKeyspaceRequest) ProtoMessage() {} +func (*GetKeyspaceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{33} +} +func (m *GetKeyspaceRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetKeyspaceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetKeyspaceRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetKeyspaceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetKeyspaceRequest.Merge(m, src) +} +func (m *GetKeyspaceRequest) XXX_Size() int { + return m.Size() +} +func (m *GetKeyspaceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetKeyspaceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetKeyspaceRequest proto.InternalMessageInfo + +func (m *GetKeyspaceRequest) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +type GetKeyspaceResponse struct { + Keyspace *Keyspace `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetKeyspaceResponse) Reset() { *m = GetKeyspaceResponse{} } +func (m *GetKeyspaceResponse) String() string { return proto.CompactTextString(m) } +func (*GetKeyspaceResponse) ProtoMessage() {} +func (*GetKeyspaceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{34} +} +func (m *GetKeyspaceResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetKeyspaceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetKeyspaceResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetKeyspaceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetKeyspaceResponse.Merge(m, src) +} +func (m *GetKeyspaceResponse) XXX_Size() int { + return m.Size() +} +func (m *GetKeyspaceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetKeyspaceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetKeyspaceResponse proto.InternalMessageInfo + +func (m *GetKeyspaceResponse) GetKeyspace() *Keyspace { + if m != nil { + return m.Keyspace + } + return nil +} + +type GetSchemaRequest struct { + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + // Tables is a list of tables for which we should gather information. Each is + // either an exact match, or a regular expression of the form /regexp/. + Tables []string `protobuf:"bytes,2,rep,name=tables,proto3" json:"tables,omitempty"` + // ExcludeTables is a list of tables to exclude from the result. Each is + // either an exact match, or a regular expression of the form /regexp/. + ExcludeTables []string `protobuf:"bytes,3,rep,name=exclude_tables,json=excludeTables,proto3" json:"exclude_tables,omitempty"` + // IncludeViews specifies whether to include views in the result. + IncludeViews bool `protobuf:"varint,4,opt,name=include_views,json=includeViews,proto3" json:"include_views,omitempty"` + // TableNamesOnly specifies whether to limit the results to just table names, + // rather than full schema information for each table. + TableNamesOnly bool `protobuf:"varint,5,opt,name=table_names_only,json=tableNamesOnly,proto3" json:"table_names_only,omitempty"` + // TableSizesOnly specifies whether to limit the results to just table sizes, + // rather than full schema information for each table. It is ignored if + // TableNamesOnly is set to true. + TableSizesOnly bool `protobuf:"varint,6,opt,name=table_sizes_only,json=tableSizesOnly,proto3" json:"table_sizes_only,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetSchemaRequest) Reset() { *m = GetSchemaRequest{} } +func (m *GetSchemaRequest) String() string { return proto.CompactTextString(m) } +func (*GetSchemaRequest) ProtoMessage() {} +func (*GetSchemaRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{35} +} +func (m *GetSchemaRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetSchemaRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetSchemaRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetSchemaRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSchemaRequest.Merge(m, src) +} +func (m *GetSchemaRequest) XXX_Size() int { + return m.Size() +} +func (m *GetSchemaRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetSchemaRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSchemaRequest proto.InternalMessageInfo + +func (m *GetSchemaRequest) GetTabletAlias() *topodata.TabletAlias { + if m != nil { + return m.TabletAlias + } + return nil +} + +func (m *GetSchemaRequest) GetTables() []string { + if m != nil { + return m.Tables + } + return nil +} + +func (m *GetSchemaRequest) GetExcludeTables() []string { + if m != nil { + return m.ExcludeTables + } + return nil +} + +func (m *GetSchemaRequest) GetIncludeViews() bool { + if m != nil { + return m.IncludeViews + } + return false +} + +func (m *GetSchemaRequest) GetTableNamesOnly() bool { + if m != nil { + return m.TableNamesOnly + } + return false +} + +func (m *GetSchemaRequest) GetTableSizesOnly() bool { + if m != nil { + return m.TableSizesOnly + } + return false +} + +type GetSchemaResponse struct { + Schema *tabletmanagerdata.SchemaDefinition `protobuf:"bytes,1,opt,name=schema,proto3" json:"schema,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetSchemaResponse) Reset() { *m = GetSchemaResponse{} } +func (m *GetSchemaResponse) String() string { return proto.CompactTextString(m) } +func (*GetSchemaResponse) ProtoMessage() {} +func (*GetSchemaResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{36} +} +func (m *GetSchemaResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetSchemaResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetSchemaResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetSchemaResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSchemaResponse.Merge(m, src) +} +func (m *GetSchemaResponse) XXX_Size() int { + return m.Size() +} +func (m *GetSchemaResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetSchemaResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSchemaResponse proto.InternalMessageInfo + +func (m *GetSchemaResponse) GetSchema() *tabletmanagerdata.SchemaDefinition { + if m != nil { + return m.Schema + } + return nil +} + +type GetShardRequest struct { + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + ShardName string `protobuf:"bytes,2,opt,name=shard_name,json=shardName,proto3" json:"shard_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetShardRequest) Reset() { *m = GetShardRequest{} } +func (m *GetShardRequest) String() string { return proto.CompactTextString(m) } +func (*GetShardRequest) ProtoMessage() {} +func (*GetShardRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{37} +} +func (m *GetShardRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetShardRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetShardRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetShardRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetShardRequest.Merge(m, src) +} +func (m *GetShardRequest) XXX_Size() int { + return m.Size() +} +func (m *GetShardRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetShardRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetShardRequest proto.InternalMessageInfo + +func (m *GetShardRequest) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *GetShardRequest) GetShardName() string { + if m != nil { + return m.ShardName + } + return "" +} + +type GetShardResponse struct { + Shard *Shard `protobuf:"bytes,1,opt,name=shard,proto3" json:"shard,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetShardResponse) Reset() { *m = GetShardResponse{} } +func (m *GetShardResponse) String() string { return proto.CompactTextString(m) } +func (*GetShardResponse) ProtoMessage() {} +func (*GetShardResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{38} +} +func (m *GetShardResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetShardResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetShardResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetShardResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetShardResponse.Merge(m, src) +} +func (m *GetShardResponse) XXX_Size() int { + return m.Size() +} +func (m *GetShardResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetShardResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetShardResponse proto.InternalMessageInfo + +func (m *GetShardResponse) GetShard() *Shard { + if m != nil { + return m.Shard + } + return nil +} + +type GetSrvKeyspacesRequest struct { + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // Cells is a list of cells to lookup a SrvKeyspace for. Leaving this empty is + // equivalent to specifying all cells in the topo. + Cells []string `protobuf:"bytes,2,rep,name=cells,proto3" json:"cells,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetSrvKeyspacesRequest) Reset() { *m = GetSrvKeyspacesRequest{} } +func (m *GetSrvKeyspacesRequest) String() string { return proto.CompactTextString(m) } +func (*GetSrvKeyspacesRequest) ProtoMessage() {} +func (*GetSrvKeyspacesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{39} +} +func (m *GetSrvKeyspacesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetSrvKeyspacesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetSrvKeyspacesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetSrvKeyspacesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSrvKeyspacesRequest.Merge(m, src) +} +func (m *GetSrvKeyspacesRequest) XXX_Size() int { + return m.Size() +} +func (m *GetSrvKeyspacesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetSrvKeyspacesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSrvKeyspacesRequest proto.InternalMessageInfo + +func (m *GetSrvKeyspacesRequest) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *GetSrvKeyspacesRequest) GetCells() []string { + if m != nil { + return m.Cells + } + return nil +} + +type GetSrvKeyspacesResponse struct { + // SrvKeyspaces is a mapping of cell name to SrvKeyspace. + SrvKeyspaces map[string]*topodata.SrvKeyspace `protobuf:"bytes,1,rep,name=srv_keyspaces,json=srvKeyspaces,proto3" json:"srv_keyspaces,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetSrvKeyspacesResponse) Reset() { *m = GetSrvKeyspacesResponse{} } +func (m *GetSrvKeyspacesResponse) String() string { return proto.CompactTextString(m) } +func (*GetSrvKeyspacesResponse) ProtoMessage() {} +func (*GetSrvKeyspacesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{40} +} +func (m *GetSrvKeyspacesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetSrvKeyspacesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetSrvKeyspacesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetSrvKeyspacesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSrvKeyspacesResponse.Merge(m, src) +} +func (m *GetSrvKeyspacesResponse) XXX_Size() int { + return m.Size() +} +func (m *GetSrvKeyspacesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetSrvKeyspacesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSrvKeyspacesResponse proto.InternalMessageInfo + +func (m *GetSrvKeyspacesResponse) GetSrvKeyspaces() map[string]*topodata.SrvKeyspace { + if m != nil { + return m.SrvKeyspaces + } + return nil +} + +type GetSrvVSchemaRequest struct { + Cell string `protobuf:"bytes,1,opt,name=cell,proto3" json:"cell,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetSrvVSchemaRequest) Reset() { *m = GetSrvVSchemaRequest{} } +func (m *GetSrvVSchemaRequest) String() string { return proto.CompactTextString(m) } +func (*GetSrvVSchemaRequest) ProtoMessage() {} +func (*GetSrvVSchemaRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{41} +} +func (m *GetSrvVSchemaRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetSrvVSchemaRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetSrvVSchemaRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetSrvVSchemaRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSrvVSchemaRequest.Merge(m, src) +} +func (m *GetSrvVSchemaRequest) XXX_Size() int { + return m.Size() +} +func (m *GetSrvVSchemaRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetSrvVSchemaRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSrvVSchemaRequest proto.InternalMessageInfo + +func (m *GetSrvVSchemaRequest) GetCell() string { + if m != nil { + return m.Cell + } + return "" +} + +type GetSrvVSchemaResponse struct { + SrvVSchema *vschema.SrvVSchema `protobuf:"bytes,1,opt,name=srv_v_schema,json=srvVSchema,proto3" json:"srv_v_schema,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetSrvVSchemaResponse) Reset() { *m = GetSrvVSchemaResponse{} } +func (m *GetSrvVSchemaResponse) String() string { return proto.CompactTextString(m) } +func (*GetSrvVSchemaResponse) ProtoMessage() {} +func (*GetSrvVSchemaResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{42} +} +func (m *GetSrvVSchemaResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetSrvVSchemaResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetSrvVSchemaResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetSrvVSchemaResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSrvVSchemaResponse.Merge(m, src) +} +func (m *GetSrvVSchemaResponse) XXX_Size() int { + return m.Size() +} +func (m *GetSrvVSchemaResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetSrvVSchemaResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSrvVSchemaResponse proto.InternalMessageInfo + +func (m *GetSrvVSchemaResponse) GetSrvVSchema() *vschema.SrvVSchema { + if m != nil { + return m.SrvVSchema + } + return nil +} + +type GetTabletRequest struct { + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetTabletRequest) Reset() { *m = GetTabletRequest{} } +func (m *GetTabletRequest) String() string { return proto.CompactTextString(m) } +func (*GetTabletRequest) ProtoMessage() {} +func (*GetTabletRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{43} +} +func (m *GetTabletRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetTabletRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetTabletRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetTabletRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetTabletRequest.Merge(m, src) +} +func (m *GetTabletRequest) XXX_Size() int { + return m.Size() +} +func (m *GetTabletRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetTabletRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetTabletRequest proto.InternalMessageInfo + +func (m *GetTabletRequest) GetTabletAlias() *topodata.TabletAlias { + if m != nil { + return m.TabletAlias + } + return nil +} + +type GetTabletResponse struct { + Tablet *topodata.Tablet `protobuf:"bytes,1,opt,name=tablet,proto3" json:"tablet,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetTabletResponse) Reset() { *m = GetTabletResponse{} } +func (m *GetTabletResponse) String() string { return proto.CompactTextString(m) } +func (*GetTabletResponse) ProtoMessage() {} +func (*GetTabletResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{44} +} +func (m *GetTabletResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetTabletResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetTabletResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetTabletResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetTabletResponse.Merge(m, src) +} +func (m *GetTabletResponse) XXX_Size() int { + return m.Size() +} +func (m *GetTabletResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetTabletResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetTabletResponse proto.InternalMessageInfo + +func (m *GetTabletResponse) GetTablet() *topodata.Tablet { + if m != nil { + return m.Tablet + } + return nil +} + +type GetTabletsRequest struct { + // Keyspace is the name of the keyspace to return tablets for. Omit to return + // all tablets. + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // Shard is the name of the shard to return tablets for. This field is ignored + // if Keyspace is not set. + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + // Cells is an optional set of cells to return tablets for. + Cells []string `protobuf:"bytes,3,rep,name=cells,proto3" json:"cells,omitempty"` + // Strict specifies how the server should treat failures from individual + // cells. + // + // When false (the default), GetTablets will return data from any cells that + // return successfully, but will fail the request if all cells fail. When + // true, any individual cell can fail the full request. + Strict bool `protobuf:"varint,4,opt,name=strict,proto3" json:"strict,omitempty"` + // TabletAliases is an optional list of tablet aliases to fetch Tablet objects + // for. If specified, Keyspace, Shard, and Cells are ignored, and tablets are + // looked up by their respective aliases' Cells directly. + TabletAliases []*topodata.TabletAlias `protobuf:"bytes,5,rep,name=tablet_aliases,json=tabletAliases,proto3" json:"tablet_aliases,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetTabletsRequest) Reset() { *m = GetTabletsRequest{} } +func (m *GetTabletsRequest) String() string { return proto.CompactTextString(m) } +func (*GetTabletsRequest) ProtoMessage() {} +func (*GetTabletsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{45} +} +func (m *GetTabletsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetTabletsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetTabletsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetTabletsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetTabletsRequest.Merge(m, src) +} +func (m *GetTabletsRequest) XXX_Size() int { + return m.Size() +} +func (m *GetTabletsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetTabletsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetTabletsRequest proto.InternalMessageInfo + +func (m *GetTabletsRequest) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *GetTabletsRequest) GetShard() string { + if m != nil { + return m.Shard + } + return "" +} + +func (m *GetTabletsRequest) GetCells() []string { + if m != nil { + return m.Cells + } + return nil +} + +func (m *GetTabletsRequest) GetStrict() bool { + if m != nil { + return m.Strict + } + return false +} + +func (m *GetTabletsRequest) GetTabletAliases() []*topodata.TabletAlias { + if m != nil { + return m.TabletAliases + } + return nil +} + +type GetTabletsResponse struct { + Tablets []*topodata.Tablet `protobuf:"bytes,1,rep,name=tablets,proto3" json:"tablets,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetTabletsResponse) Reset() { *m = GetTabletsResponse{} } +func (m *GetTabletsResponse) String() string { return proto.CompactTextString(m) } +func (*GetTabletsResponse) ProtoMessage() {} +func (*GetTabletsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{46} +} +func (m *GetTabletsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetTabletsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetTabletsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetTabletsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetTabletsResponse.Merge(m, src) +} +func (m *GetTabletsResponse) XXX_Size() int { + return m.Size() +} +func (m *GetTabletsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetTabletsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetTabletsResponse proto.InternalMessageInfo + +func (m *GetTabletsResponse) GetTablets() []*topodata.Tablet { + if m != nil { + return m.Tablets + } + return nil +} + +type GetVSchemaRequest struct { + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetVSchemaRequest) Reset() { *m = GetVSchemaRequest{} } +func (m *GetVSchemaRequest) String() string { return proto.CompactTextString(m) } +func (*GetVSchemaRequest) ProtoMessage() {} +func (*GetVSchemaRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{47} +} +func (m *GetVSchemaRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetVSchemaRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetVSchemaRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetVSchemaRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetVSchemaRequest.Merge(m, src) +} +func (m *GetVSchemaRequest) XXX_Size() int { + return m.Size() +} +func (m *GetVSchemaRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetVSchemaRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetVSchemaRequest proto.InternalMessageInfo + +func (m *GetVSchemaRequest) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +type GetVSchemaResponse struct { + VSchema *vschema.Keyspace `protobuf:"bytes,1,opt,name=v_schema,json=vSchema,proto3" json:"v_schema,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetVSchemaResponse) Reset() { *m = GetVSchemaResponse{} } +func (m *GetVSchemaResponse) String() string { return proto.CompactTextString(m) } +func (*GetVSchemaResponse) ProtoMessage() {} +func (*GetVSchemaResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{48} +} +func (m *GetVSchemaResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetVSchemaResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetVSchemaResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetVSchemaResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetVSchemaResponse.Merge(m, src) +} +func (m *GetVSchemaResponse) XXX_Size() int { + return m.Size() +} +func (m *GetVSchemaResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetVSchemaResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetVSchemaResponse proto.InternalMessageInfo + +func (m *GetVSchemaResponse) GetVSchema() *vschema.Keyspace { + if m != nil { + return m.VSchema + } + return nil +} + +type GetWorkflowsRequest struct { + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + ActiveOnly bool `protobuf:"varint,2,opt,name=active_only,json=activeOnly,proto3" json:"active_only,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetWorkflowsRequest) Reset() { *m = GetWorkflowsRequest{} } +func (m *GetWorkflowsRequest) String() string { return proto.CompactTextString(m) } +func (*GetWorkflowsRequest) ProtoMessage() {} +func (*GetWorkflowsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{49} +} +func (m *GetWorkflowsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetWorkflowsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetWorkflowsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetWorkflowsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetWorkflowsRequest.Merge(m, src) +} +func (m *GetWorkflowsRequest) XXX_Size() int { + return m.Size() +} +func (m *GetWorkflowsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetWorkflowsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetWorkflowsRequest proto.InternalMessageInfo + +func (m *GetWorkflowsRequest) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *GetWorkflowsRequest) GetActiveOnly() bool { + if m != nil { + return m.ActiveOnly + } + return false +} + +type GetWorkflowsResponse struct { + Workflows []*Workflow `protobuf:"bytes,1,rep,name=workflows,proto3" json:"workflows,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetWorkflowsResponse) Reset() { *m = GetWorkflowsResponse{} } +func (m *GetWorkflowsResponse) String() string { return proto.CompactTextString(m) } +func (*GetWorkflowsResponse) ProtoMessage() {} +func (*GetWorkflowsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{50} +} +func (m *GetWorkflowsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetWorkflowsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetWorkflowsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetWorkflowsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetWorkflowsResponse.Merge(m, src) +} +func (m *GetWorkflowsResponse) XXX_Size() int { + return m.Size() +} +func (m *GetWorkflowsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetWorkflowsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetWorkflowsResponse proto.InternalMessageInfo + +func (m *GetWorkflowsResponse) GetWorkflows() []*Workflow { + if m != nil { + return m.Workflows + } + return nil +} + +type InitShardPrimaryRequest struct { + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + PrimaryElectTabletAlias *topodata.TabletAlias `protobuf:"bytes,3,opt,name=primary_elect_tablet_alias,json=primaryElectTabletAlias,proto3" json:"primary_elect_tablet_alias,omitempty"` + Force bool `protobuf:"varint,4,opt,name=force,proto3" json:"force,omitempty"` + WaitReplicasTimeout *vttime.Duration `protobuf:"bytes,5,opt,name=wait_replicas_timeout,json=waitReplicasTimeout,proto3" json:"wait_replicas_timeout,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InitShardPrimaryRequest) Reset() { *m = InitShardPrimaryRequest{} } +func (m *InitShardPrimaryRequest) String() string { return proto.CompactTextString(m) } +func (*InitShardPrimaryRequest) ProtoMessage() {} +func (*InitShardPrimaryRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{51} +} +func (m *InitShardPrimaryRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InitShardPrimaryRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InitShardPrimaryRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *InitShardPrimaryRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_InitShardPrimaryRequest.Merge(m, src) +} +func (m *InitShardPrimaryRequest) XXX_Size() int { + return m.Size() +} +func (m *InitShardPrimaryRequest) XXX_DiscardUnknown() { + xxx_messageInfo_InitShardPrimaryRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_InitShardPrimaryRequest proto.InternalMessageInfo + +func (m *InitShardPrimaryRequest) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *InitShardPrimaryRequest) GetShard() string { + if m != nil { + return m.Shard + } + return "" +} + +func (m *InitShardPrimaryRequest) GetPrimaryElectTabletAlias() *topodata.TabletAlias { + if m != nil { + return m.PrimaryElectTabletAlias + } + return nil +} + +func (m *InitShardPrimaryRequest) GetForce() bool { + if m != nil { + return m.Force + } + return false +} + +func (m *InitShardPrimaryRequest) GetWaitReplicasTimeout() *vttime.Duration { + if m != nil { + return m.WaitReplicasTimeout + } + return nil +} + +type InitShardPrimaryResponse struct { + Events []*logutil.Event `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InitShardPrimaryResponse) Reset() { *m = InitShardPrimaryResponse{} } +func (m *InitShardPrimaryResponse) String() string { return proto.CompactTextString(m) } +func (*InitShardPrimaryResponse) ProtoMessage() {} +func (*InitShardPrimaryResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{52} +} +func (m *InitShardPrimaryResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InitShardPrimaryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InitShardPrimaryResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *InitShardPrimaryResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_InitShardPrimaryResponse.Merge(m, src) +} +func (m *InitShardPrimaryResponse) XXX_Size() int { + return m.Size() +} +func (m *InitShardPrimaryResponse) XXX_DiscardUnknown() { + xxx_messageInfo_InitShardPrimaryResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_InitShardPrimaryResponse proto.InternalMessageInfo + +func (m *InitShardPrimaryResponse) GetEvents() []*logutil.Event { + if m != nil { + return m.Events + } + return nil +} + +type PlannedReparentShardRequest struct { + // Keyspace is the name of the keyspace to perform the Planned Reparent in. + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // Shard is the name of the shard to perform teh Planned Reparent in. + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + // NewPrimary is the alias of the tablet to promote to shard primary. If not + // specified, the vtctld will select the most up-to-date candidate to promote. + // + // It is an error to set NewPrimary and AvoidPrimary to the same alias. + NewPrimary *topodata.TabletAlias `protobuf:"bytes,3,opt,name=new_primary,json=newPrimary,proto3" json:"new_primary,omitempty"` + // AvoidPrimary is the alias of the tablet to demote. In other words, + // specifying an AvoidPrimary alias tells the vtctld to promote any replica + // other than this one. A shard whose current primary is not this one is then + // a no-op. + // + // It is an error to set NewPrimary and AvoidPrimary to the same alias. + AvoidPrimary *topodata.TabletAlias `protobuf:"bytes,4,opt,name=avoid_primary,json=avoidPrimary,proto3" json:"avoid_primary,omitempty"` + // WaitReplicasTimeout is the duration of time to wait for replicas to catch + // up in replication both before and after the reparent. The timeout is not + // cumulative across both wait periods, meaning that the replicas have + // WaitReplicasTimeout time to catch up before the reparent, and an additional + // WaitReplicasTimeout time to catch up after the reparent. + WaitReplicasTimeout *vttime.Duration `protobuf:"bytes,5,opt,name=wait_replicas_timeout,json=waitReplicasTimeout,proto3" json:"wait_replicas_timeout,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PlannedReparentShardRequest) Reset() { *m = PlannedReparentShardRequest{} } +func (m *PlannedReparentShardRequest) String() string { return proto.CompactTextString(m) } +func (*PlannedReparentShardRequest) ProtoMessage() {} +func (*PlannedReparentShardRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{53} +} +func (m *PlannedReparentShardRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PlannedReparentShardRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PlannedReparentShardRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PlannedReparentShardRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PlannedReparentShardRequest.Merge(m, src) +} +func (m *PlannedReparentShardRequest) XXX_Size() int { + return m.Size() +} +func (m *PlannedReparentShardRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PlannedReparentShardRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PlannedReparentShardRequest proto.InternalMessageInfo + +func (m *PlannedReparentShardRequest) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *PlannedReparentShardRequest) GetShard() string { + if m != nil { + return m.Shard + } + return "" +} + +func (m *PlannedReparentShardRequest) GetNewPrimary() *topodata.TabletAlias { + if m != nil { + return m.NewPrimary + } + return nil +} + +func (m *PlannedReparentShardRequest) GetAvoidPrimary() *topodata.TabletAlias { + if m != nil { + return m.AvoidPrimary + } + return nil +} + +func (m *PlannedReparentShardRequest) GetWaitReplicasTimeout() *vttime.Duration { + if m != nil { + return m.WaitReplicasTimeout + } + return nil +} + +type PlannedReparentShardResponse struct { + // Keyspace is the name of the keyspace the Planned Reparent took place in. + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // Shard is the name of the shard the Planned Reparent took place in. + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + // PromotedPrimary is the alias of the tablet that was promoted to shard + // primary. If NewPrimary was set in the request, then this will be the same + // alias. Otherwise, it will be the alias of the tablet found to be most + // up-to-date. + PromotedPrimary *topodata.TabletAlias `protobuf:"bytes,3,opt,name=promoted_primary,json=promotedPrimary,proto3" json:"promoted_primary,omitempty"` + Events []*logutil.Event `protobuf:"bytes,4,rep,name=events,proto3" json:"events,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PlannedReparentShardResponse) Reset() { *m = PlannedReparentShardResponse{} } +func (m *PlannedReparentShardResponse) String() string { return proto.CompactTextString(m) } +func (*PlannedReparentShardResponse) ProtoMessage() {} +func (*PlannedReparentShardResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{54} +} +func (m *PlannedReparentShardResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PlannedReparentShardResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PlannedReparentShardResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PlannedReparentShardResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PlannedReparentShardResponse.Merge(m, src) +} +func (m *PlannedReparentShardResponse) XXX_Size() int { + return m.Size() +} +func (m *PlannedReparentShardResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PlannedReparentShardResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PlannedReparentShardResponse proto.InternalMessageInfo + +func (m *PlannedReparentShardResponse) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *PlannedReparentShardResponse) GetShard() string { + if m != nil { + return m.Shard + } + return "" +} + +func (m *PlannedReparentShardResponse) GetPromotedPrimary() *topodata.TabletAlias { + if m != nil { + return m.PromotedPrimary + } + return nil +} + +func (m *PlannedReparentShardResponse) GetEvents() []*logutil.Event { + if m != nil { + return m.Events + } + return nil +} + +type RemoveKeyspaceCellRequest struct { + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Cell string `protobuf:"bytes,2,opt,name=cell,proto3" json:"cell,omitempty"` + // Force proceeds even if the cell's topology server cannot be reached. This + // should only be set if a cell has been shut down entirely, and the global + // topology data just needs to be updated. + Force bool `protobuf:"varint,3,opt,name=force,proto3" json:"force,omitempty"` + // Recursive also deletes all tablets in that cell belonging to the specified + // keyspace. + Recursive bool `protobuf:"varint,4,opt,name=recursive,proto3" json:"recursive,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RemoveKeyspaceCellRequest) Reset() { *m = RemoveKeyspaceCellRequest{} } +func (m *RemoveKeyspaceCellRequest) String() string { return proto.CompactTextString(m) } +func (*RemoveKeyspaceCellRequest) ProtoMessage() {} +func (*RemoveKeyspaceCellRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{55} +} +func (m *RemoveKeyspaceCellRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RemoveKeyspaceCellRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RemoveKeyspaceCellRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RemoveKeyspaceCellRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoveKeyspaceCellRequest.Merge(m, src) +} +func (m *RemoveKeyspaceCellRequest) XXX_Size() int { + return m.Size() +} +func (m *RemoveKeyspaceCellRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RemoveKeyspaceCellRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RemoveKeyspaceCellRequest proto.InternalMessageInfo + +func (m *RemoveKeyspaceCellRequest) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *RemoveKeyspaceCellRequest) GetCell() string { + if m != nil { + return m.Cell + } + return "" +} + +func (m *RemoveKeyspaceCellRequest) GetForce() bool { + if m != nil { + return m.Force + } + return false +} + +func (m *RemoveKeyspaceCellRequest) GetRecursive() bool { + if m != nil { + return m.Recursive + } + return false +} + +type RemoveKeyspaceCellResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RemoveKeyspaceCellResponse) Reset() { *m = RemoveKeyspaceCellResponse{} } +func (m *RemoveKeyspaceCellResponse) String() string { return proto.CompactTextString(m) } +func (*RemoveKeyspaceCellResponse) ProtoMessage() {} +func (*RemoveKeyspaceCellResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{56} +} +func (m *RemoveKeyspaceCellResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RemoveKeyspaceCellResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RemoveKeyspaceCellResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RemoveKeyspaceCellResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoveKeyspaceCellResponse.Merge(m, src) +} +func (m *RemoveKeyspaceCellResponse) XXX_Size() int { + return m.Size() +} +func (m *RemoveKeyspaceCellResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RemoveKeyspaceCellResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RemoveKeyspaceCellResponse proto.InternalMessageInfo + +type RemoveShardCellRequest struct { + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + ShardName string `protobuf:"bytes,2,opt,name=shard_name,json=shardName,proto3" json:"shard_name,omitempty"` + Cell string `protobuf:"bytes,3,opt,name=cell,proto3" json:"cell,omitempty"` + // Force proceeds even if the cell's topology server cannot be reached. This + // should only be set if a cell has been shut down entirely, and the global + // topology data just needs to be updated. + Force bool `protobuf:"varint,4,opt,name=force,proto3" json:"force,omitempty"` + // Recursive also deletes all tablets in that cell belonging to the specified + // keyspace and shard. + Recursive bool `protobuf:"varint,5,opt,name=recursive,proto3" json:"recursive,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RemoveShardCellRequest) Reset() { *m = RemoveShardCellRequest{} } +func (m *RemoveShardCellRequest) String() string { return proto.CompactTextString(m) } +func (*RemoveShardCellRequest) ProtoMessage() {} +func (*RemoveShardCellRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{57} +} +func (m *RemoveShardCellRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RemoveShardCellRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RemoveShardCellRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RemoveShardCellRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoveShardCellRequest.Merge(m, src) +} +func (m *RemoveShardCellRequest) XXX_Size() int { + return m.Size() +} +func (m *RemoveShardCellRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RemoveShardCellRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RemoveShardCellRequest proto.InternalMessageInfo + +func (m *RemoveShardCellRequest) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *RemoveShardCellRequest) GetShardName() string { + if m != nil { + return m.ShardName + } + return "" +} + +func (m *RemoveShardCellRequest) GetCell() string { + if m != nil { + return m.Cell + } + return "" +} + +func (m *RemoveShardCellRequest) GetForce() bool { + if m != nil { + return m.Force + } + return false +} + +func (m *RemoveShardCellRequest) GetRecursive() bool { + if m != nil { + return m.Recursive + } + return false +} + +type RemoveShardCellResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RemoveShardCellResponse) Reset() { *m = RemoveShardCellResponse{} } +func (m *RemoveShardCellResponse) String() string { return proto.CompactTextString(m) } +func (*RemoveShardCellResponse) ProtoMessage() {} +func (*RemoveShardCellResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{58} +} +func (m *RemoveShardCellResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RemoveShardCellResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RemoveShardCellResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RemoveShardCellResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoveShardCellResponse.Merge(m, src) +} +func (m *RemoveShardCellResponse) XXX_Size() int { + return m.Size() +} +func (m *RemoveShardCellResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RemoveShardCellResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RemoveShardCellResponse proto.InternalMessageInfo + +type ReparentTabletRequest struct { + // Tablet is the alias of the tablet that should be reparented under the + // current shard primary. + Tablet *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet,proto3" json:"tablet,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReparentTabletRequest) Reset() { *m = ReparentTabletRequest{} } +func (m *ReparentTabletRequest) String() string { return proto.CompactTextString(m) } +func (*ReparentTabletRequest) ProtoMessage() {} +func (*ReparentTabletRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{59} +} +func (m *ReparentTabletRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReparentTabletRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReparentTabletRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ReparentTabletRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReparentTabletRequest.Merge(m, src) +} +func (m *ReparentTabletRequest) XXX_Size() int { + return m.Size() +} +func (m *ReparentTabletRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReparentTabletRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ReparentTabletRequest proto.InternalMessageInfo + +func (m *ReparentTabletRequest) GetTablet() *topodata.TabletAlias { + if m != nil { + return m.Tablet + } + return nil +} + +type ReparentTabletResponse struct { + // Keyspace is the name of the keyspace the tablet was reparented in. + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // Shard is the name of the shard the tablet was reparented in. + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + // Primary is the alias of the tablet that the tablet was reparented under. + Primary *topodata.TabletAlias `protobuf:"bytes,3,opt,name=primary,proto3" json:"primary,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReparentTabletResponse) Reset() { *m = ReparentTabletResponse{} } +func (m *ReparentTabletResponse) String() string { return proto.CompactTextString(m) } +func (*ReparentTabletResponse) ProtoMessage() {} +func (*ReparentTabletResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{60} +} +func (m *ReparentTabletResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReparentTabletResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReparentTabletResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ReparentTabletResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReparentTabletResponse.Merge(m, src) +} +func (m *ReparentTabletResponse) XXX_Size() int { + return m.Size() +} +func (m *ReparentTabletResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReparentTabletResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ReparentTabletResponse proto.InternalMessageInfo + +func (m *ReparentTabletResponse) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *ReparentTabletResponse) GetShard() string { + if m != nil { + return m.Shard + } + return "" +} + +func (m *ReparentTabletResponse) GetPrimary() *topodata.TabletAlias { + if m != nil { + return m.Primary + } + return nil +} + +type ShardReplicationPositionsRequest struct { + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ShardReplicationPositionsRequest) Reset() { *m = ShardReplicationPositionsRequest{} } +func (m *ShardReplicationPositionsRequest) String() string { return proto.CompactTextString(m) } +func (*ShardReplicationPositionsRequest) ProtoMessage() {} +func (*ShardReplicationPositionsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{61} +} +func (m *ShardReplicationPositionsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ShardReplicationPositionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ShardReplicationPositionsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ShardReplicationPositionsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShardReplicationPositionsRequest.Merge(m, src) +} +func (m *ShardReplicationPositionsRequest) XXX_Size() int { + return m.Size() +} +func (m *ShardReplicationPositionsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ShardReplicationPositionsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ShardReplicationPositionsRequest proto.InternalMessageInfo + +func (m *ShardReplicationPositionsRequest) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *ShardReplicationPositionsRequest) GetShard() string { + if m != nil { + return m.Shard + } + return "" +} + +type ShardReplicationPositionsResponse struct { + // ReplicationStatuses is a mapping of tablet alias string to replication + // status for that tablet. + ReplicationStatuses map[string]*replicationdata.Status `protobuf:"bytes,1,rep,name=replication_statuses,json=replicationStatuses,proto3" json:"replication_statuses,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // TabletMap is the set of tablets whose replication statuses were queried, + // keyed by tablet alias. + TabletMap map[string]*topodata.Tablet `protobuf:"bytes,2,rep,name=tablet_map,json=tabletMap,proto3" json:"tablet_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ShardReplicationPositionsResponse) Reset() { *m = ShardReplicationPositionsResponse{} } +func (m *ShardReplicationPositionsResponse) String() string { return proto.CompactTextString(m) } +func (*ShardReplicationPositionsResponse) ProtoMessage() {} +func (*ShardReplicationPositionsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{62} +} +func (m *ShardReplicationPositionsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ShardReplicationPositionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ShardReplicationPositionsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ShardReplicationPositionsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShardReplicationPositionsResponse.Merge(m, src) +} +func (m *ShardReplicationPositionsResponse) XXX_Size() int { + return m.Size() +} +func (m *ShardReplicationPositionsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ShardReplicationPositionsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ShardReplicationPositionsResponse proto.InternalMessageInfo + +func (m *ShardReplicationPositionsResponse) GetReplicationStatuses() map[string]*replicationdata.Status { + if m != nil { + return m.ReplicationStatuses + } + return nil +} + +func (m *ShardReplicationPositionsResponse) GetTabletMap() map[string]*topodata.Tablet { + if m != nil { + return m.TabletMap + } + return nil +} + +type TabletExternallyReparentedRequest struct { + // Tablet is the alias of the tablet that was promoted externally and should + // be updated to the shard primary in the topo. + Tablet *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet,proto3" json:"tablet,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TabletExternallyReparentedRequest) Reset() { *m = TabletExternallyReparentedRequest{} } +func (m *TabletExternallyReparentedRequest) String() string { return proto.CompactTextString(m) } +func (*TabletExternallyReparentedRequest) ProtoMessage() {} +func (*TabletExternallyReparentedRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{63} +} +func (m *TabletExternallyReparentedRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TabletExternallyReparentedRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TabletExternallyReparentedRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TabletExternallyReparentedRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TabletExternallyReparentedRequest.Merge(m, src) +} +func (m *TabletExternallyReparentedRequest) XXX_Size() int { + return m.Size() +} +func (m *TabletExternallyReparentedRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TabletExternallyReparentedRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_TabletExternallyReparentedRequest proto.InternalMessageInfo + +func (m *TabletExternallyReparentedRequest) GetTablet() *topodata.TabletAlias { + if m != nil { + return m.Tablet + } + return nil +} + +type TabletExternallyReparentedResponse struct { + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + NewPrimary *topodata.TabletAlias `protobuf:"bytes,3,opt,name=new_primary,json=newPrimary,proto3" json:"new_primary,omitempty"` + OldPrimary *topodata.TabletAlias `protobuf:"bytes,4,opt,name=old_primary,json=oldPrimary,proto3" json:"old_primary,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TabletExternallyReparentedResponse) Reset() { *m = TabletExternallyReparentedResponse{} } +func (m *TabletExternallyReparentedResponse) String() string { return proto.CompactTextString(m) } +func (*TabletExternallyReparentedResponse) ProtoMessage() {} +func (*TabletExternallyReparentedResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{64} +} +func (m *TabletExternallyReparentedResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TabletExternallyReparentedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TabletExternallyReparentedResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TabletExternallyReparentedResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TabletExternallyReparentedResponse.Merge(m, src) +} +func (m *TabletExternallyReparentedResponse) XXX_Size() int { + return m.Size() +} +func (m *TabletExternallyReparentedResponse) XXX_DiscardUnknown() { + xxx_messageInfo_TabletExternallyReparentedResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_TabletExternallyReparentedResponse proto.InternalMessageInfo + +func (m *TabletExternallyReparentedResponse) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *TabletExternallyReparentedResponse) GetShard() string { + if m != nil { + return m.Shard + } + return "" +} + +func (m *TabletExternallyReparentedResponse) GetNewPrimary() *topodata.TabletAlias { + if m != nil { + return m.NewPrimary + } + return nil +} + +func (m *TabletExternallyReparentedResponse) GetOldPrimary() *topodata.TabletAlias { + if m != nil { + return m.OldPrimary + } + return nil +} + +func init() { + proto.RegisterType((*ExecuteVtctlCommandRequest)(nil), "vtctldata.ExecuteVtctlCommandRequest") + proto.RegisterType((*ExecuteVtctlCommandResponse)(nil), "vtctldata.ExecuteVtctlCommandResponse") + proto.RegisterType((*TableMaterializeSettings)(nil), "vtctldata.TableMaterializeSettings") + proto.RegisterType((*MaterializeSettings)(nil), "vtctldata.MaterializeSettings") + proto.RegisterType((*Keyspace)(nil), "vtctldata.Keyspace") + proto.RegisterType((*Shard)(nil), "vtctldata.Shard") + proto.RegisterType((*Workflow)(nil), "vtctldata.Workflow") + proto.RegisterMapType((map[string]*Workflow_ShardStream)(nil), "vtctldata.Workflow.ShardStreamsEntry") + proto.RegisterType((*Workflow_ReplicationLocation)(nil), "vtctldata.Workflow.ReplicationLocation") + proto.RegisterType((*Workflow_ShardStream)(nil), "vtctldata.Workflow.ShardStream") + proto.RegisterType((*Workflow_Stream)(nil), "vtctldata.Workflow.Stream") + proto.RegisterType((*Workflow_Stream_CopyState)(nil), "vtctldata.Workflow.Stream.CopyState") + proto.RegisterType((*ChangeTabletTypeRequest)(nil), "vtctldata.ChangeTabletTypeRequest") + proto.RegisterType((*ChangeTabletTypeResponse)(nil), "vtctldata.ChangeTabletTypeResponse") + proto.RegisterType((*CreateKeyspaceRequest)(nil), "vtctldata.CreateKeyspaceRequest") + proto.RegisterType((*CreateKeyspaceResponse)(nil), "vtctldata.CreateKeyspaceResponse") + proto.RegisterType((*CreateShardRequest)(nil), "vtctldata.CreateShardRequest") + proto.RegisterType((*CreateShardResponse)(nil), "vtctldata.CreateShardResponse") + proto.RegisterType((*DeleteKeyspaceRequest)(nil), "vtctldata.DeleteKeyspaceRequest") + proto.RegisterType((*DeleteKeyspaceResponse)(nil), "vtctldata.DeleteKeyspaceResponse") + proto.RegisterType((*DeleteShardsRequest)(nil), "vtctldata.DeleteShardsRequest") + proto.RegisterType((*DeleteShardsResponse)(nil), "vtctldata.DeleteShardsResponse") + proto.RegisterType((*DeleteTabletsRequest)(nil), "vtctldata.DeleteTabletsRequest") + proto.RegisterType((*DeleteTabletsResponse)(nil), "vtctldata.DeleteTabletsResponse") + proto.RegisterType((*EmergencyReparentShardRequest)(nil), "vtctldata.EmergencyReparentShardRequest") + proto.RegisterType((*EmergencyReparentShardResponse)(nil), "vtctldata.EmergencyReparentShardResponse") + proto.RegisterType((*FindAllShardsInKeyspaceRequest)(nil), "vtctldata.FindAllShardsInKeyspaceRequest") + proto.RegisterType((*FindAllShardsInKeyspaceResponse)(nil), "vtctldata.FindAllShardsInKeyspaceResponse") + proto.RegisterMapType((map[string]*Shard)(nil), "vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry") + proto.RegisterType((*GetBackupsRequest)(nil), "vtctldata.GetBackupsRequest") + proto.RegisterType((*GetBackupsResponse)(nil), "vtctldata.GetBackupsResponse") + proto.RegisterType((*GetCellInfoNamesRequest)(nil), "vtctldata.GetCellInfoNamesRequest") + proto.RegisterType((*GetCellInfoNamesResponse)(nil), "vtctldata.GetCellInfoNamesResponse") + proto.RegisterType((*GetCellInfoRequest)(nil), "vtctldata.GetCellInfoRequest") + proto.RegisterType((*GetCellInfoResponse)(nil), "vtctldata.GetCellInfoResponse") + proto.RegisterType((*GetCellsAliasesRequest)(nil), "vtctldata.GetCellsAliasesRequest") + proto.RegisterType((*GetCellsAliasesResponse)(nil), "vtctldata.GetCellsAliasesResponse") + proto.RegisterMapType((map[string]*topodata.CellsAlias)(nil), "vtctldata.GetCellsAliasesResponse.AliasesEntry") + proto.RegisterType((*GetKeyspacesRequest)(nil), "vtctldata.GetKeyspacesRequest") + proto.RegisterType((*GetKeyspacesResponse)(nil), "vtctldata.GetKeyspacesResponse") + proto.RegisterType((*GetKeyspaceRequest)(nil), "vtctldata.GetKeyspaceRequest") + proto.RegisterType((*GetKeyspaceResponse)(nil), "vtctldata.GetKeyspaceResponse") + proto.RegisterType((*GetSchemaRequest)(nil), "vtctldata.GetSchemaRequest") + proto.RegisterType((*GetSchemaResponse)(nil), "vtctldata.GetSchemaResponse") + proto.RegisterType((*GetShardRequest)(nil), "vtctldata.GetShardRequest") + proto.RegisterType((*GetShardResponse)(nil), "vtctldata.GetShardResponse") + proto.RegisterType((*GetSrvKeyspacesRequest)(nil), "vtctldata.GetSrvKeyspacesRequest") + proto.RegisterType((*GetSrvKeyspacesResponse)(nil), "vtctldata.GetSrvKeyspacesResponse") + proto.RegisterMapType((map[string]*topodata.SrvKeyspace)(nil), "vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry") + proto.RegisterType((*GetSrvVSchemaRequest)(nil), "vtctldata.GetSrvVSchemaRequest") + proto.RegisterType((*GetSrvVSchemaResponse)(nil), "vtctldata.GetSrvVSchemaResponse") + proto.RegisterType((*GetTabletRequest)(nil), "vtctldata.GetTabletRequest") + proto.RegisterType((*GetTabletResponse)(nil), "vtctldata.GetTabletResponse") + proto.RegisterType((*GetTabletsRequest)(nil), "vtctldata.GetTabletsRequest") + proto.RegisterType((*GetTabletsResponse)(nil), "vtctldata.GetTabletsResponse") + proto.RegisterType((*GetVSchemaRequest)(nil), "vtctldata.GetVSchemaRequest") + proto.RegisterType((*GetVSchemaResponse)(nil), "vtctldata.GetVSchemaResponse") + proto.RegisterType((*GetWorkflowsRequest)(nil), "vtctldata.GetWorkflowsRequest") + proto.RegisterType((*GetWorkflowsResponse)(nil), "vtctldata.GetWorkflowsResponse") + proto.RegisterType((*InitShardPrimaryRequest)(nil), "vtctldata.InitShardPrimaryRequest") + proto.RegisterType((*InitShardPrimaryResponse)(nil), "vtctldata.InitShardPrimaryResponse") + proto.RegisterType((*PlannedReparentShardRequest)(nil), "vtctldata.PlannedReparentShardRequest") + proto.RegisterType((*PlannedReparentShardResponse)(nil), "vtctldata.PlannedReparentShardResponse") + proto.RegisterType((*RemoveKeyspaceCellRequest)(nil), "vtctldata.RemoveKeyspaceCellRequest") + proto.RegisterType((*RemoveKeyspaceCellResponse)(nil), "vtctldata.RemoveKeyspaceCellResponse") + proto.RegisterType((*RemoveShardCellRequest)(nil), "vtctldata.RemoveShardCellRequest") + proto.RegisterType((*RemoveShardCellResponse)(nil), "vtctldata.RemoveShardCellResponse") + proto.RegisterType((*ReparentTabletRequest)(nil), "vtctldata.ReparentTabletRequest") + proto.RegisterType((*ReparentTabletResponse)(nil), "vtctldata.ReparentTabletResponse") + proto.RegisterType((*ShardReplicationPositionsRequest)(nil), "vtctldata.ShardReplicationPositionsRequest") + proto.RegisterType((*ShardReplicationPositionsResponse)(nil), "vtctldata.ShardReplicationPositionsResponse") + proto.RegisterMapType((map[string]*replicationdata.Status)(nil), "vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry") + proto.RegisterMapType((map[string]*topodata.Tablet)(nil), "vtctldata.ShardReplicationPositionsResponse.TabletMapEntry") + proto.RegisterType((*TabletExternallyReparentedRequest)(nil), "vtctldata.TabletExternallyReparentedRequest") + proto.RegisterType((*TabletExternallyReparentedResponse)(nil), "vtctldata.TabletExternallyReparentedResponse") +} + +func init() { proto.RegisterFile("vtctldata.proto", fileDescriptor_f41247b323a1ab2e) } + +var fileDescriptor_f41247b323a1ab2e = []byte{ + // 2731 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x1a, 0x4d, 0x6f, 0x1b, 0xc7, + 0xb5, 0xcb, 0x2f, 0x89, 0x8f, 0x1f, 0x92, 0x56, 0x94, 0xb4, 0x61, 0x6c, 0x59, 0x5e, 0xc7, 0x8e, + 0xea, 0xc4, 0x94, 0xad, 0x24, 0x86, 0xe1, 0x24, 0xad, 0x6d, 0x89, 0x32, 0xe4, 0x38, 0xaa, 0xba, + 0x54, 0x15, 0x34, 0x87, 0x6e, 0x47, 0xe4, 0x88, 0x5e, 0x68, 0xb9, 0xcb, 0xec, 0x0c, 0x29, 0x31, + 0x3d, 0xf4, 0xd2, 0x1e, 0x02, 0x14, 0xe8, 0xb5, 0x40, 0x50, 0xa0, 0xa7, 0xa2, 0xe8, 0xad, 0x97, + 0x00, 0x2d, 0x8a, 0x1e, 0x8b, 0x1e, 0x7a, 0xe8, 0xb5, 0xb7, 0xc2, 0xfd, 0x19, 0xbd, 0x14, 0xf3, + 0xb5, 0x5c, 0x2e, 0x97, 0xb4, 0x2c, 0x1b, 0x28, 0x7a, 0x12, 0xe7, 0xcd, 0x7b, 0xf3, 0xde, 0xbc, + 0xef, 0x37, 0x2b, 0x98, 0xeb, 0xd3, 0x26, 0x75, 0x5b, 0x88, 0xa2, 0x5a, 0x37, 0xf0, 0xa9, 0xaf, + 0xe7, 0x43, 0x40, 0x75, 0xfe, 0xc8, 0xf1, 0x5c, 0xbf, 0x3d, 0xdc, 0xac, 0x96, 0x5c, 0xbf, 0xdd, + 0xa3, 0x8e, 0x2b, 0x97, 0xe5, 0xce, 0x80, 0x7c, 0xe1, 0x36, 0xa9, 0x5a, 0x2f, 0x05, 0xb8, 0xeb, + 0x3a, 0x4d, 0x44, 0x1d, 0xdf, 0x8b, 0x50, 0xad, 0x50, 0x74, 0xe4, 0x62, 0xda, 0x41, 0x1e, 0x6a, + 0xe3, 0x20, 0xb2, 0x51, 0xa6, 0x7e, 0xd7, 0x8f, 0x1e, 0xdf, 0x27, 0xcd, 0x67, 0xb8, 0xa3, 0x96, + 0xc5, 0x3e, 0xa5, 0x4e, 0x07, 0x8b, 0x95, 0xf9, 0x19, 0x54, 0xeb, 0x67, 0xb8, 0xd9, 0xa3, 0xf8, + 0x90, 0x49, 0xb8, 0xe5, 0x77, 0x3a, 0xc8, 0x6b, 0x59, 0xf8, 0x8b, 0x1e, 0x26, 0x54, 0xd7, 0x21, + 0x83, 0x82, 0x36, 0x31, 0xb4, 0xb5, 0xf4, 0x7a, 0xde, 0xe2, 0xbf, 0xf5, 0xeb, 0x50, 0x46, 0x4d, + 0x26, 0x8b, 0xcd, 0x8e, 0xf1, 0x7b, 0xd4, 0x48, 0xad, 0x69, 0xeb, 0x69, 0xab, 0x24, 0xa0, 0x07, + 0x02, 0x68, 0x6e, 0xc1, 0x9b, 0x89, 0x07, 0x93, 0xae, 0xef, 0x11, 0xac, 0xbf, 0x05, 0x59, 0xdc, + 0xc7, 0x1e, 0x35, 0xb4, 0x35, 0x6d, 0xbd, 0xb0, 0x59, 0xae, 0x29, 0x1d, 0xd4, 0x19, 0xd4, 0x12, + 0x9b, 0xe6, 0x57, 0x1a, 0x18, 0x07, 0xec, 0x9a, 0x9f, 0x22, 0x8a, 0x03, 0x07, 0xb9, 0xce, 0x97, + 0xb8, 0x81, 0x29, 0x75, 0xbc, 0x36, 0xd1, 0xaf, 0x42, 0x91, 0xa2, 0xa0, 0x8d, 0xa9, 0xcd, 0x35, + 0xc1, 0x4f, 0xca, 0x5b, 0x05, 0x01, 0xe3, 0x54, 0xfa, 0x3b, 0xb0, 0x40, 0xfc, 0x5e, 0xd0, 0xc4, + 0x36, 0x3e, 0xeb, 0x06, 0x98, 0x10, 0xc7, 0xf7, 0xb8, 0xb8, 0x79, 0x6b, 0x5e, 0x6c, 0xd4, 0x43, + 0xb8, 0x7e, 0x19, 0xa0, 0x19, 0x60, 0x44, 0xb1, 0xdd, 0x6a, 0xb9, 0x46, 0x9a, 0x63, 0xe5, 0x05, + 0x64, 0xbb, 0xe5, 0x9a, 0xff, 0x4c, 0xc1, 0x62, 0x92, 0x18, 0x55, 0x98, 0x3d, 0xf5, 0x83, 0x93, + 0x63, 0xd7, 0x3f, 0x95, 0x22, 0x84, 0x6b, 0xfd, 0x6d, 0x98, 0x93, 0xfc, 0x4f, 0xf0, 0x80, 0x74, + 0x51, 0x13, 0x4b, 0xee, 0x65, 0x01, 0xfe, 0x44, 0x42, 0x19, 0xa2, 0xbc, 0x4b, 0x88, 0x28, 0x04, + 0x28, 0x0b, 0x70, 0x88, 0x78, 0x03, 0xe6, 0x08, 0xf5, 0xbb, 0x36, 0x3a, 0xa6, 0x38, 0xb0, 0x9b, + 0x7e, 0x77, 0x60, 0x64, 0xd6, 0xb4, 0xf5, 0x59, 0xab, 0xc4, 0xc0, 0x0f, 0x19, 0x74, 0xcb, 0xef, + 0x0e, 0xf4, 0x27, 0x50, 0xe6, 0x5a, 0xb1, 0x89, 0x94, 0xd3, 0xc8, 0xae, 0xa5, 0xd7, 0x0b, 0x9b, + 0xd7, 0x6a, 0x43, 0xd7, 0x9c, 0xa4, 0x59, 0xab, 0xc4, 0x49, 0xc3, 0x1b, 0xea, 0x90, 0x69, 0x62, + 0xd7, 0x35, 0x72, 0x5c, 0x22, 0xfe, 0x5b, 0x28, 0x9f, 0xf9, 0x9f, 0x4d, 0x07, 0x5d, 0x4c, 0x8c, + 0x19, 0xa5, 0x7c, 0x06, 0x3b, 0x60, 0x20, 0xfd, 0xdb, 0x30, 0x8f, 0xcf, 0x28, 0x0e, 0x3c, 0xe4, + 0xda, 0x4d, 0xb7, 0x47, 0x28, 0x0e, 0x8c, 0x59, 0x8e, 0x36, 0xa7, 0xe0, 0x5b, 0x02, 0x6c, 0xee, + 0xc1, 0x6c, 0x78, 0x43, 0x1d, 0x32, 0x1e, 0xea, 0x28, 0x73, 0xf2, 0xdf, 0x7a, 0x0d, 0x66, 0x47, + 0x14, 0x58, 0xd8, 0xd4, 0x6b, 0xa1, 0x97, 0x2b, 0x4a, 0x2b, 0xc4, 0x31, 0x7f, 0x04, 0xd9, 0xc6, + 0x33, 0x14, 0xb4, 0x98, 0x71, 0x42, 0x42, 0x69, 0x9c, 0x93, 0x38, 0xa3, 0x54, 0x84, 0xd1, 0x75, + 0xc8, 0x12, 0x46, 0xc8, 0xb5, 0x5f, 0xd8, 0x9c, 0x1b, 0x72, 0xe1, 0xe7, 0x59, 0x62, 0xd7, 0xfc, + 0x5d, 0x1e, 0x66, 0x3f, 0x53, 0x46, 0x4e, 0x12, 0xf8, 0xbb, 0x90, 0x13, 0x16, 0x96, 0xe2, 0xbe, + 0x1d, 0x51, 0xbb, 0x22, 0xac, 0x59, 0xc3, 0xb8, 0x7e, 0xea, 0x8b, 0xbf, 0x96, 0x24, 0x63, 0x07, + 0x08, 0xcb, 0x4b, 0x49, 0xce, 0x7f, 0x80, 0x20, 0xd3, 0xef, 0xc0, 0x52, 0x07, 0x9d, 0xd9, 0x7d, + 0x3b, 0x92, 0x3d, 0x6c, 0x17, 0xb5, 0xb9, 0xbb, 0xa4, 0x2d, 0xbd, 0x83, 0xce, 0x0e, 0xa3, 0xf4, + 0xa8, 0xad, 0x3f, 0x81, 0x12, 0xbf, 0x9e, 0x4d, 0x68, 0x80, 0x51, 0x47, 0xb9, 0xcc, 0xf5, 0x24, + 0xd6, 0x5c, 0x1d, 0x0d, 0x81, 0x57, 0xf7, 0x68, 0x30, 0xb0, 0x8a, 0x24, 0x02, 0xaa, 0xfe, 0x18, + 0x16, 0xc6, 0x50, 0xf4, 0x79, 0x48, 0x9f, 0xe0, 0x81, 0x54, 0x14, 0xfb, 0xa9, 0x7f, 0x00, 0xd9, + 0x3e, 0x72, 0x7b, 0x4a, 0x4d, 0x57, 0x5e, 0xc0, 0xca, 0x12, 0xd8, 0xf7, 0x53, 0xf7, 0xb4, 0xea, + 0x2e, 0x2c, 0x26, 0xdc, 0x7f, 0xaa, 0xc5, 0x97, 0x21, 0xc7, 0x85, 0x24, 0x46, 0x8a, 0x27, 0x34, + 0xb9, 0xaa, 0xfe, 0x51, 0x83, 0x42, 0x84, 0x8b, 0xfe, 0x3e, 0xcc, 0x28, 0x15, 0x68, 0x5c, 0x05, + 0xd5, 0x44, 0xb9, 0x84, 0x48, 0x0a, 0x55, 0xdf, 0x61, 0x31, 0xcc, 0x43, 0xa2, 0xe9, 0x7b, 0x34, + 0xf0, 0x5d, 0xc1, 0xa6, 0xb0, 0x79, 0x39, 0xe6, 0x45, 0x22, 0xf0, 0xe8, 0x96, 0xc0, 0xb2, 0x44, + 0xa0, 0xaa, 0x25, 0xd1, 0xdf, 0x05, 0xdd, 0x21, 0x76, 0x37, 0x70, 0x3a, 0x28, 0x18, 0xd8, 0x04, + 0x07, 0x7d, 0xc7, 0x6b, 0x73, 0x37, 0x98, 0xb5, 0xe6, 0x1d, 0xb2, 0x2f, 0x36, 0x1a, 0x02, 0x5e, + 0xfd, 0x75, 0x06, 0x72, 0x52, 0xec, 0x32, 0xa4, 0x9c, 0x16, 0xbf, 0x74, 0xda, 0x4a, 0x39, 0x2d, + 0xbd, 0xa2, 0x9c, 0x59, 0x78, 0xb8, 0x58, 0xe8, 0xb7, 0x98, 0x67, 0x31, 0x86, 0xd2, 0xb3, 0x96, + 0x86, 0xd2, 0x09, 0xb9, 0x1e, 0xba, 0x0e, 0x22, 0x96, 0x44, 0xd2, 0x3f, 0x86, 0x92, 0x28, 0x58, + 0xb6, 0x74, 0xe8, 0x0c, 0xa7, 0x32, 0x6a, 0x91, 0x32, 0xf6, 0x88, 0xff, 0x6c, 0xf0, 0x7d, 0xab, + 0x78, 0x14, 0x59, 0x31, 0x73, 0x74, 0x7d, 0xe2, 0x30, 0xd3, 0x18, 0x59, 0x61, 0x0e, 0xb5, 0xd6, + 0xaf, 0x01, 0x4f, 0x5a, 0x76, 0x88, 0x20, 0x12, 0x4c, 0x91, 0x01, 0xf7, 0x15, 0x12, 0xbb, 0x04, + 0x45, 0x14, 0xcb, 0x0c, 0x23, 0x16, 0xfa, 0x0a, 0xcc, 0xb4, 0x8e, 0x6c, 0x1e, 0x76, 0x22, 0xa5, + 0xe4, 0x5a, 0x47, 0x7b, 0x2c, 0xf0, 0x1e, 0xc2, 0x12, 0x0d, 0x90, 0x47, 0x22, 0x25, 0x8a, 0x50, + 0xd4, 0xe9, 0x1a, 0x79, 0x2e, 0x76, 0xb1, 0x26, 0xab, 0x1f, 0x2b, 0x53, 0x56, 0x25, 0x82, 0x7a, + 0xa0, 0x30, 0xf5, 0x0d, 0x28, 0x32, 0x14, 0xbb, 0xd7, 0x6d, 0x21, 0x8a, 0x5b, 0x06, 0x24, 0x50, + 0x16, 0xd8, 0xcf, 0x1f, 0x08, 0x04, 0xdd, 0x80, 0x99, 0x0e, 0x26, 0x04, 0xb5, 0xb1, 0x51, 0xe0, + 0xc2, 0xa8, 0xa5, 0x5e, 0x87, 0x02, 0x4b, 0xd1, 0x36, 0x17, 0x9a, 0x18, 0x45, 0xee, 0x0e, 0x6f, + 0x4d, 0x76, 0xa6, 0x1a, 0xcb, 0xdd, 0x0d, 0x86, 0x6c, 0x41, 0x53, 0xfd, 0x24, 0xd5, 0xfb, 0x90, + 0x0f, 0x37, 0x98, 0x42, 0xa2, 0xf5, 0x4e, 0x2c, 0x98, 0x42, 0x5c, 0x44, 0xa8, 0xdd, 0x3d, 0x91, + 0xd6, 0xce, 0xb1, 0xe5, 0xfe, 0x89, 0xf9, 0xb5, 0x06, 0x2b, 0x5b, 0xcf, 0x90, 0xd7, 0xc6, 0x07, + 0x61, 0x6e, 0x56, 0xe5, 0xfd, 0x5e, 0x98, 0xc4, 0x11, 0xb3, 0xb9, 0xac, 0xc5, 0x13, 0x1c, 0x42, + 0xe6, 0x76, 0xbe, 0xd0, 0x6f, 0x71, 0xfd, 0xb3, 0xd4, 0xcf, 0xd9, 0x95, 0x37, 0x2b, 0x71, 0x22, + 0xce, 0x27, 0xd7, 0x3a, 0x62, 0x7f, 0xb9, 0xb9, 0x82, 0x81, 0x1d, 0xf4, 0x3c, 0xe9, 0xc7, 0xb9, + 0x56, 0x30, 0xb0, 0x7a, 0x9e, 0xf9, 0x5b, 0x0d, 0x8c, 0x71, 0xe9, 0x64, 0x8f, 0xf0, 0x01, 0x94, + 0x8e, 0xf0, 0xb1, 0x1f, 0x60, 0x5b, 0x3a, 0xac, 0x90, 0x6f, 0x3e, 0xce, 0xca, 0x2a, 0x0a, 0x34, + 0xb1, 0xd2, 0xdf, 0x83, 0xa2, 0xa8, 0x8e, 0x92, 0x2a, 0x35, 0x81, 0xaa, 0xc0, 0xb1, 0x24, 0xd1, + 0x2a, 0x14, 0x4e, 0x11, 0xb1, 0x47, 0xa5, 0xcc, 0x9f, 0x22, 0xb2, 0x2d, 0x04, 0xfd, 0x26, 0x0d, + 0x4b, 0x5b, 0xbc, 0x17, 0x08, 0xcb, 0xcd, 0xb0, 0x47, 0x1a, 0x4b, 0xff, 0x15, 0xc8, 0x1e, 0xfb, + 0x2a, 0xfb, 0xcf, 0x5a, 0x62, 0xa1, 0x6f, 0x40, 0x05, 0xb9, 0xae, 0x7f, 0x6a, 0xe3, 0x4e, 0x97, + 0x0e, 0xec, 0xbe, 0x2d, 0xfa, 0x32, 0xc9, 0x6c, 0x81, 0xef, 0xd5, 0xd9, 0xd6, 0x61, 0x83, 0x6f, + 0xe8, 0xb7, 0xa1, 0xc2, 0x63, 0xd6, 0xf1, 0xda, 0x76, 0xd3, 0x77, 0x7b, 0x1d, 0x4f, 0xb8, 0x7c, + 0x86, 0xb3, 0xd2, 0xd5, 0xde, 0x16, 0xdf, 0xe2, 0xee, 0xff, 0x64, 0x9c, 0x82, 0x1b, 0x29, 0xcb, + 0x8d, 0x64, 0x8c, 0x17, 0xcd, 0xdd, 0x16, 0x57, 0x79, 0xec, 0x2c, 0x6e, 0xb4, 0x07, 0x50, 0x64, + 0xc9, 0x07, 0xb7, 0xec, 0xe3, 0xc0, 0xef, 0x10, 0x23, 0x17, 0x4f, 0x66, 0xea, 0x8c, 0x5a, 0x83, + 0xa3, 0xed, 0x04, 0x7e, 0xc7, 0x2a, 0x90, 0xf0, 0x37, 0xd1, 0x6f, 0x42, 0x86, 0x73, 0x9f, 0xe1, + 0xdc, 0x97, 0xc7, 0x29, 0x39, 0x6f, 0x8e, 0xc3, 0x92, 0xc1, 0x11, 0x22, 0x91, 0x46, 0x49, 0xc4, + 0x75, 0x91, 0x01, 0xc3, 0xde, 0xe0, 0x0e, 0x94, 0x88, 0x87, 0xba, 0xe4, 0x99, 0x4f, 0x79, 0x68, + 0x27, 0x46, 0x75, 0x51, 0xa1, 0xb0, 0x95, 0xb9, 0x0b, 0xcb, 0x71, 0xbb, 0x49, 0xf7, 0xda, 0x88, + 0x55, 0x8a, 0xc2, 0xe6, 0x62, 0x24, 0x32, 0x13, 0xba, 0x8a, 0x5f, 0x68, 0xa0, 0x8b, 0xb3, 0x44, + 0x33, 0x20, 0x1d, 0x60, 0x5a, 0xc5, 0xb9, 0x0c, 0x20, 0x4a, 0x6a, 0xa4, 0xd3, 0xc8, 0x73, 0xc8, + 0xde, 0x88, 0x9f, 0xa4, 0xa3, 0x7e, 0x72, 0x1d, 0xca, 0x8e, 0xd7, 0x74, 0x7b, 0x2d, 0x6c, 0x77, + 0x51, 0xc0, 0x9a, 0x64, 0xd9, 0xe2, 0x49, 0xe8, 0x3e, 0x07, 0x9a, 0xbf, 0xd1, 0x60, 0x71, 0x44, + 0x9c, 0x0b, 0xde, 0x4b, 0xbf, 0x11, 0xad, 0x13, 0x2c, 0x52, 0x86, 0xd8, 0xd1, 0xae, 0x27, 0x74, + 0x47, 0x1b, 0xb9, 0x01, 0x46, 0xad, 0x81, 0x8d, 0xcf, 0x1c, 0x42, 0x89, 0x14, 0x5e, 0xb8, 0xd0, + 0x43, 0xb1, 0x55, 0xe7, 0x3b, 0xe6, 0xf7, 0x61, 0x69, 0x1b, 0xbb, 0x78, 0x3c, 0x68, 0xa6, 0xe9, + 0xec, 0x12, 0xe4, 0x03, 0xdc, 0xec, 0x05, 0xc4, 0xe9, 0xab, 0x00, 0x1a, 0x02, 0x4c, 0x03, 0x96, + 0xe3, 0x47, 0x8a, 0x7b, 0x9b, 0x3f, 0xd7, 0x60, 0x51, 0x6c, 0x71, 0xa9, 0x89, 0xe2, 0xb5, 0x1e, + 0x56, 0x7d, 0x51, 0xcc, 0xc7, 0xef, 0x27, 0xf7, 0xa7, 0x73, 0x66, 0xad, 0x37, 0x9b, 0x4a, 0x6c, + 0xe7, 0x38, 0x2c, 0xca, 0xd2, 0x2e, 0x0c, 0xbc, 0x7b, 0x2c, 0x2b, 0xb2, 0xb9, 0x0c, 0x95, 0x51, + 0x31, 0xa4, 0x7c, 0x03, 0x05, 0x17, 0x29, 0x27, 0x94, 0xef, 0x23, 0xd9, 0xaa, 0xcb, 0x2c, 0x8c, + 0x95, 0x9c, 0x13, 0xf2, 0x70, 0x29, 0x92, 0x87, 0x31, 0x61, 0x71, 0x23, 0x92, 0x8a, 0x6c, 0x18, + 0xa4, 0xdc, 0x45, 0x0e, 0x94, 0xbd, 0x82, 0xb9, 0xa2, 0xec, 0x10, 0xb2, 0x96, 0x32, 0xfd, 0x32, + 0x05, 0x97, 0xeb, 0x1d, 0x1c, 0xb4, 0xb1, 0xd7, 0x1c, 0x58, 0x58, 0xb8, 0xdb, 0xb9, 0xbd, 0x3b, + 0xb9, 0xc1, 0xb8, 0x0b, 0x05, 0x0f, 0x0f, 0xe5, 0x99, 0xda, 0x65, 0x80, 0x87, 0x95, 0x90, 0xfa, + 0x77, 0x60, 0xce, 0x69, 0x7b, 0x2c, 0xdd, 0xcb, 0x96, 0x95, 0x18, 0x99, 0x69, 0x8a, 0x28, 0x0b, + 0x6c, 0xd9, 0x04, 0x12, 0x7d, 0x1b, 0x96, 0x4e, 0x91, 0x43, 0x43, 0xea, 0x70, 0x3e, 0xcd, 0x86, + 0x6e, 0xcd, 0x93, 0xc4, 0x76, 0x2f, 0x10, 0xad, 0xf2, 0x22, 0x43, 0x57, 0xe4, 0x6a, 0x6e, 0xfd, + 0xb3, 0x06, 0xab, 0x93, 0x34, 0x22, 0x03, 0xec, 0xe5, 0x55, 0xf2, 0x00, 0xe6, 0xbb, 0x81, 0xdf, + 0xf1, 0x29, 0x6e, 0x9d, 0x4f, 0x2f, 0x73, 0x0a, 0x5d, 0x29, 0xe7, 0x06, 0xe4, 0xf8, 0x48, 0xac, + 0x74, 0x12, 0x1f, 0x98, 0xe5, 0xae, 0xf9, 0x11, 0xac, 0xee, 0x38, 0x5e, 0xeb, 0xa1, 0xeb, 0x0a, + 0xef, 0xdb, 0xf5, 0x5e, 0x22, 0xf4, 0xcc, 0xbf, 0x68, 0x70, 0x65, 0x22, 0xb9, 0xbc, 0xfd, 0x5e, + 0x2c, 0x9c, 0xee, 0x46, 0xc2, 0xe9, 0x05, 0xb4, 0x22, 0xdc, 0xe4, 0xbc, 0xa0, 0x9a, 0xef, 0x4f, + 0x64, 0xef, 0x3d, 0x71, 0x46, 0xb8, 0x31, 0x3a, 0x23, 0x24, 0xa4, 0xa7, 0x70, 0x28, 0x30, 0xeb, + 0xb0, 0xf0, 0x18, 0xd3, 0x47, 0xa8, 0x79, 0xd2, 0xeb, 0x92, 0x0b, 0xbb, 0xb0, 0xb9, 0x0d, 0x7a, + 0xf4, 0x18, 0x79, 0xf3, 0x1a, 0xcc, 0x1c, 0x09, 0x90, 0xbc, 0x7a, 0xa5, 0x16, 0x3e, 0xd5, 0x08, + 0xdc, 0x5d, 0xef, 0xd8, 0xb7, 0x14, 0x92, 0xf9, 0x06, 0xac, 0x3c, 0xc6, 0x74, 0x0b, 0xbb, 0x2e, + 0x83, 0xb3, 0x84, 0xaf, 0x44, 0x32, 0x6f, 0x83, 0x31, 0xbe, 0x25, 0xd9, 0x54, 0x20, 0xcb, 0xaa, + 0x85, 0x7a, 0x75, 0x11, 0x0b, 0x73, 0x9d, 0x8b, 0xa4, 0x28, 0x22, 0xcd, 0x07, 0x1f, 0xcd, 0xb5, + 0xe1, 0x68, 0x6e, 0xee, 0xc0, 0xe2, 0x08, 0x66, 0x58, 0x16, 0xf2, 0x6c, 0xdb, 0x76, 0xbc, 0x63, + 0x5f, 0xd6, 0x85, 0xc8, 0x10, 0x1d, 0xa2, 0xcf, 0x36, 0xe5, 0x2f, 0x96, 0x69, 0xe5, 0x39, 0x44, + 0x26, 0x1b, 0x25, 0xfd, 0x37, 0x5a, 0x78, 0xb3, 0xe1, 0x96, 0x64, 0xb3, 0x0b, 0x33, 0xa3, 0x69, + 0x6c, 0x23, 0x62, 0xaf, 0x09, 0x44, 0x35, 0xb9, 0x16, 0x8e, 0xa1, 0xe8, 0xab, 0xfb, 0x50, 0x8c, + 0x6e, 0x24, 0xb8, 0xc6, 0xcd, 0x51, 0xd7, 0xa8, 0x8c, 0xde, 0x47, 0xb0, 0x89, 0xba, 0xc7, 0x12, + 0x57, 0x8d, 0x72, 0xcb, 0xf0, 0x3e, 0xbb, 0x50, 0x19, 0x05, 0xcb, 0xbb, 0xdc, 0x81, 0xbc, 0x72, + 0x14, 0x75, 0x9b, 0xc4, 0x52, 0x3a, 0xc4, 0x32, 0x6f, 0x73, 0x33, 0xbd, 0x4c, 0xcc, 0xed, 0x8c, + 0xc8, 0x74, 0xf1, 0xee, 0xe4, 0x67, 0x29, 0x98, 0x7f, 0x8c, 0xa9, 0x68, 0x1d, 0x5f, 0xbd, 0xc3, + 0x5f, 0x96, 0x63, 0x62, 0x38, 0x2b, 0x8b, 0x15, 0x6b, 0x4e, 0xf0, 0x99, 0x68, 0x4e, 0xe4, 0x7e, + 0x9a, 0xef, 0x97, 0x24, 0xf4, 0x40, 0xa0, 0x5d, 0x03, 0xd5, 0xad, 0xd8, 0x7d, 0x07, 0x9f, 0x12, + 0x59, 0x2a, 0x8b, 0x12, 0x78, 0xc8, 0x60, 0xfa, 0x3a, 0xcc, 0x8b, 0x47, 0x2a, 0xee, 0xe2, 0xb6, + 0xef, 0xb9, 0x03, 0x9e, 0xac, 0x67, 0xe5, 0x4c, 0xcc, 0xe3, 0xe2, 0x7b, 0x9e, 0x3b, 0x18, 0x62, + 0x12, 0xe7, 0x4b, 0x85, 0x99, 0x8b, 0x60, 0x36, 0x18, 0x98, 0x61, 0x9a, 0xfb, 0x3c, 0x03, 0x28, + 0x2d, 0x48, 0x65, 0x7e, 0x08, 0x39, 0xd9, 0x6b, 0x0b, 0x05, 0x5c, 0xab, 0x8d, 0x3f, 0x9e, 0x0a, + 0x92, 0x6d, 0x7c, 0xec, 0x78, 0x8e, 0x7c, 0x8a, 0xe1, 0x10, 0xf3, 0x29, 0xcc, 0xb1, 0x13, 0x5f, + 0x4f, 0xcb, 0x67, 0xde, 0x17, 0x56, 0x1a, 0x29, 0x28, 0x61, 0x03, 0xa6, 0x4d, 0x6d, 0xc0, 0xcc, + 0x27, 0x3c, 0x22, 0x1b, 0x41, 0x3f, 0xee, 0xc1, 0x2f, 0x4a, 0x71, 0x2c, 0xa6, 0x95, 0x21, 0xc5, + 0xc2, 0xfc, 0xbb, 0x88, 0xe1, 0xd1, 0xc3, 0xa4, 0x3c, 0x3f, 0x84, 0x12, 0x09, 0xfa, 0x76, 0xdc, + 0xf7, 0xdf, 0x1f, 0x8d, 0xe4, 0x24, 0xd2, 0x5a, 0x14, 0xa8, 0xde, 0x85, 0x22, 0xa0, 0xea, 0x21, + 0x2c, 0x8c, 0xa1, 0x24, 0x04, 0xf6, 0x3b, 0xa3, 0x81, 0x1d, 0x71, 0xd8, 0x08, 0x75, 0x34, 0xb2, + 0x6f, 0xf2, 0x10, 0x6e, 0x04, 0xfd, 0xc3, 0xd1, 0x00, 0x48, 0x4a, 0x90, 0x7b, 0xb0, 0x14, 0xc3, + 0x0d, 0x07, 0x4e, 0x26, 0xec, 0x70, 0x30, 0x0b, 0xe3, 0x4e, 0x3e, 0xa0, 0x47, 0x48, 0x80, 0x84, + 0xbf, 0xcd, 0xa7, 0xdc, 0xa4, 0x72, 0xaa, 0x7c, 0xd5, 0xc0, 0x33, 0x3f, 0xe6, 0x0e, 0xac, 0x4e, + 0x93, 0x92, 0xad, 0x87, 0x8f, 0x36, 0x93, 0x66, 0x60, 0xb9, 0x6f, 0xfe, 0x41, 0x8b, 0xd0, 0x5f, + 0xbc, 0x04, 0x0e, 0xbd, 0x26, 0x1d, 0xf1, 0x1a, 0xfe, 0x82, 0x46, 0x03, 0xa7, 0xa9, 0x46, 0x12, + 0xb9, 0x4a, 0xe8, 0x61, 0xb3, 0xe7, 0xef, 0x61, 0xcd, 0x07, 0x3c, 0x69, 0xc6, 0x7a, 0x53, 0xfd, + 0x26, 0xcc, 0x08, 0xb4, 0x61, 0xe3, 0x1e, 0xbf, 0xb4, 0x42, 0x30, 0x37, 0xf8, 0xa5, 0x63, 0xb6, + 0x9f, 0x96, 0x75, 0x1f, 0x71, 0x96, 0x71, 0x07, 0x78, 0x17, 0x66, 0x63, 0xc6, 0x5f, 0x08, 0x8d, + 0x1f, 0x7a, 0xdd, 0x4c, 0x5f, 0xda, 0xdd, 0xe2, 0x99, 0x5b, 0x3d, 0xe1, 0x9c, 0x4b, 0xd7, 0x57, + 0xa0, 0x80, 0x9a, 0xd4, 0xe9, 0x63, 0x91, 0xc2, 0x44, 0xaf, 0x0e, 0x02, 0xc4, 0xd3, 0x97, 0x28, + 0x45, 0x91, 0x33, 0x87, 0xa5, 0x48, 0x7d, 0x55, 0x48, 0x2a, 0x45, 0x8a, 0xc0, 0x1a, 0x62, 0x99, + 0xff, 0xd1, 0x60, 0x65, 0xd7, 0x73, 0x44, 0xae, 0x91, 0x7d, 0xe4, 0xc5, 0xfd, 0xc1, 0x82, 0xaa, + 0x7a, 0x92, 0xc4, 0x2e, 0x6e, 0xca, 0x8f, 0x2e, 0xca, 0xbd, 0xa7, 0x36, 0xb3, 0x2b, 0x92, 0xb0, + 0xce, 0xe8, 0x22, 0x1b, 0xc3, 0xf1, 0x37, 0x13, 0x1d, 0x7f, 0x5f, 0x4f, 0x1f, 0xff, 0x08, 0x8c, + 0xf1, 0xcb, 0x87, 0xf9, 0x56, 0x35, 0xd3, 0xda, 0xd4, 0x66, 0xfa, 0xab, 0x14, 0xbc, 0xb9, 0xef, + 0x22, 0xcf, 0xc3, 0xad, 0xff, 0xf1, 0x6c, 0x74, 0x1f, 0x4a, 0xa8, 0xef, 0x3b, 0xc3, 0xe9, 0x21, + 0x33, 0x8d, 0xb2, 0xc8, 0x71, 0x15, 0xed, 0xeb, 0xd1, 0xe7, 0x9f, 0x34, 0xb8, 0x94, 0xac, 0x8b, + 0xff, 0x83, 0xa9, 0xe8, 0xa7, 0xf0, 0x86, 0x85, 0x3b, 0x7e, 0x3f, 0x7c, 0x34, 0x60, 0xed, 0xe1, + 0x79, 0xac, 0xa8, 0xca, 0x47, 0x2a, 0xf2, 0xe9, 0x2b, 0xf9, 0xd1, 0x66, 0xe4, 0xed, 0x20, 0x13, + 0x7f, 0xb5, 0xb8, 0x04, 0xd5, 0x24, 0x01, 0xe4, 0x14, 0xfe, 0xb5, 0x06, 0xcb, 0x62, 0x9b, 0xab, + 0xf4, 0xbc, 0xc2, 0xbd, 0xe0, 0x71, 0x49, 0xc9, 0x9e, 0x4e, 0x92, 0x3d, 0x33, 0x51, 0xf6, 0x6c, + 0x5c, 0xf6, 0x37, 0x60, 0x65, 0x4c, 0x38, 0x29, 0xf8, 0x0e, 0x2c, 0x29, 0x67, 0x18, 0x2d, 0x7f, + 0xb7, 0x62, 0xf5, 0x6a, 0xfa, 0x47, 0x06, 0xf3, 0x27, 0xec, 0xfe, 0xa3, 0xe7, 0x5c, 0xd8, 0xab, + 0x36, 0x60, 0xe6, 0x5c, 0xce, 0xa4, 0xb0, 0xcc, 0x03, 0x58, 0x93, 0x9e, 0x1c, 0x7e, 0x4d, 0x52, + 0x5f, 0x1f, 0x5e, 0x61, 0x84, 0xfc, 0x7d, 0x1a, 0xae, 0x4e, 0x39, 0x56, 0x5e, 0xef, 0x0c, 0x2a, + 0xd1, 0xef, 0x73, 0x84, 0x22, 0xda, 0x1b, 0x8e, 0x4e, 0xf5, 0xb1, 0x46, 0x70, 0xca, 0x59, 0xd1, + 0xaf, 0x81, 0x0d, 0x79, 0x8e, 0xe8, 0xc0, 0x16, 0x83, 0xf1, 0x1d, 0xfd, 0x73, 0x00, 0x99, 0xc1, + 0x3b, 0xa8, 0x2b, 0x3f, 0x54, 0x7d, 0xf8, 0x52, 0xfc, 0x84, 0x32, 0x3f, 0x45, 0x5d, 0xc1, 0x25, + 0x4f, 0xd5, 0xba, 0x6a, 0x83, 0x31, 0x49, 0x98, 0x84, 0x5e, 0xef, 0xd6, 0x68, 0xaf, 0xb7, 0x52, + 0x8b, 0xff, 0xbf, 0x83, 0x38, 0x20, 0xfa, 0xed, 0x6f, 0x0f, 0xca, 0xa3, 0xdc, 0xcf, 0xf3, 0x6c, + 0x10, 0x6f, 0x1e, 0x22, 0xdd, 0xa3, 0x05, 0x57, 0x05, 0xb0, 0x2e, 0x3f, 0x4c, 0xbb, 0xe1, 0xd3, + 0x0f, 0x6e, 0x5d, 0xd0, 0xa7, 0xff, 0xaa, 0x81, 0x39, 0xed, 0xd0, 0x0b, 0x3b, 0xf8, 0x45, 0x6b, + 0xc8, 0x5d, 0x28, 0xf8, 0xee, 0x39, 0x2b, 0x08, 0xf8, 0xae, 0x4a, 0xb2, 0x8f, 0xee, 0xfd, 0xed, + 0xf9, 0xaa, 0xf6, 0x8f, 0xe7, 0xab, 0xda, 0xbf, 0x9e, 0xaf, 0x6a, 0xbf, 0xfa, 0xf7, 0xea, 0xb7, + 0x3e, 0xbf, 0xd1, 0x77, 0x28, 0x26, 0xa4, 0xe6, 0xf8, 0x1b, 0xe2, 0xd7, 0x46, 0xdb, 0xdf, 0xe8, + 0xd3, 0x0d, 0xfe, 0x2f, 0x25, 0x1b, 0xa1, 0x0f, 0x1d, 0xe5, 0x38, 0xe0, 0xbd, 0xff, 0x06, 0x00, + 0x00, 0xff, 0xff, 0xda, 0xbd, 0xf5, 0x12, 0x0f, 0x23, 0x00, 0x00, +} + +func (m *ExecuteVtctlCommandRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecuteVtctlCommandRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecuteVtctlCommandRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.ActionTimeout != 0 { + i = encodeVarintVtctldata(dAtA, i, uint64(m.ActionTimeout)) + i-- + dAtA[i] = 0x10 + } + if len(m.Args) > 0 { + for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Args[iNdEx]) + copy(dAtA[i:], m.Args[iNdEx]) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Args[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ExecuteVtctlCommandResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecuteVtctlCommandResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecuteVtctlCommandResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Event != nil { + { + size, err := m.Event.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TableMaterializeSettings) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TableMaterializeSettings) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TableMaterializeSettings) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.CreateDdl) > 0 { + i -= len(m.CreateDdl) + copy(dAtA[i:], m.CreateDdl) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.CreateDdl))) + i-- + dAtA[i] = 0x1a + } + if len(m.SourceExpression) > 0 { + i -= len(m.SourceExpression) + copy(dAtA[i:], m.SourceExpression) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.SourceExpression))) + i-- + dAtA[i] = 0x12 + } + if len(m.TargetTable) > 0 { + i -= len(m.TargetTable) + copy(dAtA[i:], m.TargetTable) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.TargetTable))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MaterializeSettings) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MaterializeSettings) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MaterializeSettings) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.ExternalCluster) > 0 { + i -= len(m.ExternalCluster) + copy(dAtA[i:], m.ExternalCluster) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.ExternalCluster))) + i-- + dAtA[i] = 0x42 + } + if len(m.TabletTypes) > 0 { + i -= len(m.TabletTypes) + copy(dAtA[i:], m.TabletTypes) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.TabletTypes))) + i-- + dAtA[i] = 0x3a + } + if len(m.Cell) > 0 { + i -= len(m.Cell) + copy(dAtA[i:], m.Cell) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Cell))) + i-- + dAtA[i] = 0x32 + } + if len(m.TableSettings) > 0 { + for iNdEx := len(m.TableSettings) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TableSettings[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.StopAfterCopy { + i-- + if m.StopAfterCopy { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.TargetKeyspace) > 0 { + i -= len(m.TargetKeyspace) + copy(dAtA[i:], m.TargetKeyspace) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.TargetKeyspace))) + i-- + dAtA[i] = 0x1a + } + if len(m.SourceKeyspace) > 0 { + i -= len(m.SourceKeyspace) + copy(dAtA[i:], m.SourceKeyspace) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.SourceKeyspace))) + i-- + dAtA[i] = 0x12 + } + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Keyspace) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Keyspace) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Keyspace) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Keyspace != nil { + { + size, err := m.Keyspace.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Shard) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Shard) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Shard) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Shard != nil { + { + size, err := m.Shard.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Workflow) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Workflow) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Workflow) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.ShardStreams) > 0 { + for k := range m.ShardStreams { + v := m.ShardStreams[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintVtctldata(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintVtctldata(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if m.MaxVReplicationLag != 0 { + i = encodeVarintVtctldata(dAtA, i, uint64(m.MaxVReplicationLag)) + i-- + dAtA[i] = 0x20 + } + if m.Target != nil { + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Source != nil { + { + size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Workflow_ReplicationLocation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Workflow_ReplicationLocation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Workflow_ReplicationLocation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Shards) > 0 { + for iNdEx := len(m.Shards) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Shards[iNdEx]) + copy(dAtA[i:], m.Shards[iNdEx]) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Shards[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Workflow_ShardStream) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Workflow_ShardStream) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Workflow_ShardStream) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.IsPrimaryServing { + i-- + if m.IsPrimaryServing { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.TabletControls) > 0 { + for iNdEx := len(m.TabletControls) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TabletControls[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Streams) > 0 { + for iNdEx := len(m.Streams) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Streams[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Workflow_Stream) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Workflow_Stream) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Workflow_Stream) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.CopyStates) > 0 { + for iNdEx := len(m.CopyStates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.CopyStates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + } + if len(m.Message) > 0 { + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x5a + } + if m.TimeUpdated != nil { + { + size, err := m.TimeUpdated.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if m.TransactionTimestamp != nil { + { + size, err := m.TransactionTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if len(m.DbName) > 0 { + i -= len(m.DbName) + copy(dAtA[i:], m.DbName) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.DbName))) + i-- + dAtA[i] = 0x42 + } + if len(m.State) > 0 { + i -= len(m.State) + copy(dAtA[i:], m.State) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.State))) + i-- + dAtA[i] = 0x3a + } + if len(m.StopPosition) > 0 { + i -= len(m.StopPosition) + copy(dAtA[i:], m.StopPosition) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.StopPosition))) + i-- + dAtA[i] = 0x32 + } + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Position))) + i-- + dAtA[i] = 0x2a + } + if m.BinlogSource != nil { + { + size, err := m.BinlogSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Tablet != nil { + { + size, err := m.Tablet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if m.Id != 0 { + i = encodeVarintVtctldata(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Workflow_Stream_CopyState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Workflow_Stream_CopyState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Workflow_Stream_CopyState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.LastPk) > 0 { + i -= len(m.LastPk) + copy(dAtA[i:], m.LastPk) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.LastPk))) + i-- + dAtA[i] = 0x12 + } + if len(m.Table) > 0 { + i -= len(m.Table) + copy(dAtA[i:], m.Table) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Table))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ChangeTabletTypeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ChangeTabletTypeRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ChangeTabletTypeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.DryRun { + i-- + if m.DryRun { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.DbType != 0 { + i = encodeVarintVtctldata(dAtA, i, uint64(m.DbType)) + i-- + dAtA[i] = 0x10 + } + if m.TabletAlias != nil { + { + size, err := m.TabletAlias.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ChangeTabletTypeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ChangeTabletTypeResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ChangeTabletTypeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.WasDryRun { + i-- + if m.WasDryRun { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.AfterTablet != nil { + { + size, err := m.AfterTablet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.BeforeTablet != nil { + { + size, err := m.BeforeTablet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CreateKeyspaceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateKeyspaceRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CreateKeyspaceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.SnapshotTime != nil { + { + size, err := m.SnapshotTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if len(m.BaseKeyspace) > 0 { + i -= len(m.BaseKeyspace) + copy(dAtA[i:], m.BaseKeyspace) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.BaseKeyspace))) + i-- + dAtA[i] = 0x42 + } + if m.Type != 0 { + i = encodeVarintVtctldata(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x38 + } + if len(m.ServedFroms) > 0 { + for iNdEx := len(m.ServedFroms) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ServedFroms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.ShardingColumnType != 0 { + i = encodeVarintVtctldata(dAtA, i, uint64(m.ShardingColumnType)) + i-- + dAtA[i] = 0x28 + } + if len(m.ShardingColumnName) > 0 { + i -= len(m.ShardingColumnName) + copy(dAtA[i:], m.ShardingColumnName) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.ShardingColumnName))) + i-- + dAtA[i] = 0x22 + } + if m.AllowEmptyVSchema { + i-- + if m.AllowEmptyVSchema { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Force { + i-- + if m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CreateKeyspaceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateKeyspaceResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CreateKeyspaceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Keyspace != nil { + { + size, err := m.Keyspace.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CreateShardRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateShardRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CreateShardRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.IncludeParent { + i-- + if m.IncludeParent { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.Force { + i-- + if m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.ShardName) > 0 { + i -= len(m.ShardName) + copy(dAtA[i:], m.ShardName) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.ShardName))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CreateShardResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateShardResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CreateShardResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.ShardAlreadyExists { + i-- + if m.ShardAlreadyExists { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Shard != nil { + { + size, err := m.Shard.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Keyspace != nil { + { + size, err := m.Keyspace.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DeleteKeyspaceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteKeyspaceRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeleteKeyspaceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Recursive { + i-- + if m.Recursive { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DeleteKeyspaceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteKeyspaceResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeleteKeyspaceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *DeleteShardsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteShardsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeleteShardsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.EvenIfServing { + i-- + if m.EvenIfServing { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.Recursive { + i-- + if m.Recursive { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Shards) > 0 { + for iNdEx := len(m.Shards) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Shards[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeleteShardsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteShardsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeleteShardsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *DeleteTabletsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteTabletsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeleteTabletsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.AllowPrimary { + i-- + if m.AllowPrimary { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.TabletAliases) > 0 { + for iNdEx := len(m.TabletAliases) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TabletAliases[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeleteTabletsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteTabletsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeleteTabletsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *EmergencyReparentShardRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EmergencyReparentShardRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EmergencyReparentShardRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.WaitReplicasTimeout != nil { + { + size, err := m.WaitReplicasTimeout.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.IgnoreReplicas) > 0 { + for iNdEx := len(m.IgnoreReplicas) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IgnoreReplicas[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.NewPrimary != nil { + { + size, err := m.NewPrimary.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EmergencyReparentShardResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EmergencyReparentShardResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EmergencyReparentShardResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.PromotedPrimary != nil { + { + size, err := m.PromotedPrimary.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FindAllShardsInKeyspaceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FindAllShardsInKeyspaceRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FindAllShardsInKeyspaceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FindAllShardsInKeyspaceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FindAllShardsInKeyspaceResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FindAllShardsInKeyspaceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Shards) > 0 { + for k := range m.Shards { + v := m.Shards[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintVtctldata(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintVtctldata(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GetBackupsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetBackupsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetBackupsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetBackupsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetBackupsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetBackupsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Backups) > 0 { + for iNdEx := len(m.Backups) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Backups[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GetCellInfoNamesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetCellInfoNamesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetCellInfoNamesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *GetCellInfoNamesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetCellInfoNamesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetCellInfoNamesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Names) > 0 { + for iNdEx := len(m.Names) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Names[iNdEx]) + copy(dAtA[i:], m.Names[iNdEx]) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Names[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GetCellInfoRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetCellInfoRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetCellInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Cell) > 0 { + i -= len(m.Cell) + copy(dAtA[i:], m.Cell) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Cell))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetCellInfoResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetCellInfoResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetCellInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.CellInfo != nil { + { + size, err := m.CellInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetCellsAliasesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetCellsAliasesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetCellsAliasesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *GetCellsAliasesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetCellsAliasesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetCellsAliasesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Aliases) > 0 { + for k := range m.Aliases { + v := m.Aliases[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintVtctldata(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintVtctldata(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GetKeyspacesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetKeyspacesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetKeyspacesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *GetKeyspacesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetKeyspacesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetKeyspacesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Keyspaces) > 0 { + for iNdEx := len(m.Keyspaces) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Keyspaces[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GetKeyspaceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetKeyspaceRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetKeyspaceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetKeyspaceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetKeyspaceResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetKeyspaceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Keyspace != nil { + { + size, err := m.Keyspace.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetSchemaRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetSchemaRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetSchemaRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.TableSizesOnly { + i-- + if m.TableSizesOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.TableNamesOnly { + i-- + if m.TableNamesOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.IncludeViews { + i-- + if m.IncludeViews { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.ExcludeTables) > 0 { + for iNdEx := len(m.ExcludeTables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ExcludeTables[iNdEx]) + copy(dAtA[i:], m.ExcludeTables[iNdEx]) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.ExcludeTables[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Tables) > 0 { + for iNdEx := len(m.Tables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Tables[iNdEx]) + copy(dAtA[i:], m.Tables[iNdEx]) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Tables[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.TabletAlias != nil { + { + size, err := m.TabletAlias.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetSchemaResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetSchemaResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetSchemaResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Schema != nil { + { + size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetShardRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetShardRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetShardRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.ShardName) > 0 { + i -= len(m.ShardName) + copy(dAtA[i:], m.ShardName) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.ShardName))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetShardResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetShardResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetShardResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Shard != nil { + { + size, err := m.Shard.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetSrvKeyspacesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetSrvKeyspacesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetSrvKeyspacesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetSrvKeyspacesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetSrvKeyspacesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetSrvKeyspacesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.SrvKeyspaces) > 0 { + for k := range m.SrvKeyspaces { + v := m.SrvKeyspaces[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintVtctldata(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintVtctldata(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GetSrvVSchemaRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetSrvVSchemaRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetSrvVSchemaRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Cell) > 0 { + i -= len(m.Cell) + copy(dAtA[i:], m.Cell) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Cell))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetSrvVSchemaResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetSrvVSchemaResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetSrvVSchemaResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.SrvVSchema != nil { + { + size, err := m.SrvVSchema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetTabletRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetTabletRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetTabletRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.TabletAlias != nil { + { + size, err := m.TabletAlias.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetTabletResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetTabletResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetTabletResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Tablet != nil { + { + size, err := m.Tablet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetTabletsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetTabletsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetTabletsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.TabletAliases) > 0 { + for iNdEx := len(m.TabletAliases) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TabletAliases[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.Strict { + i-- + if m.Strict { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetTabletsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetTabletsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetTabletsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Tablets) > 0 { + for iNdEx := len(m.Tablets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tablets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GetVSchemaRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetVSchemaRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetVSchemaRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetVSchemaResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetVSchemaResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetVSchemaResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.VSchema != nil { + { + size, err := m.VSchema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetWorkflowsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetWorkflowsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetWorkflowsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.ActiveOnly { + i-- + if m.ActiveOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetWorkflowsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetWorkflowsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetWorkflowsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Workflows) > 0 { + for iNdEx := len(m.Workflows) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Workflows[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *InitShardPrimaryRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InitShardPrimaryRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InitShardPrimaryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.WaitReplicasTimeout != nil { + { + size, err := m.WaitReplicasTimeout.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Force { + i-- + if m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.PrimaryElectTabletAlias != nil { + { + size, err := m.PrimaryElectTabletAlias.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *InitShardPrimaryResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InitShardPrimaryResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InitShardPrimaryResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PlannedReparentShardRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PlannedReparentShardRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PlannedReparentShardRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.WaitReplicasTimeout != nil { + { + size, err := m.WaitReplicasTimeout.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.AvoidPrimary != nil { + { + size, err := m.AvoidPrimary.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.NewPrimary != nil { + { + size, err := m.NewPrimary.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PlannedReparentShardResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PlannedReparentShardResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PlannedReparentShardResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.PromotedPrimary != nil { + { + size, err := m.PromotedPrimary.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RemoveKeyspaceCellRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveKeyspaceCellRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RemoveKeyspaceCellRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Recursive { + i-- + if m.Recursive { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.Force { + i-- + if m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.Cell) > 0 { + i -= len(m.Cell) + copy(dAtA[i:], m.Cell) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Cell))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RemoveKeyspaceCellResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveKeyspaceCellResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RemoveKeyspaceCellResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *RemoveShardCellRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveShardCellRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RemoveShardCellRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Recursive { + i-- + if m.Recursive { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.Force { + i-- + if m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.Cell) > 0 { + i -= len(m.Cell) + copy(dAtA[i:], m.Cell) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Cell))) + i-- + dAtA[i] = 0x1a + } + if len(m.ShardName) > 0 { + i -= len(m.ShardName) + copy(dAtA[i:], m.ShardName) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.ShardName))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RemoveShardCellResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveShardCellResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RemoveShardCellResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *ReparentTabletRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReparentTabletRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReparentTabletRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Tablet != nil { + { + size, err := m.Tablet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ReparentTabletResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReparentTabletResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReparentTabletResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Primary != nil { + { + size, err := m.Primary.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ShardReplicationPositionsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ShardReplicationPositionsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ShardReplicationPositionsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ShardReplicationPositionsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ShardReplicationPositionsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ShardReplicationPositionsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.TabletMap) > 0 { + for k := range m.TabletMap { + v := m.TabletMap[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintVtctldata(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintVtctldata(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.ReplicationStatuses) > 0 { + for k := range m.ReplicationStatuses { + v := m.ReplicationStatuses[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintVtctldata(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintVtctldata(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *TabletExternallyReparentedRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TabletExternallyReparentedRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TabletExternallyReparentedRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Tablet != nil { + { + size, err := m.Tablet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TabletExternallyReparentedResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TabletExternallyReparentedResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TabletExternallyReparentedResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.OldPrimary != nil { + { + size, err := m.OldPrimary.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.NewPrimary != nil { + { + size, err := m.NewPrimary.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtctldata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintVtctldata(dAtA []byte, offset int, v uint64) int { + offset -= sovVtctldata(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ExecuteVtctlCommandRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovVtctldata(uint64(l)) + } + } + if m.ActionTimeout != 0 { + n += 1 + sovVtctldata(uint64(m.ActionTimeout)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ExecuteVtctlCommandResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Event != nil { + l = m.Event.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *TableMaterializeSettings) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TargetTable) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + l = len(m.SourceExpression) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + l = len(m.CreateDdl) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MaterializeSettings) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + l = len(m.SourceKeyspace) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + l = len(m.TargetKeyspace) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.StopAfterCopy { + n += 2 + } + if len(m.TableSettings) > 0 { + for _, e := range m.TableSettings { + l = e.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + } + l = len(m.Cell) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + l = len(m.TabletTypes) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + l = len(m.ExternalCluster) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Keyspace) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.Keyspace != nil { + l = m.Keyspace.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Shard) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.Shard != nil { + l = m.Shard.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Workflow) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.Source != nil { + l = m.Source.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.Target != nil { + l = m.Target.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.MaxVReplicationLag != 0 { + n += 1 + sovVtctldata(uint64(m.MaxVReplicationLag)) + } + if len(m.ShardStreams) > 0 { + for k, v := range m.ShardStreams { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovVtctldata(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovVtctldata(uint64(len(k))) + l + n += mapEntrySize + 1 + sovVtctldata(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Workflow_ReplicationLocation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + if len(m.Shards) > 0 { + for _, s := range m.Shards { + l = len(s) + n += 1 + l + sovVtctldata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Workflow_ShardStream) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Streams) > 0 { + for _, e := range m.Streams { + l = e.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + } + if len(m.TabletControls) > 0 { + for _, e := range m.TabletControls { + l = e.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + } + if m.IsPrimaryServing { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Workflow_Stream) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sovVtctldata(uint64(m.Id)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.Tablet != nil { + l = m.Tablet.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.BinlogSource != nil { + l = m.BinlogSource.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + l = len(m.Position) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + l = len(m.StopPosition) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + l = len(m.State) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + l = len(m.DbName) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.TransactionTimestamp != nil { + l = m.TransactionTimestamp.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.TimeUpdated != nil { + l = m.TimeUpdated.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + if len(m.CopyStates) > 0 { + for _, e := range m.CopyStates { + l = e.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Workflow_Stream_CopyState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Table) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + l = len(m.LastPk) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ChangeTabletTypeRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.DbType != 0 { + n += 1 + sovVtctldata(uint64(m.DbType)) + } + if m.DryRun { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ChangeTabletTypeResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BeforeTablet != nil { + l = m.BeforeTablet.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.AfterTablet != nil { + l = m.AfterTablet.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.WasDryRun { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CreateKeyspaceRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.Force { + n += 2 + } + if m.AllowEmptyVSchema { + n += 2 + } + l = len(m.ShardingColumnName) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.ShardingColumnType != 0 { + n += 1 + sovVtctldata(uint64(m.ShardingColumnType)) + } + if len(m.ServedFroms) > 0 { + for _, e := range m.ServedFroms { + l = e.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + } + if m.Type != 0 { + n += 1 + sovVtctldata(uint64(m.Type)) + } + l = len(m.BaseKeyspace) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.SnapshotTime != nil { + l = m.SnapshotTime.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CreateKeyspaceResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Keyspace != nil { + l = m.Keyspace.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CreateShardRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + l = len(m.ShardName) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.Force { + n += 2 + } + if m.IncludeParent { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CreateShardResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Keyspace != nil { + l = m.Keyspace.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.Shard != nil { + l = m.Shard.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.ShardAlreadyExists { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *DeleteKeyspaceRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.Recursive { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *DeleteKeyspaceResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *DeleteShardsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Shards) > 0 { + for _, e := range m.Shards { + l = e.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + } + if m.Recursive { + n += 2 + } + if m.EvenIfServing { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *DeleteShardsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *DeleteTabletsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.TabletAliases) > 0 { + for _, e := range m.TabletAliases { + l = e.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + } + if m.AllowPrimary { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *DeleteTabletsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *EmergencyReparentShardRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.NewPrimary != nil { + l = m.NewPrimary.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if len(m.IgnoreReplicas) > 0 { + for _, e := range m.IgnoreReplicas { + l = e.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + } + if m.WaitReplicasTimeout != nil { + l = m.WaitReplicasTimeout.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *EmergencyReparentShardResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.PromotedPrimary != nil { + l = m.PromotedPrimary.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *FindAllShardsInKeyspaceRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *FindAllShardsInKeyspaceResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Shards) > 0 { + for k, v := range m.Shards { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovVtctldata(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovVtctldata(uint64(len(k))) + l + n += mapEntrySize + 1 + sovVtctldata(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetBackupsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetBackupsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Backups) > 0 { + for _, e := range m.Backups { + l = e.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetCellInfoNamesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetCellInfoNamesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovVtctldata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetCellInfoRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Cell) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetCellInfoResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CellInfo != nil { + l = m.CellInfo.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetCellsAliasesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetCellsAliasesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Aliases) > 0 { + for k, v := range m.Aliases { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovVtctldata(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovVtctldata(uint64(len(k))) + l + n += mapEntrySize + 1 + sovVtctldata(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetKeyspacesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetKeyspacesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Keyspaces) > 0 { + for _, e := range m.Keyspaces { + l = e.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetKeyspaceRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetKeyspaceResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Keyspace != nil { + l = m.Keyspace.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetSchemaRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if len(m.Tables) > 0 { + for _, s := range m.Tables { + l = len(s) + n += 1 + l + sovVtctldata(uint64(l)) + } + } + if len(m.ExcludeTables) > 0 { + for _, s := range m.ExcludeTables { + l = len(s) + n += 1 + l + sovVtctldata(uint64(l)) + } + } + if m.IncludeViews { + n += 2 + } + if m.TableNamesOnly { + n += 2 + } + if m.TableSizesOnly { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetSchemaResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Schema != nil { + l = m.Schema.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetShardRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + l = len(m.ShardName) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetShardResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Shard != nil { + l = m.Shard.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetSrvKeyspacesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sovVtctldata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetSrvKeyspacesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.SrvKeyspaces) > 0 { + for k, v := range m.SrvKeyspaces { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovVtctldata(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovVtctldata(uint64(len(k))) + l + n += mapEntrySize + 1 + sovVtctldata(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetSrvVSchemaRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Cell) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetSrvVSchemaResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SrvVSchema != nil { + l = m.SrvVSchema.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetTabletRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetTabletResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Tablet != nil { + l = m.Tablet.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetTabletsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sovVtctldata(uint64(l)) + } + } + if m.Strict { + n += 2 + } + if len(m.TabletAliases) > 0 { + for _, e := range m.TabletAliases { + l = e.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetTabletsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Tablets) > 0 { + for _, e := range m.Tablets { + l = e.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetVSchemaRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetVSchemaResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.VSchema != nil { + l = m.VSchema.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetWorkflowsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.ActiveOnly { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetWorkflowsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Workflows) > 0 { + for _, e := range m.Workflows { + l = e.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *InitShardPrimaryRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.PrimaryElectTabletAlias != nil { + l = m.PrimaryElectTabletAlias.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.Force { + n += 2 + } + if m.WaitReplicasTimeout != nil { + l = m.WaitReplicasTimeout.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *InitShardPrimaryResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PlannedReparentShardRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.NewPrimary != nil { + l = m.NewPrimary.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.AvoidPrimary != nil { + l = m.AvoidPrimary.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.WaitReplicasTimeout != nil { + l = m.WaitReplicasTimeout.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PlannedReparentShardResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.PromotedPrimary != nil { + l = m.PromotedPrimary.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RemoveKeyspaceCellRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + l = len(m.Cell) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.Force { + n += 2 + } + if m.Recursive { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RemoveKeyspaceCellResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RemoveShardCellRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + l = len(m.ShardName) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + l = len(m.Cell) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.Force { + n += 2 + } + if m.Recursive { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RemoveShardCellResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReparentTabletRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Tablet != nil { + l = m.Tablet.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReparentTabletResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.Primary != nil { + l = m.Primary.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ShardReplicationPositionsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ShardReplicationPositionsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ReplicationStatuses) > 0 { + for k, v := range m.ReplicationStatuses { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovVtctldata(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovVtctldata(uint64(len(k))) + l + n += mapEntrySize + 1 + sovVtctldata(uint64(mapEntrySize)) + } + } + if len(m.TabletMap) > 0 { + for k, v := range m.TabletMap { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovVtctldata(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovVtctldata(uint64(len(k))) + l + n += mapEntrySize + 1 + sovVtctldata(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *TabletExternallyReparentedRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Tablet != nil { + l = m.Tablet.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *TabletExternallyReparentedResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.NewPrimary != nil { + l = m.NewPrimary.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.OldPrimary != nil { + l = m.OldPrimary.Size() + n += 1 + l + sovVtctldata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovVtctldata(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozVtctldata(x uint64) (n int) { + return sovVtctldata(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ExecuteVtctlCommandRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecuteVtctlCommandRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecuteVtctlCommandRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActionTimeout", wireType) + } + m.ActionTimeout = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ActionTimeout |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecuteVtctlCommandResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecuteVtctlCommandResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecuteVtctlCommandResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Event == nil { + m.Event = &logutil.Event{} + } + if err := m.Event.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TableMaterializeSettings) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TableMaterializeSettings: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TableMaterializeSettings: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetTable", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetTable = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceExpression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourceExpression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreateDdl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CreateDdl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MaterializeSettings) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MaterializeSettings: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MaterializeSettings: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Workflow = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceKeyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourceKeyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetKeyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetKeyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StopAfterCopy", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.StopAfterCopy = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TableSettings", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TableSettings = append(m.TableSettings, &TableMaterializeSettings{}) + if err := m.TableSettings[len(m.TableSettings)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cell = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletTypes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TabletTypes = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalCluster", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalCluster = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Keyspace) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Keyspace: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Keyspace: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Keyspace == nil { + m.Keyspace = &topodata.Keyspace{} + } + if err := m.Keyspace.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Shard) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Shard: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Shard: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Shard == nil { + m.Shard = &topodata.Shard{} + } + if err := m.Shard.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Workflow) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Workflow: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Workflow: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Source == nil { + m.Source = &Workflow_ReplicationLocation{} + } + if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Target == nil { + m.Target = &Workflow_ReplicationLocation{} + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxVReplicationLag", wireType) + } + m.MaxVReplicationLag = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxVReplicationLag |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShardStreams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ShardStreams == nil { + m.ShardStreams = make(map[string]*Workflow_ShardStream) + } + var mapkey string + var mapvalue *Workflow_ShardStream + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthVtctldata + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthVtctldata + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthVtctldata + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Workflow_ShardStream{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ShardStreams[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Workflow_ReplicationLocation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicationLocation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicationLocation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shards", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shards = append(m.Shards, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Workflow_ShardStream) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ShardStream: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ShardStream: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Streams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Streams = append(m.Streams, &Workflow_Stream{}) + if err := m.Streams[len(m.Streams)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletControls", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TabletControls = append(m.TabletControls, &topodata.Shard_TabletControl{}) + if err := m.TabletControls[len(m.TabletControls)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsPrimaryServing", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsPrimaryServing = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Workflow_Stream) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Stream: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Stream: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tablet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tablet == nil { + m.Tablet = &topodata.TabletAlias{} + } + if err := m.Tablet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BinlogSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BinlogSource == nil { + m.BinlogSource = &binlogdata.BinlogSource{} + } + if err := m.BinlogSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Position = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StopPosition", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StopPosition = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DbName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DbName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TransactionTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TransactionTimestamp == nil { + m.TransactionTimestamp = &vttime.Time{} + } + if err := m.TransactionTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeUpdated", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TimeUpdated == nil { + m.TimeUpdated = &vttime.Time{} + } + if err := m.TimeUpdated.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CopyStates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CopyStates = append(m.CopyStates, &Workflow_Stream_CopyState{}) + if err := m.CopyStates[len(m.CopyStates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Workflow_Stream_CopyState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CopyState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CopyState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Table", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Table = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastPk", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LastPk = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ChangeTabletTypeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ChangeTabletTypeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ChangeTabletTypeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} + } + if err := m.TabletAlias.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DbType", wireType) + } + m.DbType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DbType |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DryRun = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ChangeTabletTypeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ChangeTabletTypeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ChangeTabletTypeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BeforeTablet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BeforeTablet == nil { + m.BeforeTablet = &topodata.Tablet{} + } + if err := m.BeforeTablet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AfterTablet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AfterTablet == nil { + m.AfterTablet = &topodata.Tablet{} + } + if err := m.AfterTablet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WasDryRun", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.WasDryRun = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateKeyspaceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateKeyspaceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Force = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowEmptyVSchema", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowEmptyVSchema = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShardingColumnName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShardingColumnName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ShardingColumnType", wireType) + } + m.ShardingColumnType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ShardingColumnType |= topodata.KeyspaceIdType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServedFroms", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServedFroms = append(m.ServedFroms, &topodata.Keyspace_ServedFrom{}) + if err := m.ServedFroms[len(m.ServedFroms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= topodata.KeyspaceType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BaseKeyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BaseKeyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SnapshotTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SnapshotTime == nil { + m.SnapshotTime = &vttime.Time{} + } + if err := m.SnapshotTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateKeyspaceResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateKeyspaceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Keyspace == nil { + m.Keyspace = &Keyspace{} + } + if err := m.Keyspace.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateShardRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateShardRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShardName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShardName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Force = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeParent", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeParent = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateShardResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateShardResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Keyspace == nil { + m.Keyspace = &Keyspace{} + } + if err := m.Keyspace.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Shard == nil { + m.Shard = &Shard{} + } + if err := m.Shard.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ShardAlreadyExists", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ShardAlreadyExists = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteKeyspaceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteKeyspaceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Recursive = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteKeyspaceResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteKeyspaceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteShardsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteShardsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteShardsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shards", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shards = append(m.Shards, &Shard{}) + if err := m.Shards[len(m.Shards)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Recursive = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EvenIfServing", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.EvenIfServing = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteShardsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteShardsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteShardsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteTabletsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteTabletsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteTabletsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletAliases", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TabletAliases = append(m.TabletAliases, &topodata.TabletAlias{}) + if err := m.TabletAliases[len(m.TabletAliases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowPrimary", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowPrimary = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteTabletsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteTabletsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteTabletsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EmergencyReparentShardRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EmergencyReparentShardRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EmergencyReparentShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NewPrimary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NewPrimary == nil { + m.NewPrimary = &topodata.TabletAlias{} + } + if err := m.NewPrimary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IgnoreReplicas", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IgnoreReplicas = append(m.IgnoreReplicas, &topodata.TabletAlias{}) + if err := m.IgnoreReplicas[len(m.IgnoreReplicas)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WaitReplicasTimeout", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WaitReplicasTimeout == nil { + m.WaitReplicasTimeout = &vttime.Duration{} + } + if err := m.WaitReplicasTimeout.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EmergencyReparentShardResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EmergencyReparentShardResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EmergencyReparentShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PromotedPrimary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PromotedPrimary == nil { + m.PromotedPrimary = &topodata.TabletAlias{} + } + if err := m.PromotedPrimary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, &logutil.Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FindAllShardsInKeyspaceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FindAllShardsInKeyspaceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FindAllShardsInKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FindAllShardsInKeyspaceResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FindAllShardsInKeyspaceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FindAllShardsInKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shards", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Shards == nil { + m.Shards = make(map[string]*Shard) + } + var mapkey string + var mapvalue *Shard + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthVtctldata + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthVtctldata + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthVtctldata + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Shard{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Shards[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetBackupsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetBackupsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetBackupsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetBackupsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetBackupsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetBackupsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Backups", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Backups = append(m.Backups, &mysqlctl.BackupInfo{}) + if err := m.Backups[len(m.Backups)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetCellInfoNamesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetCellInfoNamesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetCellInfoNamesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetCellInfoNamesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetCellInfoNamesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetCellInfoNamesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetCellInfoRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetCellInfoRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetCellInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cell = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetCellInfoResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetCellInfoResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetCellInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CellInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CellInfo == nil { + m.CellInfo = &topodata.CellInfo{} + } + if err := m.CellInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetCellsAliasesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetCellsAliasesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetCellsAliasesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetCellsAliasesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetCellsAliasesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetCellsAliasesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Aliases", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Aliases == nil { + m.Aliases = make(map[string]*topodata.CellsAlias) + } + var mapkey string + var mapvalue *topodata.CellsAlias + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthVtctldata + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthVtctldata + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthVtctldata + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &topodata.CellsAlias{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Aliases[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetKeyspacesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetKeyspacesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetKeyspacesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetKeyspacesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetKeyspacesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetKeyspacesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspaces", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspaces = append(m.Keyspaces, &Keyspace{}) + if err := m.Keyspaces[len(m.Keyspaces)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetKeyspaceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetKeyspaceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetKeyspaceResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetKeyspaceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Keyspace == nil { + m.Keyspace = &Keyspace{} + } + if err := m.Keyspace.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetSchemaRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetSchemaRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} + } + if err := m.TabletAlias.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tables = append(m.Tables, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExcludeTables", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExcludeTables = append(m.ExcludeTables, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeViews", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeViews = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TableNamesOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TableNamesOnly = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TableSizesOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TableSizesOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetSchemaResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetSchemaResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Schema == nil { + m.Schema = &tabletmanagerdata.SchemaDefinition{} + } + if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetShardRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetShardRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShardName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShardName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetShardResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetShardResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Shard == nil { + m.Shard = &Shard{} + } + if err := m.Shard.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetSrvKeyspacesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetSrvKeyspacesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetSrvKeyspacesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetSrvKeyspacesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetSrvKeyspacesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetSrvKeyspacesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SrvKeyspaces", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SrvKeyspaces == nil { + m.SrvKeyspaces = make(map[string]*topodata.SrvKeyspace) + } + var mapkey string + var mapvalue *topodata.SrvKeyspace + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthVtctldata + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthVtctldata + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthVtctldata + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &topodata.SrvKeyspace{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.SrvKeyspaces[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetSrvVSchemaRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetSrvVSchemaRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetSrvVSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cell = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetSrvVSchemaResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetSrvVSchemaResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetSrvVSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SrvVSchema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SrvVSchema == nil { + m.SrvVSchema = &vschema.SrvVSchema{} + } + if err := m.SrvVSchema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetTabletRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetTabletRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetTabletRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} + } + if err := m.TabletAlias.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetTabletResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetTabletResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetTabletResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tablet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tablet == nil { + m.Tablet = &topodata.Tablet{} + } + if err := m.Tablet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetTabletsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetTabletsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetTabletsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Strict", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Strict = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletAliases", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TabletAliases = append(m.TabletAliases, &topodata.TabletAlias{}) + if err := m.TabletAliases[len(m.TabletAliases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetTabletsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetTabletsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetTabletsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tablets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tablets = append(m.Tablets, &topodata.Tablet{}) + if err := m.Tablets[len(m.Tablets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetVSchemaRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetVSchemaRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetVSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetVSchemaResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetVSchemaResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetVSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VSchema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VSchema == nil { + m.VSchema = &vschema.Keyspace{} + } + if err := m.VSchema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetWorkflowsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetWorkflowsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetWorkflowsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ActiveOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetWorkflowsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetWorkflowsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetWorkflowsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Workflows", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Workflows = append(m.Workflows, &Workflow{}) + if err := m.Workflows[len(m.Workflows)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InitShardPrimaryRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InitShardPrimaryRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InitShardPrimaryRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrimaryElectTabletAlias", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PrimaryElectTabletAlias == nil { + m.PrimaryElectTabletAlias = &topodata.TabletAlias{} + } + if err := m.PrimaryElectTabletAlias.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Force = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WaitReplicasTimeout", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WaitReplicasTimeout == nil { + m.WaitReplicasTimeout = &vttime.Duration{} + } + if err := m.WaitReplicasTimeout.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InitShardPrimaryResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InitShardPrimaryResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InitShardPrimaryResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, &logutil.Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PlannedReparentShardRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PlannedReparentShardRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PlannedReparentShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NewPrimary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NewPrimary == nil { + m.NewPrimary = &topodata.TabletAlias{} + } + if err := m.NewPrimary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AvoidPrimary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AvoidPrimary == nil { + m.AvoidPrimary = &topodata.TabletAlias{} + } + if err := m.AvoidPrimary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WaitReplicasTimeout", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WaitReplicasTimeout == nil { + m.WaitReplicasTimeout = &vttime.Duration{} + } + if err := m.WaitReplicasTimeout.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PlannedReparentShardResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PlannedReparentShardResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PlannedReparentShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PromotedPrimary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PromotedPrimary == nil { + m.PromotedPrimary = &topodata.TabletAlias{} + } + if err := m.PromotedPrimary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, &logutil.Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveKeyspaceCellRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveKeyspaceCellRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveKeyspaceCellRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cell = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Force = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Recursive = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveKeyspaceCellResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveKeyspaceCellResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveKeyspaceCellResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveShardCellRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveShardCellRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveShardCellRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShardName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShardName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cell = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Force = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Recursive = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveShardCellResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveShardCellResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveShardCellResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReparentTabletRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReparentTabletRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReparentTabletRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tablet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tablet == nil { + m.Tablet = &topodata.TabletAlias{} + } + if err := m.Tablet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReparentTabletResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReparentTabletResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReparentTabletResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Primary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Primary == nil { + m.Primary = &topodata.TabletAlias{} + } + if err := m.Primary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ShardReplicationPositionsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ShardReplicationPositionsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ShardReplicationPositionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ShardReplicationPositionsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ShardReplicationPositionsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ShardReplicationPositionsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReplicationStatuses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ReplicationStatuses == nil { + m.ReplicationStatuses = make(map[string]*replicationdata.Status) + } + var mapkey string + var mapvalue *replicationdata.Status + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthVtctldata + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthVtctldata + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthVtctldata + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &replicationdata.Status{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ReplicationStatuses[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletMap", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TabletMap == nil { + m.TabletMap = make(map[string]*topodata.Tablet) + } + var mapkey string + var mapvalue *topodata.Tablet + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthVtctldata + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthVtctldata + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthVtctldata + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &topodata.Tablet{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.TabletMap[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TabletExternallyReparentedRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TabletExternallyReparentedRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TabletExternallyReparentedRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tablet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tablet == nil { + m.Tablet = &topodata.TabletAlias{} + } + if err := m.Tablet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TabletExternallyReparentedResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TabletExternallyReparentedResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TabletExternallyReparentedResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NewPrimary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NewPrimary == nil { + m.NewPrimary = &topodata.TabletAlias{} + } + if err := m.NewPrimary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OldPrimary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.OldPrimary == nil { + m.OldPrimary = &topodata.TabletAlias{} + } + if err := m.OldPrimary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtctldata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtctldata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipVtctldata(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVtctldata + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVtctldata + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVtctldata + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthVtctldata + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupVtctldata + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthVtctldata + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthVtctldata = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowVtctldata = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupVtctldata = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/vt/proto/vtctlservice/vtctlservice.pb.go b/go/vt/proto/vtctlservice/vtctlservice.pb.go index 3f7e5a349cb..053b6ee12fa 100644 --- a/go/vt/proto/vtctlservice/vtctlservice.pb.go +++ b/go/vt/proto/vtctlservice/vtctlservice.pb.go @@ -1,4 +1,4 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: vtctlservice.proto package vtctlservice @@ -12,7 +12,6 @@ import ( grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" - vtctldata "vitess.io/vitess/go/vt/proto/vtctldata" ) @@ -30,22 +29,55 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package func init() { proto.RegisterFile("vtctlservice.proto", fileDescriptor_27055cdbb1148d2b) } var fileDescriptor_27055cdbb1148d2b = []byte{ - // 235 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2a, 0x2b, 0x49, 0x2e, - 0xc9, 0x29, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, - 0x41, 0x16, 0x93, 0xe2, 0x07, 0xf3, 0x52, 0x12, 0x4b, 0x12, 0x21, 0xd2, 0x46, 0x85, 0x5c, 0xac, - 0x61, 0x20, 0x21, 0xa1, 0x0c, 0x2e, 0x61, 0xd7, 0x8a, 0xd4, 0xe4, 0xd2, 0x92, 0x54, 0x30, 0xdf, - 0x39, 0x3f, 0x37, 0x37, 0x31, 0x2f, 0x45, 0x48, 0x55, 0x0f, 0xa1, 0x03, 0x8b, 0x7c, 0x50, 0x6a, - 0x61, 0x69, 0x6a, 0x71, 0x89, 0x94, 0x1a, 0x21, 0x65, 0xc5, 0x05, 0xf9, 0x79, 0xc5, 0xa9, 0x4a, - 0x0c, 0x06, 0x8c, 0x46, 0xf3, 0x99, 0xb8, 0xd8, 0xc0, 0x92, 0x29, 0x42, 0x45, 0x5c, 0xe2, 0x6e, - 0x99, 0x79, 0x29, 0x8e, 0x39, 0x39, 0xc1, 0x19, 0x89, 0x45, 0x29, 0xc5, 0x9e, 0x79, 0xde, 0xa9, - 0x95, 0xc5, 0x05, 0x89, 0xc9, 0xa9, 0x42, 0x9a, 0x48, 0x26, 0xe2, 0x50, 0x03, 0xb3, 0x5c, 0x8b, - 0x18, 0xa5, 0x30, 0x07, 0x08, 0xf9, 0x71, 0x71, 0xbb, 0xa7, 0x96, 0xc0, 0xed, 0x91, 0x45, 0xd2, - 0x8c, 0x24, 0x0e, 0x33, 0x5b, 0x0e, 0x97, 0x34, 0xdc, 0xbc, 0x40, 0x2e, 0x1e, 0x24, 0x89, 0x62, - 0x21, 0x1c, 0x3a, 0x8a, 0x61, 0x26, 0xca, 0xe3, 0x94, 0x87, 0x19, 0xe9, 0xa4, 0x1d, 0xa5, 0x59, - 0x96, 0x59, 0x92, 0x5a, 0x5c, 0xac, 0x97, 0x99, 0xaf, 0x0f, 0x61, 0xe9, 0xa7, 0xe7, 0xeb, 0x97, - 0x95, 0xe8, 0x83, 0x23, 0x4d, 0x1f, 0x39, 0x4a, 0x93, 0xd8, 0xc0, 0x62, 0xc6, 0x80, 0x00, 0x00, - 0x00, 0xff, 0xff, 0xd5, 0x49, 0x16, 0xd1, 0xfd, 0x01, 0x00, 0x00, + // 753 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x96, 0xdf, 0x4e, 0x14, 0x3f, + 0x14, 0xc7, 0xd9, 0x8b, 0x1f, 0xf9, 0x59, 0x51, 0x48, 0x25, 0x1a, 0x17, 0x76, 0x58, 0x50, 0x54, + 0xfc, 0xc3, 0x1a, 0xbc, 0xf4, 0x0a, 0xd6, 0x15, 0x09, 0x09, 0x41, 0x20, 0x90, 0x90, 0x70, 0x51, + 0x66, 0x0e, 0xec, 0x84, 0x99, 0x76, 0x98, 0x96, 0x95, 0x8d, 0x2f, 0xe2, 0x1b, 0xe9, 0xa5, 0x8f, + 0x60, 0xf0, 0x45, 0xcc, 0x4e, 0xb7, 0xa5, 0xd3, 0x69, 0xd9, 0xbd, 0x82, 0xed, 0xe7, 0x7b, 0xbe, + 0xa7, 0xff, 0xce, 0xe9, 0x20, 0xdc, 0x13, 0xa1, 0x48, 0x38, 0xe4, 0xbd, 0x38, 0x84, 0xd5, 0x2c, + 0x67, 0x82, 0xe1, 0x29, 0x73, 0xac, 0x3e, 0x5d, 0xfc, 0x8a, 0x88, 0x20, 0x12, 0xaf, 0x5d, 0xa2, + 0xff, 0x0e, 0x07, 0x43, 0xb8, 0x8b, 0x1e, 0x75, 0xae, 0x21, 0xbc, 0x12, 0x50, 0xfc, 0x6e, 0xb3, + 0x34, 0x25, 0x34, 0xc2, 0xcb, 0xab, 0xb7, 0x11, 0x0e, 0xbe, 0x07, 0x97, 0x57, 0xc0, 0x45, 0xfd, + 0xc5, 0x28, 0x19, 0xcf, 0x18, 0xe5, 0xb0, 0x34, 0xf1, 0xbe, 0xb6, 0xf6, 0x73, 0x16, 0x4d, 0x16, + 0x30, 0xc2, 0x27, 0x68, 0xa6, 0xdd, 0x25, 0xf4, 0x1c, 0x0e, 0xc8, 0x69, 0x02, 0xe2, 0xa0, 0x9f, + 0x01, 0x5e, 0x32, 0xac, 0x6c, 0xa8, 0xd2, 0x3d, 0xbb, 0x53, 0xa3, 0x72, 0xe1, 0x23, 0xf4, 0xb0, + 0x9d, 0x03, 0x11, 0xb0, 0x0d, 0x7d, 0x9e, 0x91, 0x10, 0x70, 0xd3, 0x0c, 0x2c, 0x21, 0x65, 0xbd, + 0x78, 0x87, 0x42, 0x1b, 0xef, 0xa0, 0xfb, 0x92, 0xed, 0x77, 0x49, 0x1e, 0xe1, 0x46, 0x25, 0xa6, + 0x18, 0x57, 0x96, 0x81, 0x0f, 0x9b, 0x13, 0xfd, 0x04, 0x09, 0x78, 0x26, 0x5a, 0x46, 0xae, 0x89, + 0xda, 0x0a, 0x6d, 0xfc, 0x15, 0x4d, 0x49, 0x56, 0x64, 0xe4, 0x38, 0xa8, 0x04, 0x49, 0xa0, 0x4c, + 0x17, 0xbc, 0x5c, 0x5b, 0x1e, 0xa0, 0x07, 0x92, 0xc8, 0x2d, 0xe7, 0xb8, 0x1a, 0x33, 0x24, 0xca, + 0xb4, 0xe9, 0x17, 0x68, 0x57, 0x86, 0x1e, 0x77, 0x52, 0xc8, 0xcf, 0x81, 0x86, 0xfd, 0x3d, 0xc8, + 0x48, 0x0e, 0x54, 0xc8, 0xcd, 0x7d, 0x65, 0x5e, 0x2d, 0xa7, 0x44, 0xe5, 0x59, 0x19, 0x43, 0xa9, + 0x13, 0xe6, 0xe8, 0xc9, 0xe7, 0x98, 0x46, 0xeb, 0x49, 0x22, 0x57, 0xb8, 0x45, 0xf5, 0xde, 0x9b, + 0x3e, 0x1e, 0x8d, 0x4a, 0xf9, 0x7a, 0x1c, 0xa9, 0xce, 0xb9, 0x8d, 0xd0, 0x26, 0x88, 0x0d, 0x12, + 0x5e, 0x5c, 0x65, 0x1c, 0xcf, 0x1b, 0xb1, 0xb7, 0xc3, 0xca, 0xb9, 0xe1, 0xa1, 0xda, 0xec, 0x04, + 0xcd, 0x6c, 0x82, 0x68, 0x43, 0x92, 0x6c, 0xd1, 0x33, 0xb6, 0x43, 0x52, 0xe0, 0xa5, 0xda, 0xb1, + 0xa1, 0xab, 0x76, 0xaa, 0x1a, 0xf3, 0x8a, 0x1b, 0x14, 0x37, 0xdc, 0x51, 0xae, 0x2b, 0x5e, 0xc2, + 0xda, 0xef, 0x18, 0x4d, 0x0f, 0x01, 0x5f, 0x4f, 0x62, 0xc2, 0x81, 0xe3, 0xc5, 0x6a, 0x90, 0x62, + 0xca, 0x77, 0xe9, 0x2e, 0x89, 0x35, 0x57, 0x7d, 0x7e, 0xd6, 0x5c, 0xed, 0x33, 0x0b, 0x7c, 0xd8, + 0xac, 0x1a, 0x03, 0x94, 0xab, 0xc6, 0x04, 0xae, 0xaa, 0x29, 0x73, 0x6d, 0xf9, 0x05, 0xdd, 0xdb, + 0x04, 0xb1, 0x1f, 0x76, 0x21, 0x25, 0x78, 0xae, 0xac, 0x97, 0xa3, 0xca, 0x6c, 0xde, 0x0d, 0xb5, + 0x53, 0x07, 0xfd, 0x3f, 0x18, 0x2e, 0x6a, 0xa3, 0x6e, 0x69, 0xcd, 0x6a, 0x98, 0x73, 0x32, 0xeb, + 0x3c, 0xf6, 0xf3, 0xde, 0xed, 0x32, 0xad, 0xf3, 0x30, 0x99, 0xe7, 0x3c, 0xca, 0x12, 0xb3, 0x45, + 0x48, 0x78, 0x38, 0x5c, 0xf0, 0x42, 0x25, 0xec, 0xb0, 0xbc, 0xe8, 0xa6, 0x5f, 0x60, 0x6d, 0xa1, + 0x6c, 0x1d, 0xf6, 0x16, 0xca, 0x51, 0xcf, 0x16, 0x2a, 0x68, 0xd5, 0xa1, 0xea, 0x5f, 0x4e, 0xb5, + 0xaf, 0x0e, 0xab, 0x9d, 0x4b, 0x9a, 0xa9, 0x95, 0x5a, 0x66, 0xd6, 0x32, 0x1b, 0x1e, 0x6a, 0xdd, + 0xbc, 0x23, 0x96, 0x5f, 0x9c, 0x25, 0xec, 0x5b, 0xe5, 0xe6, 0x69, 0xe0, 0xb9, 0x79, 0x06, 0x37, + 0xfb, 0xc4, 0x16, 0x8d, 0xe5, 0xf9, 0xef, 0xe6, 0x71, 0x4a, 0xf2, 0x7e, 0xa9, 0x4f, 0xd8, 0xd0, + 0xd5, 0x27, 0xaa, 0x1a, 0x6d, 0x1f, 0xa3, 0xd9, 0xdd, 0x84, 0x50, 0x0a, 0x51, 0xb9, 0x6d, 0x9b, + 0x5f, 0x04, 0x2e, 0x81, 0x4a, 0xf3, 0x72, 0xa4, 0x4e, 0xa7, 0x0a, 0x11, 0xde, 0x83, 0x94, 0xf5, + 0xf4, 0x43, 0x37, 0x68, 0x07, 0xf8, 0xb9, 0x61, 0x50, 0xc5, 0x2a, 0xcd, 0xf2, 0x08, 0x95, 0x59, + 0x17, 0x92, 0x17, 0xd9, 0x8b, 0x0c, 0x8b, 0x95, 0x58, 0xcd, 0x5c, 0x75, 0x51, 0x91, 0x98, 0xcf, + 0xbc, 0x5a, 0xdb, 0xf0, 0x1a, 0x37, 0x4b, 0x71, 0x26, 0x72, 0x3d, 0xf3, 0xb6, 0x42, 0x1b, 0x5f, + 0xa3, 0xa7, 0xc3, 0xcd, 0xca, 0x92, 0x38, 0x24, 0x22, 0x66, 0x74, 0x97, 0xf1, 0x78, 0xf0, 0x97, + 0xe3, 0x37, 0x86, 0x83, 0x57, 0xa5, 0xd2, 0xbd, 0x1d, 0x4f, 0xac, 0x33, 0x7f, 0x47, 0x75, 0x39, + 0x9b, 0xce, 0xb5, 0x80, 0x9c, 0x92, 0x24, 0xd1, 0x2f, 0x2e, 0x44, 0xd8, 0x74, 0xf3, 0xcb, 0x54, + 0xee, 0x77, 0x63, 0xaa, 0x55, 0xf2, 0x8d, 0x8f, 0xbf, 0x6e, 0x82, 0xda, 0xef, 0x9b, 0xa0, 0xf6, + 0xe7, 0x26, 0xa8, 0xfd, 0xf8, 0x1b, 0x4c, 0x1c, 0xaf, 0xf4, 0x62, 0x01, 0x9c, 0xaf, 0xc6, 0xac, + 0x25, 0xff, 0x6b, 0x9d, 0xb3, 0x56, 0x4f, 0xb4, 0x8a, 0x8f, 0xdd, 0x96, 0xf9, 0x29, 0x7c, 0x3a, + 0x59, 0x8c, 0x7d, 0xf8, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe0, 0x50, 0xd7, 0x8b, 0x35, 0x0b, 0x00, + 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -159,12 +191,107 @@ var _Vtctl_serviceDesc = grpc.ServiceDesc{ // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type VtctldClient interface { - // FindAllShardsInKeyspace returns a map of shard names to shard references for a given keyspace. + // ChangeTabletType changes the db type for the specified tablet, if possible. + // This is used primarily to arrange replicas, and it will not convert a + // primary. For that, use InitShardPrimary. + // + // NOTE: This command automatically updates the serving graph. + ChangeTabletType(ctx context.Context, in *vtctldata.ChangeTabletTypeRequest, opts ...grpc.CallOption) (*vtctldata.ChangeTabletTypeResponse, error) + // CreateKeyspace creates the specified keyspace in the topology. For a + // SNAPSHOT keyspace, the request must specify the name of a base keyspace, + // as well as a snapshot time. + CreateKeyspace(ctx context.Context, in *vtctldata.CreateKeyspaceRequest, opts ...grpc.CallOption) (*vtctldata.CreateKeyspaceResponse, error) + // CreateShard creates the specified shard in the topology. + CreateShard(ctx context.Context, in *vtctldata.CreateShardRequest, opts ...grpc.CallOption) (*vtctldata.CreateShardResponse, error) + // DeleteKeyspace deletes the specified keyspace from the topology. In + // recursive mode, it also recursively deletes all shards in the keyspace. + // Otherwise, the keyspace must be empty (have no shards), or DeleteKeyspace + // returns an error. + DeleteKeyspace(ctx context.Context, in *vtctldata.DeleteKeyspaceRequest, opts ...grpc.CallOption) (*vtctldata.DeleteKeyspaceResponse, error) + // DeleteShards deletes the specified shards from the topology. In recursive + // mode, it also deletes all tablets belonging to the shard. Otherwise, the + // shard must be empty (have no tablets) or DeleteShards returns an error for + // that shard. + DeleteShards(ctx context.Context, in *vtctldata.DeleteShardsRequest, opts ...grpc.CallOption) (*vtctldata.DeleteShardsResponse, error) + // DeleteTablets deletes one or more tablets from the topology. + DeleteTablets(ctx context.Context, in *vtctldata.DeleteTabletsRequest, opts ...grpc.CallOption) (*vtctldata.DeleteTabletsResponse, error) + // EmergencyReparentShard reparents the shard to the new primary. It assumes + // the old primary is dead or otherwise not responding. + EmergencyReparentShard(ctx context.Context, in *vtctldata.EmergencyReparentShardRequest, opts ...grpc.CallOption) (*vtctldata.EmergencyReparentShardResponse, error) + // FindAllShardsInKeyspace returns a map of shard names to shard references + // for a given keyspace. FindAllShardsInKeyspace(ctx context.Context, in *vtctldata.FindAllShardsInKeyspaceRequest, opts ...grpc.CallOption) (*vtctldata.FindAllShardsInKeyspaceResponse, error) + // GetBackups returns all the backups for a shard. + GetBackups(ctx context.Context, in *vtctldata.GetBackupsRequest, opts ...grpc.CallOption) (*vtctldata.GetBackupsResponse, error) + // GetCellInfoNames returns all the cells for which we have a CellInfo object, + // meaning we have a topology service registered. + GetCellInfoNames(ctx context.Context, in *vtctldata.GetCellInfoNamesRequest, opts ...grpc.CallOption) (*vtctldata.GetCellInfoNamesResponse, error) + // GetCellInfo returns the information for a cell. + GetCellInfo(ctx context.Context, in *vtctldata.GetCellInfoRequest, opts ...grpc.CallOption) (*vtctldata.GetCellInfoResponse, error) + // GetCellsAliases returns a mapping of cell alias to cells identified by that + // alias. + GetCellsAliases(ctx context.Context, in *vtctldata.GetCellsAliasesRequest, opts ...grpc.CallOption) (*vtctldata.GetCellsAliasesResponse, error) // GetKeyspace reads the given keyspace from the topo and returns it. GetKeyspace(ctx context.Context, in *vtctldata.GetKeyspaceRequest, opts ...grpc.CallOption) (*vtctldata.GetKeyspaceResponse, error) // GetKeyspaces returns the keyspace struct of all keyspaces in the topo. GetKeyspaces(ctx context.Context, in *vtctldata.GetKeyspacesRequest, opts ...grpc.CallOption) (*vtctldata.GetKeyspacesResponse, error) + // GetSchema returns the schema for a tablet, or just the schema for the + // specified tables in that tablet. + GetSchema(ctx context.Context, in *vtctldata.GetSchemaRequest, opts ...grpc.CallOption) (*vtctldata.GetSchemaResponse, error) + // GetShard returns information about a shard in the topology. + GetShard(ctx context.Context, in *vtctldata.GetShardRequest, opts ...grpc.CallOption) (*vtctldata.GetShardResponse, error) + // GetSrvKeyspaces returns the SrvKeyspaces for a keyspace in one or more + // cells. + GetSrvKeyspaces(ctx context.Context, in *vtctldata.GetSrvKeyspacesRequest, opts ...grpc.CallOption) (*vtctldata.GetSrvKeyspacesResponse, error) + // GetSrvVSchema returns a the SrvVSchema for a cell. + GetSrvVSchema(ctx context.Context, in *vtctldata.GetSrvVSchemaRequest, opts ...grpc.CallOption) (*vtctldata.GetSrvVSchemaResponse, error) + // GetTablet returns information about a tablet. + GetTablet(ctx context.Context, in *vtctldata.GetTabletRequest, opts ...grpc.CallOption) (*vtctldata.GetTabletResponse, error) + // GetTablets returns tablets, optionally filtered by keyspace and shard. + GetTablets(ctx context.Context, in *vtctldata.GetTabletsRequest, opts ...grpc.CallOption) (*vtctldata.GetTabletsResponse, error) + // GetVSchema returns the vschema for a keyspace. + GetVSchema(ctx context.Context, in *vtctldata.GetVSchemaRequest, opts ...grpc.CallOption) (*vtctldata.GetVSchemaResponse, error) + // GetWorkflows returns a list of workflows for the given keyspace. + GetWorkflows(ctx context.Context, in *vtctldata.GetWorkflowsRequest, opts ...grpc.CallOption) (*vtctldata.GetWorkflowsResponse, error) + // InitShardPrimary sets the initial primary for a shard. Will make all other + // tablets in the shard replicas of the provided primary. + // + // WARNING: This could cause data loss on an already replicating shard. + // PlannedReparentShard or EmergencyReparentShard should be used in those + // cases instead. + InitShardPrimary(ctx context.Context, in *vtctldata.InitShardPrimaryRequest, opts ...grpc.CallOption) (*vtctldata.InitShardPrimaryResponse, error) + // PlannedReparentShard reparents the shard to the new primary, or away from + // an old primary. Both the old and new primaries need to be reachable and + // running. + // + // **NOTE**: The vtctld will not consider any replicas outside the cell the + // current shard primary is in for promotion unless NewPrimary is explicitly + // provided in the request. + PlannedReparentShard(ctx context.Context, in *vtctldata.PlannedReparentShardRequest, opts ...grpc.CallOption) (*vtctldata.PlannedReparentShardResponse, error) + // RemoveKeyspaceCell removes the specified cell from the Cells list for all + // shards in the specified keyspace, as well as from the SrvKeyspace for that + // keyspace in that cell. + RemoveKeyspaceCell(ctx context.Context, in *vtctldata.RemoveKeyspaceCellRequest, opts ...grpc.CallOption) (*vtctldata.RemoveKeyspaceCellResponse, error) + // RemoveShardCell removes the specified cell from the specified shard's Cells + // list. + RemoveShardCell(ctx context.Context, in *vtctldata.RemoveShardCellRequest, opts ...grpc.CallOption) (*vtctldata.RemoveShardCellResponse, error) + // ReparentTablet reparents a tablet to the current primary in the shard. This + // only works if the current replica position matches the last known reparent + // action. + ReparentTablet(ctx context.Context, in *vtctldata.ReparentTabletRequest, opts ...grpc.CallOption) (*vtctldata.ReparentTabletResponse, error) + // ShardReplicationPositions returns the replication position of each tablet + // in a shard. This RPC makes a best-effort to return partial results. For + // example, if one tablet in the shard graph is unreachable, then + // ShardReplicationPositions will return non-error, and include valid results + // for the reachable tablets. + ShardReplicationPositions(ctx context.Context, in *vtctldata.ShardReplicationPositionsRequest, opts ...grpc.CallOption) (*vtctldata.ShardReplicationPositionsResponse, error) + // TabletExternallyReparented changes metadata in the topology server to + // acknowledge a shard primary change performed by an external tool (e.g. + // orchestrator). + // + // See the Reparenting guide for more information: + // https://vitess.io/docs/user-guides/configuration-advanced/reparenting/#external-reparenting. + TabletExternallyReparented(ctx context.Context, in *vtctldata.TabletExternallyReparentedRequest, opts ...grpc.CallOption) (*vtctldata.TabletExternallyReparentedResponse, error) } type vtctldClient struct { @@ -175,6 +302,69 @@ func NewVtctldClient(cc *grpc.ClientConn) VtctldClient { return &vtctldClient{cc} } +func (c *vtctldClient) ChangeTabletType(ctx context.Context, in *vtctldata.ChangeTabletTypeRequest, opts ...grpc.CallOption) (*vtctldata.ChangeTabletTypeResponse, error) { + out := new(vtctldata.ChangeTabletTypeResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/ChangeTabletType", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) CreateKeyspace(ctx context.Context, in *vtctldata.CreateKeyspaceRequest, opts ...grpc.CallOption) (*vtctldata.CreateKeyspaceResponse, error) { + out := new(vtctldata.CreateKeyspaceResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/CreateKeyspace", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) CreateShard(ctx context.Context, in *vtctldata.CreateShardRequest, opts ...grpc.CallOption) (*vtctldata.CreateShardResponse, error) { + out := new(vtctldata.CreateShardResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/CreateShard", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) DeleteKeyspace(ctx context.Context, in *vtctldata.DeleteKeyspaceRequest, opts ...grpc.CallOption) (*vtctldata.DeleteKeyspaceResponse, error) { + out := new(vtctldata.DeleteKeyspaceResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/DeleteKeyspace", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) DeleteShards(ctx context.Context, in *vtctldata.DeleteShardsRequest, opts ...grpc.CallOption) (*vtctldata.DeleteShardsResponse, error) { + out := new(vtctldata.DeleteShardsResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/DeleteShards", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) DeleteTablets(ctx context.Context, in *vtctldata.DeleteTabletsRequest, opts ...grpc.CallOption) (*vtctldata.DeleteTabletsResponse, error) { + out := new(vtctldata.DeleteTabletsResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/DeleteTablets", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) EmergencyReparentShard(ctx context.Context, in *vtctldata.EmergencyReparentShardRequest, opts ...grpc.CallOption) (*vtctldata.EmergencyReparentShardResponse, error) { + out := new(vtctldata.EmergencyReparentShardResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/EmergencyReparentShard", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *vtctldClient) FindAllShardsInKeyspace(ctx context.Context, in *vtctldata.FindAllShardsInKeyspaceRequest, opts ...grpc.CallOption) (*vtctldata.FindAllShardsInKeyspaceResponse, error) { out := new(vtctldata.FindAllShardsInKeyspaceResponse) err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/FindAllShardsInKeyspace", in, out, opts...) @@ -184,6 +374,42 @@ func (c *vtctldClient) FindAllShardsInKeyspace(ctx context.Context, in *vtctldat return out, nil } +func (c *vtctldClient) GetBackups(ctx context.Context, in *vtctldata.GetBackupsRequest, opts ...grpc.CallOption) (*vtctldata.GetBackupsResponse, error) { + out := new(vtctldata.GetBackupsResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/GetBackups", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) GetCellInfoNames(ctx context.Context, in *vtctldata.GetCellInfoNamesRequest, opts ...grpc.CallOption) (*vtctldata.GetCellInfoNamesResponse, error) { + out := new(vtctldata.GetCellInfoNamesResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/GetCellInfoNames", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) GetCellInfo(ctx context.Context, in *vtctldata.GetCellInfoRequest, opts ...grpc.CallOption) (*vtctldata.GetCellInfoResponse, error) { + out := new(vtctldata.GetCellInfoResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/GetCellInfo", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) GetCellsAliases(ctx context.Context, in *vtctldata.GetCellsAliasesRequest, opts ...grpc.CallOption) (*vtctldata.GetCellsAliasesResponse, error) { + out := new(vtctldata.GetCellsAliasesResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/GetCellsAliases", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *vtctldClient) GetKeyspace(ctx context.Context, in *vtctldata.GetKeyspaceRequest, opts ...grpc.CallOption) (*vtctldata.GetKeyspaceResponse, error) { out := new(vtctldata.GetKeyspaceResponse) err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/GetKeyspace", in, out, opts...) @@ -202,103 +428,983 @@ func (c *vtctldClient) GetKeyspaces(ctx context.Context, in *vtctldata.GetKeyspa return out, nil } +func (c *vtctldClient) GetSchema(ctx context.Context, in *vtctldata.GetSchemaRequest, opts ...grpc.CallOption) (*vtctldata.GetSchemaResponse, error) { + out := new(vtctldata.GetSchemaResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/GetSchema", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) GetShard(ctx context.Context, in *vtctldata.GetShardRequest, opts ...grpc.CallOption) (*vtctldata.GetShardResponse, error) { + out := new(vtctldata.GetShardResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/GetShard", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) GetSrvKeyspaces(ctx context.Context, in *vtctldata.GetSrvKeyspacesRequest, opts ...grpc.CallOption) (*vtctldata.GetSrvKeyspacesResponse, error) { + out := new(vtctldata.GetSrvKeyspacesResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/GetSrvKeyspaces", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) GetSrvVSchema(ctx context.Context, in *vtctldata.GetSrvVSchemaRequest, opts ...grpc.CallOption) (*vtctldata.GetSrvVSchemaResponse, error) { + out := new(vtctldata.GetSrvVSchemaResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/GetSrvVSchema", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) GetTablet(ctx context.Context, in *vtctldata.GetTabletRequest, opts ...grpc.CallOption) (*vtctldata.GetTabletResponse, error) { + out := new(vtctldata.GetTabletResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/GetTablet", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) GetTablets(ctx context.Context, in *vtctldata.GetTabletsRequest, opts ...grpc.CallOption) (*vtctldata.GetTabletsResponse, error) { + out := new(vtctldata.GetTabletsResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/GetTablets", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) GetVSchema(ctx context.Context, in *vtctldata.GetVSchemaRequest, opts ...grpc.CallOption) (*vtctldata.GetVSchemaResponse, error) { + out := new(vtctldata.GetVSchemaResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/GetVSchema", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) GetWorkflows(ctx context.Context, in *vtctldata.GetWorkflowsRequest, opts ...grpc.CallOption) (*vtctldata.GetWorkflowsResponse, error) { + out := new(vtctldata.GetWorkflowsResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/GetWorkflows", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) InitShardPrimary(ctx context.Context, in *vtctldata.InitShardPrimaryRequest, opts ...grpc.CallOption) (*vtctldata.InitShardPrimaryResponse, error) { + out := new(vtctldata.InitShardPrimaryResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/InitShardPrimary", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) PlannedReparentShard(ctx context.Context, in *vtctldata.PlannedReparentShardRequest, opts ...grpc.CallOption) (*vtctldata.PlannedReparentShardResponse, error) { + out := new(vtctldata.PlannedReparentShardResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/PlannedReparentShard", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) RemoveKeyspaceCell(ctx context.Context, in *vtctldata.RemoveKeyspaceCellRequest, opts ...grpc.CallOption) (*vtctldata.RemoveKeyspaceCellResponse, error) { + out := new(vtctldata.RemoveKeyspaceCellResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/RemoveKeyspaceCell", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) RemoveShardCell(ctx context.Context, in *vtctldata.RemoveShardCellRequest, opts ...grpc.CallOption) (*vtctldata.RemoveShardCellResponse, error) { + out := new(vtctldata.RemoveShardCellResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/RemoveShardCell", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) ReparentTablet(ctx context.Context, in *vtctldata.ReparentTabletRequest, opts ...grpc.CallOption) (*vtctldata.ReparentTabletResponse, error) { + out := new(vtctldata.ReparentTabletResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/ReparentTablet", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) ShardReplicationPositions(ctx context.Context, in *vtctldata.ShardReplicationPositionsRequest, opts ...grpc.CallOption) (*vtctldata.ShardReplicationPositionsResponse, error) { + out := new(vtctldata.ShardReplicationPositionsResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/ShardReplicationPositions", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) TabletExternallyReparented(ctx context.Context, in *vtctldata.TabletExternallyReparentedRequest, opts ...grpc.CallOption) (*vtctldata.TabletExternallyReparentedResponse, error) { + out := new(vtctldata.TabletExternallyReparentedResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/TabletExternallyReparented", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // VtctldServer is the server API for Vtctld service. type VtctldServer interface { - // FindAllShardsInKeyspace returns a map of shard names to shard references for a given keyspace. + // ChangeTabletType changes the db type for the specified tablet, if possible. + // This is used primarily to arrange replicas, and it will not convert a + // primary. For that, use InitShardPrimary. + // + // NOTE: This command automatically updates the serving graph. + ChangeTabletType(context.Context, *vtctldata.ChangeTabletTypeRequest) (*vtctldata.ChangeTabletTypeResponse, error) + // CreateKeyspace creates the specified keyspace in the topology. For a + // SNAPSHOT keyspace, the request must specify the name of a base keyspace, + // as well as a snapshot time. + CreateKeyspace(context.Context, *vtctldata.CreateKeyspaceRequest) (*vtctldata.CreateKeyspaceResponse, error) + // CreateShard creates the specified shard in the topology. + CreateShard(context.Context, *vtctldata.CreateShardRequest) (*vtctldata.CreateShardResponse, error) + // DeleteKeyspace deletes the specified keyspace from the topology. In + // recursive mode, it also recursively deletes all shards in the keyspace. + // Otherwise, the keyspace must be empty (have no shards), or DeleteKeyspace + // returns an error. + DeleteKeyspace(context.Context, *vtctldata.DeleteKeyspaceRequest) (*vtctldata.DeleteKeyspaceResponse, error) + // DeleteShards deletes the specified shards from the topology. In recursive + // mode, it also deletes all tablets belonging to the shard. Otherwise, the + // shard must be empty (have no tablets) or DeleteShards returns an error for + // that shard. + DeleteShards(context.Context, *vtctldata.DeleteShardsRequest) (*vtctldata.DeleteShardsResponse, error) + // DeleteTablets deletes one or more tablets from the topology. + DeleteTablets(context.Context, *vtctldata.DeleteTabletsRequest) (*vtctldata.DeleteTabletsResponse, error) + // EmergencyReparentShard reparents the shard to the new primary. It assumes + // the old primary is dead or otherwise not responding. + EmergencyReparentShard(context.Context, *vtctldata.EmergencyReparentShardRequest) (*vtctldata.EmergencyReparentShardResponse, error) + // FindAllShardsInKeyspace returns a map of shard names to shard references + // for a given keyspace. FindAllShardsInKeyspace(context.Context, *vtctldata.FindAllShardsInKeyspaceRequest) (*vtctldata.FindAllShardsInKeyspaceResponse, error) + // GetBackups returns all the backups for a shard. + GetBackups(context.Context, *vtctldata.GetBackupsRequest) (*vtctldata.GetBackupsResponse, error) + // GetCellInfoNames returns all the cells for which we have a CellInfo object, + // meaning we have a topology service registered. + GetCellInfoNames(context.Context, *vtctldata.GetCellInfoNamesRequest) (*vtctldata.GetCellInfoNamesResponse, error) + // GetCellInfo returns the information for a cell. + GetCellInfo(context.Context, *vtctldata.GetCellInfoRequest) (*vtctldata.GetCellInfoResponse, error) + // GetCellsAliases returns a mapping of cell alias to cells identified by that + // alias. + GetCellsAliases(context.Context, *vtctldata.GetCellsAliasesRequest) (*vtctldata.GetCellsAliasesResponse, error) // GetKeyspace reads the given keyspace from the topo and returns it. GetKeyspace(context.Context, *vtctldata.GetKeyspaceRequest) (*vtctldata.GetKeyspaceResponse, error) // GetKeyspaces returns the keyspace struct of all keyspaces in the topo. GetKeyspaces(context.Context, *vtctldata.GetKeyspacesRequest) (*vtctldata.GetKeyspacesResponse, error) + // GetSchema returns the schema for a tablet, or just the schema for the + // specified tables in that tablet. + GetSchema(context.Context, *vtctldata.GetSchemaRequest) (*vtctldata.GetSchemaResponse, error) + // GetShard returns information about a shard in the topology. + GetShard(context.Context, *vtctldata.GetShardRequest) (*vtctldata.GetShardResponse, error) + // GetSrvKeyspaces returns the SrvKeyspaces for a keyspace in one or more + // cells. + GetSrvKeyspaces(context.Context, *vtctldata.GetSrvKeyspacesRequest) (*vtctldata.GetSrvKeyspacesResponse, error) + // GetSrvVSchema returns a the SrvVSchema for a cell. + GetSrvVSchema(context.Context, *vtctldata.GetSrvVSchemaRequest) (*vtctldata.GetSrvVSchemaResponse, error) + // GetTablet returns information about a tablet. + GetTablet(context.Context, *vtctldata.GetTabletRequest) (*vtctldata.GetTabletResponse, error) + // GetTablets returns tablets, optionally filtered by keyspace and shard. + GetTablets(context.Context, *vtctldata.GetTabletsRequest) (*vtctldata.GetTabletsResponse, error) + // GetVSchema returns the vschema for a keyspace. + GetVSchema(context.Context, *vtctldata.GetVSchemaRequest) (*vtctldata.GetVSchemaResponse, error) + // GetWorkflows returns a list of workflows for the given keyspace. + GetWorkflows(context.Context, *vtctldata.GetWorkflowsRequest) (*vtctldata.GetWorkflowsResponse, error) + // InitShardPrimary sets the initial primary for a shard. Will make all other + // tablets in the shard replicas of the provided primary. + // + // WARNING: This could cause data loss on an already replicating shard. + // PlannedReparentShard or EmergencyReparentShard should be used in those + // cases instead. + InitShardPrimary(context.Context, *vtctldata.InitShardPrimaryRequest) (*vtctldata.InitShardPrimaryResponse, error) + // PlannedReparentShard reparents the shard to the new primary, or away from + // an old primary. Both the old and new primaries need to be reachable and + // running. + // + // **NOTE**: The vtctld will not consider any replicas outside the cell the + // current shard primary is in for promotion unless NewPrimary is explicitly + // provided in the request. + PlannedReparentShard(context.Context, *vtctldata.PlannedReparentShardRequest) (*vtctldata.PlannedReparentShardResponse, error) + // RemoveKeyspaceCell removes the specified cell from the Cells list for all + // shards in the specified keyspace, as well as from the SrvKeyspace for that + // keyspace in that cell. + RemoveKeyspaceCell(context.Context, *vtctldata.RemoveKeyspaceCellRequest) (*vtctldata.RemoveKeyspaceCellResponse, error) + // RemoveShardCell removes the specified cell from the specified shard's Cells + // list. + RemoveShardCell(context.Context, *vtctldata.RemoveShardCellRequest) (*vtctldata.RemoveShardCellResponse, error) + // ReparentTablet reparents a tablet to the current primary in the shard. This + // only works if the current replica position matches the last known reparent + // action. + ReparentTablet(context.Context, *vtctldata.ReparentTabletRequest) (*vtctldata.ReparentTabletResponse, error) + // ShardReplicationPositions returns the replication position of each tablet + // in a shard. This RPC makes a best-effort to return partial results. For + // example, if one tablet in the shard graph is unreachable, then + // ShardReplicationPositions will return non-error, and include valid results + // for the reachable tablets. + ShardReplicationPositions(context.Context, *vtctldata.ShardReplicationPositionsRequest) (*vtctldata.ShardReplicationPositionsResponse, error) + // TabletExternallyReparented changes metadata in the topology server to + // acknowledge a shard primary change performed by an external tool (e.g. + // orchestrator). + // + // See the Reparenting guide for more information: + // https://vitess.io/docs/user-guides/configuration-advanced/reparenting/#external-reparenting. + TabletExternallyReparented(context.Context, *vtctldata.TabletExternallyReparentedRequest) (*vtctldata.TabletExternallyReparentedResponse, error) } // UnimplementedVtctldServer can be embedded to have forward compatible implementations. type UnimplementedVtctldServer struct { } +func (*UnimplementedVtctldServer) ChangeTabletType(ctx context.Context, req *vtctldata.ChangeTabletTypeRequest) (*vtctldata.ChangeTabletTypeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ChangeTabletType not implemented") +} +func (*UnimplementedVtctldServer) CreateKeyspace(ctx context.Context, req *vtctldata.CreateKeyspaceRequest) (*vtctldata.CreateKeyspaceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateKeyspace not implemented") +} +func (*UnimplementedVtctldServer) CreateShard(ctx context.Context, req *vtctldata.CreateShardRequest) (*vtctldata.CreateShardResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateShard not implemented") +} +func (*UnimplementedVtctldServer) DeleteKeyspace(ctx context.Context, req *vtctldata.DeleteKeyspaceRequest) (*vtctldata.DeleteKeyspaceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteKeyspace not implemented") +} +func (*UnimplementedVtctldServer) DeleteShards(ctx context.Context, req *vtctldata.DeleteShardsRequest) (*vtctldata.DeleteShardsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteShards not implemented") +} +func (*UnimplementedVtctldServer) DeleteTablets(ctx context.Context, req *vtctldata.DeleteTabletsRequest) (*vtctldata.DeleteTabletsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteTablets not implemented") +} +func (*UnimplementedVtctldServer) EmergencyReparentShard(ctx context.Context, req *vtctldata.EmergencyReparentShardRequest) (*vtctldata.EmergencyReparentShardResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method EmergencyReparentShard not implemented") +} func (*UnimplementedVtctldServer) FindAllShardsInKeyspace(ctx context.Context, req *vtctldata.FindAllShardsInKeyspaceRequest) (*vtctldata.FindAllShardsInKeyspaceResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method FindAllShardsInKeyspace not implemented") } +func (*UnimplementedVtctldServer) GetBackups(ctx context.Context, req *vtctldata.GetBackupsRequest) (*vtctldata.GetBackupsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetBackups not implemented") +} +func (*UnimplementedVtctldServer) GetCellInfoNames(ctx context.Context, req *vtctldata.GetCellInfoNamesRequest) (*vtctldata.GetCellInfoNamesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetCellInfoNames not implemented") +} +func (*UnimplementedVtctldServer) GetCellInfo(ctx context.Context, req *vtctldata.GetCellInfoRequest) (*vtctldata.GetCellInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetCellInfo not implemented") +} +func (*UnimplementedVtctldServer) GetCellsAliases(ctx context.Context, req *vtctldata.GetCellsAliasesRequest) (*vtctldata.GetCellsAliasesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetCellsAliases not implemented") +} func (*UnimplementedVtctldServer) GetKeyspace(ctx context.Context, req *vtctldata.GetKeyspaceRequest) (*vtctldata.GetKeyspaceResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetKeyspace not implemented") } func (*UnimplementedVtctldServer) GetKeyspaces(ctx context.Context, req *vtctldata.GetKeyspacesRequest) (*vtctldata.GetKeyspacesResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetKeyspaces not implemented") } +func (*UnimplementedVtctldServer) GetSchema(ctx context.Context, req *vtctldata.GetSchemaRequest) (*vtctldata.GetSchemaResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSchema not implemented") +} +func (*UnimplementedVtctldServer) GetShard(ctx context.Context, req *vtctldata.GetShardRequest) (*vtctldata.GetShardResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetShard not implemented") +} +func (*UnimplementedVtctldServer) GetSrvKeyspaces(ctx context.Context, req *vtctldata.GetSrvKeyspacesRequest) (*vtctldata.GetSrvKeyspacesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSrvKeyspaces not implemented") +} +func (*UnimplementedVtctldServer) GetSrvVSchema(ctx context.Context, req *vtctldata.GetSrvVSchemaRequest) (*vtctldata.GetSrvVSchemaResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSrvVSchema not implemented") +} +func (*UnimplementedVtctldServer) GetTablet(ctx context.Context, req *vtctldata.GetTabletRequest) (*vtctldata.GetTabletResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetTablet not implemented") +} +func (*UnimplementedVtctldServer) GetTablets(ctx context.Context, req *vtctldata.GetTabletsRequest) (*vtctldata.GetTabletsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetTablets not implemented") +} +func (*UnimplementedVtctldServer) GetVSchema(ctx context.Context, req *vtctldata.GetVSchemaRequest) (*vtctldata.GetVSchemaResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetVSchema not implemented") +} +func (*UnimplementedVtctldServer) GetWorkflows(ctx context.Context, req *vtctldata.GetWorkflowsRequest) (*vtctldata.GetWorkflowsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetWorkflows not implemented") +} +func (*UnimplementedVtctldServer) InitShardPrimary(ctx context.Context, req *vtctldata.InitShardPrimaryRequest) (*vtctldata.InitShardPrimaryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method InitShardPrimary not implemented") +} +func (*UnimplementedVtctldServer) PlannedReparentShard(ctx context.Context, req *vtctldata.PlannedReparentShardRequest) (*vtctldata.PlannedReparentShardResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PlannedReparentShard not implemented") +} +func (*UnimplementedVtctldServer) RemoveKeyspaceCell(ctx context.Context, req *vtctldata.RemoveKeyspaceCellRequest) (*vtctldata.RemoveKeyspaceCellResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RemoveKeyspaceCell not implemented") +} +func (*UnimplementedVtctldServer) RemoveShardCell(ctx context.Context, req *vtctldata.RemoveShardCellRequest) (*vtctldata.RemoveShardCellResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RemoveShardCell not implemented") +} +func (*UnimplementedVtctldServer) ReparentTablet(ctx context.Context, req *vtctldata.ReparentTabletRequest) (*vtctldata.ReparentTabletResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReparentTablet not implemented") +} +func (*UnimplementedVtctldServer) ShardReplicationPositions(ctx context.Context, req *vtctldata.ShardReplicationPositionsRequest) (*vtctldata.ShardReplicationPositionsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ShardReplicationPositions not implemented") +} +func (*UnimplementedVtctldServer) TabletExternallyReparented(ctx context.Context, req *vtctldata.TabletExternallyReparentedRequest) (*vtctldata.TabletExternallyReparentedResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method TabletExternallyReparented not implemented") +} func RegisterVtctldServer(s *grpc.Server, srv VtctldServer) { s.RegisterService(&_Vtctld_serviceDesc, srv) } -func _Vtctld_FindAllShardsInKeyspace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(vtctldata.FindAllShardsInKeyspaceRequest) +func _Vtctld_ChangeTabletType_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.ChangeTabletTypeRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(VtctldServer).FindAllShardsInKeyspace(ctx, in) + return srv.(VtctldServer).ChangeTabletType(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vtctlservice.Vtctld/FindAllShardsInKeyspace", + FullMethod: "/vtctlservice.Vtctld/ChangeTabletType", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(VtctldServer).FindAllShardsInKeyspace(ctx, req.(*vtctldata.FindAllShardsInKeyspaceRequest)) + return srv.(VtctldServer).ChangeTabletType(ctx, req.(*vtctldata.ChangeTabletTypeRequest)) } return interceptor(ctx, in, info, handler) } -func _Vtctld_GetKeyspace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(vtctldata.GetKeyspaceRequest) +func _Vtctld_CreateKeyspace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.CreateKeyspaceRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(VtctldServer).GetKeyspace(ctx, in) + return srv.(VtctldServer).CreateKeyspace(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vtctlservice.Vtctld/GetKeyspace", + FullMethod: "/vtctlservice.Vtctld/CreateKeyspace", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(VtctldServer).GetKeyspace(ctx, req.(*vtctldata.GetKeyspaceRequest)) + return srv.(VtctldServer).CreateKeyspace(ctx, req.(*vtctldata.CreateKeyspaceRequest)) } return interceptor(ctx, in, info, handler) } -func _Vtctld_GetKeyspaces_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(vtctldata.GetKeyspacesRequest) +func _Vtctld_CreateShard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.CreateShardRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(VtctldServer).GetKeyspaces(ctx, in) + return srv.(VtctldServer).CreateShard(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vtctlservice.Vtctld/GetKeyspaces", + FullMethod: "/vtctlservice.Vtctld/CreateShard", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(VtctldServer).GetKeyspaces(ctx, req.(*vtctldata.GetKeyspacesRequest)) + return srv.(VtctldServer).CreateShard(ctx, req.(*vtctldata.CreateShardRequest)) } return interceptor(ctx, in, info, handler) } -var _Vtctld_serviceDesc = grpc.ServiceDesc{ - ServiceName: "vtctlservice.Vtctld", - HandlerType: (*VtctldServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "FindAllShardsInKeyspace", - Handler: _Vtctld_FindAllShardsInKeyspace_Handler, - }, - { - MethodName: "GetKeyspace", - Handler: _Vtctld_GetKeyspace_Handler, - }, - { - MethodName: "GetKeyspaces", - Handler: _Vtctld_GetKeyspaces_Handler, +func _Vtctld_DeleteKeyspace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.DeleteKeyspaceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).DeleteKeyspace(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/DeleteKeyspace", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).DeleteKeyspace(ctx, req.(*vtctldata.DeleteKeyspaceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_DeleteShards_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.DeleteShardsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).DeleteShards(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/DeleteShards", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).DeleteShards(ctx, req.(*vtctldata.DeleteShardsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_DeleteTablets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.DeleteTabletsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).DeleteTablets(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/DeleteTablets", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).DeleteTablets(ctx, req.(*vtctldata.DeleteTabletsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_EmergencyReparentShard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.EmergencyReparentShardRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).EmergencyReparentShard(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/EmergencyReparentShard", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).EmergencyReparentShard(ctx, req.(*vtctldata.EmergencyReparentShardRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_FindAllShardsInKeyspace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.FindAllShardsInKeyspaceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).FindAllShardsInKeyspace(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/FindAllShardsInKeyspace", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).FindAllShardsInKeyspace(ctx, req.(*vtctldata.FindAllShardsInKeyspaceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_GetBackups_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.GetBackupsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).GetBackups(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/GetBackups", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).GetBackups(ctx, req.(*vtctldata.GetBackupsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_GetCellInfoNames_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.GetCellInfoNamesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).GetCellInfoNames(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/GetCellInfoNames", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).GetCellInfoNames(ctx, req.(*vtctldata.GetCellInfoNamesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_GetCellInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.GetCellInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).GetCellInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/GetCellInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).GetCellInfo(ctx, req.(*vtctldata.GetCellInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_GetCellsAliases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.GetCellsAliasesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).GetCellsAliases(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/GetCellsAliases", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).GetCellsAliases(ctx, req.(*vtctldata.GetCellsAliasesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_GetKeyspace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.GetKeyspaceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).GetKeyspace(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/GetKeyspace", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).GetKeyspace(ctx, req.(*vtctldata.GetKeyspaceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_GetKeyspaces_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.GetKeyspacesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).GetKeyspaces(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/GetKeyspaces", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).GetKeyspaces(ctx, req.(*vtctldata.GetKeyspacesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_GetSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.GetSchemaRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).GetSchema(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/GetSchema", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).GetSchema(ctx, req.(*vtctldata.GetSchemaRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_GetShard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.GetShardRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).GetShard(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/GetShard", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).GetShard(ctx, req.(*vtctldata.GetShardRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_GetSrvKeyspaces_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.GetSrvKeyspacesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).GetSrvKeyspaces(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/GetSrvKeyspaces", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).GetSrvKeyspaces(ctx, req.(*vtctldata.GetSrvKeyspacesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_GetSrvVSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.GetSrvVSchemaRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).GetSrvVSchema(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/GetSrvVSchema", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).GetSrvVSchema(ctx, req.(*vtctldata.GetSrvVSchemaRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_GetTablet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.GetTabletRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).GetTablet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/GetTablet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).GetTablet(ctx, req.(*vtctldata.GetTabletRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_GetTablets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.GetTabletsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).GetTablets(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/GetTablets", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).GetTablets(ctx, req.(*vtctldata.GetTabletsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_GetVSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.GetVSchemaRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).GetVSchema(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/GetVSchema", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).GetVSchema(ctx, req.(*vtctldata.GetVSchemaRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_GetWorkflows_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.GetWorkflowsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).GetWorkflows(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/GetWorkflows", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).GetWorkflows(ctx, req.(*vtctldata.GetWorkflowsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_InitShardPrimary_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.InitShardPrimaryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).InitShardPrimary(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/InitShardPrimary", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).InitShardPrimary(ctx, req.(*vtctldata.InitShardPrimaryRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_PlannedReparentShard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.PlannedReparentShardRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).PlannedReparentShard(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/PlannedReparentShard", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).PlannedReparentShard(ctx, req.(*vtctldata.PlannedReparentShardRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_RemoveKeyspaceCell_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.RemoveKeyspaceCellRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).RemoveKeyspaceCell(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/RemoveKeyspaceCell", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).RemoveKeyspaceCell(ctx, req.(*vtctldata.RemoveKeyspaceCellRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_RemoveShardCell_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.RemoveShardCellRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).RemoveShardCell(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/RemoveShardCell", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).RemoveShardCell(ctx, req.(*vtctldata.RemoveShardCellRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_ReparentTablet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.ReparentTabletRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).ReparentTablet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/ReparentTablet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).ReparentTablet(ctx, req.(*vtctldata.ReparentTabletRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_ShardReplicationPositions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.ShardReplicationPositionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).ShardReplicationPositions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/ShardReplicationPositions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).ShardReplicationPositions(ctx, req.(*vtctldata.ShardReplicationPositionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_TabletExternallyReparented_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.TabletExternallyReparentedRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).TabletExternallyReparented(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/TabletExternallyReparented", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).TabletExternallyReparented(ctx, req.(*vtctldata.TabletExternallyReparentedRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Vtctld_serviceDesc = grpc.ServiceDesc{ + ServiceName: "vtctlservice.Vtctld", + HandlerType: (*VtctldServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ChangeTabletType", + Handler: _Vtctld_ChangeTabletType_Handler, + }, + { + MethodName: "CreateKeyspace", + Handler: _Vtctld_CreateKeyspace_Handler, + }, + { + MethodName: "CreateShard", + Handler: _Vtctld_CreateShard_Handler, + }, + { + MethodName: "DeleteKeyspace", + Handler: _Vtctld_DeleteKeyspace_Handler, + }, + { + MethodName: "DeleteShards", + Handler: _Vtctld_DeleteShards_Handler, + }, + { + MethodName: "DeleteTablets", + Handler: _Vtctld_DeleteTablets_Handler, + }, + { + MethodName: "EmergencyReparentShard", + Handler: _Vtctld_EmergencyReparentShard_Handler, + }, + { + MethodName: "FindAllShardsInKeyspace", + Handler: _Vtctld_FindAllShardsInKeyspace_Handler, + }, + { + MethodName: "GetBackups", + Handler: _Vtctld_GetBackups_Handler, + }, + { + MethodName: "GetCellInfoNames", + Handler: _Vtctld_GetCellInfoNames_Handler, + }, + { + MethodName: "GetCellInfo", + Handler: _Vtctld_GetCellInfo_Handler, + }, + { + MethodName: "GetCellsAliases", + Handler: _Vtctld_GetCellsAliases_Handler, + }, + { + MethodName: "GetKeyspace", + Handler: _Vtctld_GetKeyspace_Handler, + }, + { + MethodName: "GetKeyspaces", + Handler: _Vtctld_GetKeyspaces_Handler, + }, + { + MethodName: "GetSchema", + Handler: _Vtctld_GetSchema_Handler, + }, + { + MethodName: "GetShard", + Handler: _Vtctld_GetShard_Handler, + }, + { + MethodName: "GetSrvKeyspaces", + Handler: _Vtctld_GetSrvKeyspaces_Handler, + }, + { + MethodName: "GetSrvVSchema", + Handler: _Vtctld_GetSrvVSchema_Handler, + }, + { + MethodName: "GetTablet", + Handler: _Vtctld_GetTablet_Handler, + }, + { + MethodName: "GetTablets", + Handler: _Vtctld_GetTablets_Handler, + }, + { + MethodName: "GetVSchema", + Handler: _Vtctld_GetVSchema_Handler, + }, + { + MethodName: "GetWorkflows", + Handler: _Vtctld_GetWorkflows_Handler, + }, + { + MethodName: "InitShardPrimary", + Handler: _Vtctld_InitShardPrimary_Handler, + }, + { + MethodName: "PlannedReparentShard", + Handler: _Vtctld_PlannedReparentShard_Handler, + }, + { + MethodName: "RemoveKeyspaceCell", + Handler: _Vtctld_RemoveKeyspaceCell_Handler, + }, + { + MethodName: "RemoveShardCell", + Handler: _Vtctld_RemoveShardCell_Handler, + }, + { + MethodName: "ReparentTablet", + Handler: _Vtctld_ReparentTablet_Handler, + }, + { + MethodName: "ShardReplicationPositions", + Handler: _Vtctld_ShardReplicationPositions_Handler, + }, + { + MethodName: "TabletExternallyReparented", + Handler: _Vtctld_TabletExternallyReparented_Handler, }, }, Streams: []grpc.StreamDesc{}, diff --git a/go/vt/proto/vtgate/vtgate.pb.go b/go/vt/proto/vtgate/vtgate.pb.go index 199d82cd0f6..d265e098d5b 100644 --- a/go/vt/proto/vtgate/vtgate.pb.go +++ b/go/vt/proto/vtgate/vtgate.pb.go @@ -1,14 +1,16 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: vtgate.proto package vtgate import ( + encoding_binary "encoding/binary" fmt "fmt" + io "io" math "math" + math_bits "math/bits" proto "github.com/golang/protobuf/proto" - binlogdata "vitess.io/vitess/go/vt/proto/binlogdata" query "vitess.io/vitess/go/vt/proto/query" topodata "vitess.io/vitess/go/vt/proto/topodata" @@ -157,7 +159,9 @@ type Session struct { // DDL strategy DDLStrategy string `protobuf:"bytes,21,opt,name=DDLStrategy,proto3" json:"DDLStrategy,omitempty"` // Session UUID - SessionUUID string `protobuf:"bytes,22,opt,name=SessionUUID,proto3" json:"SessionUUID,omitempty"` + SessionUUID string `protobuf:"bytes,22,opt,name=SessionUUID,proto3" json:"SessionUUID,omitempty"` + // enable_system_settings defines if we can use reserved connections. + EnableSystemSettings bool `protobuf:"varint,23,opt,name=enable_system_settings,json=enableSystemSettings,proto3" json:"enable_system_settings,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -169,18 +173,26 @@ func (*Session) ProtoMessage() {} func (*Session) Descriptor() ([]byte, []int) { return fileDescriptor_aab96496ceaf1ebb, []int{0} } - func (m *Session) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Session.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *Session) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Session.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_Session.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *Session) XXX_Merge(src proto.Message) { xxx_messageInfo_Session.Merge(m, src) } func (m *Session) XXX_Size() int { - return xxx_messageInfo_Session.Size(m) + return m.Size() } func (m *Session) XXX_DiscardUnknown() { xxx_messageInfo_Session.DiscardUnknown(m) @@ -335,6 +347,13 @@ func (m *Session) GetSessionUUID() string { return "" } +func (m *Session) GetEnableSystemSettings() bool { + if m != nil { + return m.EnableSystemSettings + } + return false +} + type Session_ShardSession struct { Target *query.Target `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` TransactionId int64 `protobuf:"varint,2,opt,name=transaction_id,json=transactionId,proto3" json:"transaction_id,omitempty"` @@ -352,18 +371,26 @@ func (*Session_ShardSession) ProtoMessage() {} func (*Session_ShardSession) Descriptor() ([]byte, []int) { return fileDescriptor_aab96496ceaf1ebb, []int{0, 0} } - func (m *Session_ShardSession) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Session_ShardSession.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *Session_ShardSession) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Session_ShardSession.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_Session_ShardSession.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *Session_ShardSession) XXX_Merge(src proto.Message) { xxx_messageInfo_Session_ShardSession.Merge(m, src) } func (m *Session_ShardSession) XXX_Size() int { - return xxx_messageInfo_Session_ShardSession.Size(m) + return m.Size() } func (m *Session_ShardSession) XXX_DiscardUnknown() { xxx_messageInfo_Session_ShardSession.DiscardUnknown(m) @@ -416,18 +443,26 @@ func (*ReadAfterWrite) ProtoMessage() {} func (*ReadAfterWrite) Descriptor() ([]byte, []int) { return fileDescriptor_aab96496ceaf1ebb, []int{1} } - func (m *ReadAfterWrite) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReadAfterWrite.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ReadAfterWrite) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReadAfterWrite.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ReadAfterWrite.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ReadAfterWrite) XXX_Merge(src proto.Message) { xxx_messageInfo_ReadAfterWrite.Merge(m, src) } func (m *ReadAfterWrite) XXX_Size() int { - return xxx_messageInfo_ReadAfterWrite.Size(m) + return m.Size() } func (m *ReadAfterWrite) XXX_DiscardUnknown() { xxx_messageInfo_ReadAfterWrite.DiscardUnknown(m) @@ -481,18 +516,26 @@ func (*ExecuteRequest) ProtoMessage() {} func (*ExecuteRequest) Descriptor() ([]byte, []int) { return fileDescriptor_aab96496ceaf1ebb, []int{2} } - func (m *ExecuteRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExecuteRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ExecuteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExecuteRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ExecuteRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ExecuteRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ExecuteRequest.Merge(m, src) } func (m *ExecuteRequest) XXX_Size() int { - return xxx_messageInfo_ExecuteRequest.Size(m) + return m.Size() } func (m *ExecuteRequest) XXX_DiscardUnknown() { xxx_messageInfo_ExecuteRequest.DiscardUnknown(m) @@ -563,18 +606,26 @@ func (*ExecuteResponse) ProtoMessage() {} func (*ExecuteResponse) Descriptor() ([]byte, []int) { return fileDescriptor_aab96496ceaf1ebb, []int{3} } - func (m *ExecuteResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExecuteResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ExecuteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExecuteResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ExecuteResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ExecuteResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ExecuteResponse.Merge(m, src) } func (m *ExecuteResponse) XXX_Size() int { - return xxx_messageInfo_ExecuteResponse.Size(m) + return m.Size() } func (m *ExecuteResponse) XXX_DiscardUnknown() { xxx_messageInfo_ExecuteResponse.DiscardUnknown(m) @@ -629,18 +680,26 @@ func (*ExecuteBatchRequest) ProtoMessage() {} func (*ExecuteBatchRequest) Descriptor() ([]byte, []int) { return fileDescriptor_aab96496ceaf1ebb, []int{4} } - func (m *ExecuteBatchRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExecuteBatchRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ExecuteBatchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExecuteBatchRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ExecuteBatchRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ExecuteBatchRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ExecuteBatchRequest.Merge(m, src) } func (m *ExecuteBatchRequest) XXX_Size() int { - return xxx_messageInfo_ExecuteBatchRequest.Size(m) + return m.Size() } func (m *ExecuteBatchRequest) XXX_DiscardUnknown() { xxx_messageInfo_ExecuteBatchRequest.DiscardUnknown(m) @@ -718,18 +777,26 @@ func (*ExecuteBatchResponse) ProtoMessage() {} func (*ExecuteBatchResponse) Descriptor() ([]byte, []int) { return fileDescriptor_aab96496ceaf1ebb, []int{5} } - func (m *ExecuteBatchResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExecuteBatchResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ExecuteBatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExecuteBatchResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ExecuteBatchResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ExecuteBatchResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ExecuteBatchResponse.Merge(m, src) } func (m *ExecuteBatchResponse) XXX_Size() int { - return xxx_messageInfo_ExecuteBatchResponse.Size(m) + return m.Size() } func (m *ExecuteBatchResponse) XXX_DiscardUnknown() { xxx_messageInfo_ExecuteBatchResponse.DiscardUnknown(m) @@ -783,18 +850,26 @@ func (*StreamExecuteRequest) ProtoMessage() {} func (*StreamExecuteRequest) Descriptor() ([]byte, []int) { return fileDescriptor_aab96496ceaf1ebb, []int{6} } - func (m *StreamExecuteRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StreamExecuteRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *StreamExecuteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StreamExecuteRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_StreamExecuteRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *StreamExecuteRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_StreamExecuteRequest.Merge(m, src) } func (m *StreamExecuteRequest) XXX_Size() int { - return xxx_messageInfo_StreamExecuteRequest.Size(m) + return m.Size() } func (m *StreamExecuteRequest) XXX_DiscardUnknown() { xxx_messageInfo_StreamExecuteRequest.DiscardUnknown(m) @@ -863,18 +938,26 @@ func (*StreamExecuteResponse) ProtoMessage() {} func (*StreamExecuteResponse) Descriptor() ([]byte, []int) { return fileDescriptor_aab96496ceaf1ebb, []int{7} } - func (m *StreamExecuteResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StreamExecuteResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *StreamExecuteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StreamExecuteResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_StreamExecuteResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *StreamExecuteResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_StreamExecuteResponse.Merge(m, src) } func (m *StreamExecuteResponse) XXX_Size() int { - return xxx_messageInfo_StreamExecuteResponse.Size(m) + return m.Size() } func (m *StreamExecuteResponse) XXX_DiscardUnknown() { xxx_messageInfo_StreamExecuteResponse.DiscardUnknown(m) @@ -907,18 +990,26 @@ func (*ResolveTransactionRequest) ProtoMessage() {} func (*ResolveTransactionRequest) Descriptor() ([]byte, []int) { return fileDescriptor_aab96496ceaf1ebb, []int{8} } - func (m *ResolveTransactionRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResolveTransactionRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ResolveTransactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResolveTransactionRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ResolveTransactionRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ResolveTransactionRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ResolveTransactionRequest.Merge(m, src) } func (m *ResolveTransactionRequest) XXX_Size() int { - return xxx_messageInfo_ResolveTransactionRequest.Size(m) + return m.Size() } func (m *ResolveTransactionRequest) XXX_DiscardUnknown() { xxx_messageInfo_ResolveTransactionRequest.DiscardUnknown(m) @@ -953,18 +1044,26 @@ func (*ResolveTransactionResponse) ProtoMessage() {} func (*ResolveTransactionResponse) Descriptor() ([]byte, []int) { return fileDescriptor_aab96496ceaf1ebb, []int{9} } - func (m *ResolveTransactionResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResolveTransactionResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ResolveTransactionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResolveTransactionResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ResolveTransactionResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ResolveTransactionResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ResolveTransactionResponse.Merge(m, src) } func (m *ResolveTransactionResponse) XXX_Size() int { - return xxx_messageInfo_ResolveTransactionResponse.Size(m) + return m.Size() } func (m *ResolveTransactionResponse) XXX_DiscardUnknown() { xxx_messageInfo_ResolveTransactionResponse.DiscardUnknown(m) @@ -972,6 +1071,53 @@ func (m *ResolveTransactionResponse) XXX_DiscardUnknown() { var xxx_messageInfo_ResolveTransactionResponse proto.InternalMessageInfo +type VStreamFlags struct { + MinimizeSkew bool `protobuf:"varint,1,opt,name=minimize_skew,json=minimizeSkew,proto3" json:"minimize_skew,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VStreamFlags) Reset() { *m = VStreamFlags{} } +func (m *VStreamFlags) String() string { return proto.CompactTextString(m) } +func (*VStreamFlags) ProtoMessage() {} +func (*VStreamFlags) Descriptor() ([]byte, []int) { + return fileDescriptor_aab96496ceaf1ebb, []int{10} +} +func (m *VStreamFlags) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VStreamFlags) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_VStreamFlags.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *VStreamFlags) XXX_Merge(src proto.Message) { + xxx_messageInfo_VStreamFlags.Merge(m, src) +} +func (m *VStreamFlags) XXX_Size() int { + return m.Size() +} +func (m *VStreamFlags) XXX_DiscardUnknown() { + xxx_messageInfo_VStreamFlags.DiscardUnknown(m) +} + +var xxx_messageInfo_VStreamFlags proto.InternalMessageInfo + +func (m *VStreamFlags) GetMinimizeSkew() bool { + if m != nil { + return m.MinimizeSkew + } + return false +} + // VStreamRequest is the payload for VStream. type VStreamRequest struct { CallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=caller_id,json=callerId,proto3" json:"caller_id,omitempty"` @@ -981,6 +1127,7 @@ type VStreamRequest struct { // position is of the form 'ks1:0@MySQL56/|ks2:-80@MySQL56/'. Vgtid *binlogdata.VGtid `protobuf:"bytes,3,opt,name=vgtid,proto3" json:"vgtid,omitempty"` Filter *binlogdata.Filter `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"` + Flags *VStreamFlags `protobuf:"bytes,5,opt,name=flags,proto3" json:"flags,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -990,20 +1137,28 @@ func (m *VStreamRequest) Reset() { *m = VStreamRequest{} } func (m *VStreamRequest) String() string { return proto.CompactTextString(m) } func (*VStreamRequest) ProtoMessage() {} func (*VStreamRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_aab96496ceaf1ebb, []int{10} + return fileDescriptor_aab96496ceaf1ebb, []int{11} } - func (m *VStreamRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VStreamRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *VStreamRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VStreamRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_VStreamRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *VStreamRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_VStreamRequest.Merge(m, src) } func (m *VStreamRequest) XXX_Size() int { - return xxx_messageInfo_VStreamRequest.Size(m) + return m.Size() } func (m *VStreamRequest) XXX_DiscardUnknown() { xxx_messageInfo_VStreamRequest.DiscardUnknown(m) @@ -1039,6 +1194,13 @@ func (m *VStreamRequest) GetFilter() *binlogdata.Filter { return nil } +func (m *VStreamRequest) GetFlags() *VStreamFlags { + if m != nil { + return m.Flags + } + return nil +} + // VStreamResponse is streamed by VStream. type VStreamResponse struct { Events []*binlogdata.VEvent `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` @@ -1051,20 +1213,28 @@ func (m *VStreamResponse) Reset() { *m = VStreamResponse{} } func (m *VStreamResponse) String() string { return proto.CompactTextString(m) } func (*VStreamResponse) ProtoMessage() {} func (*VStreamResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_aab96496ceaf1ebb, []int{11} + return fileDescriptor_aab96496ceaf1ebb, []int{12} } - func (m *VStreamResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VStreamResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *VStreamResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VStreamResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_VStreamResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *VStreamResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_VStreamResponse.Merge(m, src) } func (m *VStreamResponse) XXX_Size() int { - return xxx_messageInfo_VStreamResponse.Size(m) + return m.Size() } func (m *VStreamResponse) XXX_DiscardUnknown() { xxx_messageInfo_VStreamResponse.DiscardUnknown(m) @@ -1095,6 +1265,7 @@ func init() { proto.RegisterType((*StreamExecuteResponse)(nil), "vtgate.StreamExecuteResponse") proto.RegisterType((*ResolveTransactionRequest)(nil), "vtgate.ResolveTransactionRequest") proto.RegisterType((*ResolveTransactionResponse)(nil), "vtgate.ResolveTransactionResponse") + proto.RegisterType((*VStreamFlags)(nil), "vtgate.VStreamFlags") proto.RegisterType((*VStreamRequest)(nil), "vtgate.VStreamRequest") proto.RegisterType((*VStreamResponse)(nil), "vtgate.VStreamResponse") } @@ -1102,90 +1273,4559 @@ func init() { func init() { proto.RegisterFile("vtgate.proto", fileDescriptor_aab96496ceaf1ebb) } var fileDescriptor_aab96496ceaf1ebb = []byte{ - // 1357 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0x6d, 0x6f, 0x1b, 0xc5, - 0x13, 0xef, 0xf9, 0xd9, 0xe3, 0xa7, 0xcb, 0xe6, 0xe1, 0x7f, 0xcd, 0xbf, 0x80, 0xe5, 0xb6, 0xaa, - 0x5b, 0x50, 0x02, 0x41, 0x40, 0x85, 0x40, 0x90, 0xd8, 0x6e, 0x71, 0x95, 0xd4, 0x61, 0xed, 0x24, - 0x12, 0x02, 0x9d, 0x2e, 0xbe, 0x8d, 0xb3, 0x8a, 0x73, 0xeb, 0xee, 0xae, 0x1d, 0xfc, 0x29, 0x78, - 0x0f, 0x1f, 0x80, 0x37, 0xbc, 0xe7, 0x3b, 0xf0, 0x8e, 0x6f, 0x84, 0x76, 0xf7, 0xce, 0x3e, 0xbb, - 0x81, 0xa6, 0xad, 0xfa, 0xc6, 0xba, 0x9d, 0xdf, 0xec, 0xec, 0xcc, 0xfc, 0x66, 0x76, 0xd6, 0x50, - 0x9c, 0xc8, 0x81, 0x27, 0xc9, 0xd6, 0x88, 0x33, 0xc9, 0x50, 0xc6, 0xac, 0x36, 0xed, 0x53, 0x1a, - 0x0c, 0xd9, 0xc0, 0xf7, 0xa4, 0x67, 0x90, 0xcd, 0xc2, 0x8b, 0x31, 0xe1, 0xd3, 0x70, 0x51, 0x96, - 0x6c, 0xc4, 0xe2, 0xe0, 0x44, 0xf2, 0x51, 0xdf, 0x2c, 0x6a, 0xbf, 0x15, 0x20, 0xdb, 0x25, 0x42, - 0x50, 0x16, 0xa0, 0xfb, 0x50, 0xa6, 0x81, 0x2b, 0xb9, 0x17, 0x08, 0xaf, 0x2f, 0x29, 0x0b, 0x1c, - 0xab, 0x6a, 0xd5, 0x73, 0xb8, 0x44, 0x83, 0xde, 0x5c, 0x88, 0x1a, 0x50, 0x16, 0xe7, 0x1e, 0xf7, - 0x5d, 0x61, 0xf6, 0x09, 0x27, 0x51, 0x4d, 0xd6, 0x0b, 0x3b, 0x77, 0xb6, 0x42, 0xef, 0x42, 0x7b, - 0x5b, 0x5d, 0xa5, 0x15, 0x2e, 0x70, 0x49, 0xc4, 0x56, 0x02, 0xbd, 0x0f, 0xe0, 0x8d, 0x25, 0xeb, - 0xb3, 0xcb, 0x4b, 0x2a, 0x9d, 0x94, 0x3e, 0x27, 0x26, 0x41, 0x77, 0xa1, 0x24, 0x3d, 0x3e, 0x20, - 0xd2, 0x15, 0x92, 0xd3, 0x60, 0xe0, 0xa4, 0xab, 0x56, 0x3d, 0x8f, 0x8b, 0x46, 0xd8, 0xd5, 0x32, - 0xb4, 0x0d, 0x59, 0x36, 0x92, 0xda, 0x85, 0x4c, 0xd5, 0xaa, 0x17, 0x76, 0xd6, 0xb7, 0x4c, 0xe0, - 0xad, 0x9f, 0x49, 0x7f, 0x2c, 0x49, 0xc7, 0x80, 0x38, 0xd2, 0x42, 0x7b, 0x60, 0xc7, 0xc2, 0x73, - 0x2f, 0x99, 0x4f, 0x9c, 0x6c, 0xd5, 0xaa, 0x97, 0x77, 0xfe, 0x17, 0x39, 0x1f, 0x8b, 0xf4, 0x80, - 0xf9, 0x04, 0x57, 0xe4, 0xa2, 0x00, 0x6d, 0x43, 0xee, 0xca, 0xe3, 0x01, 0x0d, 0x06, 0xc2, 0xc9, - 0xe9, 0xc0, 0x57, 0xc3, 0x53, 0xbf, 0x57, 0xbf, 0x27, 0x06, 0xc3, 0x33, 0x25, 0xf4, 0x0d, 0x14, - 0x47, 0x9c, 0xcc, 0xb3, 0x95, 0xbf, 0x41, 0xb6, 0x0a, 0x23, 0x4e, 0x66, 0xb9, 0xda, 0x85, 0xd2, - 0x88, 0x09, 0x39, 0xb7, 0x00, 0x37, 0xb0, 0x50, 0x54, 0x5b, 0x66, 0x26, 0xee, 0x41, 0x79, 0xe8, - 0x09, 0xe9, 0xd2, 0x40, 0x10, 0x2e, 0x5d, 0xea, 0x3b, 0x85, 0xaa, 0x55, 0x4f, 0xe1, 0xa2, 0x92, - 0xb6, 0xb5, 0xb0, 0xed, 0xa3, 0xf7, 0x00, 0xce, 0xd8, 0x38, 0xf0, 0x5d, 0xce, 0xae, 0x84, 0x53, - 0xd4, 0x1a, 0x79, 0x2d, 0xc1, 0xec, 0x4a, 0x20, 0x17, 0x36, 0xc6, 0x82, 0x70, 0xd7, 0x27, 0x67, - 0x34, 0x20, 0xbe, 0x3b, 0xf1, 0x38, 0xf5, 0x4e, 0x87, 0x44, 0x38, 0x25, 0xed, 0xd0, 0xc3, 0x65, - 0x87, 0x8e, 0x04, 0xe1, 0x4d, 0xa3, 0x7c, 0x1c, 0xe9, 0xb6, 0x02, 0xc9, 0xa7, 0x78, 0x6d, 0x7c, - 0x0d, 0x84, 0x3a, 0x60, 0x8b, 0xa9, 0x90, 0xe4, 0x32, 0x66, 0xba, 0xac, 0x4d, 0xdf, 0x7b, 0x29, - 0x56, 0xad, 0xb7, 0x64, 0xb5, 0x22, 0x16, 0xa5, 0xe8, 0xff, 0x90, 0xe7, 0xec, 0xca, 0xed, 0xb3, - 0x71, 0x20, 0x9d, 0x4a, 0xd5, 0xaa, 0x27, 0x71, 0x8e, 0xb3, 0xab, 0x86, 0x5a, 0xab, 0x12, 0x14, - 0xde, 0x84, 0x8c, 0x18, 0x0d, 0xa4, 0x70, 0xec, 0x6a, 0xb2, 0x9e, 0xc7, 0x31, 0x09, 0xaa, 0x83, - 0x4d, 0x03, 0x97, 0x13, 0x41, 0xf8, 0x84, 0xf8, 0x6e, 0x9f, 0x05, 0x81, 0xb3, 0xa2, 0x0b, 0xb5, - 0x4c, 0x03, 0x1c, 0x8a, 0x1b, 0x2c, 0x08, 0x14, 0xc3, 0x43, 0xd6, 0xbf, 0x88, 0x08, 0x72, 0x90, - 0x2e, 0xc6, 0x57, 0x30, 0xac, 0x76, 0x44, 0x9d, 0xb7, 0x05, 0xab, 0x9a, 0x1e, 0x6d, 0xe5, 0x9c, - 0x78, 0x5c, 0x9e, 0x12, 0x4f, 0x3a, 0xab, 0xda, 0xe3, 0x15, 0x05, 0xed, 0xb3, 0xfe, 0xc5, 0x77, - 0x11, 0x80, 0xbe, 0x05, 0x9b, 0x13, 0xcf, 0x77, 0xbd, 0x33, 0x49, 0xb8, 0x7b, 0xc5, 0xa9, 0x24, - 0xce, 0x9a, 0x3e, 0x74, 0x23, 0x3a, 0x14, 0x13, 0xcf, 0xdf, 0x55, 0xf0, 0x89, 0x42, 0x71, 0x99, - 0x2f, 0xac, 0x51, 0x15, 0x0a, 0xcd, 0xe6, 0x7e, 0x57, 0x72, 0x4f, 0x92, 0xc1, 0xd4, 0x59, 0xd7, - 0xdd, 0x15, 0x17, 0x29, 0x8d, 0xd0, 0xbd, 0xa3, 0xa3, 0x76, 0xd3, 0xd9, 0x30, 0x1a, 0x31, 0xd1, - 0xe6, 0x9f, 0x16, 0x14, 0xe3, 0x31, 0xa1, 0xfb, 0x90, 0x31, 0xfd, 0xa9, 0x2f, 0x8e, 0xc2, 0x4e, - 0x29, 0x6c, 0x8c, 0x9e, 0x16, 0xe2, 0x10, 0x54, 0xf7, 0x4c, 0xbc, 0x0b, 0xa9, 0xef, 0x24, 0x74, - 0xa0, 0xa5, 0x98, 0xb4, 0xed, 0xa3, 0xc7, 0x50, 0x94, 0x8a, 0x46, 0xe9, 0x7a, 0x43, 0xea, 0x09, - 0x27, 0x19, 0xb6, 0xf8, 0xec, 0x3a, 0xeb, 0x69, 0x74, 0x57, 0x81, 0xb8, 0x20, 0xe7, 0x0b, 0xf4, - 0x01, 0x14, 0x66, 0xb4, 0x51, 0x5f, 0xdf, 0x2e, 0x49, 0x0c, 0x91, 0xa8, 0xed, 0x6f, 0xfe, 0x08, - 0xb7, 0xff, 0xb5, 0x36, 0x91, 0x0d, 0xc9, 0x0b, 0x32, 0xd5, 0x21, 0xe4, 0xb1, 0xfa, 0x44, 0x0f, - 0x21, 0x3d, 0xf1, 0x86, 0x63, 0xa2, 0xfd, 0x9c, 0xf7, 0xfb, 0x1e, 0x0d, 0x66, 0x7b, 0xb1, 0xd1, - 0xf8, 0x32, 0xf1, 0xd8, 0xda, 0xdc, 0x83, 0xb5, 0xeb, 0xca, 0xf3, 0x1a, 0xc3, 0x6b, 0x71, 0xc3, - 0xf9, 0x98, 0x8d, 0x67, 0xa9, 0x5c, 0xd2, 0x4e, 0xd5, 0xfe, 0xb0, 0xa0, 0xbc, 0x48, 0x24, 0xfa, - 0x04, 0xd6, 0x97, 0xa9, 0x77, 0x07, 0x92, 0xfa, 0xa1, 0x59, 0xb4, 0xc8, 0xf3, 0x53, 0x49, 0x7d, - 0xf4, 0x05, 0x38, 0x2f, 0x6d, 0x91, 0xf4, 0x92, 0xb0, 0xb1, 0xd4, 0x07, 0x5b, 0x78, 0x7d, 0x71, - 0x57, 0xcf, 0x80, 0xaa, 0x2c, 0xc3, 0x92, 0x56, 0x53, 0xa1, 0x7f, 0xa1, 0x0f, 0x32, 0x44, 0xe4, - 0xf0, 0x4a, 0x08, 0xf5, 0x14, 0xa2, 0xce, 0x11, 0xb5, 0xdf, 0x13, 0x50, 0x0e, 0xaf, 0x5e, 0x4c, - 0x5e, 0x8c, 0x89, 0x90, 0xe8, 0x23, 0xc8, 0xf7, 0xbd, 0xe1, 0x90, 0x70, 0x37, 0x74, 0xb1, 0xb0, - 0x53, 0xd9, 0x32, 0x03, 0xa8, 0xa1, 0xe5, 0xed, 0x26, 0xce, 0x19, 0x8d, 0xb6, 0x8f, 0x1e, 0x42, - 0x36, 0xea, 0xa1, 0xc4, 0x4c, 0x37, 0xde, 0x43, 0x38, 0xc2, 0xd1, 0x03, 0x48, 0x6b, 0x16, 0xc2, - 0xb2, 0x58, 0x89, 0x38, 0x51, 0xb7, 0x95, 0xbe, 0x88, 0xb1, 0xc1, 0xd1, 0x67, 0x10, 0xd6, 0x86, - 0x2b, 0xa7, 0x23, 0xa2, 0x8b, 0xa1, 0xbc, 0xb3, 0xb6, 0x5c, 0x45, 0xbd, 0xe9, 0x88, 0x60, 0x90, - 0xb3, 0x6f, 0x55, 0xa4, 0x17, 0x64, 0x2a, 0x46, 0x5e, 0x9f, 0xb8, 0x7a, 0x74, 0xe9, 0x11, 0x93, - 0xc7, 0xa5, 0x48, 0xaa, 0x2b, 0x3f, 0x3e, 0x82, 0xb2, 0x37, 0x19, 0x41, 0xcf, 0x52, 0xb9, 0xb4, - 0x9d, 0xa9, 0xfd, 0x62, 0x41, 0x65, 0x96, 0x29, 0x31, 0x62, 0x81, 0x50, 0x27, 0xa6, 0x09, 0xe7, - 0x8c, 0x2f, 0xa5, 0x09, 0x1f, 0x36, 0x5a, 0x4a, 0x8c, 0x0d, 0xfa, 0x3a, 0x39, 0x7a, 0x04, 0x19, - 0x4e, 0xc4, 0x78, 0x28, 0xc3, 0x24, 0xa1, 0xf8, 0xa0, 0xc2, 0x1a, 0xc1, 0xa1, 0x46, 0xed, 0xef, - 0x04, 0xac, 0x86, 0x1e, 0xed, 0x79, 0xb2, 0x7f, 0xfe, 0xce, 0x09, 0xfc, 0x10, 0xb2, 0xca, 0x1b, - 0x4a, 0x54, 0x41, 0x25, 0xaf, 0xa7, 0x30, 0xd2, 0x78, 0x0b, 0x12, 0x3d, 0xb1, 0xf0, 0xa2, 0x49, - 0x9b, 0x17, 0x8d, 0x27, 0xe2, 0x2f, 0x9a, 0x77, 0xc4, 0x75, 0xed, 0x57, 0x0b, 0xd6, 0x16, 0x73, - 0xfa, 0xce, 0xa8, 0xfe, 0x18, 0xb2, 0x86, 0xc8, 0x28, 0x9b, 0x1b, 0xa1, 0x6f, 0x86, 0xe6, 0x13, - 0x2a, 0xcf, 0x8d, 0xe9, 0x48, 0x4d, 0x35, 0xeb, 0x5a, 0x57, 0x72, 0xe2, 0x5d, 0xbe, 0x55, 0xcb, - 0xce, 0xfa, 0x30, 0xf1, 0x7a, 0x7d, 0x98, 0x7c, 0xe3, 0x3e, 0x4c, 0xbd, 0x82, 0x9b, 0xf4, 0x8d, - 0x9e, 0x82, 0xb1, 0xdc, 0x66, 0xfe, 0x3b, 0xb7, 0xb5, 0x06, 0xac, 0x2f, 0x25, 0x2a, 0xa4, 0x71, - 0xde, 0x5f, 0xd6, 0x2b, 0xfb, 0xeb, 0x27, 0xb8, 0x8d, 0x89, 0x60, 0xc3, 0x09, 0x89, 0x55, 0xde, - 0x9b, 0xa5, 0x1c, 0x41, 0xca, 0x97, 0xe1, 0xd4, 0xcc, 0x63, 0xfd, 0x5d, 0xbb, 0x03, 0x9b, 0xd7, - 0x99, 0x37, 0x8e, 0xd6, 0xfe, 0xb2, 0xa0, 0x7c, 0x6c, 0x62, 0x78, 0xb3, 0x23, 0x97, 0xc8, 0x4b, - 0xdc, 0x90, 0xbc, 0x07, 0x90, 0x9e, 0xe8, 0xe1, 0x14, 0x5d, 0xd2, 0xb1, 0x7f, 0x2a, 0xc7, 0x6a, - 0x66, 0x60, 0x83, 0xab, 0x4c, 0x9e, 0xd1, 0xa1, 0x24, 0x5c, 0xb3, 0xab, 0x32, 0x19, 0xd3, 0x7c, - 0xa2, 0x11, 0x1c, 0x6a, 0xd4, 0xbe, 0x86, 0xca, 0x2c, 0x96, 0x39, 0x11, 0x64, 0x42, 0xd4, 0x33, - 0xce, 0xd2, 0xc5, 0xbf, 0xb0, 0xfd, 0xb8, 0xa5, 0x20, 0x1c, 0x6a, 0x3c, 0x6a, 0x42, 0x65, 0xe9, - 0x8d, 0x8f, 0x2a, 0x50, 0x38, 0x7a, 0xde, 0x3d, 0x6c, 0x35, 0xda, 0x4f, 0xda, 0xad, 0xa6, 0x7d, - 0x0b, 0x01, 0x64, 0xba, 0xed, 0xe7, 0x4f, 0xf7, 0x5b, 0xb6, 0x85, 0xf2, 0x90, 0x3e, 0x38, 0xda, - 0xef, 0xb5, 0xed, 0x84, 0xfa, 0xec, 0x9d, 0x74, 0x0e, 0x1b, 0x76, 0xf2, 0xd1, 0x57, 0x50, 0x68, - 0xe8, 0x7f, 0x2a, 0x1d, 0xee, 0x13, 0xae, 0x36, 0x3c, 0xef, 0xe0, 0x83, 0xdd, 0x7d, 0xfb, 0x16, - 0xca, 0x42, 0xf2, 0x10, 0xab, 0x9d, 0x39, 0x48, 0x1d, 0x76, 0xba, 0x3d, 0x3b, 0x81, 0xca, 0x00, - 0xbb, 0x47, 0xbd, 0x4e, 0xa3, 0x73, 0x70, 0xd0, 0xee, 0xd9, 0xc9, 0xbd, 0xcf, 0xa1, 0x42, 0xd9, - 0xd6, 0x84, 0x4a, 0x22, 0x84, 0xf9, 0x23, 0xf6, 0xc3, 0xdd, 0x70, 0x45, 0xd9, 0xb6, 0xf9, 0xda, - 0x1e, 0xb0, 0xed, 0x89, 0xdc, 0xd6, 0xe8, 0xb6, 0x29, 0xcd, 0xd3, 0x8c, 0x5e, 0x7d, 0xfa, 0x4f, - 0x00, 0x00, 0x00, 0xff, 0xff, 0x90, 0xad, 0x39, 0x45, 0x08, 0x0e, 0x00, 0x00, + // 1454 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xdd, 0x6e, 0x1b, 0xb7, + 0x12, 0xce, 0xea, 0x5f, 0xa3, 0xbf, 0x35, 0x2d, 0x3b, 0x1b, 0x9f, 0x1c, 0x1f, 0x41, 0x49, 0x10, + 0xc5, 0xa7, 0xb0, 0x5b, 0xa7, 0x45, 0x83, 0xa2, 0x45, 0x6b, 0xcb, 0x76, 0xaa, 0xc0, 0x8e, 0x5c, + 0x4a, 0xb6, 0x81, 0xa2, 0xc5, 0x62, 0xad, 0xa5, 0x65, 0xc2, 0xd2, 0xae, 0x42, 0x52, 0x52, 0xd5, + 0x97, 0xe8, 0x6d, 0xd1, 0x17, 0xe8, 0x4d, 0xef, 0xfb, 0x0a, 0xbd, 0x6c, 0xde, 0xa0, 0x48, 0xdf, + 0xa1, 0xd7, 0x05, 0xb9, 0x5c, 0x79, 0xa5, 0xb8, 0x8d, 0x93, 0x20, 0x37, 0x82, 0x38, 0xdf, 0x70, + 0x38, 0xfc, 0xbe, 0x19, 0x92, 0x0b, 0xf9, 0x91, 0xe8, 0x3a, 0x82, 0xac, 0x0f, 0x98, 0x2f, 0x7c, + 0x94, 0x0a, 0x46, 0x2b, 0xe6, 0x29, 0xf5, 0x7a, 0x7e, 0xd7, 0x75, 0x84, 0x13, 0x20, 0x2b, 0xb9, + 0x67, 0x43, 0xc2, 0x26, 0x7a, 0x50, 0x14, 0xfe, 0xc0, 0x8f, 0x82, 0x23, 0xc1, 0x06, 0x9d, 0x60, + 0x50, 0x7d, 0x9e, 0x83, 0x74, 0x8b, 0x70, 0x4e, 0x7d, 0x0f, 0xdd, 0x83, 0x22, 0xf5, 0x6c, 0xc1, + 0x1c, 0x8f, 0x3b, 0x1d, 0x41, 0x7d, 0xcf, 0x32, 0x2a, 0x46, 0x2d, 0x83, 0x0b, 0xd4, 0x6b, 0x5f, + 0x1a, 0x51, 0x1d, 0x8a, 0xfc, 0xdc, 0x61, 0xae, 0xcd, 0x83, 0x79, 0xdc, 0x8a, 0x55, 0xe2, 0xb5, + 0xdc, 0xe6, 0xed, 0x75, 0x9d, 0x9d, 0x8e, 0xb7, 0xde, 0x92, 0x5e, 0x7a, 0x80, 0x0b, 0x3c, 0x32, + 0xe2, 0x68, 0x15, 0xc0, 0x19, 0x0a, 0xbf, 0xe3, 0xf7, 0xfb, 0x54, 0x58, 0x09, 0xb5, 0x4e, 0xc4, + 0x82, 0xee, 0x40, 0x41, 0x38, 0xac, 0x4b, 0x84, 0xcd, 0x05, 0xa3, 0x5e, 0xd7, 0x4a, 0x56, 0x8c, + 0x5a, 0x16, 0xe7, 0x03, 0x63, 0x4b, 0xd9, 0xd0, 0x06, 0xa4, 0xfd, 0x81, 0x50, 0x29, 0xa4, 0x2a, + 0x46, 0x2d, 0xb7, 0xb9, 0xb4, 0x1e, 0x6c, 0x7c, 0xf7, 0x3b, 0xd2, 0x19, 0x0a, 0xd2, 0x0c, 0x40, + 0x1c, 0x7a, 0xa1, 0x6d, 0x30, 0x23, 0xdb, 0xb3, 0xfb, 0xbe, 0x4b, 0xac, 0x74, 0xc5, 0xa8, 0x15, + 0x37, 0x6f, 0x86, 0xc9, 0x47, 0x76, 0x7a, 0xe0, 0xbb, 0x04, 0x97, 0xc4, 0xac, 0x01, 0x6d, 0x40, + 0x66, 0xec, 0x30, 0x8f, 0x7a, 0x5d, 0x6e, 0x65, 0xd4, 0xc6, 0x17, 0xf5, 0xaa, 0x5f, 0xc9, 0xdf, + 0x93, 0x00, 0xc3, 0x53, 0x27, 0xf4, 0x39, 0xe4, 0x07, 0x8c, 0x5c, 0xb2, 0x95, 0xbd, 0x06, 0x5b, + 0xb9, 0x01, 0x23, 0x53, 0xae, 0xb6, 0xa0, 0x30, 0xf0, 0xb9, 0xb8, 0x8c, 0x00, 0xd7, 0x88, 0x90, + 0x97, 0x53, 0xa6, 0x21, 0xee, 0x42, 0xb1, 0xe7, 0x70, 0x61, 0x53, 0x8f, 0x13, 0x26, 0x6c, 0xea, + 0x5a, 0xb9, 0x8a, 0x51, 0x4b, 0xe0, 0xbc, 0xb4, 0x36, 0x94, 0xb1, 0xe1, 0xa2, 0xff, 0x02, 0x9c, + 0xf9, 0x43, 0xcf, 0xb5, 0x99, 0x3f, 0xe6, 0x56, 0x5e, 0x79, 0x64, 0x95, 0x05, 0xfb, 0x63, 0x8e, + 0x6c, 0x58, 0x1e, 0x72, 0xc2, 0x6c, 0x97, 0x9c, 0x51, 0x8f, 0xb8, 0xf6, 0xc8, 0x61, 0xd4, 0x39, + 0xed, 0x11, 0x6e, 0x15, 0x54, 0x42, 0x0f, 0xe6, 0x13, 0x3a, 0xe2, 0x84, 0xed, 0x04, 0xce, 0xc7, + 0xa1, 0xef, 0xae, 0x27, 0xd8, 0x04, 0x97, 0x87, 0x57, 0x40, 0xa8, 0x09, 0x26, 0x9f, 0x70, 0x41, + 0xfa, 0x91, 0xd0, 0x45, 0x15, 0xfa, 0xee, 0x4b, 0x7b, 0x55, 0x7e, 0x73, 0x51, 0x4b, 0x7c, 0xd6, + 0x8a, 0xfe, 0x03, 0x59, 0xe6, 0x8f, 0xed, 0x8e, 0x3f, 0xf4, 0x84, 0x55, 0xaa, 0x18, 0xb5, 0x38, + 0xce, 0x30, 0x7f, 0x5c, 0x97, 0x63, 0x59, 0x82, 0xdc, 0x19, 0x91, 0x81, 0x4f, 0x3d, 0xc1, 0x2d, + 0xb3, 0x12, 0xaf, 0x65, 0x71, 0xc4, 0x82, 0x6a, 0x60, 0x52, 0xcf, 0x66, 0x84, 0x13, 0x36, 0x22, + 0xae, 0xdd, 0xf1, 0x3d, 0xcf, 0x5a, 0x50, 0x85, 0x5a, 0xa4, 0x1e, 0xd6, 0xe6, 0xba, 0xef, 0x79, + 0x52, 0xe1, 0x9e, 0xdf, 0xb9, 0x08, 0x05, 0xb2, 0x90, 0x2a, 0xc6, 0x57, 0x28, 0x2c, 0x67, 0x84, + 0x9d, 0xb7, 0x0e, 0x8b, 0x4a, 0x1e, 0x15, 0xe5, 0x9c, 0x38, 0x4c, 0x9c, 0x12, 0x47, 0x58, 0x8b, + 0x2a, 0xe3, 0x05, 0x09, 0xed, 0xfb, 0x9d, 0x8b, 0x2f, 0x43, 0x00, 0x7d, 0x01, 0x26, 0x23, 0x8e, + 0x6b, 0x3b, 0x67, 0x82, 0x30, 0x7b, 0xcc, 0xa8, 0x20, 0x56, 0x59, 0x2d, 0xba, 0x1c, 0x2e, 0x8a, + 0x89, 0xe3, 0x6e, 0x49, 0xf8, 0x44, 0xa2, 0xb8, 0xc8, 0x66, 0xc6, 0xa8, 0x02, 0xb9, 0x9d, 0x9d, + 0xfd, 0x96, 0x60, 0x8e, 0x20, 0xdd, 0x89, 0xb5, 0xa4, 0xba, 0x2b, 0x6a, 0x92, 0x1e, 0x3a, 0xbd, + 0xa3, 0xa3, 0xc6, 0x8e, 0xb5, 0x1c, 0x78, 0x44, 0x4c, 0xe8, 0x43, 0x58, 0x26, 0x9e, 0x24, 0xda, + 0xd6, 0xaa, 0x71, 0x22, 0x84, 0xea, 0x8b, 0x9b, 0x8a, 0xa6, 0x72, 0x80, 0x06, 0x52, 0xb5, 0x34, + 0xb6, 0xf2, 0xab, 0x01, 0xf9, 0x28, 0x13, 0xe8, 0x1e, 0xa4, 0x82, 0xae, 0x56, 0xc7, 0x4d, 0x6e, + 0xb3, 0xa0, 0xdb, 0xa9, 0xad, 0x8c, 0x58, 0x83, 0xf2, 0x74, 0x8a, 0xf6, 0x2e, 0x75, 0xad, 0x98, + 0xa2, 0xa7, 0x10, 0xb1, 0x36, 0x5c, 0xf4, 0x08, 0xf2, 0x42, 0xae, 0x2a, 0x6c, 0xa7, 0x47, 0x1d, + 0x6e, 0xc5, 0xf5, 0xc1, 0x30, 0x3d, 0x04, 0xdb, 0x0a, 0xdd, 0x92, 0x20, 0xce, 0x89, 0xcb, 0x01, + 0xfa, 0x1f, 0xe4, 0xa6, 0x62, 0x53, 0x57, 0x9d, 0x49, 0x71, 0x0c, 0xa1, 0xa9, 0xe1, 0xae, 0x7c, + 0x03, 0xb7, 0xfe, 0xb1, 0xa2, 0x91, 0x09, 0xf1, 0x0b, 0x32, 0x51, 0x5b, 0xc8, 0x62, 0xf9, 0x17, + 0x3d, 0x80, 0xe4, 0xc8, 0xe9, 0x0d, 0x89, 0xca, 0xf3, 0xf2, 0x94, 0xd8, 0xa6, 0xde, 0x74, 0x2e, + 0x0e, 0x3c, 0x3e, 0x89, 0x3d, 0x32, 0x56, 0xb6, 0xa1, 0x7c, 0x55, 0x51, 0x5f, 0x11, 0xb8, 0x1c, + 0x0d, 0x9c, 0x8d, 0xc4, 0x78, 0x92, 0xc8, 0xc4, 0xcd, 0x44, 0xf5, 0x17, 0x03, 0x8a, 0xb3, 0xf2, + 0xa3, 0x0f, 0x60, 0x69, 0xbe, 0x60, 0xec, 0xae, 0xa0, 0xae, 0x0e, 0x8b, 0x66, 0xab, 0xe3, 0xb1, + 0xa0, 0x2e, 0xfa, 0x18, 0xac, 0x97, 0xa6, 0x08, 0xda, 0x27, 0xfe, 0x50, 0xa8, 0x85, 0x0d, 0xbc, + 0x34, 0x3b, 0xab, 0x1d, 0x80, 0xb2, 0x98, 0x75, 0x23, 0xc8, 0xbb, 0xa4, 0x73, 0xa1, 0x16, 0x0a, + 0x84, 0xc8, 0xe0, 0x05, 0x0d, 0xb5, 0x25, 0x22, 0xd7, 0xe1, 0xd5, 0x9f, 0x63, 0x50, 0xd4, 0x07, + 0x36, 0x26, 0xcf, 0x86, 0x84, 0x0b, 0xf4, 0x1e, 0x64, 0x3b, 0x4e, 0xaf, 0x47, 0x98, 0xad, 0x53, + 0xcc, 0x6d, 0x96, 0xd6, 0x83, 0x6b, 0xab, 0xae, 0xec, 0x8d, 0x1d, 0x9c, 0x09, 0x3c, 0x1a, 0x2e, + 0x7a, 0x00, 0xe9, 0xb0, 0xf3, 0x62, 0x53, 0xdf, 0x68, 0xe7, 0xe1, 0x10, 0x47, 0xf7, 0x21, 0xa9, + 0x54, 0xd0, 0x65, 0xb1, 0x10, 0x6a, 0x22, 0xcf, 0x38, 0x75, 0x7c, 0xe3, 0x00, 0x47, 0x1f, 0x81, + 0xae, 0x0d, 0x5b, 0x4c, 0x06, 0x44, 0x15, 0x43, 0x71, 0xb3, 0x3c, 0x5f, 0x45, 0xed, 0xc9, 0x80, + 0x60, 0x10, 0xd3, 0xff, 0xb2, 0x48, 0x2f, 0xc8, 0x84, 0x0f, 0x9c, 0x0e, 0xb1, 0xd5, 0x85, 0xa7, + 0x2e, 0xa6, 0x2c, 0x2e, 0x84, 0x56, 0x55, 0xf9, 0xd1, 0x8b, 0x2b, 0x7d, 0x9d, 0x8b, 0xeb, 0x49, + 0x22, 0x93, 0x34, 0x53, 0xd5, 0x1f, 0x0c, 0x28, 0x4d, 0x99, 0xe2, 0x03, 0xdf, 0xe3, 0x72, 0xc5, + 0x24, 0x61, 0xcc, 0x67, 0x73, 0x34, 0xe1, 0xc3, 0xfa, 0xae, 0x34, 0xe3, 0x00, 0x7d, 0x1d, 0x8e, + 0xd6, 0x20, 0xc5, 0x08, 0x1f, 0xf6, 0x84, 0x26, 0x09, 0x45, 0xaf, 0x37, 0xac, 0x10, 0xac, 0x3d, + 0xaa, 0xcf, 0x63, 0xb0, 0xa8, 0x33, 0xda, 0x76, 0x44, 0xe7, 0xfc, 0x9d, 0x0b, 0xf8, 0x7f, 0x48, + 0xcb, 0x6c, 0x28, 0x91, 0x05, 0x15, 0xbf, 0x5a, 0xc2, 0xd0, 0xe3, 0x2d, 0x44, 0x74, 0xf8, 0xcc, + 0x3b, 0x28, 0x19, 0xbc, 0x83, 0x1c, 0x1e, 0x7d, 0x07, 0xbd, 0x23, 0xad, 0xab, 0x3f, 0x19, 0x50, + 0x9e, 0xe5, 0xf4, 0x9d, 0x49, 0xfd, 0x3e, 0xa4, 0x03, 0x21, 0x43, 0x36, 0x97, 0x75, 0x6e, 0x81, + 0xcc, 0x27, 0x54, 0x9c, 0x07, 0xa1, 0x43, 0x37, 0xd9, 0xac, 0xe5, 0x96, 0x60, 0xc4, 0xe9, 0xbf, + 0x55, 0xcb, 0x4e, 0xfb, 0x30, 0xf6, 0x7a, 0x7d, 0x18, 0x7f, 0xe3, 0x3e, 0x4c, 0xbc, 0x42, 0x9b, + 0xe4, 0xb5, 0x1e, 0x90, 0x11, 0x6e, 0x53, 0xff, 0xce, 0x6d, 0xb5, 0x0e, 0x4b, 0x73, 0x44, 0x69, + 0x19, 0x2f, 0xfb, 0xcb, 0x78, 0x65, 0x7f, 0x7d, 0x0b, 0xb7, 0x30, 0xe1, 0x7e, 0x6f, 0x44, 0x22, + 0x95, 0xf7, 0x66, 0x94, 0x23, 0x48, 0xb8, 0x42, 0xdf, 0x9a, 0x59, 0xac, 0xfe, 0x57, 0x6f, 0xc3, + 0xca, 0x55, 0xe1, 0x83, 0x44, 0xab, 0x0f, 0x21, 0x7f, 0x1c, 0x6c, 0x61, 0xaf, 0xe7, 0x74, 0xb9, + 0x7c, 0x93, 0xf7, 0xa9, 0x47, 0xfb, 0xf4, 0x7b, 0x62, 0xf3, 0x0b, 0x32, 0xd6, 0x9f, 0x07, 0xf9, + 0xd0, 0xd8, 0xba, 0x20, 0xe3, 0xea, 0x5f, 0x06, 0x14, 0xf5, 0xac, 0x37, 0xcb, 0x73, 0x4e, 0xf1, + 0xd8, 0x35, 0x15, 0xbf, 0x0f, 0xc9, 0x91, 0xba, 0xd1, 0xc2, 0x93, 0x3d, 0xf2, 0x51, 0x74, 0x2c, + 0x2f, 0x1a, 0x1c, 0xe0, 0x92, 0xfe, 0x33, 0xda, 0x13, 0x84, 0xa9, 0x92, 0x90, 0xf4, 0x47, 0x3c, + 0xf7, 0x14, 0x82, 0xb5, 0x07, 0x5a, 0x83, 0xe4, 0x99, 0xdc, 0xba, 0xae, 0x8e, 0x72, 0x28, 0x76, + 0x94, 0x16, 0x1c, 0xb8, 0x54, 0x3f, 0x83, 0xd2, 0x74, 0xdf, 0x97, 0x4a, 0x93, 0x11, 0x91, 0xaf, + 0x4b, 0x43, 0x75, 0xd7, 0xcc, 0x52, 0xc7, 0xbb, 0x12, 0xc2, 0xda, 0x63, 0x6d, 0x07, 0x4a, 0x73, + 0x9f, 0x1e, 0xa8, 0x04, 0xb9, 0xa3, 0xa7, 0xad, 0xc3, 0xdd, 0x7a, 0x63, 0xaf, 0xb1, 0xbb, 0x63, + 0xde, 0x40, 0x00, 0xa9, 0x56, 0xe3, 0xe9, 0xe3, 0xfd, 0x5d, 0xd3, 0x40, 0x59, 0x48, 0x1e, 0x1c, + 0xed, 0xb7, 0x1b, 0x66, 0x4c, 0xfe, 0x6d, 0x9f, 0x34, 0x0f, 0xeb, 0x66, 0x7c, 0xed, 0x53, 0xc8, + 0xd5, 0xd5, 0x07, 0x54, 0x93, 0xb9, 0x84, 0xc9, 0x09, 0x4f, 0x9b, 0xf8, 0x60, 0x6b, 0xdf, 0xbc, + 0x81, 0xd2, 0x10, 0x3f, 0xc4, 0x72, 0x66, 0x06, 0x12, 0x87, 0xcd, 0x56, 0xdb, 0x8c, 0xa1, 0x22, + 0xc0, 0xd6, 0x51, 0xbb, 0x59, 0x6f, 0x1e, 0x1c, 0x34, 0xda, 0x66, 0x7c, 0x7b, 0xef, 0xb7, 0x17, + 0xab, 0xc6, 0xef, 0x2f, 0x56, 0x8d, 0x3f, 0x5e, 0xac, 0x1a, 0x3f, 0xfe, 0xb9, 0x7a, 0x03, 0x4a, + 0xd4, 0x5f, 0x1f, 0x51, 0x41, 0x38, 0x0f, 0xbe, 0x17, 0xbf, 0xbe, 0xa3, 0x47, 0xd4, 0xdf, 0x08, + 0xfe, 0x6d, 0x74, 0xfd, 0x8d, 0x91, 0xd8, 0x50, 0xe8, 0x46, 0x40, 0xcf, 0x69, 0x4a, 0x8d, 0x1e, + 0xfe, 0x1d, 0x00, 0x00, 0xff, 0xff, 0xb1, 0xa5, 0xb0, 0xf3, 0xaf, 0x0e, 0x00, 0x00, +} + +func (m *Session) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Session) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Session) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.EnableSystemSettings { + i-- + if m.EnableSystemSettings { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb8 + } + if len(m.SessionUUID) > 0 { + i -= len(m.SessionUUID) + copy(dAtA[i:], m.SessionUUID) + i = encodeVarintVtgate(dAtA, i, uint64(len(m.SessionUUID))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + if len(m.DDLStrategy) > 0 { + i -= len(m.DDLStrategy) + copy(dAtA[i:], m.DDLStrategy) + i = encodeVarintVtgate(dAtA, i, uint64(len(m.DDLStrategy))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + if m.ReadAfterWrite != nil { + { + size, err := m.ReadAfterWrite.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtgate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + if m.LastLockHeartbeat != 0 { + i = encodeVarintVtgate(dAtA, i, uint64(m.LastLockHeartbeat)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x98 + } + if m.LockSession != nil { + { + size, err := m.LockSession.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtgate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if m.InReservedConn { + i-- + if m.InReservedConn { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + } + if len(m.Savepoints) > 0 { + for iNdEx := len(m.Savepoints) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Savepoints[iNdEx]) + copy(dAtA[i:], m.Savepoints[iNdEx]) + i = encodeVarintVtgate(dAtA, i, uint64(len(m.Savepoints[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + } + if m.RowCount != 0 { + i = encodeVarintVtgate(dAtA, i, uint64(m.RowCount)) + i-- + dAtA[i] = 0x78 + } + if len(m.SystemVariables) > 0 { + for k := range m.SystemVariables { + v := m.SystemVariables[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintVtgate(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintVtgate(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintVtgate(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x72 + } + } + if len(m.UserDefinedVariables) > 0 { + for k := range m.UserDefinedVariables { + v := m.UserDefinedVariables[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtgate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintVtgate(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintVtgate(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x6a + } + } + if m.FoundRows != 0 { + i = encodeVarintVtgate(dAtA, i, uint64(m.FoundRows)) + i-- + dAtA[i] = 0x60 + } + if m.LastInsertId != 0 { + i = encodeVarintVtgate(dAtA, i, uint64(m.LastInsertId)) + i-- + dAtA[i] = 0x58 + } + if len(m.PostSessions) > 0 { + for iNdEx := len(m.PostSessions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PostSessions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtgate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + } + if len(m.PreSessions) > 0 { + for iNdEx := len(m.PreSessions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PreSessions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtgate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + if len(m.Warnings) > 0 { + for iNdEx := len(m.Warnings) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Warnings[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtgate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if m.TransactionMode != 0 { + i = encodeVarintVtgate(dAtA, i, uint64(m.TransactionMode)) + i-- + dAtA[i] = 0x38 + } + if m.Options != nil { + { + size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtgate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if len(m.TargetString) > 0 { + i -= len(m.TargetString) + copy(dAtA[i:], m.TargetString) + i = encodeVarintVtgate(dAtA, i, uint64(len(m.TargetString))) + i-- + dAtA[i] = 0x2a + } + if m.Autocommit { + i-- + if m.Autocommit { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.ShardSessions) > 0 { + for iNdEx := len(m.ShardSessions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ShardSessions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtgate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.InTransaction { + i-- + if m.InTransaction { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Session_ShardSession) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Session_ShardSession) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Session_ShardSession) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.ReservedId != 0 { + i = encodeVarintVtgate(dAtA, i, uint64(m.ReservedId)) + i-- + dAtA[i] = 0x20 + } + if m.TabletAlias != nil { + { + size, err := m.TabletAlias.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtgate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.TransactionId != 0 { + i = encodeVarintVtgate(dAtA, i, uint64(m.TransactionId)) + i-- + dAtA[i] = 0x10 + } + if m.Target != nil { + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtgate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ReadAfterWrite) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadAfterWrite) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReadAfterWrite) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.SessionTrackGtids { + i-- + if m.SessionTrackGtids { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.ReadAfterWriteTimeout != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ReadAfterWriteTimeout)))) + i-- + dAtA[i] = 0x11 + } + if len(m.ReadAfterWriteGtid) > 0 { + i -= len(m.ReadAfterWriteGtid) + copy(dAtA[i:], m.ReadAfterWriteGtid) + i = encodeVarintVtgate(dAtA, i, uint64(len(m.ReadAfterWriteGtid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExecuteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecuteRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecuteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Options != nil { + { + size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtgate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if len(m.KeyspaceShard) > 0 { + i -= len(m.KeyspaceShard) + copy(dAtA[i:], m.KeyspaceShard) + i = encodeVarintVtgate(dAtA, i, uint64(len(m.KeyspaceShard))) + i-- + dAtA[i] = 0x32 + } + if m.TabletType != 0 { + i = encodeVarintVtgate(dAtA, i, uint64(m.TabletType)) + i-- + dAtA[i] = 0x20 + } + if m.Query != nil { + { + size, err := m.Query.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtgate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Session != nil { + { + size, err := m.Session.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtgate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.CallerId != nil { + { + size, err := m.CallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtgate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExecuteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecuteResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecuteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Result != nil { + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtgate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Session != nil { + { + size, err := m.Session.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtgate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtgate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExecuteBatchRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecuteBatchRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecuteBatchRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Options != nil { + { + size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtgate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if len(m.KeyspaceShard) > 0 { + i -= len(m.KeyspaceShard) + copy(dAtA[i:], m.KeyspaceShard) + i = encodeVarintVtgate(dAtA, i, uint64(len(m.KeyspaceShard))) + i-- + dAtA[i] = 0x32 + } + if m.AsTransaction { + i-- + if m.AsTransaction { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.TabletType != 0 { + i = encodeVarintVtgate(dAtA, i, uint64(m.TabletType)) + i-- + dAtA[i] = 0x20 + } + if len(m.Queries) > 0 { + for iNdEx := len(m.Queries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Queries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtgate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Session != nil { + { + size, err := m.Session.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtgate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.CallerId != nil { + { + size, err := m.CallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtgate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExecuteBatchResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecuteBatchResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecuteBatchResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Results) > 0 { + for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Results[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtgate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Session != nil { + { + size, err := m.Session.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtgate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtgate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StreamExecuteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StreamExecuteRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StreamExecuteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Session != nil { + { + size, err := m.Session.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtgate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.Options != nil { + { + size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtgate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.KeyspaceShard) > 0 { + i -= len(m.KeyspaceShard) + copy(dAtA[i:], m.KeyspaceShard) + i = encodeVarintVtgate(dAtA, i, uint64(len(m.KeyspaceShard))) + i-- + dAtA[i] = 0x22 + } + if m.TabletType != 0 { + i = encodeVarintVtgate(dAtA, i, uint64(m.TabletType)) + i-- + dAtA[i] = 0x18 + } + if m.Query != nil { + { + size, err := m.Query.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtgate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.CallerId != nil { + { + size, err := m.CallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtgate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StreamExecuteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StreamExecuteResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StreamExecuteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Result != nil { + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtgate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResolveTransactionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResolveTransactionRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResolveTransactionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Dtid) > 0 { + i -= len(m.Dtid) + copy(dAtA[i:], m.Dtid) + i = encodeVarintVtgate(dAtA, i, uint64(len(m.Dtid))) + i-- + dAtA[i] = 0x12 + } + if m.CallerId != nil { + { + size, err := m.CallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtgate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResolveTransactionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResolveTransactionResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResolveTransactionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *VStreamFlags) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VStreamFlags) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VStreamFlags) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.MinimizeSkew { + i-- + if m.MinimizeSkew { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *VStreamRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } + +func (m *VStreamRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VStreamRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Flags != nil { + { + size, err := m.Flags.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtgate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Filter != nil { + { + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtgate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Vgtid != nil { + { + size, err := m.Vgtid.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtgate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.TabletType != 0 { + i = encodeVarintVtgate(dAtA, i, uint64(m.TabletType)) + i-- + dAtA[i] = 0x10 + } + if m.CallerId != nil { + { + size, err := m.CallerId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtgate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VStreamResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VStreamResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VStreamResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtgate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintVtgate(dAtA []byte, offset int, v uint64) int { + offset -= sovVtgate(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Session) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.InTransaction { + n += 2 + } + if len(m.ShardSessions) > 0 { + for _, e := range m.ShardSessions { + l = e.Size() + n += 1 + l + sovVtgate(uint64(l)) + } + } + if m.Autocommit { + n += 2 + } + l = len(m.TargetString) + if l > 0 { + n += 1 + l + sovVtgate(uint64(l)) + } + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovVtgate(uint64(l)) + } + if m.TransactionMode != 0 { + n += 1 + sovVtgate(uint64(m.TransactionMode)) + } + if len(m.Warnings) > 0 { + for _, e := range m.Warnings { + l = e.Size() + n += 1 + l + sovVtgate(uint64(l)) + } + } + if len(m.PreSessions) > 0 { + for _, e := range m.PreSessions { + l = e.Size() + n += 1 + l + sovVtgate(uint64(l)) + } + } + if len(m.PostSessions) > 0 { + for _, e := range m.PostSessions { + l = e.Size() + n += 1 + l + sovVtgate(uint64(l)) + } + } + if m.LastInsertId != 0 { + n += 1 + sovVtgate(uint64(m.LastInsertId)) + } + if m.FoundRows != 0 { + n += 1 + sovVtgate(uint64(m.FoundRows)) + } + if len(m.UserDefinedVariables) > 0 { + for k, v := range m.UserDefinedVariables { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovVtgate(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovVtgate(uint64(len(k))) + l + n += mapEntrySize + 1 + sovVtgate(uint64(mapEntrySize)) + } + } + if len(m.SystemVariables) > 0 { + for k, v := range m.SystemVariables { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovVtgate(uint64(len(k))) + 1 + len(v) + sovVtgate(uint64(len(v))) + n += mapEntrySize + 1 + sovVtgate(uint64(mapEntrySize)) + } + } + if m.RowCount != 0 { + n += 1 + sovVtgate(uint64(m.RowCount)) + } + if len(m.Savepoints) > 0 { + for _, s := range m.Savepoints { + l = len(s) + n += 2 + l + sovVtgate(uint64(l)) + } + } + if m.InReservedConn { + n += 3 + } + if m.LockSession != nil { + l = m.LockSession.Size() + n += 2 + l + sovVtgate(uint64(l)) + } + if m.LastLockHeartbeat != 0 { + n += 2 + sovVtgate(uint64(m.LastLockHeartbeat)) + } + if m.ReadAfterWrite != nil { + l = m.ReadAfterWrite.Size() + n += 2 + l + sovVtgate(uint64(l)) + } + l = len(m.DDLStrategy) + if l > 0 { + n += 2 + l + sovVtgate(uint64(l)) + } + l = len(m.SessionUUID) + if l > 0 { + n += 2 + l + sovVtgate(uint64(l)) + } + if m.EnableSystemSettings { + n += 3 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Session_ShardSession) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Target != nil { + l = m.Target.Size() + n += 1 + l + sovVtgate(uint64(l)) + } + if m.TransactionId != 0 { + n += 1 + sovVtgate(uint64(m.TransactionId)) + } + if m.TabletAlias != nil { + l = m.TabletAlias.Size() + n += 1 + l + sovVtgate(uint64(l)) + } + if m.ReservedId != 0 { + n += 1 + sovVtgate(uint64(m.ReservedId)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReadAfterWrite) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ReadAfterWriteGtid) + if l > 0 { + n += 1 + l + sovVtgate(uint64(l)) + } + if m.ReadAfterWriteTimeout != 0 { + n += 9 + } + if m.SessionTrackGtids { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ExecuteRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CallerId != nil { + l = m.CallerId.Size() + n += 1 + l + sovVtgate(uint64(l)) + } + if m.Session != nil { + l = m.Session.Size() + n += 1 + l + sovVtgate(uint64(l)) + } + if m.Query != nil { + l = m.Query.Size() + n += 1 + l + sovVtgate(uint64(l)) + } + if m.TabletType != 0 { + n += 1 + sovVtgate(uint64(m.TabletType)) + } + l = len(m.KeyspaceShard) + if l > 0 { + n += 1 + l + sovVtgate(uint64(l)) + } + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovVtgate(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ExecuteResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovVtgate(uint64(l)) + } + if m.Session != nil { + l = m.Session.Size() + n += 1 + l + sovVtgate(uint64(l)) + } + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovVtgate(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ExecuteBatchRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CallerId != nil { + l = m.CallerId.Size() + n += 1 + l + sovVtgate(uint64(l)) + } + if m.Session != nil { + l = m.Session.Size() + n += 1 + l + sovVtgate(uint64(l)) + } + if len(m.Queries) > 0 { + for _, e := range m.Queries { + l = e.Size() + n += 1 + l + sovVtgate(uint64(l)) + } + } + if m.TabletType != 0 { + n += 1 + sovVtgate(uint64(m.TabletType)) + } + if m.AsTransaction { + n += 2 + } + l = len(m.KeyspaceShard) + if l > 0 { + n += 1 + l + sovVtgate(uint64(l)) + } + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovVtgate(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ExecuteBatchResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovVtgate(uint64(l)) + } + if m.Session != nil { + l = m.Session.Size() + n += 1 + l + sovVtgate(uint64(l)) + } + if len(m.Results) > 0 { + for _, e := range m.Results { + l = e.Size() + n += 1 + l + sovVtgate(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StreamExecuteRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CallerId != nil { + l = m.CallerId.Size() + n += 1 + l + sovVtgate(uint64(l)) + } + if m.Query != nil { + l = m.Query.Size() + n += 1 + l + sovVtgate(uint64(l)) + } + if m.TabletType != 0 { + n += 1 + sovVtgate(uint64(m.TabletType)) + } + l = len(m.KeyspaceShard) + if l > 0 { + n += 1 + l + sovVtgate(uint64(l)) + } + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovVtgate(uint64(l)) + } + if m.Session != nil { + l = m.Session.Size() + n += 1 + l + sovVtgate(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StreamExecuteResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovVtgate(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ResolveTransactionRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CallerId != nil { + l = m.CallerId.Size() + n += 1 + l + sovVtgate(uint64(l)) + } + l = len(m.Dtid) + if l > 0 { + n += 1 + l + sovVtgate(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ResolveTransactionResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *VStreamFlags) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MinimizeSkew { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *VStreamRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CallerId != nil { + l = m.CallerId.Size() + n += 1 + l + sovVtgate(uint64(l)) + } + if m.TabletType != 0 { + n += 1 + sovVtgate(uint64(m.TabletType)) + } + if m.Vgtid != nil { + l = m.Vgtid.Size() + n += 1 + l + sovVtgate(uint64(l)) + } + if m.Filter != nil { + l = m.Filter.Size() + n += 1 + l + sovVtgate(uint64(l)) + } + if m.Flags != nil { + l = m.Flags.Size() + n += 1 + l + sovVtgate(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *VStreamResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovVtgate(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovVtgate(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozVtgate(x uint64) (n int) { + return sovVtgate(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Session) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Session: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Session: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InTransaction", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.InTransaction = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShardSessions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShardSessions = append(m.ShardSessions, &Session_ShardSession{}) + if err := m.ShardSessions[len(m.ShardSessions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Autocommit", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Autocommit = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetString", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetString = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &query.ExecuteOptions{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TransactionMode", wireType) + } + m.TransactionMode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TransactionMode |= TransactionMode(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Warnings", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Warnings = append(m.Warnings, &query.QueryWarning{}) + if err := m.Warnings[len(m.Warnings)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreSessions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreSessions = append(m.PreSessions, &Session_ShardSession{}) + if err := m.PreSessions[len(m.PreSessions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PostSessions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PostSessions = append(m.PostSessions, &Session_ShardSession{}) + if err := m.PostSessions[len(m.PostSessions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastInsertId", wireType) + } + m.LastInsertId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastInsertId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FoundRows", wireType) + } + m.FoundRows = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FoundRows |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserDefinedVariables", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UserDefinedVariables == nil { + m.UserDefinedVariables = make(map[string]*query.BindVariable) + } + var mapkey string + var mapvalue *query.BindVariable + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthVtgate + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthVtgate + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthVtgate + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthVtgate + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &query.BindVariable{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipVtgate(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtgate + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.UserDefinedVariables[mapkey] = mapvalue + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SystemVariables", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SystemVariables == nil { + m.SystemVariables = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthVtgate + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthVtgate + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthVtgate + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthVtgate + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipVtgate(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtgate + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.SystemVariables[mapkey] = mapvalue + iNdEx = postIndex + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RowCount", wireType) + } + m.RowCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RowCount |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Savepoints", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Savepoints = append(m.Savepoints, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InReservedConn", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.InReservedConn = bool(v != 0) + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LockSession", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LockSession == nil { + m.LockSession = &Session_ShardSession{} + } + if err := m.LockSession.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastLockHeartbeat", wireType) + } + m.LastLockHeartbeat = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastLockHeartbeat |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadAfterWrite", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ReadAfterWrite == nil { + m.ReadAfterWrite = &ReadAfterWrite{} + } + if err := m.ReadAfterWrite.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DDLStrategy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DDLStrategy = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionUUID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SessionUUID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 23: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EnableSystemSettings", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.EnableSystemSettings = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipVtgate(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtgate + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtgate + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Session_ShardSession) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ShardSession: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ShardSession: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Target == nil { + m.Target = &query.Target{} + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TransactionId", wireType) + } + m.TransactionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TransactionId |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} + } + if err := m.TabletAlias.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReservedId", wireType) + } + m.ReservedId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReservedId |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipVtgate(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtgate + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtgate + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadAfterWrite) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadAfterWrite: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadAfterWrite: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadAfterWriteGtid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReadAfterWriteGtid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadAfterWriteTimeout", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ReadAfterWriteTimeout = float64(math.Float64frombits(v)) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionTrackGtids", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SessionTrackGtids = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipVtgate(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtgate + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtgate + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecuteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecuteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecuteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CallerId == nil { + m.CallerId = &vtrpc.CallerID{} + } + if err := m.CallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Session", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Session == nil { + m.Session = &Session{} + } + if err := m.Session.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Query == nil { + m.Query = &query.BoundQuery{} + } + if err := m.Query.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletType", wireType) + } + m.TabletType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TabletType |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KeyspaceShard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KeyspaceShard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &query.ExecuteOptions{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtgate(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtgate + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtgate + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecuteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecuteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecuteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &vtrpc.RPCError{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Session", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Session == nil { + m.Session = &Session{} + } + if err := m.Session.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &query.QueryResult{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtgate(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtgate + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtgate + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecuteBatchRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecuteBatchRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecuteBatchRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CallerId == nil { + m.CallerId = &vtrpc.CallerID{} + } + if err := m.CallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Session", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Session == nil { + m.Session = &Session{} + } + if err := m.Session.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Queries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Queries = append(m.Queries, &query.BoundQuery{}) + if err := m.Queries[len(m.Queries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletType", wireType) + } + m.TabletType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TabletType |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AsTransaction", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AsTransaction = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KeyspaceShard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KeyspaceShard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &query.ExecuteOptions{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtgate(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtgate + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtgate + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecuteBatchResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecuteBatchResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecuteBatchResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &vtrpc.RPCError{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Session", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Session == nil { + m.Session = &Session{} + } + if err := m.Session.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Results = append(m.Results, &query.ResultWithError{}) + if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtgate(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtgate + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtgate + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StreamExecuteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StreamExecuteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StreamExecuteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CallerId == nil { + m.CallerId = &vtrpc.CallerID{} + } + if err := m.CallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Query == nil { + m.Query = &query.BoundQuery{} + } + if err := m.Query.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletType", wireType) + } + m.TabletType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TabletType |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KeyspaceShard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KeyspaceShard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &query.ExecuteOptions{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Session", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Session == nil { + m.Session = &Session{} + } + if err := m.Session.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtgate(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtgate + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtgate + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StreamExecuteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StreamExecuteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StreamExecuteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &query.QueryResult{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtgate(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtgate + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtgate + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResolveTransactionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResolveTransactionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResolveTransactionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CallerId == nil { + m.CallerId = &vtrpc.CallerID{} + } + if err := m.CallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dtid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Dtid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtgate(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtgate + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtgate + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResolveTransactionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResolveTransactionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResolveTransactionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipVtgate(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtgate + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtgate + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VStreamFlags) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VStreamFlags: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VStreamFlags: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinimizeSkew", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MinimizeSkew = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipVtgate(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtgate + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtgate + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VStreamRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VStreamRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VStreamRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CallerId == nil { + m.CallerId = &vtrpc.CallerID{} + } + if err := m.CallerId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletType", wireType) + } + m.TabletType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TabletType |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vgtid", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Vgtid == nil { + m.Vgtid = &binlogdata.VGtid{} + } + if err := m.Vgtid.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filter == nil { + m.Filter = &binlogdata.Filter{} + } + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Flags == nil { + m.Flags = &VStreamFlags{} + } + if err := m.Flags.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtgate(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtgate + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtgate + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VStreamResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VStreamResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VStreamResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtgate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtgate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtgate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, &binlogdata.VEvent{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtgate(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtgate + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtgate + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipVtgate(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVtgate + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVtgate + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVtgate + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthVtgate + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupVtgate + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthVtgate + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthVtgate = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowVtgate = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupVtgate = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/vt/proto/vtgateservice/vtgateservice.pb.go b/go/vt/proto/vtgateservice/vtgateservice.pb.go index 023adc5d727..dc6d4d6d2ee 100644 --- a/go/vt/proto/vtgateservice/vtgateservice.pb.go +++ b/go/vt/proto/vtgateservice/vtgateservice.pb.go @@ -1,4 +1,4 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: vtgateservice.proto package vtgateservice @@ -12,7 +12,6 @@ import ( grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" - vtgate "vitess.io/vitess/go/vt/proto/vtgate" ) @@ -30,23 +29,24 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package func init() { proto.RegisterFile("vtgateservice.proto", fileDescriptor_601ae27c95081e0f) } var fileDescriptor_601ae27c95081e0f = []byte{ - // 247 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x91, 0x3f, 0x4b, 0x03, 0x41, - 0x10, 0xc5, 0x15, 0x21, 0x81, 0x25, 0x69, 0x46, 0x51, 0x88, 0x5a, 0x98, 0xd2, 0xe2, 0x56, 0xb4, - 0x15, 0x8b, 0x03, 0x2b, 0x1b, 0x89, 0x92, 0x42, 0xb0, 0x58, 0x97, 0xe1, 0x5c, 0xd0, 0x9b, 0x73, - 0x67, 0xb2, 0xf8, 0x01, 0xfc, 0xe0, 0xc2, 0xed, 0x9f, 0x70, 0x9e, 0xda, 0xdd, 0xfd, 0xde, 0x9b, - 0xb7, 0xc3, 0x3c, 0xb5, 0x1f, 0xa4, 0x31, 0x82, 0x8c, 0x3e, 0x38, 0x8b, 0x55, 0xe7, 0x49, 0x08, - 0xe6, 0x03, 0xb8, 0x98, 0xc5, 0xdf, 0x28, 0x5e, 0x7e, 0xed, 0xa9, 0xc9, 0xda, 0x09, 0x32, 0xc3, - 0xb5, 0x9a, 0xde, 0x7e, 0xa2, 0xdd, 0x08, 0xc2, 0x61, 0x95, 0x4c, 0x09, 0xac, 0xf0, 0x63, 0x83, - 0x2c, 0x8b, 0xa3, 0x11, 0xe7, 0x8e, 0x5a, 0xc6, 0xe5, 0x0e, 0xdc, 0xa9, 0x59, 0x82, 0xb5, 0x11, - 0xfb, 0x0a, 0xc7, 0x3f, 0xac, 0x3d, 0xcd, 0x39, 0x27, 0xbf, 0x8b, 0x25, 0xec, 0x5e, 0xcd, 0x1f, - 0xc4, 0xa3, 0x79, 0xcf, 0x0b, 0x95, 0x81, 0x01, 0xce, 0x71, 0xa7, 0x7f, 0xa8, 0x39, 0xef, 0x62, - 0x17, 0x9e, 0x15, 0xac, 0x90, 0xe9, 0x2d, 0xe0, 0xa3, 0x37, 0x2d, 0x1b, 0x2b, 0x8e, 0x5a, 0x38, - 0xcb, 0x83, 0x63, 0x2d, 0x67, 0x2f, 0xff, 0xb3, 0x94, 0x85, 0x6f, 0xd4, 0x74, 0x1d, 0x1f, 0xdf, - 0xde, 0x2e, 0x81, 0xd1, 0xed, 0x0a, 0xdf, 0xae, 0x57, 0xd7, 0xea, 0xc0, 0x51, 0x15, 0xfa, 0x22, - 0x62, 0x33, 0x55, 0xe3, 0x3b, 0xfb, 0x74, 0x9e, 0x90, 0x23, 0x1d, 0xbf, 0x74, 0x43, 0x3a, 0x88, - 0xee, 0x2d, 0x7a, 0x50, 0xec, 0xcb, 0xa4, 0x87, 0x57, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xd5, - 0x14, 0x87, 0x30, 0x05, 0x02, 0x00, 0x00, + // 265 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2e, 0x2b, 0x49, 0x4f, + 0x2c, 0x49, 0x2d, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, + 0xe2, 0x45, 0x11, 0x94, 0xe2, 0x81, 0x70, 0x21, 0x92, 0x46, 0x2d, 0xcc, 0x5c, 0x6c, 0x61, 0x99, + 0x25, 0xa9, 0xc5, 0xc5, 0x42, 0x36, 0x5c, 0xec, 0xae, 0x15, 0xa9, 0xc9, 0xa5, 0x25, 0xa9, 0x42, + 0x62, 0x7a, 0x50, 0x45, 0x50, 0x81, 0xa0, 0xd4, 0xc2, 0xd2, 0xd4, 0xe2, 0x12, 0x29, 0x71, 0x0c, + 0xf1, 0xe2, 0x82, 0xfc, 0xbc, 0xe2, 0x54, 0x25, 0x06, 0x21, 0x6f, 0x2e, 0x1e, 0xa8, 0xa0, 0x53, + 0x62, 0x49, 0x72, 0x86, 0x90, 0x34, 0x9a, 0x52, 0xb0, 0x28, 0xcc, 0x1c, 0x19, 0xec, 0x92, 0x70, + 0xc3, 0x02, 0xb8, 0x78, 0x83, 0x4b, 0x8a, 0x52, 0x13, 0x73, 0x61, 0x0e, 0x82, 0x6b, 0x40, 0x11, + 0x86, 0x19, 0x27, 0x8b, 0x43, 0x16, 0x66, 0x9e, 0x01, 0xa3, 0x50, 0x2c, 0x97, 0x50, 0x50, 0x6a, + 0x71, 0x7e, 0x4e, 0x59, 0x6a, 0x48, 0x51, 0x62, 0x5e, 0x71, 0x62, 0x72, 0x49, 0x66, 0x7e, 0x9e, + 0x90, 0x22, 0x4c, 0x23, 0xa6, 0x1c, 0xcc, 0x6c, 0x25, 0x7c, 0x4a, 0xe0, 0x0e, 0xb6, 0xe3, 0x62, + 0x0f, 0x83, 0x58, 0x8e, 0x08, 0x3b, 0xa8, 0x00, 0x46, 0xd8, 0xc1, 0xc5, 0x11, 0xce, 0x73, 0x0a, + 0x3a, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x67, 0x3c, 0x96, + 0x63, 0xe0, 0x12, 0xc9, 0xcc, 0xd7, 0x2b, 0x03, 0x47, 0x0c, 0x24, 0xa6, 0xf4, 0xd2, 0x8b, 0x0a, + 0x92, 0xa3, 0xb4, 0xa0, 0x42, 0x99, 0xf9, 0xfa, 0x10, 0x96, 0x7e, 0x7a, 0xbe, 0x7e, 0x59, 0x89, + 0x3e, 0x58, 0x89, 0x3e, 0x4a, 0x44, 0x27, 0xb1, 0x81, 0x05, 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, + 0xff, 0xb1, 0x89, 0xe2, 0xfd, 0x15, 0x02, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/go/vt/proto/vtrpc/vtrpc.pb.go b/go/vt/proto/vtrpc/vtrpc.pb.go index 8a673537d9a..7ebd72e0933 100644 --- a/go/vt/proto/vtrpc/vtrpc.pb.go +++ b/go/vt/proto/vtrpc/vtrpc.pb.go @@ -1,11 +1,13 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: vtrpc.proto package vtrpc import ( fmt "fmt" + io "io" math "math" + math_bits "math/bits" proto "github.com/golang/protobuf/proto" ) @@ -320,18 +322,26 @@ func (*CallerID) ProtoMessage() {} func (*CallerID) Descriptor() ([]byte, []int) { return fileDescriptor_750b4cf641561858, []int{0} } - func (m *CallerID) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CallerID.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *CallerID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CallerID.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_CallerID.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *CallerID) XXX_Merge(src proto.Message) { xxx_messageInfo_CallerID.Merge(m, src) } func (m *CallerID) XXX_Size() int { - return xxx_messageInfo_CallerID.Size(m) + return m.Size() } func (m *CallerID) XXX_DiscardUnknown() { xxx_messageInfo_CallerID.DiscardUnknown(m) @@ -379,18 +389,26 @@ func (*RPCError) ProtoMessage() {} func (*RPCError) Descriptor() ([]byte, []int) { return fileDescriptor_750b4cf641561858, []int{1} } - func (m *RPCError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RPCError.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *RPCError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RPCError.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_RPCError.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *RPCError) XXX_Merge(src proto.Message) { xxx_messageInfo_RPCError.Merge(m, src) } func (m *RPCError) XXX_Size() int { - return xxx_messageInfo_RPCError.Size(m) + return m.Size() } func (m *RPCError) XXX_DiscardUnknown() { xxx_messageInfo_RPCError.DiscardUnknown(m) @@ -429,43 +447,559 @@ func init() { func init() { proto.RegisterFile("vtrpc.proto", fileDescriptor_750b4cf641561858) } var fileDescriptor_750b4cf641561858 = []byte{ - // 605 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x93, 0x4d, 0x4f, 0x1b, 0x3b, - 0x14, 0x86, 0xc9, 0x07, 0xf9, 0x38, 0x13, 0x88, 0x31, 0x5f, 0xe1, 0x5e, 0xae, 0xee, 0x55, 0x56, - 0x57, 0x2c, 0x88, 0xd4, 0xaa, 0xea, 0xda, 0x19, 0x1f, 0x82, 0xc5, 0xe0, 0x49, 0x3d, 0x36, 0x25, - 0xdd, 0x58, 0x21, 0x8c, 0x50, 0xaa, 0xc0, 0x44, 0x93, 0x14, 0xa9, 0x9b, 0xfe, 0xac, 0xfe, 0xa6, - 0xfe, 0x8c, 0xca, 0x4e, 0xa6, 0x28, 0xb0, 0x9b, 0xf3, 0x3e, 0xc7, 0xc7, 0xef, 0x79, 0x9d, 0x40, - 0xf0, 0xbc, 0xcc, 0xe7, 0x93, 0xf3, 0x79, 0x9e, 0x2d, 0x33, 0xba, 0xed, 0x8b, 0xee, 0x57, 0x68, - 0x84, 0xe3, 0xd9, 0x2c, 0xcd, 0x05, 0xa7, 0xa7, 0xd0, 0x9c, 0xe7, 0xd3, 0xa7, 0xc9, 0x74, 0x3e, - 0x9e, 0x75, 0x4a, 0xff, 0x95, 0xfe, 0x6f, 0xaa, 0x17, 0xc1, 0xd1, 0x49, 0xf6, 0x38, 0xcf, 0x9e, - 0xd2, 0xa7, 0x65, 0xa7, 0xbc, 0xa2, 0x7f, 0x04, 0xda, 0x85, 0xd6, 0xe2, 0xdb, 0xdd, 0x4b, 0x43, - 0xc5, 0x37, 0x6c, 0x68, 0xdd, 0x1f, 0xd0, 0x50, 0xc3, 0x10, 0xf3, 0x3c, 0xcb, 0xe9, 0x47, 0x08, - 0x66, 0xe9, 0xc3, 0x78, 0xf2, 0xdd, 0x4e, 0xb2, 0xfb, 0xd4, 0xdf, 0xb6, 0xfb, 0xee, 0xe8, 0x7c, - 0xe5, 0x30, 0xf2, 0xc4, 0x37, 0x86, 0xd9, 0x7d, 0xaa, 0x60, 0xd5, 0xea, 0xbe, 0x69, 0x07, 0xea, - 0x8f, 0xe9, 0x62, 0x31, 0x7e, 0x48, 0xd7, 0x26, 0x8a, 0x92, 0xfe, 0x0b, 0x55, 0x3f, 0xab, 0xe2, - 0x67, 0x05, 0xeb, 0x59, 0x7e, 0x80, 0x07, 0x67, 0x3f, 0xcb, 0x50, 0xf5, 0x33, 0x6a, 0x50, 0x8e, - 0xaf, 0xc8, 0x16, 0x6d, 0x41, 0x23, 0x64, 0x32, 0xc4, 0x08, 0x39, 0x29, 0xd1, 0x00, 0xea, 0x46, - 0x5e, 0xc9, 0xf8, 0xb3, 0x24, 0x65, 0x7a, 0x00, 0x44, 0xc8, 0x1b, 0x16, 0x09, 0x6e, 0x99, 0x1a, - 0x98, 0x6b, 0x94, 0x9a, 0x54, 0xe8, 0x21, 0xec, 0x71, 0x64, 0x3c, 0x12, 0x12, 0x2d, 0xde, 0x86, - 0x88, 0x1c, 0x39, 0xa9, 0xd2, 0x1d, 0x68, 0xca, 0x58, 0xdb, 0x8b, 0xd8, 0x48, 0x4e, 0xb6, 0x29, - 0x85, 0x5d, 0x16, 0x29, 0x64, 0x7c, 0x64, 0xf1, 0x56, 0x24, 0x3a, 0x21, 0x35, 0x77, 0x72, 0x88, - 0xea, 0x5a, 0x24, 0x89, 0x88, 0xa5, 0xe5, 0x28, 0x05, 0x72, 0x52, 0xa7, 0xfb, 0xd0, 0x36, 0x92, - 0x19, 0x7d, 0x89, 0x52, 0x8b, 0x90, 0x69, 0xe4, 0x84, 0xd0, 0x23, 0xa0, 0x0a, 0x93, 0xd8, 0xa8, - 0xd0, 0xdd, 0x72, 0xc9, 0x4c, 0xe2, 0xf4, 0x06, 0x3d, 0x86, 0xfd, 0x0b, 0x26, 0x22, 0xe4, 0x76, - 0xa8, 0x30, 0x8c, 0x25, 0x17, 0x5a, 0xc4, 0x92, 0x34, 0x9d, 0x73, 0xd6, 0x8f, 0x95, 0xeb, 0x02, - 0x4a, 0xa0, 0x15, 0x1b, 0x6d, 0xe3, 0x0b, 0xab, 0x98, 0x1c, 0x20, 0x09, 0xe8, 0x1e, 0xec, 0x18, - 0x29, 0xae, 0x87, 0x11, 0xba, 0x35, 0x90, 0x93, 0x96, 0xdb, 0x5c, 0x48, 0x8d, 0x4a, 0xb2, 0x88, - 0xec, 0xd0, 0x36, 0x04, 0x46, 0xb2, 0x1b, 0x26, 0x22, 0xd6, 0x8f, 0x90, 0xec, 0xba, 0x85, 0x38, - 0xd3, 0xcc, 0x46, 0x71, 0x92, 0x90, 0xf6, 0xd9, 0xaf, 0x32, 0xb4, 0x5f, 0xbd, 0x89, 0x5b, 0x32, - 0x31, 0x61, 0x88, 0x49, 0x62, 0x23, 0x1c, 0xb0, 0x70, 0x44, 0xb6, 0x5c, 0x68, 0xab, 0x3c, 0x9d, - 0xc7, 0xb5, 0x5a, 0xa2, 0x1d, 0x38, 0x58, 0xe7, 0x6a, 0x51, 0xa9, 0x58, 0x15, 0xc4, 0x87, 0xdc, - 0x67, 0xdc, 0x0a, 0x39, 0x34, 0xba, 0x50, 0x2b, 0xf4, 0x14, 0x3a, 0x6f, 0x42, 0x2e, 0x68, 0x95, - 0xfe, 0x05, 0x47, 0xce, 0xf9, 0x40, 0x09, 0x3d, 0xda, 0x9c, 0xb7, 0xed, 0x4e, 0xbe, 0x09, 0xb9, - 0xa0, 0x35, 0xfa, 0x0f, 0x9c, 0xbc, 0x8d, 0xb5, 0xc0, 0x75, 0xfa, 0x37, 0x1c, 0x7f, 0x32, 0xa8, - 0x46, 0xd6, 0x3d, 0x65, 0x82, 0xea, 0xe6, 0x05, 0x36, 0x9c, 0x53, 0x27, 0x0b, 0x69, 0xf5, 0x6d, - 0xa1, 0x36, 0xe9, 0x09, 0x1c, 0x16, 0x29, 0x6e, 0x5a, 0x01, 0x67, 0x53, 0x2b, 0x26, 0x13, 0x81, - 0x52, 0x6f, 0xb2, 0xc0, 0xb1, 0x57, 0x8f, 0x5e, 0xb0, 0x56, 0xff, 0x03, 0xb4, 0xa7, 0xd9, 0xf9, - 0xf3, 0x74, 0x99, 0x2e, 0x16, 0xab, 0x7f, 0xea, 0x97, 0xee, 0xba, 0x9a, 0x66, 0xbd, 0xd5, 0x57, - 0xef, 0x21, 0xeb, 0x3d, 0x2f, 0x7b, 0x9e, 0xf6, 0xfc, 0xaf, 0xfc, 0xae, 0xe6, 0x8b, 0xf7, 0xbf, - 0x03, 0x00, 0x00, 0xff, 0xff, 0x27, 0xae, 0x20, 0x34, 0xe3, 0x03, 0x00, 0x00, + // 628 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x93, 0xcb, 0x4e, 0x1b, 0x3f, + 0x14, 0xc6, 0xc9, 0x85, 0x5c, 0x4e, 0x02, 0x31, 0xe6, 0x16, 0xfe, 0x7f, 0x9a, 0x56, 0x59, 0x55, + 0x2c, 0x88, 0xd4, 0x2e, 0xba, 0x76, 0xc6, 0x87, 0x60, 0x31, 0x78, 0x52, 0x8f, 0x87, 0x92, 0x6e, + 0xac, 0x10, 0x46, 0x28, 0x55, 0x60, 0xa2, 0x49, 0x8a, 0xd4, 0x4d, 0x9f, 0xa3, 0x4f, 0xd2, 0x67, + 0xe8, 0xb2, 0x8f, 0x50, 0xd1, 0x4d, 0x1f, 0xa3, 0xb2, 0x93, 0x29, 0x0a, 0xec, 0xe6, 0x7c, 0xbf, + 0xe3, 0xe3, 0xef, 0x7c, 0x4e, 0xa0, 0x76, 0x3f, 0x4f, 0xa7, 0xa3, 0xe3, 0x69, 0x9a, 0xcc, 0x13, + 0xba, 0xee, 0x8a, 0xf6, 0x27, 0xa8, 0x78, 0xc3, 0xc9, 0x24, 0x4e, 0x05, 0xa7, 0x87, 0x50, 0x9d, + 0xa6, 0xe3, 0xbb, 0xd1, 0x78, 0x3a, 0x9c, 0x34, 0x73, 0xaf, 0x72, 0xaf, 0xab, 0xea, 0x51, 0xb0, + 0x74, 0x94, 0xdc, 0x4e, 0x93, 0xbb, 0xf8, 0x6e, 0xde, 0xcc, 0x2f, 0xe8, 0x3f, 0x81, 0xb6, 0xa1, + 0x3e, 0xfb, 0x7c, 0xf5, 0xd8, 0x50, 0x70, 0x0d, 0x2b, 0x5a, 0xfb, 0x2b, 0x54, 0x54, 0xdf, 0xc3, + 0x34, 0x4d, 0x52, 0xfa, 0x0e, 0x6a, 0x93, 0xf8, 0x66, 0x38, 0xfa, 0x62, 0x46, 0xc9, 0x75, 0xec, + 0x6e, 0xdb, 0x7c, 0xb3, 0x77, 0xbc, 0x70, 0xe8, 0x3b, 0xe2, 0x1a, 0xbd, 0xe4, 0x3a, 0x56, 0xb0, + 0x68, 0xb5, 0xdf, 0xb4, 0x09, 0xe5, 0xdb, 0x78, 0x36, 0x1b, 0xde, 0xc4, 0x4b, 0x13, 0x59, 0x49, + 0x5f, 0x42, 0xd1, 0xcd, 0x2a, 0xb8, 0x59, 0xb5, 0xe5, 0x2c, 0x37, 0xc0, 0x81, 0xa3, 0xef, 0x79, + 0x28, 0xba, 0x19, 0x25, 0xc8, 0x07, 0x67, 0x64, 0x8d, 0xd6, 0xa1, 0xe2, 0x31, 0xe9, 0xa1, 0x8f, + 0x9c, 0xe4, 0x68, 0x0d, 0xca, 0x91, 0x3c, 0x93, 0xc1, 0x07, 0x49, 0xf2, 0x74, 0x07, 0x88, 0x90, + 0x17, 0xcc, 0x17, 0xdc, 0x30, 0xd5, 0x8b, 0xce, 0x51, 0x6a, 0x52, 0xa0, 0xbb, 0xb0, 0xc5, 0x91, + 0x71, 0x5f, 0x48, 0x34, 0x78, 0xe9, 0x21, 0x72, 0xe4, 0xa4, 0x48, 0x37, 0xa0, 0x2a, 0x03, 0x6d, + 0x4e, 0x82, 0x48, 0x72, 0xb2, 0x4e, 0x29, 0x6c, 0x32, 0x5f, 0x21, 0xe3, 0x03, 0x83, 0x97, 0x22, + 0xd4, 0x21, 0x29, 0xd9, 0x93, 0x7d, 0x54, 0xe7, 0x22, 0x0c, 0x45, 0x20, 0x0d, 0x47, 0x29, 0x90, + 0x93, 0x32, 0xdd, 0x86, 0x46, 0x24, 0x59, 0xa4, 0x4f, 0x51, 0x6a, 0xe1, 0x31, 0x8d, 0x9c, 0x10, + 0xba, 0x07, 0x54, 0x61, 0x18, 0x44, 0xca, 0xb3, 0xb7, 0x9c, 0xb2, 0x28, 0xb4, 0x7a, 0x85, 0xee, + 0xc3, 0xf6, 0x09, 0x13, 0x3e, 0x72, 0xd3, 0x57, 0xe8, 0x05, 0x92, 0x0b, 0x2d, 0x02, 0x49, 0xaa, + 0xd6, 0x39, 0xeb, 0x06, 0xca, 0x76, 0x01, 0x25, 0x50, 0x0f, 0x22, 0x6d, 0x82, 0x13, 0xa3, 0x98, + 0xec, 0x21, 0xa9, 0xd1, 0x2d, 0xd8, 0x88, 0xa4, 0x38, 0xef, 0xfb, 0x68, 0xd7, 0x40, 0x4e, 0xea, + 0x76, 0x73, 0x21, 0x35, 0x2a, 0xc9, 0x7c, 0xb2, 0x41, 0x1b, 0x50, 0x8b, 0x24, 0xbb, 0x60, 0xc2, + 0x67, 0x5d, 0x1f, 0xc9, 0xa6, 0x5d, 0x88, 0x33, 0xcd, 0x8c, 0x1f, 0x84, 0x21, 0x69, 0x1c, 0xfd, + 0xc9, 0x43, 0xe3, 0xc9, 0x9b, 0xd8, 0x25, 0xc3, 0xc8, 0xf3, 0x30, 0x0c, 0x8d, 0x8f, 0x3d, 0xe6, + 0x0d, 0xc8, 0x9a, 0x0d, 0x6d, 0x91, 0xa7, 0xf5, 0xb8, 0x54, 0x73, 0xb4, 0x09, 0x3b, 0xcb, 0x5c, + 0x0d, 0x2a, 0x15, 0xa8, 0x8c, 0xb8, 0x90, 0xbb, 0x8c, 0x1b, 0x21, 0xfb, 0x91, 0xce, 0xd4, 0x02, + 0x3d, 0x84, 0xe6, 0xb3, 0x90, 0x33, 0x5a, 0xa4, 0xff, 0xc1, 0x9e, 0x75, 0xde, 0x53, 0x42, 0x0f, + 0x56, 0xe7, 0xad, 0xdb, 0x93, 0xcf, 0x42, 0xce, 0x68, 0x89, 0xbe, 0x80, 0x83, 0xe7, 0xb1, 0x66, + 0xb8, 0x4c, 0xff, 0x87, 0xfd, 0xf7, 0x11, 0xaa, 0x81, 0xb1, 0x4f, 0x19, 0xa2, 0xba, 0x78, 0x84, + 0x15, 0xeb, 0xd4, 0xca, 0x42, 0x1a, 0x7d, 0x99, 0xa9, 0x55, 0x7a, 0x00, 0xbb, 0x59, 0x8a, 0xab, + 0x56, 0xc0, 0xda, 0xd4, 0x8a, 0xc9, 0x50, 0xa0, 0xd4, 0xab, 0xac, 0x66, 0xd9, 0x93, 0x47, 0xcf, + 0x58, 0xbd, 0x8b, 0x3f, 0x1e, 0x5a, 0xb9, 0x9f, 0x0f, 0xad, 0xdc, 0xaf, 0x87, 0x56, 0xee, 0xdb, + 0xef, 0xd6, 0x1a, 0x34, 0xc6, 0xc9, 0xf1, 0xfd, 0x78, 0x1e, 0xcf, 0x66, 0x8b, 0x7f, 0xee, 0xc7, + 0xf6, 0xb2, 0x1a, 0x27, 0x9d, 0xc5, 0x57, 0xe7, 0x26, 0xe9, 0xdc, 0xcf, 0x3b, 0x8e, 0x76, 0xdc, + 0xaf, 0xfe, 0xaa, 0xe4, 0x8a, 0xb7, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xcb, 0x67, 0xe4, 0x15, + 0xf3, 0x03, 0x00, 0x00, +} + +func (m *CallerID) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CallerID) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CallerID) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Subcomponent) > 0 { + i -= len(m.Subcomponent) + copy(dAtA[i:], m.Subcomponent) + i = encodeVarintVtrpc(dAtA, i, uint64(len(m.Subcomponent))) + i-- + dAtA[i] = 0x1a + } + if len(m.Component) > 0 { + i -= len(m.Component) + copy(dAtA[i:], m.Component) + i = encodeVarintVtrpc(dAtA, i, uint64(len(m.Component))) + i-- + dAtA[i] = 0x12 + } + if len(m.Principal) > 0 { + i -= len(m.Principal) + copy(dAtA[i:], m.Principal) + i = encodeVarintVtrpc(dAtA, i, uint64(len(m.Principal))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RPCError) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RPCError) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RPCError) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Code != 0 { + i = encodeVarintVtrpc(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x18 + } + if len(m.Message) > 0 { + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintVtrpc(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + } + if m.LegacyCode != 0 { + i = encodeVarintVtrpc(dAtA, i, uint64(m.LegacyCode)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintVtrpc(dAtA []byte, offset int, v uint64) int { + offset -= sovVtrpc(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base } +func (m *CallerID) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Principal) + if l > 0 { + n += 1 + l + sovVtrpc(uint64(l)) + } + l = len(m.Component) + if l > 0 { + n += 1 + l + sovVtrpc(uint64(l)) + } + l = len(m.Subcomponent) + if l > 0 { + n += 1 + l + sovVtrpc(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RPCError) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LegacyCode != 0 { + n += 1 + sovVtrpc(uint64(m.LegacyCode)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sovVtrpc(uint64(l)) + } + if m.Code != 0 { + n += 1 + sovVtrpc(uint64(m.Code)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovVtrpc(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozVtrpc(x uint64) (n int) { + return sovVtrpc(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *CallerID) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtrpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CallerID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CallerID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Principal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtrpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtrpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtrpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Principal = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Component", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtrpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtrpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtrpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Component = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subcomponent", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtrpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtrpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtrpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subcomponent = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtrpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtrpc + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtrpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RPCError) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtrpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RPCError: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RPCError: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LegacyCode", wireType) + } + m.LegacyCode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtrpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LegacyCode |= LegacyErrorCode(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtrpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtrpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtrpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtrpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Code |= Code(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipVtrpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtrpc + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtrpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipVtrpc(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVtrpc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVtrpc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVtrpc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthVtrpc + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupVtrpc + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthVtrpc + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthVtrpc = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowVtrpc = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupVtrpc = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/vt/proto/vttest/vttest.pb.go b/go/vt/proto/vttest/vttest.pb.go index 7dad543bdfc..c689c083fc2 100644 --- a/go/vt/proto/vttest/vttest.pb.go +++ b/go/vt/proto/vttest/vttest.pb.go @@ -1,11 +1,13 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: vttest.proto package vttest import ( fmt "fmt" + io "io" math "math" + math_bits "math/bits" proto "github.com/golang/protobuf/proto" ) @@ -42,18 +44,26 @@ func (*Shard) ProtoMessage() {} func (*Shard) Descriptor() ([]byte, []int) { return fileDescriptor_b9b3dc07179a1ec9, []int{0} } - func (m *Shard) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Shard.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *Shard) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Shard.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_Shard.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *Shard) XXX_Merge(src proto.Message) { xxx_messageInfo_Shard.Merge(m, src) } func (m *Shard) XXX_Size() int { - return xxx_messageInfo_Shard.Size(m) + return m.Size() } func (m *Shard) XXX_DiscardUnknown() { xxx_messageInfo_Shard.DiscardUnknown(m) @@ -102,18 +112,26 @@ func (*Keyspace) ProtoMessage() {} func (*Keyspace) Descriptor() ([]byte, []int) { return fileDescriptor_b9b3dc07179a1ec9, []int{1} } - func (m *Keyspace) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Keyspace.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *Keyspace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Keyspace.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_Keyspace.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *Keyspace) XXX_Merge(src proto.Message) { xxx_messageInfo_Keyspace.Merge(m, src) } func (m *Keyspace) XXX_Size() int { - return xxx_messageInfo_Keyspace.Size(m) + return m.Size() } func (m *Keyspace) XXX_DiscardUnknown() { xxx_messageInfo_Keyspace.DiscardUnknown(m) @@ -187,18 +205,26 @@ func (*VTTestTopology) ProtoMessage() {} func (*VTTestTopology) Descriptor() ([]byte, []int) { return fileDescriptor_b9b3dc07179a1ec9, []int{2} } - func (m *VTTestTopology) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VTTestTopology.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *VTTestTopology) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VTTestTopology.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_VTTestTopology.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *VTTestTopology) XXX_Merge(src proto.Message) { xxx_messageInfo_VTTestTopology.Merge(m, src) } func (m *VTTestTopology) XXX_Size() int { - return xxx_messageInfo_VTTestTopology.Size(m) + return m.Size() } func (m *VTTestTopology) XXX_DiscardUnknown() { xxx_messageInfo_VTTestTopology.DiscardUnknown(m) @@ -229,26 +255,875 @@ func init() { func init() { proto.RegisterFile("vttest.proto", fileDescriptor_b9b3dc07179a1ec9) } var fileDescriptor_b9b3dc07179a1ec9 = []byte{ - // 322 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x51, 0xcb, 0x6a, 0xe3, 0x40, - 0x10, 0x44, 0xb6, 0xa5, 0x5d, 0xb7, 0x1f, 0x98, 0xc1, 0x87, 0xb9, 0xad, 0xd7, 0xc6, 0xa0, 0x93, - 0xb4, 0x6c, 0xfe, 0x20, 0x26, 0xb9, 0x04, 0x12, 0x50, 0x84, 0x0f, 0xb9, 0x08, 0x59, 0xea, 0x38, - 0x22, 0x92, 0x5a, 0xcc, 0x8c, 0x05, 0xfa, 0x8d, 0x7c, 0x71, 0x50, 0x8f, 0x4c, 0x2e, 0xbe, 0x55, - 0x57, 0xd5, 0x74, 0x35, 0x35, 0x30, 0x6f, 0x8d, 0x41, 0x6d, 0x82, 0x46, 0x91, 0x21, 0xe1, 0xd9, - 0x69, 0xfb, 0x00, 0xee, 0xeb, 0x47, 0xaa, 0x72, 0x21, 0x60, 0x52, 0xa7, 0x15, 0x4a, 0x67, 0xe3, - 0xf8, 0xd3, 0x88, 0xb1, 0xf0, 0x61, 0x95, 0x9f, 0x92, 0x1e, 0x26, 0xd4, 0xa2, 0x52, 0x45, 0x8e, - 0x72, 0xc4, 0xfa, 0x32, 0x3f, 0x3d, 0xa7, 0x15, 0xbe, 0x0c, 0xec, 0xf6, 0x6b, 0x04, 0xbf, 0x9f, - 0xb0, 0xd3, 0x4d, 0x9a, 0xe1, 0xcd, 0x55, 0x7b, 0xf0, 0x74, 0x9f, 0xa3, 0xe5, 0x68, 0x33, 0xf6, - 0x67, 0xff, 0x17, 0xc1, 0x70, 0x0e, 0xa7, 0x47, 0x83, 0x28, 0xfe, 0xc1, 0x9a, 0x51, 0x51, 0x9f, - 0x93, 0x8c, 0xca, 0x4b, 0x55, 0x73, 0xbc, 0x1c, 0xf3, 0x2a, 0x71, 0xd5, 0x0e, 0x2c, 0xf5, 0x17, - 0xdc, 0x7a, 0x61, 0xba, 0x06, 0xe5, 0xe4, 0xd6, 0x8b, 0xb8, 0x6b, 0x50, 0xfc, 0x81, 0x99, 0x46, - 0xd5, 0x62, 0x9e, 0xbc, 0x2b, 0xaa, 0xa4, 0xcb, 0x46, 0xb0, 0xd4, 0xa3, 0xa2, 0x4a, 0xec, 0x60, - 0xa1, 0xb0, 0x29, 0x8b, 0x2c, 0x4d, 0x32, 0xba, 0xd4, 0x46, 0x7a, 0x1b, 0xc7, 0x77, 0xa3, 0xf9, - 0x40, 0x1e, 0x7a, 0x4e, 0xfc, 0x85, 0xb9, 0xca, 0xa9, 0x2e, 0xbb, 0xc1, 0xf3, 0x8b, 0x3d, 0x33, - 0xcb, 0xb1, 0x65, 0x7b, 0x84, 0xe5, 0x31, 0x8e, 0x51, 0x9b, 0x98, 0x1a, 0x2a, 0xe9, 0xdc, 0x89, - 0x00, 0xa6, 0x9f, 0x43, 0x4b, 0x5a, 0x3a, 0x5c, 0xc4, 0xea, 0x5a, 0xc4, 0xb5, 0xbe, 0xe8, 0xc7, - 0x22, 0xd6, 0xe0, 0x66, 0x58, 0x96, 0xb6, 0xb4, 0x69, 0x64, 0x87, 0xfb, 0xfd, 0xdb, 0xae, 0x2d, - 0x0c, 0x6a, 0x1d, 0x14, 0x14, 0x5a, 0x14, 0x9e, 0x29, 0x6c, 0x4d, 0xc8, 0x7f, 0x1b, 0xda, 0x85, - 0x27, 0x8f, 0xa7, 0xbb, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x57, 0x0f, 0xe6, 0xb4, 0xf9, 0x01, - 0x00, 0x00, + // 347 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x51, 0xcd, 0x4a, 0xeb, 0x40, + 0x18, 0xbd, 0xd3, 0x36, 0xb9, 0xb7, 0x5f, 0x7f, 0x28, 0x43, 0x17, 0xb3, 0xca, 0xed, 0x6d, 0xb9, + 0x90, 0x55, 0x22, 0x8a, 0x2f, 0x60, 0xd1, 0x8d, 0xa0, 0x10, 0x43, 0x17, 0x6e, 0x42, 0x9a, 0x7c, + 0xd6, 0x60, 0x92, 0x09, 0x33, 0xd3, 0x40, 0x5e, 0xc3, 0x95, 0x8f, 0xe4, 0xd2, 0x47, 0x90, 0xfa, + 0x22, 0x92, 0x99, 0x14, 0x37, 0xdd, 0x9d, 0xef, 0x9c, 0x33, 0xdf, 0xf9, 0x38, 0x03, 0xe3, 0x5a, + 0x29, 0x94, 0xca, 0xab, 0x04, 0x57, 0x9c, 0xda, 0x66, 0x5a, 0x5e, 0x83, 0xf5, 0xf0, 0x1c, 0x8b, + 0x94, 0x52, 0x18, 0x94, 0x71, 0x81, 0x8c, 0x2c, 0x88, 0x3b, 0x0c, 0x34, 0xa6, 0x2e, 0xcc, 0xd2, + 0x6d, 0xd4, 0xc2, 0x88, 0xd7, 0x28, 0x44, 0x96, 0x22, 0xeb, 0x69, 0x7d, 0x9a, 0x6e, 0xef, 0xe2, + 0x02, 0xef, 0x3b, 0x76, 0xf9, 0xda, 0x83, 0x3f, 0xb7, 0xd8, 0xc8, 0x2a, 0x4e, 0xf0, 0xe4, 0xaa, + 0xff, 0x60, 0xcb, 0x36, 0x47, 0xb2, 0xde, 0xa2, 0xef, 0x8e, 0xce, 0x27, 0x5e, 0x77, 0x8e, 0x4e, + 0x0f, 0x3a, 0x91, 0x9e, 0xc1, 0x5c, 0xa3, 0xac, 0xdc, 0x45, 0x09, 0xcf, 0xf7, 0x45, 0xa9, 0xe3, + 0x59, 0x5f, 0xaf, 0xa2, 0x47, 0x6d, 0xad, 0xa5, 0xf6, 0x82, 0x53, 0x2f, 0x54, 0x53, 0x21, 0x1b, + 0x9c, 0x7a, 0x11, 0x36, 0x15, 0xd2, 0xbf, 0x30, 0x92, 0x28, 0x6a, 0x4c, 0xa3, 0x27, 0xc1, 0x0b, + 0x66, 0x69, 0x23, 0x18, 0xea, 0x46, 0xf0, 0x82, 0xae, 0x60, 0x22, 0xb0, 0xca, 0xb3, 0x24, 0x8e, + 0x12, 0xbe, 0x2f, 0x15, 0xb3, 0x17, 0xc4, 0xb5, 0x82, 0x71, 0x47, 0xae, 0x5b, 0x8e, 0xfe, 0x83, + 0xb1, 0x48, 0x79, 0x99, 0x37, 0x9d, 0xe7, 0xb7, 0xf6, 0x8c, 0x0c, 0xa7, 0x2d, 0xcb, 0x0d, 0x4c, + 0x37, 0x61, 0x88, 0x52, 0x85, 0xbc, 0xe2, 0x39, 0xdf, 0x35, 0xd4, 0x83, 0xe1, 0x4b, 0xd7, 0x92, + 0x64, 0x44, 0x17, 0x31, 0x3b, 0x16, 0x71, 0xac, 0x2f, 0xf8, 0xb1, 0xd0, 0x39, 0x58, 0x09, 0xe6, + 0xb9, 0x29, 0x6d, 0x18, 0x98, 0xe1, 0xea, 0xf2, 0xfd, 0xe0, 0x90, 0x8f, 0x83, 0x43, 0x3e, 0x0f, + 0x0e, 0x79, 0xfb, 0x72, 0x7e, 0x3d, 0xae, 0xea, 0x4c, 0xa1, 0x94, 0x5e, 0xc6, 0x7d, 0x83, 0xfc, + 0x1d, 0xf7, 0x6b, 0xe5, 0xeb, 0xbf, 0xf6, 0x4d, 0xc0, 0xd6, 0xd6, 0xd3, 0xc5, 0x77, 0x00, 0x00, + 0x00, 0xff, 0xff, 0xd7, 0x26, 0x4d, 0xc0, 0x09, 0x02, 0x00, 0x00, +} + +func (m *Shard) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Shard) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Shard) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.DbNameOverride) > 0 { + i -= len(m.DbNameOverride) + copy(dAtA[i:], m.DbNameOverride) + i = encodeVarintVttest(dAtA, i, uint64(len(m.DbNameOverride))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintVttest(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Keyspace) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Keyspace) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Keyspace) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.RdonlyCount != 0 { + i = encodeVarintVttest(dAtA, i, uint64(m.RdonlyCount)) + i-- + dAtA[i] = 0x38 + } + if m.ReplicaCount != 0 { + i = encodeVarintVttest(dAtA, i, uint64(m.ReplicaCount)) + i-- + dAtA[i] = 0x30 + } + if len(m.ServedFrom) > 0 { + i -= len(m.ServedFrom) + copy(dAtA[i:], m.ServedFrom) + i = encodeVarintVttest(dAtA, i, uint64(len(m.ServedFrom))) + i-- + dAtA[i] = 0x2a + } + if len(m.ShardingColumnType) > 0 { + i -= len(m.ShardingColumnType) + copy(dAtA[i:], m.ShardingColumnType) + i = encodeVarintVttest(dAtA, i, uint64(len(m.ShardingColumnType))) + i-- + dAtA[i] = 0x22 + } + if len(m.ShardingColumnName) > 0 { + i -= len(m.ShardingColumnName) + copy(dAtA[i:], m.ShardingColumnName) + i = encodeVarintVttest(dAtA, i, uint64(len(m.ShardingColumnName))) + i-- + dAtA[i] = 0x1a + } + if len(m.Shards) > 0 { + for iNdEx := len(m.Shards) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Shards[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVttest(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintVttest(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VTTestTopology) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VTTestTopology) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VTTestTopology) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarintVttest(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Keyspaces) > 0 { + for iNdEx := len(m.Keyspaces) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Keyspaces[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVttest(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintVttest(dAtA []byte, offset int, v uint64) int { + offset -= sovVttest(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base } +func (m *Shard) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovVttest(uint64(l)) + } + l = len(m.DbNameOverride) + if l > 0 { + n += 1 + l + sovVttest(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Keyspace) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovVttest(uint64(l)) + } + if len(m.Shards) > 0 { + for _, e := range m.Shards { + l = e.Size() + n += 1 + l + sovVttest(uint64(l)) + } + } + l = len(m.ShardingColumnName) + if l > 0 { + n += 1 + l + sovVttest(uint64(l)) + } + l = len(m.ShardingColumnType) + if l > 0 { + n += 1 + l + sovVttest(uint64(l)) + } + l = len(m.ServedFrom) + if l > 0 { + n += 1 + l + sovVttest(uint64(l)) + } + if m.ReplicaCount != 0 { + n += 1 + sovVttest(uint64(m.ReplicaCount)) + } + if m.RdonlyCount != 0 { + n += 1 + sovVttest(uint64(m.RdonlyCount)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *VTTestTopology) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Keyspaces) > 0 { + for _, e := range m.Keyspaces { + l = e.Size() + n += 1 + l + sovVttest(uint64(l)) + } + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sovVttest(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovVttest(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozVttest(x uint64) (n int) { + return sovVttest(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Shard) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVttest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Shard: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Shard: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVttest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVttest + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVttest + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DbNameOverride", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVttest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVttest + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVttest + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DbNameOverride = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVttest(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVttest + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVttest + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Keyspace) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVttest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Keyspace: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Keyspace: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVttest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVttest + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVttest + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shards", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVttest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVttest + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVttest + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shards = append(m.Shards, &Shard{}) + if err := m.Shards[len(m.Shards)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShardingColumnName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVttest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVttest + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVttest + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShardingColumnName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShardingColumnType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVttest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVttest + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVttest + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShardingColumnType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServedFrom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVttest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVttest + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVttest + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServedFrom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReplicaCount", wireType) + } + m.ReplicaCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVttest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReplicaCount |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RdonlyCount", wireType) + } + m.RdonlyCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVttest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RdonlyCount |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipVttest(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVttest + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVttest + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VTTestTopology) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVttest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VTTestTopology: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VTTestTopology: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspaces", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVttest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVttest + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVttest + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspaces = append(m.Keyspaces, &Keyspace{}) + if err := m.Keyspaces[len(m.Keyspaces)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVttest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVttest + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVttest + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVttest(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVttest + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVttest + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipVttest(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVttest + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVttest + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVttest + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthVttest + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupVttest + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthVttest + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthVttest = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowVttest = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupVttest = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/vt/proto/vttime/vttime.pb.go b/go/vt/proto/vttime/vttime.pb.go index 95c6f63de33..8dd642134bb 100644 --- a/go/vt/proto/vttime/vttime.pb.go +++ b/go/vt/proto/vttime/vttime.pb.go @@ -1,11 +1,13 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: vttime.proto package vttime import ( fmt "fmt" + io "io" math "math" + math_bits "math/bits" proto "github.com/golang/protobuf/proto" ) @@ -37,18 +39,26 @@ func (*Time) ProtoMessage() {} func (*Time) Descriptor() ([]byte, []int) { return fileDescriptor_bbeb0d3434911dee, []int{0} } - func (m *Time) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Time.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *Time) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Time.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_Time.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *Time) XXX_Merge(src proto.Message) { xxx_messageInfo_Time.Merge(m, src) } func (m *Time) XXX_Size() int { - return xxx_messageInfo_Time.Size(m) + return m.Size() } func (m *Time) XXX_DiscardUnknown() { xxx_messageInfo_Time.DiscardUnknown(m) @@ -70,20 +80,475 @@ func (m *Time) GetNanoseconds() int32 { return 0 } +type Duration struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Duration) Reset() { *m = Duration{} } +func (m *Duration) String() string { return proto.CompactTextString(m) } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { + return fileDescriptor_bbeb0d3434911dee, []int{1} +} +func (m *Duration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Duration.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Duration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Duration.Merge(m, src) +} +func (m *Duration) XXX_Size() int { + return m.Size() +} +func (m *Duration) XXX_DiscardUnknown() { + xxx_messageInfo_Duration.DiscardUnknown(m) +} + +var xxx_messageInfo_Duration proto.InternalMessageInfo + +func (m *Duration) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Duration) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + func init() { proto.RegisterType((*Time)(nil), "vttime.Time") + proto.RegisterType((*Duration)(nil), "vttime.Duration") } func init() { proto.RegisterFile("vttime.proto", fileDescriptor_bbeb0d3434911dee) } var fileDescriptor_bbeb0d3434911dee = []byte{ - // 120 bytes of a gzipped FileDescriptorProto + // 161 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x2b, 0x29, 0xc9, 0xcc, 0x4d, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x83, 0xf0, 0x94, 0x9c, 0xb8, 0x58, 0x42, 0x32, 0x73, 0x53, 0x85, 0x24, 0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, 0x60, 0x5c, 0x21, 0x05, 0x2e, 0xee, 0xbc, 0xc4, 0xbc, 0x7c, - 0x98, 0x2c, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0xb2, 0x90, 0x93, 0x6a, 0x94, 0x72, 0x59, 0x66, - 0x49, 0x6a, 0x71, 0xb1, 0x5e, 0x66, 0xbe, 0x3e, 0x84, 0xa5, 0x9f, 0x9e, 0xaf, 0x5f, 0x56, 0xa2, - 0x0f, 0xb6, 0x4b, 0x1f, 0x62, 0x55, 0x12, 0x1b, 0x98, 0x67, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, - 0x35, 0x46, 0xf4, 0x16, 0x89, 0x00, 0x00, 0x00, + 0x98, 0x2c, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0xb2, 0x90, 0x92, 0x15, 0x17, 0x87, 0x4b, 0x69, + 0x51, 0x62, 0x49, 0x66, 0x7e, 0x1e, 0x1e, 0x73, 0x44, 0xb8, 0x58, 0xc1, 0x9a, 0xa0, 0x26, 0x40, + 0x38, 0x4e, 0xa6, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, + 0x8c, 0xc7, 0x72, 0x0c, 0x51, 0xca, 0x65, 0x99, 0x25, 0xa9, 0xc5, 0xc5, 0x7a, 0x99, 0xf9, 0xfa, + 0x10, 0x96, 0x7e, 0x7a, 0xbe, 0x7e, 0x59, 0x89, 0x3e, 0xd8, 0xdd, 0xfa, 0x10, 0x67, 0x27, 0xb1, + 0x81, 0x79, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x31, 0xe1, 0xff, 0xf9, 0xd5, 0x00, 0x00, + 0x00, } + +func (m *Time) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Time) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Time) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Nanoseconds != 0 { + i = encodeVarintVttime(dAtA, i, uint64(m.Nanoseconds)) + i-- + dAtA[i] = 0x10 + } + if m.Seconds != 0 { + i = encodeVarintVttime(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Duration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Duration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Duration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Nanos != 0 { + i = encodeVarintVttime(dAtA, i, uint64(m.Nanos)) + i-- + dAtA[i] = 0x10 + } + if m.Seconds != 0 { + i = encodeVarintVttime(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintVttime(dAtA []byte, offset int, v uint64) int { + offset -= sovVttime(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Time) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Seconds != 0 { + n += 1 + sovVttime(uint64(m.Seconds)) + } + if m.Nanoseconds != 0 { + n += 1 + sovVttime(uint64(m.Nanoseconds)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Duration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Seconds != 0 { + n += 1 + sovVttime(uint64(m.Seconds)) + } + if m.Nanos != 0 { + n += 1 + sovVttime(uint64(m.Nanos)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovVttime(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozVttime(x uint64) (n int) { + return sovVttime(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Time) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVttime + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Time: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Time: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVttime + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanoseconds", wireType) + } + m.Nanoseconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVttime + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanoseconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipVttime(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVttime + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVttime + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Duration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVttime + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Duration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Duration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVttime + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVttime + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanos |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipVttime(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVttime + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVttime + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipVttime(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVttime + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVttime + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVttime + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthVttime + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupVttime + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthVttime + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthVttime = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowVttime = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupVttime = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/vt/proto/vtworkerdata/vtworkerdata.pb.go b/go/vt/proto/vtworkerdata/vtworkerdata.pb.go index ac3389283aa..769d38eea74 100644 --- a/go/vt/proto/vtworkerdata/vtworkerdata.pb.go +++ b/go/vt/proto/vtworkerdata/vtworkerdata.pb.go @@ -1,14 +1,15 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: vtworkerdata.proto package vtworkerdata import ( fmt "fmt" + io "io" math "math" + math_bits "math/bits" proto "github.com/golang/protobuf/proto" - logutil "vitess.io/vitess/go/vt/proto/logutil" ) @@ -37,18 +38,26 @@ func (*ExecuteVtworkerCommandRequest) ProtoMessage() {} func (*ExecuteVtworkerCommandRequest) Descriptor() ([]byte, []int) { return fileDescriptor_32a791ab99179e8e, []int{0} } - func (m *ExecuteVtworkerCommandRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExecuteVtworkerCommandRequest.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ExecuteVtworkerCommandRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExecuteVtworkerCommandRequest.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ExecuteVtworkerCommandRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ExecuteVtworkerCommandRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ExecuteVtworkerCommandRequest.Merge(m, src) } func (m *ExecuteVtworkerCommandRequest) XXX_Size() int { - return xxx_messageInfo_ExecuteVtworkerCommandRequest.Size(m) + return m.Size() } func (m *ExecuteVtworkerCommandRequest) XXX_DiscardUnknown() { xxx_messageInfo_ExecuteVtworkerCommandRequest.DiscardUnknown(m) @@ -77,18 +86,26 @@ func (*ExecuteVtworkerCommandResponse) ProtoMessage() {} func (*ExecuteVtworkerCommandResponse) Descriptor() ([]byte, []int) { return fileDescriptor_32a791ab99179e8e, []int{1} } - func (m *ExecuteVtworkerCommandResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExecuteVtworkerCommandResponse.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *ExecuteVtworkerCommandResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExecuteVtworkerCommandResponse.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_ExecuteVtworkerCommandResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *ExecuteVtworkerCommandResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ExecuteVtworkerCommandResponse.Merge(m, src) } func (m *ExecuteVtworkerCommandResponse) XXX_Size() int { - return xxx_messageInfo_ExecuteVtworkerCommandResponse.Size(m) + return m.Size() } func (m *ExecuteVtworkerCommandResponse) XXX_DiscardUnknown() { xxx_messageInfo_ExecuteVtworkerCommandResponse.DiscardUnknown(m) @@ -111,7 +128,7 @@ func init() { func init() { proto.RegisterFile("vtworkerdata.proto", fileDescriptor_32a791ab99179e8e) } var fileDescriptor_32a791ab99179e8e = []byte{ - // 175 bytes of a gzipped FileDescriptorProto + // 192 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2a, 0x2b, 0x29, 0xcf, 0x2f, 0xca, 0x4e, 0x2d, 0x4a, 0x49, 0x2c, 0x49, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x41, 0x16, 0x93, 0xe2, 0xcd, 0xc9, 0x4f, 0x2f, 0x2d, 0xc9, 0xcc, 0x81, 0x48, 0x2a, 0x19, 0x73, @@ -120,7 +137,395 @@ var fileDescriptor_32a791ab99179e8e = []byte{ 0x17, 0x4b, 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x06, 0x81, 0xd9, 0x4a, 0x6e, 0x5c, 0x72, 0xb8, 0x34, 0x15, 0x17, 0xe4, 0xe7, 0x15, 0xa7, 0x0a, 0xa9, 0x70, 0xb1, 0xa6, 0x96, 0xa5, 0xe6, 0x95, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x1b, 0xf1, 0xe9, 0xc1, 0x6c, 0x75, 0x05, 0x89, 0x06, 0x41, 0x24, - 0x9d, 0xb4, 0xa3, 0x34, 0xcb, 0x32, 0x4b, 0x52, 0x8b, 0x8b, 0xf5, 0x32, 0xf3, 0xf5, 0x21, 0x2c, - 0xfd, 0xf4, 0x7c, 0xfd, 0xb2, 0x12, 0x7d, 0xb0, 0xe3, 0xf4, 0x91, 0x1d, 0x9e, 0xc4, 0x06, 0x16, - 0x33, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xcf, 0x82, 0xc8, 0x11, 0xe3, 0x00, 0x00, 0x00, + 0x9d, 0xac, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x19, + 0x8f, 0xe5, 0x18, 0xa2, 0x34, 0xcb, 0x32, 0x4b, 0x52, 0x8b, 0x8b, 0xf5, 0x32, 0xf3, 0xf5, 0x21, + 0x2c, 0xfd, 0xf4, 0x7c, 0xfd, 0xb2, 0x12, 0x7d, 0xb0, 0x63, 0xf5, 0x91, 0x3d, 0x92, 0xc4, 0x06, + 0x16, 0x33, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x38, 0xbf, 0x62, 0x49, 0xf3, 0x00, 0x00, 0x00, +} + +func (m *ExecuteVtworkerCommandRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecuteVtworkerCommandRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecuteVtworkerCommandRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Args) > 0 { + for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Args[iNdEx]) + copy(dAtA[i:], m.Args[iNdEx]) + i = encodeVarintVtworkerdata(dAtA, i, uint64(len(m.Args[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ExecuteVtworkerCommandResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecuteVtworkerCommandResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecuteVtworkerCommandResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Event != nil { + { + size, err := m.Event.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVtworkerdata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintVtworkerdata(dAtA []byte, offset int, v uint64) int { + offset -= sovVtworkerdata(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base } +func (m *ExecuteVtworkerCommandRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovVtworkerdata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ExecuteVtworkerCommandResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Event != nil { + l = m.Event.Size() + n += 1 + l + sovVtworkerdata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovVtworkerdata(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozVtworkerdata(x uint64) (n int) { + return sovVtworkerdata(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ExecuteVtworkerCommandRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtworkerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecuteVtworkerCommandRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecuteVtworkerCommandRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtworkerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtworkerdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtworkerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtworkerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtworkerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtworkerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecuteVtworkerCommandResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtworkerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecuteVtworkerCommandResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecuteVtworkerCommandResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtworkerdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVtworkerdata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVtworkerdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Event == nil { + m.Event = &logutil.Event{} + } + if err := m.Event.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVtworkerdata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVtworkerdata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVtworkerdata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipVtworkerdata(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVtworkerdata + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVtworkerdata + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVtworkerdata + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthVtworkerdata + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupVtworkerdata + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthVtworkerdata + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthVtworkerdata = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowVtworkerdata = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupVtworkerdata = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/vt/proto/vtworkerservice/vtworkerservice.pb.go b/go/vt/proto/vtworkerservice/vtworkerservice.pb.go index 66b4c5acb7b..6eea10f8fd0 100644 --- a/go/vt/proto/vtworkerservice/vtworkerservice.pb.go +++ b/go/vt/proto/vtworkerservice/vtworkerservice.pb.go @@ -1,4 +1,4 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: vtworkerservice.proto package vtworkerservice @@ -12,7 +12,6 @@ import ( grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" - vtworkerdata "vitess.io/vitess/go/vt/proto/vtworkerdata" ) @@ -30,17 +29,18 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package func init() { proto.RegisterFile("vtworkerservice.proto", fileDescriptor_884fe2c3e67151b3) } var fileDescriptor_884fe2c3e67151b3 = []byte{ - // 151 bytes of a gzipped FileDescriptorProto + // 168 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2d, 0x2b, 0x29, 0xcf, 0x2f, 0xca, 0x4e, 0x2d, 0x2a, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x47, 0x13, 0x96, 0x12, 0x82, 0x09, 0xa4, 0x24, 0x96, 0x24, 0x42, 0x14, 0x19, 0x35, 0x33, 0x72, 0x71, 0x84, 0x41, 0x85, 0x85, 0xca, 0xb9, 0xc4, 0x5c, 0x2b, 0x52, 0x93, 0x4b, 0x4b, 0x52, 0x61, 0x42, 0xce, 0xf9, 0xb9, 0xb9, 0x89, 0x79, 0x29, 0x42, 0xda, 0x7a, 0x28, 0x7a, 0xb1, 0xab, 0x0a, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0x91, 0xd2, 0x21, 0x4e, 0x71, 0x71, 0x41, - 0x7e, 0x5e, 0x71, 0xaa, 0x12, 0x83, 0x01, 0xa3, 0x93, 0x5e, 0x94, 0x4e, 0x59, 0x66, 0x49, 0x6a, - 0x71, 0xb1, 0x5e, 0x66, 0xbe, 0x3e, 0x84, 0xa5, 0x9f, 0x9e, 0xaf, 0x5f, 0x56, 0xa2, 0x0f, 0x76, - 0xa5, 0x3e, 0x9a, 0x4f, 0x92, 0xd8, 0xc0, 0xc2, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x1c, - 0x01, 0x4d, 0x17, 0xfa, 0x00, 0x00, 0x00, + 0x7e, 0x5e, 0x71, 0xaa, 0x12, 0x83, 0x01, 0xa3, 0x93, 0xdd, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, + 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe3, 0xb1, 0x1c, 0x43, 0x94, 0x4e, 0x59, 0x66, 0x49, + 0x6a, 0x71, 0xb1, 0x5e, 0x66, 0xbe, 0x3e, 0x84, 0xa5, 0x9f, 0x9e, 0xaf, 0x5f, 0x56, 0xa2, 0x0f, + 0x76, 0xb5, 0x3e, 0x9a, 0xcf, 0x92, 0xd8, 0xc0, 0xc2, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, + 0x6a, 0x5d, 0x63, 0x01, 0x0a, 0x01, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/go/vt/proto/workflow/workflow.pb.go b/go/vt/proto/workflow/workflow.pb.go index 44e4c451786..4d258ec4a67 100644 --- a/go/vt/proto/workflow/workflow.pb.go +++ b/go/vt/proto/workflow/workflow.pb.go @@ -1,11 +1,13 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: workflow.proto package workflow import ( fmt "fmt" + io "io" math "math" + math_bits "math/bits" proto "github.com/golang/protobuf/proto" ) @@ -128,18 +130,26 @@ func (*Workflow) ProtoMessage() {} func (*Workflow) Descriptor() ([]byte, []int) { return fileDescriptor_892c7f566756b0be, []int{0} } - func (m *Workflow) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Workflow.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *Workflow) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Workflow.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_Workflow.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *Workflow) XXX_Merge(src proto.Message) { xxx_messageInfo_Workflow.Merge(m, src) } func (m *Workflow) XXX_Size() int { - return xxx_messageInfo_Workflow.Size(m) + return m.Size() } func (m *Workflow) XXX_DiscardUnknown() { xxx_messageInfo_Workflow.DiscardUnknown(m) @@ -233,18 +243,26 @@ func (*WorkflowCheckpoint) ProtoMessage() {} func (*WorkflowCheckpoint) Descriptor() ([]byte, []int) { return fileDescriptor_892c7f566756b0be, []int{1} } - func (m *WorkflowCheckpoint) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_WorkflowCheckpoint.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *WorkflowCheckpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_WorkflowCheckpoint.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_WorkflowCheckpoint.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *WorkflowCheckpoint) XXX_Merge(src proto.Message) { xxx_messageInfo_WorkflowCheckpoint.Merge(m, src) } func (m *WorkflowCheckpoint) XXX_Size() int { - return xxx_messageInfo_WorkflowCheckpoint.Size(m) + return m.Size() } func (m *WorkflowCheckpoint) XXX_DiscardUnknown() { xxx_messageInfo_WorkflowCheckpoint.DiscardUnknown(m) @@ -290,18 +308,26 @@ func (*Task) ProtoMessage() {} func (*Task) Descriptor() ([]byte, []int) { return fileDescriptor_892c7f566756b0be, []int{2} } - func (m *Task) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Task.Unmarshal(m, b) + return m.Unmarshal(b) } func (m *Task) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Task.Marshal(b, m, deterministic) + if deterministic { + return xxx_messageInfo_Task.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } func (m *Task) XXX_Merge(src proto.Message) { xxx_messageInfo_Task.Merge(m, src) } func (m *Task) XXX_Size() int { - return xxx_messageInfo_Task.Size(m) + return m.Size() } func (m *Task) XXX_DiscardUnknown() { xxx_messageInfo_Task.DiscardUnknown(m) @@ -351,38 +377,1361 @@ func init() { func init() { proto.RegisterFile("workflow.proto", fileDescriptor_892c7f566756b0be) } var fileDescriptor_892c7f566756b0be = []byte{ - // 517 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0x6f, 0x8b, 0xd3, 0x4e, - 0x10, 0xfe, 0x25, 0x6d, 0xae, 0xe9, 0xa4, 0x97, 0x2b, 0xf3, 0x3b, 0x30, 0x16, 0xd4, 0x5a, 0x94, - 0xab, 0x05, 0x5b, 0xa8, 0x20, 0xa2, 0xdc, 0x81, 0x7f, 0xf1, 0xd5, 0xbd, 0x48, 0x0f, 0x05, 0xdf, - 0x94, 0xbd, 0x66, 0xaf, 0x2e, 0xbd, 0xee, 0x1e, 0x9b, 0x69, 0x8f, 0x7e, 0x04, 0x3f, 0x98, 0x5f, - 0xc1, 0xcf, 0x23, 0xbb, 0xdb, 0xa4, 0x8d, 0x8a, 0xe0, 0xbb, 0x99, 0x79, 0xe6, 0x79, 0x26, 0x3b, - 0xf3, 0x04, 0xe2, 0x5b, 0xa5, 0x17, 0x57, 0xd7, 0xea, 0x76, 0x78, 0xa3, 0x15, 0x29, 0x0c, 0x8b, - 0xbc, 0xf7, 0xcd, 0x87, 0xf0, 0xf3, 0x36, 0x41, 0x84, 0xfa, 0x6a, 0x25, 0xb2, 0xc4, 0xeb, 0x7a, - 0xfd, 0x66, 0x6a, 0x63, 0x7c, 0x08, 0xad, 0x2b, 0x36, 0x23, 0xa5, 0x37, 0x53, 0xc9, 0x96, 0x3c, - 0xf1, 0x2d, 0x16, 0x6d, 0x6b, 0xe7, 0x6c, 0xc9, 0x0d, 0xcd, 0x42, 0x35, 0x47, 0x33, 0x31, 0x3e, - 0x85, 0x20, 0x27, 0x46, 0x3c, 0xa9, 0x77, 0xbd, 0x7e, 0x3c, 0xbe, 0x33, 0x2c, 0xbf, 0xa0, 0x98, - 0x36, 0x31, 0x70, 0xea, 0xba, 0x8c, 0x44, 0xc6, 0x88, 0x25, 0x41, 0xd7, 0xeb, 0xb7, 0x52, 0x1b, - 0xe3, 0x31, 0x04, 0x5c, 0x6b, 0xa5, 0x93, 0x03, 0xab, 0xeb, 0x12, 0xbc, 0x07, 0x90, 0x13, 0xd3, - 0x34, 0x25, 0xb1, 0xe4, 0x49, 0xa3, 0xeb, 0xf5, 0x6b, 0x69, 0xd3, 0x56, 0x2e, 0xc4, 0x92, 0xe3, - 0x5d, 0x08, 0xb9, 0xcc, 0x1c, 0x18, 0x5a, 0xb0, 0xc1, 0x65, 0x66, 0xa1, 0x07, 0x10, 0xcd, 0x34, - 0x67, 0xc4, 0x1d, 0xda, 0xb4, 0x28, 0xb8, 0x92, 0x69, 0xe8, 0x7d, 0xf7, 0x01, 0x8b, 0xaf, 0x7b, - 0xfb, 0x95, 0xcf, 0x16, 0x37, 0x4a, 0x48, 0x32, 0x1b, 0x98, 0xa9, 0x8c, 0x4f, 0xd7, 0x5c, 0xe7, - 0x42, 0x49, 0xbb, 0x9d, 0x20, 0x8d, 0x4c, 0xed, 0x93, 0x2b, 0xe1, 0x29, 0x04, 0xc4, 0xf2, 0x45, - 0x9e, 0xf8, 0xdd, 0x5a, 0x3f, 0x1a, 0x9f, 0xfc, 0xfe, 0xda, 0x9d, 0xde, 0xf0, 0xc2, 0x74, 0xbe, - 0x97, 0xa4, 0x37, 0xa9, 0x63, 0xe1, 0x07, 0x08, 0x73, 0x4e, 0x24, 0xe4, 0x3c, 0x4f, 0x6a, 0x56, - 0x61, 0xf0, 0x57, 0x85, 0xc9, 0xb6, 0xd9, 0x89, 0x94, 0xdc, 0xce, 0x47, 0x80, 0x9d, 0x38, 0xb6, - 0xa1, 0xb6, 0xe0, 0x9b, 0xed, 0x31, 0x4d, 0x88, 0x8f, 0x20, 0x58, 0xb3, 0xeb, 0x95, 0x3b, 0x62, - 0x34, 0x8e, 0x77, 0x43, 0x0c, 0x2d, 0x75, 0xe0, 0x4b, 0xff, 0x85, 0xd7, 0x79, 0x05, 0x87, 0x95, - 0x21, 0x7f, 0x10, 0x3b, 0xde, 0x17, 0x6b, 0xee, 0x91, 0x7b, 0x3f, 0x3c, 0xa8, 0x1b, 0x41, 0x8c, - 0xc1, 0x2f, 0xdd, 0xe4, 0x8b, 0x0c, 0x9f, 0x14, 0xa6, 0xf0, 0xad, 0x29, 0xfe, 0xaf, 0xce, 0xaf, - 0x18, 0xe2, 0x0c, 0x80, 0x11, 0x69, 0x71, 0xb9, 0x22, 0x5e, 0x2c, 0xe5, 0x7e, 0xb5, 0x7f, 0xf8, - 0xba, 0x6c, 0x70, 0x8b, 0xd8, 0x63, 0xec, 0xcc, 0x53, 0xdf, 0x33, 0x4f, 0xe7, 0x14, 0x8e, 0x7e, - 0x21, 0xfd, 0xcb, 0xc3, 0x06, 0xcf, 0xe1, 0xb0, 0xe2, 0x5e, 0x8c, 0x01, 0xce, 0x15, 0x4d, 0x8c, - 0xfb, 0x78, 0xd6, 0xfe, 0x0f, 0x23, 0x68, 0xa4, 0x2b, 0x29, 0x85, 0x9c, 0xb7, 0x3d, 0x0c, 0xa1, - 0xfe, 0x4e, 0x49, 0xde, 0xf6, 0x07, 0x67, 0xd0, 0x2c, 0x1f, 0x88, 0x08, 0xb1, 0x49, 0x2a, 0xbc, - 0x23, 0x88, 0xec, 0x05, 0x4a, 0x6e, 0x0b, 0x42, 0x53, 0x70, 0xfc, 0x37, 0x27, 0x5f, 0x1e, 0xaf, - 0x05, 0xf1, 0x3c, 0x1f, 0x0a, 0x35, 0x72, 0xd1, 0x68, 0xae, 0x46, 0x6b, 0x1a, 0xd9, 0xdf, 0x79, - 0x54, 0xac, 0xe5, 0xf2, 0xc0, 0xe6, 0xcf, 0x7e, 0x06, 0x00, 0x00, 0xff, 0xff, 0x75, 0x1d, 0xcd, - 0x85, 0xf0, 0x03, 0x00, 0x00, + // 538 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xdd, 0x8a, 0xd3, 0x40, + 0x14, 0xde, 0x49, 0x9b, 0xdd, 0xf4, 0xa4, 0x9b, 0x2d, 0xe3, 0x82, 0xb1, 0x60, 0xad, 0x45, 0xb1, + 0x16, 0x4c, 0xa0, 0x82, 0x8a, 0xb2, 0x0b, 0xfe, 0xe2, 0xd5, 0x5e, 0xa4, 0x8b, 0x82, 0x37, 0x65, + 0xb6, 0x99, 0xad, 0x43, 0xb7, 0x33, 0xcb, 0x64, 0xd2, 0xa5, 0x8f, 0xe0, 0x1b, 0xf8, 0x42, 0x82, + 0x97, 0x5e, 0x79, 0x2d, 0xf5, 0x45, 0x64, 0x66, 0x9a, 0xb4, 0x51, 0x11, 0xbc, 0x3b, 0xe7, 0x7c, + 0xe7, 0xfb, 0x4e, 0xe6, 0x9c, 0x2f, 0x10, 0x5c, 0x09, 0x39, 0x3b, 0xbf, 0x10, 0x57, 0xd1, 0xa5, + 0x14, 0x4a, 0x60, 0xaf, 0xc8, 0x7b, 0x9f, 0x1c, 0xf0, 0xde, 0xaf, 0x13, 0x8c, 0xa1, 0x9e, 0xe7, + 0x2c, 0x0d, 0x51, 0x17, 0xf5, 0x1b, 0x89, 0x89, 0xf1, 0x6d, 0x68, 0x9e, 0x93, 0x89, 0x12, 0x72, + 0x39, 0xe6, 0x64, 0x4e, 0x43, 0xc7, 0x60, 0xfe, 0xba, 0x76, 0x42, 0xe6, 0x54, 0xd3, 0x0c, 0x54, + 0xb3, 0x34, 0x1d, 0xe3, 0x07, 0xe0, 0x66, 0x8a, 0x28, 0x1a, 0xd6, 0xbb, 0xa8, 0x1f, 0x0c, 0xaf, + 0x47, 0xe5, 0x17, 0x14, 0xd3, 0x46, 0x1a, 0x4e, 0x6c, 0x97, 0x96, 0x48, 0x89, 0x22, 0xa1, 0xdb, + 0x45, 0xfd, 0x66, 0x62, 0x62, 0x7c, 0x08, 0x2e, 0x95, 0x52, 0xc8, 0x70, 0xd7, 0xe8, 0xda, 0x04, + 0xdf, 0x04, 0xc8, 0x14, 0x91, 0x6a, 0xac, 0xd8, 0x9c, 0x86, 0x7b, 0x5d, 0xd4, 0xaf, 0x25, 0x0d, + 0x53, 0x39, 0x65, 0x73, 0x8a, 0x6f, 0x80, 0x47, 0x79, 0x6a, 0x41, 0xcf, 0x80, 0x7b, 0x94, 0xa7, + 0x06, 0xba, 0x05, 0xfe, 0x44, 0x52, 0xa2, 0xa8, 0x45, 0x1b, 0x06, 0x05, 0x5b, 0xd2, 0x0d, 0xbd, + 0x2f, 0x0e, 0xe0, 0xe2, 0xeb, 0x5e, 0x7e, 0xa4, 0x93, 0xd9, 0xa5, 0x60, 0x5c, 0xe9, 0x0d, 0x4c, + 0x44, 0x4a, 0xc7, 0x0b, 0x2a, 0x33, 0x26, 0xb8, 0xd9, 0x8e, 0x9b, 0xf8, 0xba, 0xf6, 0xce, 0x96, + 0xf0, 0x11, 0xb8, 0x8a, 0x64, 0xb3, 0x2c, 0x74, 0xba, 0xb5, 0xbe, 0x3f, 0xbc, 0xf7, 0xe7, 0x6b, + 0x37, 0x7a, 0xd1, 0xa9, 0xee, 0x7c, 0xcd, 0x95, 0x5c, 0x26, 0x96, 0x85, 0xdf, 0x80, 0x97, 0x51, + 0xa5, 0x18, 0x9f, 0x66, 0x61, 0xcd, 0x28, 0x0c, 0xfe, 0xa9, 0x30, 0x5a, 0x37, 0x5b, 0x91, 0x92, + 0xdb, 0x7e, 0x0b, 0xb0, 0x11, 0xc7, 0x2d, 0xa8, 0xcd, 0xe8, 0x72, 0x7d, 0x4c, 0x1d, 0xe2, 0x3b, + 0xe0, 0x2e, 0xc8, 0x45, 0x6e, 0x8f, 0xe8, 0x0f, 0x83, 0xcd, 0x10, 0x4d, 0x4b, 0x2c, 0xf8, 0xd4, + 0x79, 0x82, 0xda, 0xcf, 0x60, 0xbf, 0x32, 0xe4, 0x2f, 0x62, 0x87, 0xdb, 0x62, 0x8d, 0x2d, 0x72, + 0xef, 0x3b, 0x82, 0xba, 0x16, 0xc4, 0x01, 0x38, 0xa5, 0x9b, 0x1c, 0x96, 0xe2, 0xfb, 0x85, 0x29, + 0x1c, 0x63, 0x8a, 0x6b, 0xd5, 0xf9, 0x15, 0x43, 0x1c, 0x03, 0x10, 0xa5, 0x24, 0x3b, 0xcb, 0x15, + 0x2d, 0x96, 0xd2, 0xa9, 0xf6, 0x47, 0xcf, 0xcb, 0x06, 0xbb, 0x88, 0x2d, 0xc6, 0xc6, 0x3c, 0xf5, + 0x2d, 0xf3, 0xb4, 0x8f, 0xe0, 0xe0, 0x37, 0xd2, 0xff, 0x3c, 0x6c, 0xf0, 0x08, 0xf6, 0x2b, 0xee, + 0xc5, 0x01, 0xc0, 0x89, 0x50, 0x23, 0xed, 0x3e, 0x9a, 0xb6, 0x76, 0xb0, 0x0f, 0x7b, 0x49, 0xce, + 0x39, 0xe3, 0xd3, 0x16, 0xc2, 0x1e, 0xd4, 0x5f, 0x09, 0x4e, 0x5b, 0xce, 0xe0, 0x18, 0x1a, 0xe5, + 0x03, 0x31, 0x86, 0x40, 0x27, 0x15, 0xde, 0x01, 0xf8, 0xe6, 0x02, 0x25, 0xb7, 0x09, 0x9e, 0x2e, + 0x58, 0xfe, 0x8b, 0xc7, 0x5f, 0x57, 0x1d, 0xf4, 0x6d, 0xd5, 0x41, 0x3f, 0x56, 0x1d, 0xf4, 0xf9, + 0x67, 0x67, 0xe7, 0xc3, 0xdd, 0x05, 0x53, 0x34, 0xcb, 0x22, 0x26, 0x62, 0x1b, 0xc5, 0x53, 0x11, + 0x2f, 0x54, 0x6c, 0x7e, 0xef, 0xb8, 0x58, 0xd3, 0xd9, 0xae, 0xc9, 0x1f, 0xfe, 0x0a, 0x00, 0x00, + 0xff, 0xff, 0xce, 0x9e, 0x9e, 0xfc, 0x00, 0x04, 0x00, 0x00, +} + +func (m *Workflow) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Workflow) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Workflow) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.CreateTime != 0 { + i = encodeVarintWorkflow(dAtA, i, uint64(m.CreateTime)) + i-- + dAtA[i] = 0x48 + } + if m.EndTime != 0 { + i = encodeVarintWorkflow(dAtA, i, uint64(m.EndTime)) + i-- + dAtA[i] = 0x40 + } + if m.StartTime != 0 { + i = encodeVarintWorkflow(dAtA, i, uint64(m.StartTime)) + i-- + dAtA[i] = 0x38 + } + if len(m.Error) > 0 { + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintWorkflow(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x32 + } + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintWorkflow(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x2a + } + if m.State != 0 { + i = encodeVarintWorkflow(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x20 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintWorkflow(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + } + if len(m.FactoryName) > 0 { + i -= len(m.FactoryName) + copy(dAtA[i:], m.FactoryName) + i = encodeVarintWorkflow(dAtA, i, uint64(len(m.FactoryName))) + i-- + dAtA[i] = 0x12 + } + if len(m.Uuid) > 0 { + i -= len(m.Uuid) + copy(dAtA[i:], m.Uuid) + i = encodeVarintWorkflow(dAtA, i, uint64(len(m.Uuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WorkflowCheckpoint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowCheckpoint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkflowCheckpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Settings) > 0 { + for k := range m.Settings { + v := m.Settings[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintWorkflow(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintWorkflow(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintWorkflow(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Tasks) > 0 { + for k := range m.Tasks { + v := m.Tasks[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWorkflow(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintWorkflow(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintWorkflow(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if m.CodeVersion != 0 { + i = encodeVarintWorkflow(dAtA, i, uint64(m.CodeVersion)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Task) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Task) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Task) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Error) > 0 { + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintWorkflow(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x22 + } + if len(m.Attributes) > 0 { + for k := range m.Attributes { + v := m.Attributes[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintWorkflow(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintWorkflow(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintWorkflow(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if m.State != 0 { + i = encodeVarintWorkflow(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x10 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintWorkflow(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintWorkflow(dAtA []byte, offset int, v uint64) int { + offset -= sovWorkflow(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base } +func (m *Workflow) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Uuid) + if l > 0 { + n += 1 + l + sovWorkflow(uint64(l)) + } + l = len(m.FactoryName) + if l > 0 { + n += 1 + l + sovWorkflow(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovWorkflow(uint64(l)) + } + if m.State != 0 { + n += 1 + sovWorkflow(uint64(m.State)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovWorkflow(uint64(l)) + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sovWorkflow(uint64(l)) + } + if m.StartTime != 0 { + n += 1 + sovWorkflow(uint64(m.StartTime)) + } + if m.EndTime != 0 { + n += 1 + sovWorkflow(uint64(m.EndTime)) + } + if m.CreateTime != 0 { + n += 1 + sovWorkflow(uint64(m.CreateTime)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *WorkflowCheckpoint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CodeVersion != 0 { + n += 1 + sovWorkflow(uint64(m.CodeVersion)) + } + if len(m.Tasks) > 0 { + for k, v := range m.Tasks { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovWorkflow(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovWorkflow(uint64(len(k))) + l + n += mapEntrySize + 1 + sovWorkflow(uint64(mapEntrySize)) + } + } + if len(m.Settings) > 0 { + for k, v := range m.Settings { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovWorkflow(uint64(len(k))) + 1 + len(v) + sovWorkflow(uint64(len(v))) + n += mapEntrySize + 1 + sovWorkflow(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Task) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sovWorkflow(uint64(l)) + } + if m.State != 0 { + n += 1 + sovWorkflow(uint64(m.State)) + } + if len(m.Attributes) > 0 { + for k, v := range m.Attributes { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovWorkflow(uint64(len(k))) + 1 + len(v) + sovWorkflow(uint64(len(v))) + n += mapEntrySize + 1 + sovWorkflow(uint64(mapEntrySize)) + } + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sovWorkflow(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovWorkflow(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozWorkflow(x uint64) (n int) { + return sovWorkflow(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Workflow) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Workflow: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Workflow: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FactoryName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FactoryName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= WorkflowState(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + } + m.StartTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndTime", wireType) + } + m.EndTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EndTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreateTime", wireType) + } + m.CreateTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreateTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWorkflow(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWorkflow + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWorkflow + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowCheckpoint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowCheckpoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowCheckpoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CodeVersion", wireType) + } + m.CodeVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CodeVersion |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tasks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tasks == nil { + m.Tasks = make(map[string]*Task) + } + var mapkey string + var mapvalue *Task + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthWorkflow + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthWorkflow + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthWorkflow + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Task{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipWorkflow(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWorkflow + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Tasks[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Settings", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Settings == nil { + m.Settings = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthWorkflow + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthWorkflow + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthWorkflow + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthWorkflow + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipWorkflow(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWorkflow + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Settings[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWorkflow(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWorkflow + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWorkflow + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Task) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Task: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Task: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= TaskState(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Attributes == nil { + m.Attributes = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthWorkflow + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthWorkflow + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthWorkflow + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthWorkflow + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipWorkflow(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWorkflow + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Attributes[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWorkflow(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWorkflow + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWorkflow + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipWorkflow(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWorkflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWorkflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWorkflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthWorkflow + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupWorkflow + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthWorkflow + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthWorkflow = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowWorkflow = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupWorkflow = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/vt/schema/online_ddl.go b/go/vt/schema/online_ddl.go index 39b5bb08134..ea5dc783317 100644 --- a/go/vt/schema/online_ddl.go +++ b/go/vt/schema/online_ddl.go @@ -19,11 +19,14 @@ package schema import ( "context" "encoding/json" + "errors" "fmt" "regexp" "strings" "time" + "github.com/google/shlex" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" ) @@ -32,8 +35,19 @@ var ( migrationBasePath = "schema-migration" onlineDdlUUIDRegexp = regexp.MustCompile(`^[0-f]{8}_[0-f]{4}_[0-f]{4}_[0-f]{4}_[0-f]{12}$`) strategyParserRegexp = regexp.MustCompile(`^([\S]+)\s+(.*)$`) - onlineDDLGeneratedTableNameRegexp = regexp.MustCompile(`^_[0-f]{8}_[0-f]{4}_[0-f]{4}_[0-f]{4}_[0-f]{12}_([0-9]{14})_(gho|ghc|del|new)$`) + onlineDDLGeneratedTableNameRegexp = regexp.MustCompile(`^_[0-f]{8}_[0-f]{4}_[0-f]{4}_[0-f]{4}_[0-f]{12}_([0-9]{14})_(gho|ghc|del|new|vrepl)$`) ptOSCGeneratedTableNameRegexp = regexp.MustCompile(`^_.*_old$`) + revertStatementRegexp = regexp.MustCompile(`(?i)^revert\s+(.*)$`) +) +var ( + // ErrOnlineDDLDisabled is returned when online DDL is disabled, and a user attempts to run an online DDL operation (submit, review, control) + ErrOnlineDDLDisabled = errors.New("online DDL is disabled") +) + +const ( + SchemaMigrationsTableName = "schema_migrations" + RevertActionStr = "revert" + declarativeFlag = "declarative" ) // MigrationBasePath is the root for all schema migration entries @@ -82,6 +96,8 @@ type DDLStrategy string const ( // DDLStrategyDirect means not an online-ddl migration. Just a normal MySQL ALTER TABLE DDLStrategyDirect DDLStrategy = "direct" + // DDLStrategyOnline requests vreplication to run the migration + DDLStrategyOnline DDLStrategy = "online" // DDLStrategyGhost requests gh-ost to run the migration DDLStrategyGhost DDLStrategy = "gh-ost" // DDLStrategyPTOSC requests pt-online-schema-change to run the migration @@ -92,7 +108,7 @@ const ( // A strategy is direct if it's not explciitly one of the online DDL strategies func (s DDLStrategy) IsDirect() bool { switch s { - case DDLStrategyGhost, DDLStrategyPTOSC: + case DDLStrategyOnline, DDLStrategyGhost, DDLStrategyPTOSC: return false } return true @@ -125,7 +141,7 @@ func ParseDDLStrategy(strategyVariable string) (strategy DDLStrategy, options st switch strategy = DDLStrategy(strategyName); strategy { case "": // backwards compatiblity and to handle unspecified values return DDLStrategyDirect, options, nil - case DDLStrategyGhost, DDLStrategyPTOSC, DDLStrategyDirect: + case DDLStrategyOnline, DDLStrategyGhost, DDLStrategyPTOSC, DDLStrategyDirect: return strategy, options, nil default: return DDLStrategyDirect, options, fmt.Errorf("Unknown online DDL strategy: '%v'", strategy) @@ -202,25 +218,82 @@ func (onlineDDL *OnlineDDL) ToJSON() ([]byte, error) { // GetAction extracts the DDL action type from the online DDL statement func (onlineDDL *OnlineDDL) GetAction() (action sqlparser.DDLAction, err error) { + if revertStatementRegexp.MatchString(onlineDDL.SQL) { + return sqlparser.RevertDDLAction, nil + } + _, action, err = ParseOnlineDDLStatement(onlineDDL.SQL) return action, err } // GetActionStr returns a string representation of the DDL action -func (onlineDDL *OnlineDDL) GetActionStr() (actionStr string, err error) { - action, err := onlineDDL.GetAction() +func (onlineDDL *OnlineDDL) GetActionStr() (action sqlparser.DDLAction, actionStr string, err error) { + action, err = onlineDDL.GetAction() if err != nil { - return actionStr, err + return action, actionStr, err } switch action { + case sqlparser.RevertDDLAction: + return action, RevertActionStr, nil case sqlparser.CreateDDLAction: - return sqlparser.CreateStr, nil + return action, sqlparser.CreateStr, nil case sqlparser.AlterDDLAction: - return sqlparser.AlterStr, nil + return action, sqlparser.AlterStr, nil case sqlparser.DropDDLAction: - return sqlparser.DropStr, nil + return action, sqlparser.DropStr, nil + } + return action, "", fmt.Errorf("Unsupported online DDL action. SQL=%s", onlineDDL.SQL) +} + +// GetRevertUUID works when this migration is a revert for another migration. It returns the UUID +// fo the reverted migration. +// The function returns error when this is not a revert migration. +func (onlineDDL *OnlineDDL) GetRevertUUID() (uuid string, err error) { + if submatch := revertStatementRegexp.FindStringSubmatch(onlineDDL.SQL); len(submatch) > 0 { + return submatch[1], nil + } + return "", fmt.Errorf("Not a Revert DDL: '%s'", onlineDDL.SQL) +} + +// isFlag return true when the given string is a CLI flag of the given name +func isFlag(s string, name string) bool { + if s == fmt.Sprintf("-%s", name) { + return true + } + if s == fmt.Sprintf("--%s", name) { + return true + } + return false +} + +// hasFlag returns true when Options include named flag +func (onlineDDL *OnlineDDL) hasFlag(name string) bool { + opts, _ := shlex.Split(onlineDDL.Options) + for _, opt := range opts { + if isFlag(opt, name) { + return true + } + } + return false +} + +// IsDeclarative checks if strategy options include -declarative +func (onlineDDL *OnlineDDL) IsDeclarative() bool { + return onlineDDL.hasFlag(declarativeFlag) +} + +// RuntimeOptions returns the options used as runtime flags for given strategy, removing any internal hint options +func (onlineDDL *OnlineDDL) RuntimeOptions() []string { + opts, _ := shlex.Split(onlineDDL.Options) + validOpts := []string{} + for _, opt := range opts { + switch { + case isFlag(opt, declarativeFlag): + default: + validOpts = append(validOpts, opt) + } } - return "", fmt.Errorf("Unsupported online DDL action. SQL=%s", onlineDDL.SQL) + return validOpts } // ToString returns a simple string representation of this instance diff --git a/go/vt/schema/online_ddl_test.go b/go/vt/schema/online_ddl_test.go index 28f28b9e6ad..024bc953b02 100644 --- a/go/vt/schema/online_ddl_test.go +++ b/go/vt/schema/online_ddl_test.go @@ -17,6 +17,7 @@ limitations under the License. package schema import ( + "strings" "testing" "github.com/stretchr/testify/assert" @@ -31,6 +32,7 @@ func TestCreateUUID(t *testing.T) { func TestIsDirect(t *testing.T) { assert.True(t, DDLStrategyDirect.IsDirect()) + assert.False(t, DDLStrategyOnline.IsDirect()) assert.False(t, DDLStrategyGhost.IsDirect()) assert.False(t, DDLStrategyPTOSC.IsDirect()) assert.True(t, DDLStrategy("").IsDirect()) @@ -44,12 +46,18 @@ func TestParseDDLStrategy(t *testing.T) { strategyVariable string strategy DDLStrategy options string + isDeclarative bool + runtimeOptions string err error }{ { strategyVariable: "direct", strategy: DDLStrategyDirect, }, + { + strategyVariable: "online", + strategy: DDLStrategyOnline, + }, { strategyVariable: "gh-ost", strategy: DDLStrategyGhost, @@ -65,6 +73,21 @@ func TestParseDDLStrategy(t *testing.T) { strategyVariable: "gh-ost --max-load=Threads_running=100 --allow-master", strategy: DDLStrategyGhost, options: "--max-load=Threads_running=100 --allow-master", + runtimeOptions: "--max-load=Threads_running=100 --allow-master", + }, + { + strategyVariable: "gh-ost --max-load=Threads_running=100 -declarative", + strategy: DDLStrategyGhost, + options: "--max-load=Threads_running=100 -declarative", + runtimeOptions: "--max-load=Threads_running=100", + isDeclarative: true, + }, + { + strategyVariable: "gh-ost --declarative --max-load=Threads_running=100", + strategy: DDLStrategyGhost, + options: "--declarative --max-load=Threads_running=100", + runtimeOptions: "--max-load=Threads_running=100", + isDeclarative: true, }, } for _, ts := range tt { @@ -72,6 +95,11 @@ func TestParseDDLStrategy(t *testing.T) { assert.NoError(t, err) assert.Equal(t, ts.strategy, strategy) assert.Equal(t, ts.options, options) + onlineDDL := &OnlineDDL{Options: options} + assert.Equal(t, ts.isDeclarative, onlineDDL.IsDeclarative()) + + runtimeOptions := strings.Join(onlineDDL.RuntimeOptions(), " ") + assert.Equal(t, ts.runtimeOptions, runtimeOptions) } { _, _, err := ParseDDLStrategy("other") @@ -109,7 +137,6 @@ func TestGetGCUUID(t *testing.T) { } assert.Equal(t, count, len(uuids)) } - func TestGetActionStr(t *testing.T) { tt := []struct { statement string @@ -134,14 +161,16 @@ func TestGetActionStr(t *testing.T) { }, } for _, ts := range tt { - onlineDDL := &OnlineDDL{SQL: ts.statement} - actionStr, err := onlineDDL.GetActionStr() - if ts.isError { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.Equal(t, actionStr, ts.actionStr) - } + t.Run(ts.statement, func(t *testing.T) { + onlineDDL := &OnlineDDL{SQL: ts.statement} + _, actionStr, err := onlineDDL.GetActionStr() + if ts.isError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, actionStr, ts.actionStr) + } + }) } } @@ -151,6 +180,7 @@ func TestIsOnlineDDLTableName(t *testing.T) { "_4e5dcf80_354b_11eb_82cd_f875a4d24e90_20201203114014_ghc", "_4e5dcf80_354b_11eb_82cd_f875a4d24e90_20201203114014_del", "_4e5dcf80_354b_11eb_82cd_f875a4d24e90_20201203114013_new", + "_84371a37_6153_11eb_9917_f875a4d24e90_20210128122816_vrepl", "_table_old", "__table_old", } @@ -164,9 +194,47 @@ func TestIsOnlineDDLTableName(t *testing.T) { "_table_gho", "_table_ghc", "_table_del", + "_table_vrepl", "table_old", } for _, tableName := range irrelevantNames { assert.False(t, IsOnlineDDLTableName(tableName)) } } + +func TestGetRevertUUID(t *testing.T) { + tt := []struct { + statement string + uuid string + isError bool + }{ + { + statement: "revert 4e5dcf80_354b_11eb_82cd_f875a4d24e90_20201203114014", + uuid: "4e5dcf80_354b_11eb_82cd_f875a4d24e90_20201203114014", + }, + { + statement: "REVERT 4e5dcf80_354b_11eb_82cd_f875a4d24e90_20201203114014", + uuid: "4e5dcf80_354b_11eb_82cd_f875a4d24e90_20201203114014", + }, + { + statement: "REVERT", + isError: true, + }, + { + statement: "alter table t drop column c", + isError: true, + }, + } + for _, ts := range tt { + t.Run(ts.statement, func(t *testing.T) { + onlineDDL := &OnlineDDL{SQL: ts.statement} + uuid, err := onlineDDL.GetRevertUUID() + if ts.isError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, uuid, ts.uuid) + } + }) + } +} diff --git a/go/vt/schema/parser.go b/go/vt/schema/parser.go index ccdf2bf0916..2708715da32 100644 --- a/go/vt/schema/parser.go +++ b/go/vt/schema/parser.go @@ -17,6 +17,7 @@ limitations under the License. package schema import ( + "fmt" "regexp" "strings" @@ -48,8 +49,22 @@ var ( // ALTER TABLE tbl something regexp.MustCompile(alterTableBasicPattern + `([\S]+)\s+(.*$)`), } + createTableRegexp = regexp.MustCompile(`(?s)(?i)(CREATE\s+TABLE\s+)` + "`" + `([^` + "`" + `]+)` + "`" + `(\s*[(].*$)`) ) +// ReplaceTableNameInCreateTableStatement returns a modified CREATE TABLE statement, such that the table name is replaced with given name. +// This intentionally string-replacement based, and not sqlparser.String() based, because the return statement has to be formatted _precisely_, +// up to MySQL version nuances, like the original statement. That's in favor of tengo table comparison. +// We expect a well formatted, no-qualifier statement in the form: +// CREATE TABLE `some_table` ... +func ReplaceTableNameInCreateTableStatement(createStatement string, replacementName string) (modifiedStatement string, err error) { + submatch := createTableRegexp.FindStringSubmatch(createStatement) + if len(submatch) == 0 { + return createStatement, fmt.Errorf("could not parse statement: %s", createStatement) + } + return fmt.Sprintf("%s`%s`%s", submatch[1], replacementName, submatch[3]), nil +} + // ParseAlterTableOptions parses a ALTER ... TABLE... statement into: // - explicit schema and table, if available // - alter options (anything that follows ALTER ... TABLE) diff --git a/go/vt/schema/parser_test.go b/go/vt/schema/parser_test.go index db153c3a996..237d986bc78 100644 --- a/go/vt/schema/parser_test.go +++ b/go/vt/schema/parser_test.go @@ -89,3 +89,48 @@ func TestNormalizeOnlineDDL(t *testing.T) { }) } } + +func TestReplaceTableNameInCreateTableStatement(t *testing.T) { + replacementTableName := `my_table` + tt := []struct { + stmt string + expect string + isError bool + }{ + { + stmt: "CREATE TABLE tbl (id int)", + isError: true, + }, + { + stmt: "CREATE TABLE `tbl` (id int)", + expect: "CREATE TABLE `my_table` (id int)", + }, + { + stmt: "CREATE TABLE `tbl` (id int)", + expect: "CREATE TABLE `my_table` (id int)", + }, + { + stmt: "create table `tbl` (id int)", + expect: "create table `my_table` (id int)", + }, + { + stmt: "CREATE TABLE `schema`.`tbl` (id int)", + isError: true, + }, + { + stmt: "CREATE TABLE IF NOT EXISTS `tbl` (id int)", + isError: true, + }, + } + for _, ts := range tt { + t.Run(ts.stmt, func(*testing.T) { + result, err := ReplaceTableNameInCreateTableStatement(ts.stmt, replacementTableName) + if ts.isError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, ts.expect, result) + } + }) + } +} diff --git a/go/vt/schema/tablegc.go b/go/vt/schema/tablegc.go index 61b2e247447..80dbb6b773a 100644 --- a/go/vt/schema/tablegc.go +++ b/go/vt/schema/tablegc.go @@ -76,6 +76,11 @@ func generateGCTableName(state TableGCState, uuid string, t time.Time) (tableNam return fmt.Sprintf("_vt_%s_%s_%s", state, uuid, timestamp), nil } +// GenerateGCTableName creates a GC table name, based on desired state and time, and with random UUID +func GenerateGCTableName(state TableGCState, t time.Time) (tableName string, err error) { + return generateGCTableName(state, "", t) +} + // AnalyzeGCTableName analyzes a given table name to see if it's a GC table, and if so, parse out // its state, uuid, and timestamp func AnalyzeGCTableName(tableName string) (isGCTable bool, state TableGCState, uuid string, t time.Time, err error) { diff --git a/go/vt/schemamanager/tablet_executor.go b/go/vt/schemamanager/tablet_executor.go index 9cb0f76274c..9c43416f2b7 100644 --- a/go/vt/schemamanager/tablet_executor.go +++ b/go/vt/schemamanager/tablet_executor.go @@ -42,6 +42,7 @@ type TabletExecutor struct { keyspace string waitReplicasTimeout time.Duration ddlStrategy string + skipPreflight bool } // NewTabletExecutor creates a new TabletExecutor instance @@ -76,6 +77,11 @@ func (exec *TabletExecutor) SetDDLStrategy(ddlStrategy string) error { return nil } +// SkipPreflight disables preflight checks +func (exec *TabletExecutor) SkipPreflight() { + exec.skipPreflight = true +} + // Open opens a connection to the master for every shard. func (exec *TabletExecutor) Open(ctx context.Context, keyspace string) error { if !exec.isClosed { @@ -212,6 +218,9 @@ func (exec *TabletExecutor) detectBigSchemaChanges(ctx context.Context, parsedDD } func (exec *TabletExecutor) preflightSchemaChanges(ctx context.Context, sqls []string) error { + if exec.skipPreflight { + return nil + } _, err := exec.wr.TabletManagerClient().PreflightSchema(ctx, exec.tablets[0], sqls) return err } diff --git a/go/vt/schemamanager/tablet_executor_test.go b/go/vt/schemamanager/tablet_executor_test.go index 954d1247a90..ab6c1f16e83 100644 --- a/go/vt/schemamanager/tablet_executor_test.go +++ b/go/vt/schemamanager/tablet_executor_test.go @@ -239,6 +239,12 @@ func TestIsOnlineSchemaDDL(t *testing.T) { isOnlineDDL: true, strategy: schema.DDLStrategyGhost, }, + { + query: "ALTER TABLE t ADD COLUMN i INT", + ddlStrategy: "online", + isOnlineDDL: true, + strategy: schema.DDLStrategyOnline, + }, { query: "ALTER TABLE t ADD COLUMN i INT", ddlStrategy: "", @@ -257,6 +263,11 @@ func TestIsOnlineSchemaDDL(t *testing.T) { strategy: schema.DDLStrategyGhost, options: "--max-load=Threads_running=100", }, + { + query: "TRUNCATE TABLE t", + ddlStrategy: "online", + isOnlineDDL: false, + }, { query: "TRUNCATE TABLE t", ddlStrategy: "gh-ost", diff --git a/go/vt/servenv/buildinfo.go b/go/vt/servenv/buildinfo.go index 4eb9bbf3c8f..449d226ac15 100644 --- a/go/vt/servenv/buildinfo.go +++ b/go/vt/servenv/buildinfo.go @@ -23,10 +23,22 @@ import ( "strconv" "time" + "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" + + "vitess.io/vitess/go/vt/log" + + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/stats" ) var ( + // MySQLServerVersion is what Vitess will present as it's version during the connection handshake, + // and as the value to the @@version system variable. If nothing is provided, Vitess will report itself as + // a specific MySQL version with the vitess version appended to it + MySQLServerVersion = flag.String("mysql_server_version", "", "MySQL server version to advertise.") + buildHost = "" buildUser = "" buildTime = "" @@ -52,6 +64,7 @@ type versionInfo struct { goVersion string goOS string goArch string + version string } func (v *versionInfo) Print() { @@ -59,11 +72,19 @@ func (v *versionInfo) Print() { } func (v *versionInfo) String() string { - version := fmt.Sprintf("Version: %s", v.buildGitRev) + jenkins := "" if v.jenkinsBuildNumber != 0 { - version = fmt.Sprintf("Version: %s (Jenkins build %d)", v.buildGitRev, v.jenkinsBuildNumber) + jenkins = fmt.Sprintf(" (Jenkins build %d)", v.jenkinsBuildNumber) } - return fmt.Sprintf("%s (Git branch '%s') built on %s by %s@%s using %s %s/%s\n", version, v.buildGitBranch, v.buildTimePretty, v.buildUser, v.buildHost, v.goVersion, v.goOS, v.goArch) + return fmt.Sprintf("Version: %s%s (Git revision %s branch '%s') built on %s by %s@%s using %s %s/%s", + v.version, jenkins, v.buildGitRev, v.buildGitBranch, v.buildTimePretty, v.buildUser, v.buildHost, v.goVersion, v.goOS, v.goArch) +} + +func (v *versionInfo) MySQLVersion() string { + if *MySQLServerVersion != "" { + return *MySQLServerVersion + } + return "5.7.9-vitess-" + v.version } func init() { @@ -88,8 +109,15 @@ func init() { goVersion: runtime.Version(), goOS: runtime.GOOS, goArch: runtime.GOARCH, + version: versionName, + } + var convVersion string + convVersion, err = convertMySQLVersionToCommentVersion(AppVersion.MySQLVersion()) + if err != nil { + log.Error(err) + } else { + sqlparser.MySQLVersion = convVersion } - stats.NewString("BuildHost").Set(AppVersion.buildHost) stats.NewString("BuildUser").Set(AppVersion.buildUser) stats.NewGauge("BuildTimestamp", "build timestamp").Set(AppVersion.buildTime) @@ -111,3 +139,41 @@ func init() { } stats.NewGaugesWithMultiLabels("BuildInformation", "build information exposed via label", buildLabels).Set(buildValues, 1) } + +// convertMySQLVersionToCommentVersion converts the MySQL version into comment version format. +func convertMySQLVersionToCommentVersion(version string) (string, error) { + var res = make([]int, 3) + idx := 0 + val := "" + for _, c := range version { + if c <= '9' && c >= '0' { + val += string(c) + } else if c == '.' { + v, err := strconv.Atoi(val) + if err != nil { + return "", err + } + val = "" + res[idx] = v + idx++ + if idx == 3 { + break + } + } else { + break + } + } + if val != "" { + v, err := strconv.Atoi(val) + if err != nil { + return "", err + } + res[idx] = v + idx++ + } + if idx == 0 { + return "", vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "MySQL version not correctly setup - %s.", version) + } + + return fmt.Sprintf("%01d%02d%02d", res[0], res[1], res[2]), nil +} diff --git a/go/vt/servenv/buildinfo_test.go b/go/vt/servenv/buildinfo_test.go new file mode 100644 index 00000000000..1517f4abf65 --- /dev/null +++ b/go/vt/servenv/buildinfo_test.go @@ -0,0 +1,92 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package servenv + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/stretchr/testify/assert" +) + +func TestVersionString(t *testing.T) { + now, _ := time.Parse(time.RFC1123, "Tue, 15 Sep 2020 12:04:10 UTC") + + v := &versionInfo{ + buildHost: "host", + buildUser: "user", + buildTime: now.Unix(), + buildTimePretty: "time is now", + buildGitRev: "d54b87c", + buildGitBranch: "gitBranch", + goVersion: "1.15", + goOS: "amiga", + goArch: "amd64", + version: "v1.2.3-SNAPSHOT", + } + + assert.Equal(t, "Version: v1.2.3-SNAPSHOT (Git revision d54b87c branch 'gitBranch') built on time is now by user@host using 1.15 amiga/amd64", v.String()) + + v.jenkinsBuildNumber = 422 + + assert.Equal(t, "Version: v1.2.3-SNAPSHOT (Jenkins build 422) (Git revision d54b87c branch 'gitBranch') built on time is now by user@host using 1.15 amiga/amd64", v.String()) + + assert.Equal(t, "5.7.9-vitess-v1.2.3-SNAPSHOT", v.MySQLVersion()) + newVersion := "test!" + MySQLServerVersion = &newVersion + assert.Equal(t, newVersion, v.MySQLVersion()) +} + +func TestConvertMySQLVersion(t *testing.T) { + testcases := []struct { + version string + commentVersion string + error string + }{{ + version: "5.7.9", + commentVersion: "50709", + }, { + version: "0008.08.9", + commentVersion: "80809", + }, { + version: "5.7.9, Vitess - 10.0.1", + commentVersion: "50709", + }, { + version: "8.1 Vitess - 10.0.1", + commentVersion: "80100", + }, { + version: "Vitess - 10.0.1", + error: "MySQL version not correctly setup - Vitess - 10.0.1.", + }, { + version: "5.7.9.22", + commentVersion: "50709", + }} + + for _, tcase := range testcases { + t.Run(tcase.version, func(t *testing.T) { + output, err := convertMySQLVersionToCommentVersion(tcase.version) + if tcase.error != "" { + require.EqualError(t, err, tcase.error) + } else { + require.NoError(t, err) + require.Equal(t, tcase.commentVersion, output) + } + }) + } +} diff --git a/go/vt/servenv/grpc_server.go b/go/vt/servenv/grpc_server.go index 448fbb1d73b..feae9a0e6ef 100644 --- a/go/vt/servenv/grpc_server.go +++ b/go/vt/servenv/grpc_server.go @@ -35,6 +35,7 @@ import ( "context" "vitess.io/vitess/go/vt/grpccommon" + "vitess.io/vitess/go/vt/grpcoptionaltls" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/vttls" ) @@ -64,6 +65,11 @@ var ( // GRPCCA is the CA to use if TLS is enabled GRPCCA = flag.String("grpc_ca", "", "server CA to use for gRPC connections, requires TLS, and enforces client certificate check") + GRPCEnableOptionalTLS = flag.Bool("grpc_enable_optional_tls", false, "enable optional TLS mode when a server accepts both TLS and plain-text connections on the same port") + + // GRPCServerCA if specified will combine server cert and server CA + GRPCServerCA = flag.String("grpc_server_ca", "", "path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients") + // GRPCAuth which auth plugin to use (at the moment now only static is supported) GRPCAuth = flag.String("grpc_auth_mode", "", "Which auth plugin implementation to use (eg: static)") @@ -125,13 +131,17 @@ func createGRPCServer() { var opts []grpc.ServerOption if GRPCPort != nil && *GRPCCert != "" && *GRPCKey != "" { - config, err := vttls.ServerConfig(*GRPCCert, *GRPCKey, *GRPCCA) + config, err := vttls.ServerConfig(*GRPCCert, *GRPCKey, *GRPCCA, *GRPCServerCA) if err != nil { log.Exitf("Failed to log gRPC cert/key/ca: %v", err) } // create the creds server options creds := credentials.NewTLS(config) + if *GRPCEnableOptionalTLS { + log.Warning("Optional TLS is active. Plain-text connections will be accepted") + creds = grpcoptionaltls.New(creds) + } opts = []grpc.ServerOption{grpc.Creds(creds)} } // Override the default max message size for both send and receive diff --git a/go/vt/servenv/pprof.go b/go/vt/servenv/pprof.go index 28c31dee347..e81f605d6fc 100644 --- a/go/vt/servenv/pprof.go +++ b/go/vt/servenv/pprof.go @@ -18,27 +18,298 @@ package servenv import ( "flag" + "fmt" + "io/ioutil" "os" + "os/signal" + "path/filepath" + "runtime" "runtime/pprof" + "runtime/trace" + "strconv" + "strings" + "sync/atomic" + "syscall" "vitess.io/vitess/go/vt/log" ) var ( - cpuProfile = flag.String("cpu_profile", "", "write cpu profile to file") + _ = flag.String("cpu_profile", "", "deprecated: use '-pprof=cpu' instead") + pprofFlag = flag.String("pprof", "", "enable profiling") ) -func init() { - OnInit(func() { - if *cpuProfile != "" { - f, err := os.Create(*cpuProfile) +type profmode string + +const ( + profileCPU profmode = "cpu" + profileMemHeap profmode = "mem_heap" + profileMemAllocs profmode = "mem_allocs" + profileMutex profmode = "mutex" + profileBlock profmode = "block" + profileTrace profmode = "trace" + profileThreads profmode = "threads" + profileGoroutine profmode = "goroutine" +) + +func (p profmode) filename() string { + return fmt.Sprintf("%s.pprof", string(p)) +} + +type profile struct { + mode profmode + rate int + path string + quiet bool + waitSig bool +} + +func parseProfileFlag(pf string) (*profile, error) { + if pf == "" { + return nil, nil + } + + var p profile + + items := strings.Split(pf, ",") + switch items[0] { + case "cpu": + p.mode = profileCPU + case "mem", "mem=heap": + p.mode = profileMemHeap + p.rate = 4096 + case "mem=allocs": + p.mode = profileMemAllocs + p.rate = 4096 + case "mutex": + p.mode = profileMutex + p.rate = 1 + case "block": + p.mode = profileBlock + p.rate = 1 + case "trace": + p.mode = profileTrace + case "threads": + p.mode = profileThreads + case "goroutine": + p.mode = profileGoroutine + default: + return nil, fmt.Errorf("unknown profile mode: %q", items[0]) + } + + for _, kv := range items[1:] { + var err error + fields := strings.SplitN(kv, "=", 2) + + switch fields[0] { + case "rate": + if len(fields) == 1 { + return nil, fmt.Errorf("missing value for 'rate'") + } + p.rate, err = strconv.Atoi(fields[1]) + if err != nil { + return nil, fmt.Errorf("invalid profile rate %q: %v", fields[1], err) + } + + case "path": + if len(fields) == 1 { + return nil, fmt.Errorf("missing value for 'path'") + } + p.path = fields[1] + + case "quiet": + if len(fields) == 1 { + p.quiet = true + continue + } + + p.quiet, err = strconv.ParseBool(fields[1]) if err != nil { - log.Fatalf("Failed to create profile file: %v", err) + return nil, fmt.Errorf("invalid quiet flag %q: %v", fields[1], err) } + case "waitSig": + if len(fields) == 1 { + p.waitSig = true + continue + } + p.waitSig, err = strconv.ParseBool(fields[1]) + if err != nil { + return nil, fmt.Errorf("invalid waitSig flag %q: %v", fields[1], err) + } + default: + return nil, fmt.Errorf("unknown flag: %q", fields[0]) + } + } + + return &p, nil +} + +var profileStarted uint32 + +func startCallback(start func()) func() { + return func() { + if atomic.CompareAndSwapUint32(&profileStarted, 0, 1) { + start() + } else { + log.Fatal("profile: Start() already called") + } + } +} + +func stopCallback(stop func()) func() { + return func() { + if atomic.CompareAndSwapUint32(&profileStarted, 1, 0) { + stop() + } + } +} + +// init returns a start function that begins the configured profiling process and +// returns a cleanup function that must be executed before process termination to +// flush the profile to disk. +// Based on the profiling code in github.com/pkg/profile +func (prof *profile) init() (start func(), stop func()) { + var ( + path string + err error + logf = func(format string, args ...interface{}) {} + ) + + if prof.path != "" { + path = prof.path + err = os.MkdirAll(path, 0777) + } else { + path, err = ioutil.TempDir("", "profile") + } + if err != nil { + log.Fatalf("pprof: could not create initial output directory: %v", err) + } + + if !prof.quiet { + logf = log.Infof + } + + fn := filepath.Join(path, prof.mode.filename()) + f, err := os.Create(fn) + if err != nil { + log.Fatalf("pprof: could not create profile %q: %v", fn, err) + } + logf("pprof: %s profiling enabled, %s", string(prof.mode), fn) + + switch prof.mode { + case profileCPU: + start = startCallback(func() { pprof.StartCPUProfile(f) - OnTerm(func() { - pprof.StopCPUProfile() - }) + }) + stop = stopCallback(func() { + pprof.StopCPUProfile() + f.Close() + }) + return start, stop + + case profileMemHeap, profileMemAllocs: + old := runtime.MemProfileRate + start = startCallback(func() { + runtime.MemProfileRate = prof.rate + }) + stop = stopCallback(func() { + tt := "heap" + if prof.mode == profileMemAllocs { + tt = "allocs" + } + pprof.Lookup(tt).WriteTo(f, 0) + f.Close() + runtime.MemProfileRate = old + }) + return start, stop + + case profileMutex: + start = startCallback(func() { + runtime.SetMutexProfileFraction(prof.rate) + }) + stop = stopCallback(func() { + if mp := pprof.Lookup("mutex"); mp != nil { + mp.WriteTo(f, 0) + } + f.Close() + runtime.SetMutexProfileFraction(0) + }) + return start, stop + + case profileBlock: + start = startCallback(func() { + runtime.SetBlockProfileRate(prof.rate) + }) + stop = stopCallback(func() { + pprof.Lookup("block").WriteTo(f, 0) + f.Close() + runtime.SetBlockProfileRate(0) + }) + return start, stop + + case profileThreads: + start = startCallback(func() {}) + stop = stopCallback(func() { + if mp := pprof.Lookup("threadcreate"); mp != nil { + mp.WriteTo(f, 0) + } + f.Close() + }) + return start, stop + + case profileTrace: + start = startCallback(func() { + if err := trace.Start(f); err != nil { + log.Fatalf("pprof: could not start trace: %v", err) + } + }) + stop = stopCallback(func() { + trace.Stop() + f.Close() + }) + return start, stop + + case profileGoroutine: + start = startCallback(func() {}) + stop = stopCallback(func() { + if mp := pprof.Lookup("goroutine"); mp != nil { + mp.WriteTo(f, 0) + } + f.Close() + }) + return start, stop + + default: + panic("unsupported profile mode") + } +} + +func init() { + OnInit(func() { + prof, err := parseProfileFlag(*pprofFlag) + if err != nil { + log.Fatal(err) + } + if prof != nil { + ch := make(chan os.Signal, 1) + signal.Notify(ch, syscall.SIGUSR1) + start, stop := prof.init() + + if prof.waitSig { + go func() { + <-ch + start() + }() + } else { + start() + } + + go func() { + <-ch + stop() + }() + + OnTerm(stop) } }) } diff --git a/go/vt/servenv/pprof_test.go b/go/vt/servenv/pprof_test.go new file mode 100644 index 00000000000..8ccabc773ff --- /dev/null +++ b/go/vt/servenv/pprof_test.go @@ -0,0 +1,47 @@ +package servenv + +import ( + "reflect" + "testing" +) + +func TestParseProfileFlag(t *testing.T) { + tests := []struct { + arg string + want *profile + wantErr bool + }{ + {"", nil, false}, + {"mem", &profile{mode: profileMemHeap, rate: 4096}, false}, + {"mem,rate=1234", &profile{mode: profileMemHeap, rate: 1234}, false}, + {"mem,rate", nil, true}, + {"mem,rate=foobar", nil, true}, + {"mem=allocs", &profile{mode: profileMemAllocs, rate: 4096}, false}, + {"mem=allocs,rate=420", &profile{mode: profileMemAllocs, rate: 420}, false}, + {"block", &profile{mode: profileBlock, rate: 1}, false}, + {"block,rate=4", &profile{mode: profileBlock, rate: 4}, false}, + {"cpu", &profile{mode: profileCPU}, false}, + {"cpu,quiet", &profile{mode: profileCPU, quiet: true}, false}, + {"cpu,quiet=true", &profile{mode: profileCPU, quiet: true}, false}, + {"cpu,quiet=false", &profile{mode: profileCPU, quiet: false}, false}, + {"cpu,quiet=foobar", nil, true}, + {"cpu,path=", &profile{mode: profileCPU, path: ""}, false}, + {"cpu,path", nil, true}, + {"cpu,path=a", &profile{mode: profileCPU, path: "a"}, false}, + {"cpu,path=a/b/c/d", &profile{mode: profileCPU, path: "a/b/c/d"}, false}, + {"cpu,waitSig", &profile{mode: profileCPU, waitSig: true}, false}, + {"cpu,path=a/b,waitSig", &profile{mode: profileCPU, waitSig: true, path: "a/b"}, false}, + } + for _, tt := range tests { + t.Run(tt.arg, func(t *testing.T) { + got, err := parseProfileFlag(tt.arg) + if (err != nil) != tt.wantErr { + t.Errorf("parseProfileFlag() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("parseProfileFlag() got = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/go/vt/servenv/run.go b/go/vt/servenv/run.go index db69c6214bb..25b1e189d1b 100644 --- a/go/vt/servenv/run.go +++ b/go/vt/servenv/run.go @@ -18,17 +18,22 @@ package servenv import ( "fmt" + "net" "net/http" "net/url" + "os" + "os/signal" + "syscall" "time" "vitess.io/vitess/go/event" - "vitess.io/vitess/go/proc" "vitess.io/vitess/go/vt/log" ) var ( onCloseHooks event.Hooks + // ExitChan waits for a signal that tells the process to terminate + ExitChan chan os.Signal ) // Run starts listening for RPC and HTTP requests, @@ -40,13 +45,16 @@ func Run(port int) { serveGRPC() serveSocketFile() - l, err := proc.Listen(fmt.Sprintf("%v", port)) + l, err := net.Listen("tcp", fmt.Sprintf(":%v", port)) if err != nil { log.Exit(err) } go http.Serve(l, nil) - proc.Wait() + ExitChan = make(chan os.Signal, 1) + signal.Notify(ExitChan, syscall.SIGTERM, syscall.SIGINT) + // Wait for signal + <-ExitChan l.Close() startTime := time.Now() diff --git a/go/vt/servenv/servenv.go b/go/vt/servenv/servenv.go index 0303aef61ab..9695b48c1e5 100644 --- a/go/vt/servenv/servenv.go +++ b/go/vt/servenv/servenv.go @@ -33,7 +33,6 @@ import ( "net/url" "os" "os/signal" - "runtime" "strings" "sync" "syscall" @@ -56,11 +55,11 @@ var ( Port *int // Flags to alter the behavior of the library. - lameduckPeriod = flag.Duration("lameduck-period", 50*time.Millisecond, "keep running at least this long after SIGTERM before stopping") - onTermTimeout = flag.Duration("onterm_timeout", 10*time.Second, "wait no more than this for OnTermSync handlers before stopping") - memProfileRate = flag.Int("mem-profile-rate", 512*1024, "profile every n bytes allocated") - mutexProfileFraction = flag.Int("mutex-profile-fraction", 0, "profile every n mutex contention events (see runtime.SetMutexProfileFraction)") - catchSigpipe = flag.Bool("catch-sigpipe", false, "catch and ignore SIGPIPE on stdout and stderr if specified") + lameduckPeriod = flag.Duration("lameduck-period", 50*time.Millisecond, "keep running at least this long after SIGTERM before stopping") + onTermTimeout = flag.Duration("onterm_timeout", 10*time.Second, "wait no more than this for OnTermSync handlers before stopping") + _ = flag.Int("mem-profile-rate", 512*1024, "deprecated: use '-pprof=mem' instead") + _ = flag.Int("mutex-profile-fraction", 0, "deprecated: use '-pprof=mutex' instead") + catchSigpipe = flag.Bool("catch-sigpipe", false, "catch and ignore SIGPIPE on stdout and stderr if specified") // mutex used to protect the Init function mu sync.Mutex @@ -106,13 +105,6 @@ func Init() { log.Exitf("servenv.Init: running this as root makes no sense") } - runtime.MemProfileRate = *memProfileRate - - if *mutexProfileFraction != 0 { - log.Infof("setting mutex profile fraction to %v", *mutexProfileFraction) - runtime.SetMutexProfileFraction(*mutexProfileFraction) - } - // We used to set this limit directly, but you pretty much have to // use a root account to allow increasing a limit reliably. Dropping // privileges is also tricky. The best strategy is to make a shell diff --git a/go/vt/servenv/version.go b/go/vt/servenv/version.go new file mode 100644 index 00000000000..e6d25d406a1 --- /dev/null +++ b/go/vt/servenv/version.go @@ -0,0 +1,3 @@ +package servenv + +const versionName = "10.0.2" diff --git a/go/vt/sqlparser/Makefile b/go/vt/sqlparser/Makefile index 1ca0af6755e..9dfe647113d 100644 --- a/go/vt/sqlparser/Makefile +++ b/go/vt/sqlparser/Makefile @@ -1,11 +1,11 @@ # Copyright 2019 The Vitess Authors. -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -15,7 +15,7 @@ MAKEFLAGS = -s sql.go: sql.y - go run golang.org/x/tools/cmd/goyacc -o sql.go sql.y + go run ./goyacc -fast-append -o sql.go sql.y gofmt -w sql.go clean: diff --git a/go/vt/sqlparser/analyzer.go b/go/vt/sqlparser/analyzer.go index c9c78f5ca5d..ada3e09cc20 100644 --- a/go/vt/sqlparser/analyzer.go +++ b/go/vt/sqlparser/analyzer.go @@ -59,6 +59,9 @@ const ( StmtVStream StmtLockTables StmtUnlockTables + StmtFlush + StmtCallProc + StmtRevert ) //ASTToStatementType returns a StatementType from an AST stmt @@ -78,11 +81,13 @@ func ASTToStatementType(stmt Statement) StatementType { return StmtShow case DDLStatement, DBDDLStatement, *AlterVschema: return StmtDDL + case *RevertMigration: + return StmtRevert case *Use: return StmtUse case *OtherRead, *OtherAdmin, *Load: return StmtOther - case *Explain: + case Explain: return StmtExplain case *Begin: return StmtBegin @@ -100,6 +105,10 @@ func ASTToStatementType(stmt Statement) StatementType { return StmtLockTables case *UnlockTables: return StmtUnlockTables + case *Flush: + return StmtFlush + case *CallProc: + return StmtCallProc default: return StmtUnknown } @@ -108,7 +117,7 @@ func ASTToStatementType(stmt Statement) StatementType { //CanNormalize takes Statement and returns if the statement can be normalized. func CanNormalize(stmt Statement) bool { switch stmt.(type) { - case *Select, *Union, *Insert, *Update, *Delete, *Set: + case *Select, *Union, *Insert, *Update, *Delete, *Set, *CallProc: // TODO: we could merge this logic into ASTrewriter return true } return false @@ -124,11 +133,17 @@ func CachePlan(stmt Statement) bool { return false } -//IsSetStatement takes Statement and returns if the statement is set statement. -func IsSetStatement(stmt Statement) bool { - switch stmt.(type) { +//MustRewriteAST takes Statement and returns true if RewriteAST must run on it for correct execution irrespective of user flags. +func MustRewriteAST(stmt Statement) bool { + switch node := stmt.(type) { case *Set: return true + case *Show: + switch node.Internal.(type) { + case *ShowBasic: + return true + } + return false } return false } @@ -157,6 +172,8 @@ func Preview(sql string) StatementType { return StmtStream case "vstream": return StmtVStream + case "revert": + return StmtRevert case "insert": return StmtInsert case "replace": @@ -187,8 +204,10 @@ func Preview(sql string) StatementType { return StmtRollback } switch loweredFirstWord { - case "create", "alter", "rename", "drop", "truncate", "flush": + case "create", "alter", "rename", "drop", "truncate": return StmtDDL + case "flush": + return StmtFlush case "set": return StmtSet case "show": @@ -217,6 +236,8 @@ func (s StatementType) String() string { return "STREAM" case StmtVStream: return "VSTREAM" + case StmtRevert: + return "REVERT" case StmtInsert: return "INSERT" case StmtReplace: @@ -255,6 +276,10 @@ func (s StatementType) String() string { return "LOCK_TABLES" case StmtUnlockTables: return "UNLOCK_TABLES" + case StmtFlush: + return "FLUSH" + case StmtCallProc: + return "CALL_PROC" default: return "UNKNOWN" } @@ -391,9 +416,9 @@ func NewPlanValue(node Expr) (sqltypes.PlanValue, error) { } return sqltypes.PlanValue{Value: n}, nil case FloatVal: - return sqltypes.PlanValue{Value: sqltypes.MakeTrusted(sqltypes.Float64, node.Val)}, nil + return sqltypes.PlanValue{Value: sqltypes.MakeTrusted(sqltypes.Float64, node.Bytes())}, nil case StrVal: - return sqltypes.PlanValue{Value: sqltypes.MakeTrusted(sqltypes.VarBinary, node.Val)}, nil + return sqltypes.PlanValue{Value: sqltypes.MakeTrusted(sqltypes.VarBinary, node.Bytes())}, nil case HexVal: v, err := node.HexDecode() if err != nil { diff --git a/go/vt/sqlparser/analyzer_test.go b/go/vt/sqlparser/analyzer_test.go index 3ec58e7f02e..7bf9a2590b5 100644 --- a/go/vt/sqlparser/analyzer_test.go +++ b/go/vt/sqlparser/analyzer_test.go @@ -77,6 +77,7 @@ func TestPreview(t *testing.T) { {"grant", StmtPriv}, {"revoke", StmtPriv}, {"truncate", StmtDDL}, + {"flush", StmtFlush}, {"unknown", StmtUnknown}, {"/* leading comment */ select ...", StmtSelect}, @@ -244,7 +245,7 @@ func TestIsColName(t *testing.T) { in: &ColName{}, out: true, }, { - in: newHexLiteral(""), + in: NewHexLiteral(""), }} for _, tc := range testcases { out := IsColName(tc.in) @@ -259,16 +260,16 @@ func TestIsValue(t *testing.T) { in Expr out bool }{{ - in: newStrLiteral("aa"), + in: NewStrLiteral("aa"), out: true, }, { - in: newHexLiteral("3131"), + in: NewHexLiteral("3131"), out: true, }, { - in: newIntLiteral("1"), + in: NewIntLiteral("1"), out: true, }, { - in: newArgument(":a"), + in: NewArgument(":a"), out: true, }, { in: &NullVal{}, @@ -299,7 +300,7 @@ func TestIsNull(t *testing.T) { in: &NullVal{}, out: true, }, { - in: newStrLiteral(""), + in: NewStrLiteral(""), }} for _, tc := range testcases { out := IsNull(tc.in) @@ -314,7 +315,7 @@ func TestIsSimpleTuple(t *testing.T) { in Expr out bool }{{ - in: ValTuple{newStrLiteral("aa")}, + in: ValTuple{NewStrLiteral("aa")}, out: true, }, { in: ValTuple{&ColName{}}, @@ -349,37 +350,37 @@ func TestNewPlanValue(t *testing.T) { }, { in: &Literal{ Type: IntVal, - Val: []byte("10"), + Val: "10", }, out: sqltypes.PlanValue{Value: sqltypes.NewInt64(10)}, }, { in: &Literal{ Type: IntVal, - Val: []byte("1111111111111111111111111111111111111111"), + Val: "1111111111111111111111111111111111111111", }, err: "value out of range", }, { in: &Literal{ Type: StrVal, - Val: []byte("strval"), + Val: "strval", }, out: sqltypes.PlanValue{Value: sqltypes.NewVarBinary("strval")}, }, { in: &Literal{ Type: BitVal, - Val: []byte("01100001"), + Val: "01100001", }, err: "expression is too complex", }, { in: &Literal{ Type: HexVal, - Val: []byte("3131"), + Val: "3131", }, out: sqltypes.PlanValue{Value: sqltypes.NewVarBinary("11")}, }, { in: &Literal{ Type: HexVal, - Val: []byte("313"), + Val: "313", }, err: "odd length hex string", }, { @@ -390,7 +391,7 @@ func TestNewPlanValue(t *testing.T) { Argument(":valarg"), &Literal{ Type: StrVal, - Val: []byte("strval"), + Val: "strval", }, }, out: sqltypes.PlanValue{ @@ -411,7 +412,7 @@ func TestNewPlanValue(t *testing.T) { }, { in: &Literal{ Type: FloatVal, - Val: []byte("2.1"), + Val: "2.1", }, out: sqltypes.PlanValue{Value: sqltypes.NewFloat64(2.1)}, }, { @@ -419,7 +420,7 @@ func TestNewPlanValue(t *testing.T) { Operator: Latin1Op, Expr: &Literal{ Type: StrVal, - Val: []byte("strval"), + Val: "strval", }, }, out: sqltypes.PlanValue{Value: sqltypes.NewVarBinary("strval")}, @@ -428,7 +429,7 @@ func TestNewPlanValue(t *testing.T) { Operator: UBinaryOp, Expr: &Literal{ Type: StrVal, - Val: []byte("strval"), + Val: "strval", }, }, out: sqltypes.PlanValue{Value: sqltypes.NewVarBinary("strval")}, @@ -437,7 +438,7 @@ func TestNewPlanValue(t *testing.T) { Operator: Utf8mb4Op, Expr: &Literal{ Type: StrVal, - Val: []byte("strval"), + Val: "strval", }, }, out: sqltypes.PlanValue{Value: sqltypes.NewVarBinary("strval")}, @@ -446,7 +447,7 @@ func TestNewPlanValue(t *testing.T) { Operator: Utf8Op, Expr: &Literal{ Type: StrVal, - Val: []byte("strval"), + Val: "strval", }, }, out: sqltypes.PlanValue{Value: sqltypes.NewVarBinary("strval")}, @@ -455,7 +456,7 @@ func TestNewPlanValue(t *testing.T) { Operator: UMinusOp, Expr: &Literal{ Type: FloatVal, - Val: []byte("2.1"), + Val: "2.1", }, }, err: "expression is too complex", @@ -481,19 +482,3 @@ var mustMatch = utils.MustMatchFn( }, []string{".Conn"}, // ignored fields ) - -func newStrLiteral(in string) *Literal { - return NewStrLiteral([]byte(in)) -} - -func newIntLiteral(in string) *Literal { - return NewIntLiteral([]byte(in)) -} - -func newHexLiteral(in string) *Literal { - return NewHexLiteral([]byte(in)) -} - -func newArgument(in string) Expr { - return NewArgument([]byte(in)) -} diff --git a/go/vt/sqlparser/ast.go b/go/vt/sqlparser/ast.go index c508c526c8e..2c77fe8e124 100644 --- a/go/vt/sqlparser/ast.go +++ b/go/vt/sqlparser/ast.go @@ -16,13 +16,6 @@ limitations under the License. package sqlparser -import ( - "fmt" - "strings" - - "vitess.io/vitess/go/sqltypes" -) - /* This is the Vitess AST. This file should only contain pure struct declarations, or methods used to mark a struct as implementing an interface. All other methods @@ -33,6 +26,7 @@ related to these structs live in ast_funcs.go // generated by the parser. type SQLNode interface { Format(buf *TrackedBuffer) + formatFast(buf *TrackedBuffer) } // Statements @@ -45,24 +39,25 @@ type ( // SelectStatement any SELECT statement. SelectStatement interface { + Statement iSelectStatement() - iStatement() iInsertRows() AddOrder(*Order) SetLimit(*Limit) SetLock(lock Lock) MakeDistinct() - SQLNode } // DDLStatement represents any DDL Statement DDLStatement interface { iDDLStatement() IsFullyParsed() bool + IsTemporary() bool GetTable() TableName GetAction() DDLAction GetOptLike() *OptLike GetIfExists() bool + GetIfNotExists() bool GetTableSpec() *TableSpec GetFromTables() TableNames GetToTables() TableNames @@ -86,6 +81,12 @@ type ( SQLNode } + // Explain is an interface that represents the Explain statements + Explain interface { + Statement + iExplain() + } + // AddConstraintDefinition represents a ADD CONSTRAINT alter option AddConstraintDefinition struct { ConstraintDefinition *ConstraintDefinition @@ -155,7 +156,7 @@ type ( // DropKey is used to drop a key in an alter table statement DropKey struct { Type DropKeyType - Name string + Name ColIdent } // Force is used to specify force alter option in an alter table statement @@ -174,15 +175,15 @@ type ( Cols Columns } - // RenameTable clause is used to rename the table in an alter table statement - RenameTable struct { + // RenameTableName clause is used to rename the table in an alter table statement + RenameTableName struct { Table TableName } // RenameIndex clause is used to rename indexes in an alter table statement RenameIndex struct { - OldName string - NewName string + OldName ColIdent + NewName ColIdent } // Validation clause is used to specify whether to use validation or not @@ -336,7 +337,8 @@ type ( // DropDatabase represents a DROP database statement. DropDatabase struct { - DBName string + Comments Comments + DBName TableIdent IfExists bool } @@ -352,7 +354,8 @@ type ( // CreateDatabase represents a CREATE database statement. CreateDatabase struct { - DBName string + Comments Comments + DBName TableIdent IfNotExists bool CreateOptions []CollateAndCharset FullyParsed bool @@ -360,30 +363,35 @@ type ( // AlterDatabase represents a ALTER database statement. AlterDatabase struct { - DBName string + DBName TableIdent UpdateDataDirectory bool AlterOptions []CollateAndCharset FullyParsed bool } - // DDL represents a CREATE, ALTER, DROP, RENAME, TRUNCATE or ANALYZE statement. - DDL struct { - Action DDLAction + // Flush represents a FLUSH statement. + Flush struct { + IsLocal bool + FlushOptions []string + TableNames TableNames + WithLock bool + ForExport bool + } - // FromTables is set if Action is RenameDDLAction or DropDDLAction. - FromTables TableNames + // RenameTablePair represents the name of the original table and what it is going to be set in a RENAME TABLE statement. + RenameTablePair struct { + FromTable TableName + ToTable TableName + } - // ToTables is set if Action is RenameDDLAction. - ToTables TableNames + // RenameTable represents a RENAME TABLE statement. + RenameTable struct { + TablePairs []*RenameTablePair + } - // Table is set if Action is other than RenameDDLAction or DropDDLAction. + // TruncateTable represents a TRUNCATE TABLE statement. + TruncateTable struct { Table TableName - - // The following fields are set if a DDL was fully analyzed. - IfExists bool - TableSpec *TableSpec - OptLike *OptLike - PartitionSpec *PartitionSpec } // AlterVschema represents a ALTER VSCHEMA statement. @@ -401,6 +409,20 @@ type ( AutoIncSpec *AutoIncSpec } + // RevertMigration represents a REVERT VITESS_MIGRATION statement + RevertMigration struct { + UUID string + } + + // AlterMigrationType represents the type of operation in an ALTER VITESS_MIGRATION statement + AlterMigrationType int8 + + // AlterMigration represents a ALTER VITESS_MIGRATION statement + AlterMigration struct { + Type AlterMigrationType + UUID string + } + // AlterTable represents a ALTER TABLE statement. AlterTable struct { Table TableName @@ -411,6 +433,7 @@ type ( // DropTable represents a DROP TABLE statement. DropTable struct { + Temp bool FromTables TableNames // The following fields are set if a DDL was fully analyzed. IfExists bool @@ -422,18 +445,9 @@ type ( IfExists bool } - // CreateIndex represents a CREATE INDEX query - CreateIndex struct { - Constraint string - Name ColIdent - Table TableName - Columns []*IndexColumn - Options []*IndexOption - FullyParsed bool - } - // CreateTable represents a CREATE TABLE statement. CreateTable struct { + Temp bool Table TableName IfNotExists bool TableSpec *TableSpec @@ -510,26 +524,12 @@ type ( Name ColIdent } - // Explain represents an EXPLAIN statement - Explain struct { - Type ExplainType - Statement Statement + // CallProc represents a CALL statement + CallProc struct { + Name TableName + Params Exprs } - // ExplainType is an enum for Explain.Type - ExplainType int8 - - // OtherRead represents a DESCRIBE, or EXPLAIN statement. - // It should be used only as an indicator. It does not contain - // the full AST for the statement. - OtherRead struct{} - - // OtherAdmin represents a misc statement that relies on ADMIN privileges, - // such as REPAIR, OPTIMIZE, or TRUNCATE statement. - // It should be used only as an indicator. It does not contain - // the full AST for the statement. - OtherAdmin struct{} - // LockType is an enum for Lock Types LockType int8 @@ -549,6 +549,32 @@ type ( // UnlockTables represents the unlock statement UnlockTables struct{} + + // ExplainType is an enum for ExplainStmt.Type + ExplainType int8 + + // ExplainStmt represents an Explain statement + ExplainStmt struct { + Type ExplainType + Statement Statement + } + + // ExplainTab represents the Explain table + ExplainTab struct { + Table TableName + Wild string + } + + // OtherRead represents a DESCRIBE, or EXPLAIN statement. + // It should be used only as an indicator. It does not contain + // the full AST for the statement. + OtherRead struct{} + + // OtherAdmin represents a misc statement that relies on ADMIN privileges, + // such as REPAIR, OPTIMIZE, or TRUNCATE statement. + // It should be used only as an indicator. It does not contain + // the full AST for the statement. + OtherAdmin struct{} ) func (*Union) iStatement() {} @@ -561,7 +587,7 @@ func (*Delete) iStatement() {} func (*Set) iStatement() {} func (*SetTransaction) iStatement() {} func (*DropDatabase) iStatement() {} -func (*DDL) iStatement() {} +func (*Flush) iStatement() {} func (*Show) iStatement() {} func (*Use) iStatement() {} func (*Begin) iStatement() {} @@ -570,14 +596,12 @@ func (*Rollback) iStatement() {} func (*SRollback) iStatement() {} func (*Savepoint) iStatement() {} func (*Release) iStatement() {} -func (*Explain) iStatement() {} func (*OtherRead) iStatement() {} func (*OtherAdmin) iStatement() {} func (*Select) iSelectStatement() {} func (*Union) iSelectStatement() {} func (*ParenSelect) iSelectStatement() {} func (*Load) iStatement() {} -func (*CreateIndex) iStatement() {} func (*CreateDatabase) iStatement() {} func (*AlterDatabase) iStatement() {} func (*CreateTable) iStatement() {} @@ -587,17 +611,24 @@ func (*LockTables) iStatement() {} func (*UnlockTables) iStatement() {} func (*AlterTable) iStatement() {} func (*AlterVschema) iStatement() {} +func (*AlterMigration) iStatement() {} +func (*RevertMigration) iStatement() {} func (*DropTable) iStatement() {} func (*DropView) iStatement() {} - -func (*DDL) iDDLStatement() {} -func (*CreateIndex) iDDLStatement() {} -func (*CreateView) iDDLStatement() {} -func (*AlterView) iDDLStatement() {} -func (*CreateTable) iDDLStatement() {} -func (*DropTable) iDDLStatement() {} -func (*DropView) iDDLStatement() {} -func (*AlterTable) iDDLStatement() {} +func (*TruncateTable) iStatement() {} +func (*RenameTable) iStatement() {} +func (*CallProc) iStatement() {} +func (*ExplainStmt) iStatement() {} +func (*ExplainTab) iStatement() {} + +func (*CreateView) iDDLStatement() {} +func (*AlterView) iDDLStatement() {} +func (*CreateTable) iDDLStatement() {} +func (*DropTable) iDDLStatement() {} +func (*DropView) iDDLStatement() {} +func (*AlterTable) iDDLStatement() {} +func (*TruncateTable) iDDLStatement() {} +func (*RenameTable) iDDLStatement() {} func (*AddConstraintDefinition) iAlterOption() {} func (*AddIndexDefinition) iAlterOption() {} @@ -614,19 +645,22 @@ func (*DropKey) iAlterOption() {} func (*Force) iAlterOption() {} func (*LockOption) iAlterOption() {} func (*OrderByOption) iAlterOption() {} -func (*RenameTable) iAlterOption() {} +func (*RenameTableName) iAlterOption() {} func (*RenameIndex) iAlterOption() {} func (*Validation) iAlterOption() {} func (TableOptions) iAlterOption() {} +func (*ExplainStmt) iExplain() {} +func (*ExplainTab) iExplain() {} + // IsFullyParsed implements the DDLStatement interface -func (*DDL) IsFullyParsed() bool { - return false +func (*TruncateTable) IsFullyParsed() bool { + return true } // IsFullyParsed implements the DDLStatement interface -func (node *CreateIndex) IsFullyParsed() bool { - return node.FullyParsed +func (*RenameTable) IsFullyParsed() bool { + return true } // IsFullyParsed implements the DDLStatement interface @@ -659,8 +693,48 @@ func (node *AlterView) IsFullyParsed() bool { return true } +// IsTemporary implements the DDLStatement interface +func (*TruncateTable) IsTemporary() bool { + return false +} + +// IsTemporary implements the DDLStatement interface +func (*RenameTable) IsTemporary() bool { + return false +} + +// IsTemporary implements the DDLStatement interface +func (node *CreateTable) IsTemporary() bool { + return node.Temp +} + +// IsTemporary implements the DDLStatement interface +func (node *AlterTable) IsTemporary() bool { + return false +} + +// IsTemporary implements the DDLStatement interface +func (node *CreateView) IsTemporary() bool { + return false +} + +// IsTemporary implements the DDLStatement interface +func (node *DropView) IsTemporary() bool { + return false +} + +// IsTemporary implements the DDLStatement interface +func (node *DropTable) IsTemporary() bool { + return node.Temp +} + +// IsTemporary implements the DDLStatement interface +func (node *AlterView) IsTemporary() bool { + return false +} + // GetTable implements the DDLStatement interface -func (node *CreateIndex) GetTable() TableName { +func (node *TruncateTable) GetTable() TableName { return node.Table } @@ -684,11 +758,6 @@ func (node *AlterView) GetTable() TableName { return node.ViewName } -// GetTable implements the DDLStatement interface -func (node *DDL) GetTable() TableName { - return node.Table -} - // GetTable implements the DDLStatement interface func (node *DropView) GetTable() TableName { return TableName{} @@ -699,14 +768,14 @@ func (node *DropTable) GetTable() TableName { return TableName{} } -// GetAction implements the DDLStatement interface -func (node *DDL) GetAction() DDLAction { - return node.Action +// GetTable implements the DDLStatement interface +func (node *RenameTable) GetTable() TableName { + return TableName{} } // GetAction implements the DDLStatement interface -func (node *CreateIndex) GetAction() DDLAction { - return AlterDDLAction +func (node *TruncateTable) GetAction() DDLAction { + return TruncateDDLAction } // GetAction implements the DDLStatement interface @@ -729,6 +798,11 @@ func (node *AlterView) GetAction() DDLAction { return AlterDDLAction } +// GetAction implements the DDLStatement interface +func (node *RenameTable) GetAction() DDLAction { + return RenameDDLAction +} + // GetAction implements the DDLStatement interface func (node *DropTable) GetAction() DDLAction { return DropDDLAction @@ -740,17 +814,17 @@ func (node *DropView) GetAction() DDLAction { } // GetOptLike implements the DDLStatement interface -func (node *DDL) GetOptLike() *OptLike { +func (node *CreateTable) GetOptLike() *OptLike { return node.OptLike } // GetOptLike implements the DDLStatement interface -func (node *CreateTable) GetOptLike() *OptLike { - return node.OptLike +func (node *TruncateTable) GetOptLike() *OptLike { + return nil } // GetOptLike implements the DDLStatement interface -func (node *CreateIndex) GetOptLike() *OptLike { +func (node *RenameTable) GetOptLike() *OptLike { return nil } @@ -780,8 +854,8 @@ func (node *DropView) GetOptLike() *OptLike { } // GetIfExists implements the DDLStatement interface -func (node *DDL) GetIfExists() bool { - return node.IfExists +func (node *RenameTable) GetIfExists() bool { + return false } // GetIfExists implements the DDLStatement interface @@ -790,7 +864,7 @@ func (node *CreateTable) GetIfExists() bool { } // GetIfExists implements the DDLStatement interface -func (node *CreateIndex) GetIfExists() bool { +func (node *TruncateTable) GetIfExists() bool { return false } @@ -819,9 +893,44 @@ func (node *DropView) GetIfExists() bool { return node.IfExists } -// GetTableSpec implements the DDLStatement interface -func (node *DDL) GetTableSpec() *TableSpec { - return node.TableSpec +// GetIfNotExists implements the DDLStatement interface +func (node *RenameTable) GetIfNotExists() bool { + return false +} + +// GetIfNotExists implements the DDLStatement interface +func (node *CreateTable) GetIfNotExists() bool { + return node.IfNotExists +} + +// GetIfNotExists implements the DDLStatement interface +func (node *TruncateTable) GetIfNotExists() bool { + return false +} + +// GetIfNotExists implements the DDLStatement interface +func (node *AlterTable) GetIfNotExists() bool { + return false +} + +// GetIfNotExists implements the DDLStatement interface +func (node *CreateView) GetIfNotExists() bool { + return false +} + +// GetIfNotExists implements the DDLStatement interface +func (node *AlterView) GetIfNotExists() bool { + return false +} + +// GetIfNotExists implements the DDLStatement interface +func (node *DropTable) GetIfNotExists() bool { + return false +} + +// GetIfNotExists implements the DDLStatement interface +func (node *DropView) GetIfNotExists() bool { + return false } // GetTableSpec implements the DDLStatement interface @@ -830,7 +939,12 @@ func (node *CreateTable) GetTableSpec() *TableSpec { } // GetTableSpec implements the DDLStatement interface -func (node *CreateIndex) GetTableSpec() *TableSpec { +func (node *RenameTable) GetTableSpec() *TableSpec { + return nil +} + +// GetTableSpec implements the DDLStatement interface +func (node *TruncateTable) GetTableSpec() *TableSpec { return nil } @@ -860,12 +974,16 @@ func (node *DropView) GetTableSpec() *TableSpec { } // GetFromTables implements the DDLStatement interface -func (node *DDL) GetFromTables() TableNames { - return node.FromTables +func (node *RenameTable) GetFromTables() TableNames { + var fromTables TableNames + for _, pair := range node.TablePairs { + fromTables = append(fromTables, pair.FromTable) + } + return fromTables } // GetFromTables implements the DDLStatement interface -func (node *CreateIndex) GetFromTables() TableNames { +func (node *TruncateTable) GetFromTables() TableNames { return nil } @@ -900,12 +1018,17 @@ func (node *AlterView) GetFromTables() TableNames { } // SetFromTables implements DDLStatement. -func (node *DDL) SetFromTables(tables TableNames) { - node.FromTables = tables +func (node *RenameTable) SetFromTables(tables TableNames) { + if len(node.TablePairs) != len(tables) { + return + } + for i := range node.TablePairs { + node.TablePairs[i].FromTable = tables[i] + } } // SetFromTables implements DDLStatement. -func (node *CreateIndex) SetFromTables(tables TableNames) { +func (node *TruncateTable) SetFromTables(tables TableNames) { // irrelevant } @@ -940,12 +1063,16 @@ func (node *AlterView) SetFromTables(tables TableNames) { } // GetToTables implements the DDLStatement interface -func (node *DDL) GetToTables() TableNames { - return node.ToTables +func (node *RenameTable) GetToTables() TableNames { + var toTables TableNames + for _, pair := range node.TablePairs { + toTables = append(toTables, pair.ToTable) + } + return toTables } // GetToTables implements the DDLStatement interface -func (node *CreateIndex) GetToTables() TableNames { +func (node *TruncateTable) GetToTables() TableNames { return nil } @@ -953,7 +1080,7 @@ func (node *CreateIndex) GetToTables() TableNames { func (node *AlterTable) GetToTables() TableNames { for _, option := range node.AlterOptions { switch altOption := option.(type) { - case *RenameTable: + case *RenameTableName: return TableNames{altOption.Table} } } @@ -986,14 +1113,13 @@ func (node *DropView) GetToTables() TableNames { } // AffectedTables returns the list table names affected by the DDLStatement. -func (node *DDL) AffectedTables() TableNames { - if node.Action == RenameDDLAction || node.Action == DropDDLAction { - list := make(TableNames, 0, len(node.FromTables)+len(node.ToTables)) - list = append(list, node.FromTables...) - list = append(list, node.ToTables...) - return list +func (node *RenameTable) AffectedTables() TableNames { + list := make(TableNames, 0, 2*len(node.TablePairs)) + for _, pair := range node.TablePairs { + list = append(list, pair.FromTable) + list = append(list, pair.ToTable) } - return TableNames{node.Table} + return list } // AffectedTables returns the list table names affected by the DDLStatement. @@ -1001,7 +1127,7 @@ func (node *AlterTable) AffectedTables() TableNames { affectedTables := TableNames{node.Table} for _, option := range node.AlterOptions { switch altOption := option.(type) { - case *RenameTable: + case *RenameTableName: affectedTables = append(affectedTables, altOption.Table) } } @@ -1009,7 +1135,7 @@ func (node *AlterTable) AffectedTables() TableNames { } // AffectedTables implements DDLStatement. -func (node *CreateIndex) AffectedTables() TableNames { +func (node *TruncateTable) AffectedTables() TableNames { return TableNames{node.Table} } @@ -1039,13 +1165,7 @@ func (node *DropView) AffectedTables() TableNames { } // SetTable implements DDLStatement. -func (node *CreateIndex) SetTable(qualifier string, name string) { - node.Table.Qualifier = NewTableIdent(qualifier) - node.Table.Name = NewTableIdent(name) -} - -// SetTable implements DDLStatement. -func (node *DDL) SetTable(qualifier string, name string) { +func (node *TruncateTable) SetTable(qualifier string, name string) { node.Table.Qualifier = NewTableIdent(qualifier) node.Table.Name = NewTableIdent(name) } @@ -1074,6 +1194,9 @@ func (node *AlterView) SetTable(qualifier string, name string) { node.ViewName.Name = NewTableIdent(name) } +// SetTable implements DDLStatement. +func (node *RenameTable) SetTable(qualifier string, name string) {} + // SetTable implements DDLStatement. func (node *DropTable) SetTable(qualifier string, name string) {} @@ -1101,17 +1224,17 @@ func (node *AlterDatabase) IsFullyParsed() bool { // GetDatabaseName implements the DBDDLStatement interface func (node *DropDatabase) GetDatabaseName() string { - return node.DBName + return node.DBName.String() } // GetDatabaseName implements the DBDDLStatement interface func (node *CreateDatabase) GetDatabaseName() string { - return node.DBName + return node.DBName.String() } // GetDatabaseName implements the DBDDLStatement interface func (node *AlterDatabase) GetDatabaseName() string { - return node.DBName + return node.DBName.String() } // ParenSelect can actually not be a top level statement, @@ -1138,34 +1261,28 @@ type ( ShowCollationFilterOpt Expr } - // ShowColumns is of ShowInternal type, holds the show columns statement. - ShowColumns struct { - Full string - Table TableName - DbName string - Filter *ShowFilter - } - - // ShowTableStatus is of ShowInternal type, holds SHOW TABLE STATUS queries. - ShowTableStatus struct { - DatabaseName string - Filter *ShowFilter - } - // ShowCommandType represents the show statement type. ShowCommandType int8 // ShowBasic is of ShowInternal type, holds Simple SHOW queries with a filter. ShowBasic struct { Command ShowCommandType + Full bool + Tbl TableName + DbName TableIdent Filter *ShowFilter } + + // ShowCreate is of ShowInternal type, holds SHOW CREATE queries. + ShowCreate struct { + Command ShowCommandType + Op TableName + } ) -func (*ShowLegacy) isShowInternal() {} -func (*ShowColumns) isShowInternal() {} -func (*ShowTableStatus) isShowInternal() {} -func (*ShowBasic) isShowInternal() {} +func (*ShowLegacy) isShowInternal() {} +func (*ShowBasic) isShowInternal() {} +func (*ShowCreate) isShowInternal() {} // InsertRows represents the rows for an INSERT statement. type InsertRows interface { @@ -1229,11 +1346,7 @@ type ColumnType struct { Type string // Generic field options. - NotNull bool - Autoincrement bool - Default Expr - OnUpdate Expr - Comment *Literal + Options *ColumnTypeOptions // Numeric field options Length *Literal @@ -1247,6 +1360,22 @@ type ColumnType struct { // Enum values EnumValues []string +} + +// ColumnTypeOptions are generic field options for a column type +type ColumnTypeOptions struct { + /* We need Null to be *bool to distinguish 3 cases - + 1. When Not Null is specified (Null = false) + 2. When Null is specified (Null = true) + 3. When nothing is specified (Null = nil) + The complexity arises from the fact that we do not know whether the column will be nullable or not if nothing is specified. + Therefore we do not know whether the column is nullable or not in case 3. + */ + Null *bool + Autoincrement bool + Default Expr + OnUpdate Expr + Comment *Literal // Key specification KeyOpt ColumnKeyOption @@ -1291,7 +1420,7 @@ type VindexParam struct { // ConstraintDefinition describes a constraint in a CREATE TABLE statement type ConstraintDefinition struct { - Name string + Name ColIdent Details ConstraintInfo } @@ -1325,7 +1454,7 @@ type ShowFilter struct { } // Comments represents a list of comments. -type Comments [][]byte +type Comments []string // SelectExprs represents SELECT expressions. type SelectExprs []SelectExpr @@ -1356,7 +1485,7 @@ type ( func (*StarExpr) iSelectExpr() {} func (*AliasedExpr) iSelectExpr() {} -func (Nextval) iSelectExpr() {} +func (*Nextval) iSelectExpr() {} // Columns represents an insert column list. type Columns []ColIdent @@ -1528,11 +1657,11 @@ type ( // Literal represents a fixed value. Literal struct { Type ValType - Val []byte + Val string } // Argument represents bindvariable expression - Argument []byte + Argument string // NullVal represents a NULL value. NullVal struct{} @@ -1800,1638 +1929,10 @@ type TableIdent struct { v string } -// Here follow all the Format implementations for AST nodes - -// Format formats the node. -func (node *Select) Format(buf *TrackedBuffer) { - var options string - addIf := func(b bool, s string) { - if b { - options += s - } - } - addIf(node.Distinct, DistinctStr) - if node.Cache != nil { - if *node.Cache { - options += SQLCacheStr - } else { - options += SQLNoCacheStr - } - } - addIf(node.StraightJoinHint, StraightJoinHint) - addIf(node.SQLCalcFoundRows, SQLCalcFoundRowsStr) - - buf.astPrintf(node, "select %v%s%v from %v%v%v%v%v%v%s%v", - node.Comments, options, node.SelectExprs, - node.From, node.Where, - node.GroupBy, node.Having, node.OrderBy, - node.Limit, node.Lock.ToString(), node.Into) -} - -// Format formats the node. -func (node *ParenSelect) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "(%v)", node.Select) -} - -// Format formats the node. -func (node *Union) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%v", node.FirstStatement) - for _, us := range node.UnionSelects { - buf.astPrintf(node, "%v", us) - } - buf.astPrintf(node, "%v%v%s", node.OrderBy, node.Limit, node.Lock.ToString()) -} - -// Format formats the node. -func (node *UnionSelect) Format(buf *TrackedBuffer) { - if node.Distinct { - buf.astPrintf(node, " %s %v", UnionStr, node.Statement) - } else { - buf.astPrintf(node, " %s %v", UnionAllStr, node.Statement) - } -} - -// Format formats the node. -func (node *VStream) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "vstream %v%v from %v", - node.Comments, node.SelectExpr, node.Table) -} - -// Format formats the node. -func (node *Stream) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "stream %v%v from %v", - node.Comments, node.SelectExpr, node.Table) -} - -// Format formats the node. -func (node *Insert) Format(buf *TrackedBuffer) { - switch node.Action { - case InsertAct: - buf.astPrintf(node, "%s %v%sinto %v%v%v %v%v", - InsertStr, - node.Comments, node.Ignore.ToString(), - node.Table, node.Partitions, node.Columns, node.Rows, node.OnDup) - case ReplaceAct: - buf.astPrintf(node, "%s %v%sinto %v%v%v %v%v", - ReplaceStr, - node.Comments, node.Ignore.ToString(), - node.Table, node.Partitions, node.Columns, node.Rows, node.OnDup) - default: - buf.astPrintf(node, "%s %v%sinto %v%v%v %v%v", - "Unkown Insert Action", - node.Comments, node.Ignore.ToString(), - node.Table, node.Partitions, node.Columns, node.Rows, node.OnDup) - } - -} - -// Format formats the node. -func (node *Update) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "update %v%s%v set %v%v%v%v", - node.Comments, node.Ignore.ToString(), node.TableExprs, - node.Exprs, node.Where, node.OrderBy, node.Limit) -} - -// Format formats the node. -func (node *Delete) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "delete %v", node.Comments) - if node.Ignore { - buf.WriteString("ignore ") - } - if node.Targets != nil { - buf.astPrintf(node, "%v ", node.Targets) - } - buf.astPrintf(node, "from %v%v%v%v%v", node.TableExprs, node.Partitions, node.Where, node.OrderBy, node.Limit) -} - -// Format formats the node. -func (node *Set) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "set %v%v", node.Comments, node.Exprs) -} - -// Format formats the node. -func (node *SetTransaction) Format(buf *TrackedBuffer) { - if node.Scope == ImplicitScope { - buf.astPrintf(node, "set %vtransaction ", node.Comments) - } else { - buf.astPrintf(node, "set %v%s transaction ", node.Comments, node.Scope.ToString()) - } - - for i, char := range node.Characteristics { - if i > 0 { - buf.WriteString(", ") - } - buf.astPrintf(node, "%v", char) - } -} - -// Format formats the node. -func (node *DropDatabase) Format(buf *TrackedBuffer) { - exists := "" - if node.IfExists { - exists = " if exists" - } - buf.WriteString(fmt.Sprintf("%s database%s %v", DropStr, exists, node.DBName)) -} - -// Format formats the node. -func (node *DDL) Format(buf *TrackedBuffer) { - switch node.Action { - case CreateDDLAction: - if node.OptLike != nil { - buf.astPrintf(node, "%s table %v %v", CreateStr, node.Table, node.OptLike) - } else if node.TableSpec != nil { - buf.astPrintf(node, "%s table %v %v", CreateStr, node.Table, node.TableSpec) - } else { - buf.astPrintf(node, "%s table %v", CreateStr, node.Table) - } - case DropDDLAction: - exists := "" - if node.IfExists { - exists = " if exists" - } - buf.astPrintf(node, "%s table%s %v", DropStr, exists, node.FromTables) - case RenameDDLAction: - buf.astPrintf(node, "%s table %v to %v", RenameStr, node.FromTables[0], node.ToTables[0]) - for i := 1; i < len(node.FromTables); i++ { - buf.astPrintf(node, ", %v to %v", node.FromTables[i], node.ToTables[i]) - } - case AlterDDLAction: - if node.PartitionSpec != nil { - buf.astPrintf(node, "%s table %v %v", AlterStr, node.Table, node.PartitionSpec) - } else { - buf.astPrintf(node, "%s table %v", AlterStr, node.Table) - } - case FlushDDLAction: - buf.astPrintf(node, "%s", FlushStr) - default: - buf.astPrintf(node, "%s table %v", node.Action.ToString(), node.Table) - } -} - -// Format formats the node. -func (node *AlterVschema) Format(buf *TrackedBuffer) { - switch node.Action { - case CreateVindexDDLAction: - buf.astPrintf(node, "alter vschema create vindex %v %v", node.Table, node.VindexSpec) - case DropVindexDDLAction: - buf.astPrintf(node, "alter vschema drop vindex %v", node.Table) - case AddVschemaTableDDLAction: - buf.astPrintf(node, "alter vschema add table %v", node.Table) - case DropVschemaTableDDLAction: - buf.astPrintf(node, "alter vschema drop table %v", node.Table) - case AddColVindexDDLAction: - buf.astPrintf(node, "alter vschema on %v add vindex %v (", node.Table, node.VindexSpec.Name) - for i, col := range node.VindexCols { - if i != 0 { - buf.astPrintf(node, ", %v", col) - } else { - buf.astPrintf(node, "%v", col) - } - } - buf.astPrintf(node, ")") - if node.VindexSpec.Type.String() != "" { - buf.astPrintf(node, " %v", node.VindexSpec) - } - case DropColVindexDDLAction: - buf.astPrintf(node, "alter vschema on %v drop vindex %v", node.Table, node.VindexSpec.Name) - case AddSequenceDDLAction: - buf.astPrintf(node, "alter vschema add sequence %v", node.Table) - case AddAutoIncDDLAction: - buf.astPrintf(node, "alter vschema on %v add auto_increment %v", node.Table, node.AutoIncSpec) - default: - buf.astPrintf(node, "%s table %v", node.Action.ToString(), node.Table) - } -} - -// Format formats the node. -func (node *OptLike) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "like %v", node.LikeTable) -} - -// Format formats the node. -func (node *PartitionSpec) Format(buf *TrackedBuffer) { - switch node.Action { - case ReorganizeAction: - buf.astPrintf(node, "%s ", ReorganizeStr) - prefix := "" - for _, n := range node.Names { - buf.astPrintf(node, "%s%v", prefix, n) - prefix = ", " - } - buf.WriteString(" into (") - prefix = "" - for _, pd := range node.Definitions { - buf.astPrintf(node, "%s%v", prefix, pd) - prefix = ", " - } - buf.astPrintf(node, ")") - case AddAction: - buf.astPrintf(node, "%s (%v)", AddStr, node.Definitions[0]) - case DropAction: - buf.astPrintf(node, "%s ", DropPartitionStr) - prefix := "" - for _, n := range node.Names { - buf.astPrintf(node, "%s%v", prefix, n) - prefix = ", " - } - case DiscardAction: - buf.astPrintf(node, "%s ", DiscardStr) - if node.IsAll { - buf.WriteString("all") - } else { - prefix := "" - for _, n := range node.Names { - buf.astPrintf(node, "%s%v", prefix, n) - prefix = ", " - } - } - buf.WriteString(" tablespace") - case ImportAction: - buf.astPrintf(node, "%s ", ImportStr) - if node.IsAll { - buf.WriteString("all") - } else { - prefix := "" - for _, n := range node.Names { - buf.astPrintf(node, "%s%v", prefix, n) - prefix = ", " - } - } - buf.WriteString(" tablespace") - case TruncateAction: - buf.astPrintf(node, "%s ", TruncatePartitionStr) - if node.IsAll { - buf.WriteString("all") - } else { - prefix := "" - for _, n := range node.Names { - buf.astPrintf(node, "%s%v", prefix, n) - prefix = ", " - } - } - case CoalesceAction: - buf.astPrintf(node, "%s %v", CoalesceStr, node.Number) - case ExchangeAction: - buf.astPrintf(node, "%s %v with table %v", ExchangeStr, node.Names[0], node.TableName) - if node.WithoutValidation { - buf.WriteString(" without validation") - } - case AnalyzeAction: - buf.astPrintf(node, "%s ", AnalyzePartitionStr) - if node.IsAll { - buf.WriteString("all") - } else { - prefix := "" - for _, n := range node.Names { - buf.astPrintf(node, "%s%v", prefix, n) - prefix = ", " - } - } - case CheckAction: - buf.astPrintf(node, "%s ", CheckStr) - if node.IsAll { - buf.WriteString("all") - } else { - prefix := "" - for _, n := range node.Names { - buf.astPrintf(node, "%s%v", prefix, n) - prefix = ", " - } - } - case OptimizeAction: - buf.astPrintf(node, "%s ", OptimizeStr) - if node.IsAll { - buf.WriteString("all") - } else { - prefix := "" - for _, n := range node.Names { - buf.astPrintf(node, "%s%v", prefix, n) - prefix = ", " - } - } - case RebuildAction: - buf.astPrintf(node, "%s ", RebuildStr) - if node.IsAll { - buf.WriteString("all") - } else { - prefix := "" - for _, n := range node.Names { - buf.astPrintf(node, "%s%v", prefix, n) - prefix = ", " - } - } - case RepairAction: - buf.astPrintf(node, "%s ", RepairStr) - if node.IsAll { - buf.WriteString("all") - } else { - prefix := "" - for _, n := range node.Names { - buf.astPrintf(node, "%s%v", prefix, n) - prefix = ", " - } - } - case RemoveAction: - buf.WriteString(RemoveStr) - case UpgradeAction: - buf.WriteString(UpgradeStr) - default: - panic("unimplemented") - } -} - -// Format formats the node -func (node *PartitionDefinition) Format(buf *TrackedBuffer) { - if !node.Maxvalue { - buf.astPrintf(node, "partition %v values less than (%v)", node.Name, node.Limit) - } else { - buf.astPrintf(node, "partition %v values less than (maxvalue)", node.Name) - } -} - -// Format formats the node. -func (ts *TableSpec) Format(buf *TrackedBuffer) { - buf.astPrintf(ts, "(\n") - for i, col := range ts.Columns { - if i == 0 { - buf.astPrintf(ts, "\t%v", col) - } else { - buf.astPrintf(ts, ",\n\t%v", col) - } - } - for _, idx := range ts.Indexes { - buf.astPrintf(ts, ",\n\t%v", idx) - } - for _, c := range ts.Constraints { - buf.astPrintf(ts, ",\n\t%v", c) - } - - buf.astPrintf(ts, "\n)") - for i, opt := range ts.Options { - if i != 0 { - buf.WriteString(",\n ") - } - buf.astPrintf(ts, " %s", opt.Name) - if opt.String != "" { - buf.astPrintf(ts, " %s", opt.String) - } else if opt.Value != nil { - buf.astPrintf(ts, " %v", opt.Value) - } else { - buf.astPrintf(ts, " (%v)", opt.Tables) - } - } -} - -// Format formats the node. -func (col *ColumnDefinition) Format(buf *TrackedBuffer) { - buf.astPrintf(col, "%v %v", col.Name, &col.Type) -} - -// Format returns a canonical string representation of the type and all relevant options -func (ct *ColumnType) Format(buf *TrackedBuffer) { - buf.astPrintf(ct, "%s", ct.Type) - - if ct.Length != nil && ct.Scale != nil { - buf.astPrintf(ct, "(%v,%v)", ct.Length, ct.Scale) - - } else if ct.Length != nil { - buf.astPrintf(ct, "(%v)", ct.Length) - } - - if ct.EnumValues != nil { - buf.astPrintf(ct, "(%s)", strings.Join(ct.EnumValues, ", ")) - } - - opts := make([]string, 0, 16) - if ct.Unsigned { - opts = append(opts, keywordStrings[UNSIGNED]) - } - if ct.Zerofill { - opts = append(opts, keywordStrings[ZEROFILL]) - } - if ct.Charset != "" { - opts = append(opts, keywordStrings[CHARACTER], keywordStrings[SET], ct.Charset) - } - if ct.Collate != "" { - opts = append(opts, keywordStrings[COLLATE], ct.Collate) - } - if ct.NotNull { - opts = append(opts, keywordStrings[NOT], keywordStrings[NULL]) - } - if ct.Default != nil { - opts = append(opts, keywordStrings[DEFAULT], String(ct.Default)) - } - if ct.OnUpdate != nil { - opts = append(opts, keywordStrings[ON], keywordStrings[UPDATE], String(ct.OnUpdate)) - } - if ct.Autoincrement { - opts = append(opts, keywordStrings[AUTO_INCREMENT]) - } - if ct.Comment != nil { - opts = append(opts, keywordStrings[COMMENT_KEYWORD], String(ct.Comment)) - } - if ct.KeyOpt == colKeyPrimary { - opts = append(opts, keywordStrings[PRIMARY], keywordStrings[KEY]) - } - if ct.KeyOpt == colKeyUnique { - opts = append(opts, keywordStrings[UNIQUE]) - } - if ct.KeyOpt == colKeyUniqueKey { - opts = append(opts, keywordStrings[UNIQUE], keywordStrings[KEY]) - } - if ct.KeyOpt == colKeySpatialKey { - opts = append(opts, keywordStrings[SPATIAL], keywordStrings[KEY]) - } - if ct.KeyOpt == colKeyFulltextKey { - opts = append(opts, keywordStrings[FULLTEXT], keywordStrings[KEY]) - } - if ct.KeyOpt == colKey { - opts = append(opts, keywordStrings[KEY]) - } - - if len(opts) != 0 { - buf.astPrintf(ct, " %s", strings.Join(opts, " ")) - } -} - -// Format formats the node. -func (idx *IndexDefinition) Format(buf *TrackedBuffer) { - buf.astPrintf(idx, "%v (", idx.Info) - for i, col := range idx.Columns { - if i != 0 { - buf.astPrintf(idx, ", %v", col.Column) - } else { - buf.astPrintf(idx, "%v", col.Column) - } - if col.Length != nil { - buf.astPrintf(idx, "(%v)", col.Length) - } - if col.Direction == DescOrder { - buf.astPrintf(idx, " desc") - } - } - buf.astPrintf(idx, ")") - - for _, opt := range idx.Options { - buf.astPrintf(idx, " %s", opt.Name) - if opt.String != "" { - buf.astPrintf(idx, " %s", opt.String) - } else { - buf.astPrintf(idx, " %v", opt.Value) - } - } -} - -// Format formats the node. -func (ii *IndexInfo) Format(buf *TrackedBuffer) { - if !ii.ConstraintName.IsEmpty() { - buf.astPrintf(ii, "constraint %v ", ii.ConstraintName) - } - if ii.Primary { - buf.astPrintf(ii, "%s", ii.Type) - } else { - buf.astPrintf(ii, "%s", ii.Type) - if !ii.Name.IsEmpty() { - buf.astPrintf(ii, " %v", ii.Name) - } - } -} - -// Format formats the node. -func (node *AutoIncSpec) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%v ", node.Column) - buf.astPrintf(node, "using %v", node.Sequence) -} - -// Format formats the node. The "CREATE VINDEX" preamble was formatted in -// the containing DDL node Format, so this just prints the type, any -// parameters, and optionally the owner -func (node *VindexSpec) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "using %v", node.Type) - - numParams := len(node.Params) - if numParams != 0 { - buf.astPrintf(node, " with ") - for i, p := range node.Params { - if i != 0 { - buf.astPrintf(node, ", ") - } - buf.astPrintf(node, "%v", p) - } - } -} - -// Format formats the node. -func (node VindexParam) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%s=%s", node.Key.String(), node.Val) -} - -// Format formats the node. -func (c *ConstraintDefinition) Format(buf *TrackedBuffer) { - if c.Name != "" { - buf.astPrintf(c, "constraint %s ", c.Name) - } - c.Details.Format(buf) -} - -// Format formats the node. -func (a ReferenceAction) Format(buf *TrackedBuffer) { - switch a { - case Restrict: - buf.WriteString("restrict") - case Cascade: - buf.WriteString("cascade") - case NoAction: - buf.WriteString("no action") - case SetNull: - buf.WriteString("set null") - case SetDefault: - buf.WriteString("set default") - } -} - -// Format formats the node. -func (f *ForeignKeyDefinition) Format(buf *TrackedBuffer) { - buf.astPrintf(f, "foreign key %v references %v %v", f.Source, f.ReferencedTable, f.ReferencedColumns) - if f.OnDelete != DefaultAction { - buf.astPrintf(f, " on delete %v", f.OnDelete) - } - if f.OnUpdate != DefaultAction { - buf.astPrintf(f, " on update %v", f.OnUpdate) - } -} - -// Format formats the node. -func (c *CheckConstraintDefinition) Format(buf *TrackedBuffer) { - buf.astPrintf(c, "check (%v)", c.Expr) - if c.Enforced { - buf.astPrintf(c, " enforced") - } else { - buf.astPrintf(c, " not enforced") - } -} - -// Format formats the node. -func (node *Show) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%v", node.Internal) -} - -// Format formats the node. -func (node *ShowColumns) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "show %s", node.Full) - buf.astPrintf(node, "columns from %v", node.Table) - - buf.printIf(node.DbName != "", " from "+node.DbName) - if node.Filter != nil { - buf.astPrintf(node, "%v", node.Filter) - } -} - -// Format formats the node. -func (node *ShowLegacy) Format(buf *TrackedBuffer) { - nodeType := strings.ToLower(node.Type) - if (nodeType == "tables" || nodeType == "columns" || nodeType == "fields" || nodeType == "index" || nodeType == "keys" || nodeType == "indexes" || - nodeType == "databases" || nodeType == "schemas" || nodeType == "keyspaces" || nodeType == "vitess_keyspaces" || nodeType == "vitess_shards" || nodeType == "vitess_tablets") && node.ShowTablesOpt != nil { - opt := node.ShowTablesOpt - if node.Extended != "" { - buf.astPrintf(node, "show %s%s", node.Extended, nodeType) - } else { - buf.astPrintf(node, "show %s%s", opt.Full, nodeType) - } - if (nodeType == "columns" || nodeType == "fields") && node.HasOnTable() { - buf.astPrintf(node, " from %v", node.OnTable) - } - if (nodeType == "index" || nodeType == "keys" || nodeType == "indexes") && node.HasOnTable() { - buf.astPrintf(node, " from %v", node.OnTable) - } - if opt.DbName != "" { - buf.astPrintf(node, " from %s", opt.DbName) - } - buf.astPrintf(node, "%v", opt.Filter) - return - } - if node.Scope == ImplicitScope { - buf.astPrintf(node, "show %s", nodeType) - } else { - buf.astPrintf(node, "show %s %s", node.Scope.ToString(), nodeType) - } - if node.HasOnTable() { - buf.astPrintf(node, " on %v", node.OnTable) - } - if nodeType == "collation" && node.ShowCollationFilterOpt != nil { - buf.astPrintf(node, " where %v", node.ShowCollationFilterOpt) - } - if nodeType == "charset" && node.ShowTablesOpt != nil { - buf.astPrintf(node, "%v", node.ShowTablesOpt.Filter) - } - if node.HasTable() { - buf.astPrintf(node, " %v", node.Table) - } -} - -// Format formats the node. -func (node *ShowFilter) Format(buf *TrackedBuffer) { - if node == nil { - return - } - if node.Like != "" { - buf.astPrintf(node, " like '%s'", node.Like) - } else { - buf.astPrintf(node, " where %v", node.Filter) - } -} - -// Format formats the node. -func (node *Use) Format(buf *TrackedBuffer) { - if node.DBName.v != "" { - buf.astPrintf(node, "use %v", node.DBName) - } else { - buf.astPrintf(node, "use") - } -} - -// Format formats the node. -func (node *Commit) Format(buf *TrackedBuffer) { - buf.WriteString("commit") -} - -// Format formats the node. -func (node *Begin) Format(buf *TrackedBuffer) { - buf.WriteString("begin") -} - -// Format formats the node. -func (node *Rollback) Format(buf *TrackedBuffer) { - buf.WriteString("rollback") -} - -// Format formats the node. -func (node *SRollback) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "rollback to %v", node.Name) -} - -// Format formats the node. -func (node *Savepoint) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "savepoint %v", node.Name) -} - -// Format formats the node. -func (node *Release) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "release savepoint %v", node.Name) -} - -// Format formats the node. -func (node *Explain) Format(buf *TrackedBuffer) { - format := "" - switch node.Type { - case EmptyType: // do nothing - case AnalyzeType: - format = AnalyzeStr + " " - default: - format = "format = " + node.Type.ToString() + " " - } - buf.astPrintf(node, "explain %s%v", format, node.Statement) -} - -// Format formats the node. -func (node *OtherRead) Format(buf *TrackedBuffer) { - buf.WriteString("otherread") -} - -// Format formats the node. -func (node *OtherAdmin) Format(buf *TrackedBuffer) { - buf.WriteString("otheradmin") -} - -// Format formats the node. -func (node Comments) Format(buf *TrackedBuffer) { - for _, c := range node { - buf.astPrintf(node, "%s ", c) - } -} - -// Format formats the node. -func (node SelectExprs) Format(buf *TrackedBuffer) { - var prefix string - for _, n := range node { - buf.astPrintf(node, "%s%v", prefix, n) - prefix = ", " - } -} - -// Format formats the node. -func (node *StarExpr) Format(buf *TrackedBuffer) { - if !node.TableName.IsEmpty() { - buf.astPrintf(node, "%v.", node.TableName) - } - buf.astPrintf(node, "*") -} - -// Format formats the node. -func (node *AliasedExpr) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%v", node.Expr) - if !node.As.IsEmpty() { - buf.astPrintf(node, " as %v", node.As) - } -} - -// Format formats the node. -func (node Nextval) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "next %v values", node.Expr) -} - -// Format formats the node. -func (node Columns) Format(buf *TrackedBuffer) { - if node == nil { - return - } - prefix := "(" - for _, n := range node { - buf.astPrintf(node, "%s%v", prefix, n) - prefix = ", " - } - buf.WriteString(")") -} - -// Format formats the node -func (node Partitions) Format(buf *TrackedBuffer) { - if node == nil { - return - } - prefix := " partition (" - for _, n := range node { - buf.astPrintf(node, "%s%v", prefix, n) - prefix = ", " - } - buf.WriteString(")") -} - -// Format formats the node. -func (node TableExprs) Format(buf *TrackedBuffer) { - var prefix string - for _, n := range node { - buf.astPrintf(node, "%s%v", prefix, n) - prefix = ", " - } -} - -// Format formats the node. -func (node *AliasedTableExpr) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%v%v", node.Expr, node.Partitions) - if !node.As.IsEmpty() { - buf.astPrintf(node, " as %v", node.As) - } - if node.Hints != nil { - // Hint node provides the space padding. - buf.astPrintf(node, "%v", node.Hints) - } -} - -// Format formats the node. -func (node TableNames) Format(buf *TrackedBuffer) { - var prefix string - for _, n := range node { - buf.astPrintf(node, "%s%v", prefix, n) - prefix = ", " - } -} - -// Format formats the node. -func (node TableName) Format(buf *TrackedBuffer) { - if node.IsEmpty() { - return - } - if !node.Qualifier.IsEmpty() { - buf.astPrintf(node, "%v.", node.Qualifier) - } - buf.astPrintf(node, "%v", node.Name) -} - -// Format formats the node. -func (node *ParenTableExpr) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "(%v)", node.Exprs) -} - -// Format formats the node. -func (node JoinCondition) Format(buf *TrackedBuffer) { - if node.On != nil { - buf.astPrintf(node, " on %v", node.On) - } - if node.Using != nil { - buf.astPrintf(node, " using %v", node.Using) - } -} - -// Format formats the node. -func (node *JoinTableExpr) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%v %s %v%v", node.LeftExpr, node.Join.ToString(), node.RightExpr, node.Condition) -} - -// Format formats the node. -func (node *IndexHints) Format(buf *TrackedBuffer) { - buf.astPrintf(node, " %sindex ", node.Type.ToString()) - if len(node.Indexes) == 0 { - buf.astPrintf(node, "()") - } else { - prefix := "(" - for _, n := range node.Indexes { - buf.astPrintf(node, "%s%v", prefix, n) - prefix = ", " - } - buf.astPrintf(node, ")") - } -} - -// Format formats the node. -func (node *Where) Format(buf *TrackedBuffer) { - if node == nil || node.Expr == nil { - return - } - buf.astPrintf(node, " %s %v", node.Type.ToString(), node.Expr) -} - -// Format formats the node. -func (node Exprs) Format(buf *TrackedBuffer) { - var prefix string - for _, n := range node { - buf.astPrintf(node, "%s%v", prefix, n) - prefix = ", " - } -} - -// Format formats the node. -func (node *AndExpr) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%l and %r", node.Left, node.Right) -} - -// Format formats the node. -func (node *OrExpr) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%l or %r", node.Left, node.Right) -} - -// Format formats the node. -func (node *XorExpr) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%l xor %r", node.Left, node.Right) -} - -// Format formats the node. -func (node *NotExpr) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "not %v", node.Expr) -} - -// Format formats the node. -func (node *ComparisonExpr) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%l %s %r", node.Left, node.Operator.ToString(), node.Right) - if node.Escape != nil { - buf.astPrintf(node, " escape %v", node.Escape) - } -} - -// Format formats the node. -func (node *RangeCond) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%v %s %l and %r", node.Left, node.Operator.ToString(), node.From, node.To) -} - -// Format formats the node. -func (node *IsExpr) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%v %s", node.Expr, node.Operator.ToString()) -} - -// Format formats the node. -func (node *ExistsExpr) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "exists %v", node.Subquery) -} - -// Format formats the node. -func (node *Literal) Format(buf *TrackedBuffer) { - switch node.Type { - case StrVal: - sqltypes.MakeTrusted(sqltypes.VarBinary, node.Val).EncodeSQL(buf) - case IntVal, FloatVal, HexNum: - buf.astPrintf(node, "%s", node.Val) - case HexVal: - buf.astPrintf(node, "X'%s'", node.Val) - case BitVal: - buf.astPrintf(node, "B'%s'", node.Val) - default: - panic("unexpected") - } -} - -// Format formats the node. -func (node Argument) Format(buf *TrackedBuffer) { - buf.WriteArg(string(node)) -} - -// Format formats the node. -func (node *NullVal) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "null") -} - -// Format formats the node. -func (node BoolVal) Format(buf *TrackedBuffer) { - if node { - buf.astPrintf(node, "true") - } else { - buf.astPrintf(node, "false") - } -} - -// Format formats the node. -func (node *ColName) Format(buf *TrackedBuffer) { - if !node.Qualifier.IsEmpty() { - buf.astPrintf(node, "%v.", node.Qualifier) - } - buf.astPrintf(node, "%v", node.Name) -} - -// Format formats the node. -func (node ValTuple) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "(%v)", Exprs(node)) -} - -// Format formats the node. -func (node *Subquery) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "(%v)", node.Select) -} - -// Format formats the node. -func (node *DerivedTable) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "(%v)", node.Select) -} - -// Format formats the node. -func (node ListArg) Format(buf *TrackedBuffer) { - buf.WriteArg(string(node)) -} - -// Format formats the node. -func (node *BinaryExpr) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%l %s %r", node.Left, node.Operator.ToString(), node.Right) -} - -// Format formats the node. -func (node *UnaryExpr) Format(buf *TrackedBuffer) { - if _, unary := node.Expr.(*UnaryExpr); unary { - // They have same precedence so parenthesis is not required. - buf.astPrintf(node, "%s %v", node.Operator.ToString(), node.Expr) - return - } - buf.astPrintf(node, "%s%v", node.Operator.ToString(), node.Expr) -} - -// Format formats the node. -func (node *IntervalExpr) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "interval %v %s", node.Expr, node.Unit) -} - -// Format formats the node. -func (node *TimestampFuncExpr) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%s(%s, %v, %v)", node.Name, node.Unit, node.Expr1, node.Expr2) -} - -// Format formats the node. -func (node *CurTimeFuncExpr) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%s(%v)", node.Name.String(), node.Fsp) -} - -// Format formats the node. -func (node *CollateExpr) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%v collate %s", node.Expr, node.Charset) -} - -// Format formats the node. -func (node *FuncExpr) Format(buf *TrackedBuffer) { - var distinct string - if node.Distinct { - distinct = "distinct " - } - if !node.Qualifier.IsEmpty() { - buf.astPrintf(node, "%v.", node.Qualifier) - } - // Function names should not be back-quoted even - // if they match a reserved word, only if they contain illegal characters - funcName := node.Name.String() - - if containEscapableChars(funcName, NoAt) { - writeEscapedString(buf, funcName) - } else { - buf.WriteString(funcName) - } - buf.astPrintf(node, "(%s%v)", distinct, node.Exprs) -} - -// Format formats the node -func (node *GroupConcatExpr) Format(buf *TrackedBuffer) { - if node.Distinct { - buf.astPrintf(node, "group_concat(%s%v%v%s%v)", DistinctStr, node.Exprs, node.OrderBy, node.Separator, node.Limit) - } else { - buf.astPrintf(node, "group_concat(%v%v%s%v)", node.Exprs, node.OrderBy, node.Separator, node.Limit) - } -} - -// Format formats the node. -func (node *ValuesFuncExpr) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "values(%v)", node.Name) -} - -// Format formats the node. -func (node *SubstrExpr) Format(buf *TrackedBuffer) { - var val interface{} - if node.Name != nil { - val = node.Name - } else { - val = node.StrVal - } - - if node.To == nil { - buf.astPrintf(node, "substr(%v, %v)", val, node.From) - } else { - buf.astPrintf(node, "substr(%v, %v, %v)", val, node.From, node.To) - } -} - -// Format formats the node. -func (node *ConvertExpr) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "convert(%v, %v)", node.Expr, node.Type) -} - -// Format formats the node. -func (node *ConvertUsingExpr) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "convert(%v using %s)", node.Expr, node.Type) -} - -// Format formats the node. -func (node *ConvertType) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%s", node.Type) - if node.Length != nil { - buf.astPrintf(node, "(%v", node.Length) - if node.Scale != nil { - buf.astPrintf(node, ", %v", node.Scale) - } - buf.astPrintf(node, ")") - } - if node.Charset != "" { - buf.astPrintf(node, "%s %s", node.Operator.ToString(), node.Charset) - } -} - -// Format formats the node -func (node *MatchExpr) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "match(%v) against (%v%s)", node.Columns, node.Expr, node.Option.ToString()) -} - -// Format formats the node. -func (node *CaseExpr) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "case ") - if node.Expr != nil { - buf.astPrintf(node, "%v ", node.Expr) - } - for _, when := range node.Whens { - buf.astPrintf(node, "%v ", when) - } - if node.Else != nil { - buf.astPrintf(node, "else %v ", node.Else) - } - buf.astPrintf(node, "end") -} - -// Format formats the node. -func (node *Default) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "default") - if node.ColName != "" { - buf.WriteString("(") - formatID(buf, node.ColName, strings.ToLower(node.ColName), NoAt) - buf.WriteString(")") - } -} - -// Format formats the node. -func (node *When) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "when %v then %v", node.Cond, node.Val) -} - -// Format formats the node. -func (node GroupBy) Format(buf *TrackedBuffer) { - prefix := " group by " - for _, n := range node { - buf.astPrintf(node, "%s%v", prefix, n) - prefix = ", " - } -} - -// Format formats the node. -func (node OrderBy) Format(buf *TrackedBuffer) { - prefix := " order by " - for _, n := range node { - buf.astPrintf(node, "%s%v", prefix, n) - prefix = ", " - } -} - -// Format formats the node. -func (node *Order) Format(buf *TrackedBuffer) { - if node, ok := node.Expr.(*NullVal); ok { - buf.astPrintf(node, "%v", node) - return - } - if node, ok := node.Expr.(*FuncExpr); ok { - if node.Name.Lowered() == "rand" { - buf.astPrintf(node, "%v", node) - return - } - } - - buf.astPrintf(node, "%v %s", node.Expr, node.Direction.ToString()) -} - -// Format formats the node. -func (node *Limit) Format(buf *TrackedBuffer) { - if node == nil { - return - } - buf.astPrintf(node, " limit ") - if node.Offset != nil { - buf.astPrintf(node, "%v, ", node.Offset) - } - buf.astPrintf(node, "%v", node.Rowcount) -} - -// Format formats the node. -func (node Values) Format(buf *TrackedBuffer) { - prefix := "values " - for _, n := range node { - buf.astPrintf(node, "%s%v", prefix, n) - prefix = ", " - } -} - -// Format formats the node. -func (node UpdateExprs) Format(buf *TrackedBuffer) { - var prefix string - for _, n := range node { - buf.astPrintf(node, "%s%v", prefix, n) - prefix = ", " - } -} - -// Format formats the node. -func (node *UpdateExpr) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%v = %v", node.Name, node.Expr) -} - -// Format formats the node. -func (node SetExprs) Format(buf *TrackedBuffer) { - var prefix string - for _, n := range node { - buf.astPrintf(node, "%s%v", prefix, n) - prefix = ", " - } -} - -// Format formats the node. -func (node *SetExpr) Format(buf *TrackedBuffer) { - if node.Scope != ImplicitScope { - buf.WriteString(node.Scope.ToString()) - buf.WriteString(" ") - } - // We don't have to backtick set variable names. - switch { - case node.Name.EqualString("charset") || node.Name.EqualString("names"): - buf.astPrintf(node, "%s %v", node.Name.String(), node.Expr) - case node.Name.EqualString(TransactionStr): - literal := node.Expr.(*Literal) - buf.astPrintf(node, "%s %s", node.Name.String(), strings.ToLower(string(literal.Val))) - default: - buf.astPrintf(node, "%v = %v", node.Name, node.Expr) - } -} - -// Format formats the node. -func (node OnDup) Format(buf *TrackedBuffer) { - if node == nil { - return - } - buf.astPrintf(node, " on duplicate key update %v", UpdateExprs(node)) -} - -// Format formats the node. -func (node ColIdent) Format(buf *TrackedBuffer) { - for i := NoAt; i < node.at; i++ { - buf.WriteByte('@') - } - formatID(buf, node.val, node.Lowered(), node.at) -} - -// Format formats the node. -func (node TableIdent) Format(buf *TrackedBuffer) { - formatID(buf, node.v, strings.ToLower(node.v), NoAt) -} - -// AtCount return the '@' count present in ColIdent Name -func (node ColIdent) AtCount() AtCount { - return node.at +// AtCount return the '@' count present in ColIdent Name +func (node ColIdent) AtCount() AtCount { + return node.at } func (IsolationLevel) iChar() {} func (AccessMode) iChar() {} - -// Format formats the node. -func (node IsolationLevel) Format(buf *TrackedBuffer) { - buf.WriteString("isolation level ") - switch node { - case ReadUncommitted: - buf.WriteString(ReadUncommittedStr) - case ReadCommitted: - buf.WriteString(ReadCommittedStr) - case RepeatableRead: - buf.WriteString(RepeatableReadStr) - case Serializable: - buf.WriteString(SerializableStr) - default: - buf.WriteString("Unknown Isolation level value") - } -} - -// Format formats the node. -func (node AccessMode) Format(buf *TrackedBuffer) { - if node == ReadOnly { - buf.WriteString(TxReadOnly) - } else { - buf.WriteString(TxReadWrite) - } -} - -// Format formats the node. -func (node *Load) Format(buf *TrackedBuffer) { - buf.WriteString("AST node missing for Load type") -} - -// Format formats the node. -func (node *ShowTableStatus) Format(buf *TrackedBuffer) { - buf.WriteString("show table status") - if node.DatabaseName != "" { - buf.WriteString(" from ") - buf.WriteString(node.DatabaseName) - } - buf.astPrintf(node, "%v", node.Filter) -} - -// Format formats the node. -func (node *ShowBasic) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "show%s%v", node.Command.ToString(), node.Filter) -} - -// Format formats the node. -func (node *SelectInto) Format(buf *TrackedBuffer) { - if node == nil { - return - } - buf.astPrintf(node, "%s'%s'", node.Type.ToString(), node.FileName) - if node.Charset != "" { - buf.astPrintf(node, " character set %s", node.Charset) - } - buf.astPrintf(node, "%s%s%s%s", node.FormatOption, node.ExportOption, node.Manifest, node.Overwrite) -} - -// Format formats the node. -func (node *CreateIndex) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "alter table %v add", node.Table) - if node.Constraint != "" { - buf.WriteString(" " + node.Constraint) - } - buf.astPrintf(node, " index %v", node.Name) - - buf.WriteString(" (") - for i, col := range node.Columns { - if i != 0 { - buf.astPrintf(node, ", %v", col.Column) - } else { - buf.astPrintf(node, "%v", col.Column) - } - if col.Length != nil { - buf.astPrintf(node, "(%v)", col.Length) - } - if col.Direction == DescOrder { - buf.WriteString(" desc") - } - } - buf.astPrintf(node, ")") - for _, opt := range node.Options { - //if opt == nil { - // continue - //} - buf.WriteString(" " + strings.ToLower(opt.Name)) - if opt.String != "" { - buf.WriteString(" " + opt.String) - } else { - buf.astPrintf(node, " %v", opt.Value) - } - } -} - -// Format formats the node. -func (node *CreateDatabase) Format(buf *TrackedBuffer) { - buf.WriteString("create database") - if node.IfNotExists { - buf.WriteString(" if not exists") - } - buf.astPrintf(node, " %s", node.DBName) - if node.CreateOptions != nil { - for _, createOption := range node.CreateOptions { - if createOption.IsDefault { - buf.WriteString(" default") - } - buf.WriteString(createOption.Type.ToString()) - buf.WriteString(" " + createOption.Value) - } - } -} - -// Format formats the node. -func (node *AlterDatabase) Format(buf *TrackedBuffer) { - buf.WriteString("alter database") - if node.DBName != "" { - buf.astPrintf(node, " %s", node.DBName) - } - if node.UpdateDataDirectory { - buf.WriteString(" upgrade data directory name") - } - if node.AlterOptions != nil { - for _, createOption := range node.AlterOptions { - if createOption.IsDefault { - buf.WriteString(" default") - } - buf.WriteString(createOption.Type.ToString()) - buf.WriteString(" " + createOption.Value) - } - } -} - -// Format formats the node. -func (node *CreateTable) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "create table %v", node.Table) - if node.OptLike != nil { - buf.astPrintf(node, " %v", node.OptLike) - } - if node.TableSpec != nil { - buf.astPrintf(node, " %v", node.TableSpec) - } -} - -// Format formats the node. -func (node *CreateView) Format(buf *TrackedBuffer) { - buf.WriteString("create") - if node.IsReplace { - buf.WriteString(" or replace") - } - if node.Algorithm != "" { - buf.astPrintf(node, " algorithm = %s", node.Algorithm) - } - if node.Definer != "" { - buf.astPrintf(node, " definer = %s", node.Definer) - } - if node.Security != "" { - buf.astPrintf(node, " sql security %s", node.Security) - } - buf.astPrintf(node, " view %v", node.ViewName) - buf.astPrintf(node, "%v as %v", node.Columns, node.Select) - if node.CheckOption != "" { - buf.astPrintf(node, " with %s check option", node.CheckOption) - } -} - -// Format formats the LockTables node. -func (node *LockTables) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "lock tables %v %s", node.Tables[0].Table, node.Tables[0].Lock.ToString()) - for i := 1; i < len(node.Tables); i++ { - buf.astPrintf(node, ", %v %s", node.Tables[i].Table, node.Tables[i].Lock.ToString()) - } -} - -// Format formats the UnlockTables node. -func (node *UnlockTables) Format(buf *TrackedBuffer) { - buf.WriteString("unlock tables") -} - -// Format formats the node. -func (node *AlterView) Format(buf *TrackedBuffer) { - buf.WriteString("alter") - if node.Algorithm != "" { - buf.astPrintf(node, " algorithm = %s", node.Algorithm) - } - if node.Definer != "" { - buf.astPrintf(node, " definer = %s", node.Definer) - } - if node.Security != "" { - buf.astPrintf(node, " sql security %s", node.Security) - } - buf.astPrintf(node, " view %v", node.ViewName) - buf.astPrintf(node, "%v as %v", node.Columns, node.Select) - if node.CheckOption != "" { - buf.astPrintf(node, " with %s check option", node.CheckOption) - } -} - -// Format formats the node. -func (node *DropTable) Format(buf *TrackedBuffer) { - exists := "" - if node.IfExists { - exists = " if exists" - } - buf.astPrintf(node, "drop table%s %v", exists, node.FromTables) -} - -// Format formats the node. -func (node *DropView) Format(buf *TrackedBuffer) { - exists := "" - if node.IfExists { - exists = " if exists" - } - buf.astPrintf(node, "drop view%s %v", exists, node.FromTables) -} - -// Format formats the AlterTable node. -func (node *AlterTable) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "alter table %v", node.Table) - prefix := "" - for i, option := range node.AlterOptions { - if i != 0 { - buf.WriteString(",") - } - buf.astPrintf(node, " %v", option) - if node.PartitionSpec != nil && node.PartitionSpec.Action != RemoveAction { - prefix = "," - } - } - if node.PartitionSpec != nil { - buf.astPrintf(node, "%s %v", prefix, node.PartitionSpec) - } -} - -// Format formats the node. -func (node *AddConstraintDefinition) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "add %v", node.ConstraintDefinition) -} - -// Format formats the node. -func (node *AddIndexDefinition) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "add %v", node.IndexDefinition) -} - -// Format formats the node. -func (node *AddColumns) Format(buf *TrackedBuffer) { - - if len(node.Columns) == 1 { - buf.astPrintf(node, "add column %v", node.Columns[0]) - if node.First != nil { - buf.astPrintf(node, " first %v", node.First) - } - if node.After != nil { - buf.astPrintf(node, " after %v", node.After) - } - } else { - for i, col := range node.Columns { - if i == 0 { - buf.astPrintf(node, "add column (%v", col) - } else { - buf.astPrintf(node, ", %v", col) - } - } - buf.WriteString(")") - } -} - -// Format formats the node. -func (node AlgorithmValue) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "algorithm = %s", string(node)) -} - -// Format formats the node -func (node *AlterColumn) Format(buf *TrackedBuffer) { - if node.DropDefault { - buf.astPrintf(node, "alter column %v drop default", node.Column) - } else { - buf.astPrintf(node, "alter column %v set default", node.Column) - buf.astPrintf(node, " %v", node.DefaultVal) - } -} - -// Format formats the node -func (node *ChangeColumn) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "change column %v %v", node.OldColumn, node.NewColDefinition) - if node.First != nil { - buf.astPrintf(node, " first %v", node.First) - } - if node.After != nil { - buf.astPrintf(node, " after %v", node.After) - } -} - -// Format formats the node -func (node *ModifyColumn) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "modify column %v", node.NewColDefinition) - if node.First != nil { - buf.astPrintf(node, " first %v", node.First) - } - if node.After != nil { - buf.astPrintf(node, " after %v", node.After) - } -} - -// Format formats the node -func (node *AlterCharset) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "convert to character set %s", node.CharacterSet) - if node.Collate != "" { - buf.astPrintf(node, " collate %s", node.Collate) - } -} - -// Format formats the node -func (node *KeyState) Format(buf *TrackedBuffer) { - if node.Enable { - buf.WriteString("enable keys") - } else { - buf.WriteString("disable keys") - } - -} - -// Format formats the node -func (node *TablespaceOperation) Format(buf *TrackedBuffer) { - if node.Import { - buf.WriteString("import tablespace") - } else { - buf.WriteString("discard tablespace") - } -} - -// Format formats the node -func (node *DropColumn) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "drop column %v", node.Name) -} - -// Format formats the node -func (node *DropKey) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "drop %s", node.Type.ToString()) - if node.Name != "" { - buf.astPrintf(node, " %s", node.Name) - } -} - -// Format formats the node -func (node *Force) Format(buf *TrackedBuffer) { - buf.WriteString("force") -} - -// Format formats the node -func (node *LockOption) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "lock %s", node.Type.ToString()) -} - -// Format formats the node -func (node *OrderByOption) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "order by ") - prefix := "" - for _, n := range node.Cols { - buf.astPrintf(node, "%s%v", prefix, n) - prefix = ", " - } -} - -// Format formats the node -func (node *RenameTable) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "rename %v", node.Table) -} - -// Format formats the node -func (node *RenameIndex) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "rename index %s to %s", node.OldName, node.NewName) -} - -// Format formats the node -func (node *Validation) Format(buf *TrackedBuffer) { - if node.With { - buf.WriteString("with validation") - } else { - buf.WriteString("without validation") - } -} - -// Format formats the node -func (node TableOptions) Format(buf *TrackedBuffer) { - for i, option := range node { - if i != 0 { - buf.WriteString(" ") - } - buf.astPrintf(node, "%s", option.Name) - if option.String != "" { - buf.astPrintf(node, " %s", option.String) - } else if option.Value != nil { - buf.astPrintf(node, " %v", option.Value) - } else { - buf.astPrintf(node, " (%v)", option.Tables) - } - } -} diff --git a/go/vt/sqlparser/ast_clone.go b/go/vt/sqlparser/ast_clone.go new file mode 100644 index 00000000000..44db68253c7 --- /dev/null +++ b/go/vt/sqlparser/ast_clone.go @@ -0,0 +1,2507 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by ASTHelperGen. DO NOT EDIT. + +package sqlparser + +// CloneSQLNode creates a deep clone of the input. +func CloneSQLNode(in SQLNode) SQLNode { + if in == nil { + return nil + } + switch in := in.(type) { + case AccessMode: + return in + case *AddColumns: + return CloneRefOfAddColumns(in) + case *AddConstraintDefinition: + return CloneRefOfAddConstraintDefinition(in) + case *AddIndexDefinition: + return CloneRefOfAddIndexDefinition(in) + case AlgorithmValue: + return in + case *AliasedExpr: + return CloneRefOfAliasedExpr(in) + case *AliasedTableExpr: + return CloneRefOfAliasedTableExpr(in) + case *AlterCharset: + return CloneRefOfAlterCharset(in) + case *AlterColumn: + return CloneRefOfAlterColumn(in) + case *AlterDatabase: + return CloneRefOfAlterDatabase(in) + case *AlterMigration: + return CloneRefOfAlterMigration(in) + case *AlterTable: + return CloneRefOfAlterTable(in) + case *AlterView: + return CloneRefOfAlterView(in) + case *AlterVschema: + return CloneRefOfAlterVschema(in) + case *AndExpr: + return CloneRefOfAndExpr(in) + case Argument: + return in + case *AutoIncSpec: + return CloneRefOfAutoIncSpec(in) + case *Begin: + return CloneRefOfBegin(in) + case *BinaryExpr: + return CloneRefOfBinaryExpr(in) + case BoolVal: + return in + case *CallProc: + return CloneRefOfCallProc(in) + case *CaseExpr: + return CloneRefOfCaseExpr(in) + case *ChangeColumn: + return CloneRefOfChangeColumn(in) + case *CheckConstraintDefinition: + return CloneRefOfCheckConstraintDefinition(in) + case ColIdent: + return CloneColIdent(in) + case *ColName: + return CloneRefOfColName(in) + case *CollateExpr: + return CloneRefOfCollateExpr(in) + case *ColumnDefinition: + return CloneRefOfColumnDefinition(in) + case *ColumnType: + return CloneRefOfColumnType(in) + case Columns: + return CloneColumns(in) + case Comments: + return CloneComments(in) + case *Commit: + return CloneRefOfCommit(in) + case *ComparisonExpr: + return CloneRefOfComparisonExpr(in) + case *ConstraintDefinition: + return CloneRefOfConstraintDefinition(in) + case *ConvertExpr: + return CloneRefOfConvertExpr(in) + case *ConvertType: + return CloneRefOfConvertType(in) + case *ConvertUsingExpr: + return CloneRefOfConvertUsingExpr(in) + case *CreateDatabase: + return CloneRefOfCreateDatabase(in) + case *CreateTable: + return CloneRefOfCreateTable(in) + case *CreateView: + return CloneRefOfCreateView(in) + case *CurTimeFuncExpr: + return CloneRefOfCurTimeFuncExpr(in) + case *Default: + return CloneRefOfDefault(in) + case *Delete: + return CloneRefOfDelete(in) + case *DerivedTable: + return CloneRefOfDerivedTable(in) + case *DropColumn: + return CloneRefOfDropColumn(in) + case *DropDatabase: + return CloneRefOfDropDatabase(in) + case *DropKey: + return CloneRefOfDropKey(in) + case *DropTable: + return CloneRefOfDropTable(in) + case *DropView: + return CloneRefOfDropView(in) + case *ExistsExpr: + return CloneRefOfExistsExpr(in) + case *ExplainStmt: + return CloneRefOfExplainStmt(in) + case *ExplainTab: + return CloneRefOfExplainTab(in) + case Exprs: + return CloneExprs(in) + case *Flush: + return CloneRefOfFlush(in) + case *Force: + return CloneRefOfForce(in) + case *ForeignKeyDefinition: + return CloneRefOfForeignKeyDefinition(in) + case *FuncExpr: + return CloneRefOfFuncExpr(in) + case GroupBy: + return CloneGroupBy(in) + case *GroupConcatExpr: + return CloneRefOfGroupConcatExpr(in) + case *IndexDefinition: + return CloneRefOfIndexDefinition(in) + case *IndexHints: + return CloneRefOfIndexHints(in) + case *IndexInfo: + return CloneRefOfIndexInfo(in) + case *Insert: + return CloneRefOfInsert(in) + case *IntervalExpr: + return CloneRefOfIntervalExpr(in) + case *IsExpr: + return CloneRefOfIsExpr(in) + case IsolationLevel: + return in + case JoinCondition: + return CloneJoinCondition(in) + case *JoinTableExpr: + return CloneRefOfJoinTableExpr(in) + case *KeyState: + return CloneRefOfKeyState(in) + case *Limit: + return CloneRefOfLimit(in) + case ListArg: + return CloneListArg(in) + case *Literal: + return CloneRefOfLiteral(in) + case *Load: + return CloneRefOfLoad(in) + case *LockOption: + return CloneRefOfLockOption(in) + case *LockTables: + return CloneRefOfLockTables(in) + case *MatchExpr: + return CloneRefOfMatchExpr(in) + case *ModifyColumn: + return CloneRefOfModifyColumn(in) + case *Nextval: + return CloneRefOfNextval(in) + case *NotExpr: + return CloneRefOfNotExpr(in) + case *NullVal: + return CloneRefOfNullVal(in) + case OnDup: + return CloneOnDup(in) + case *OptLike: + return CloneRefOfOptLike(in) + case *OrExpr: + return CloneRefOfOrExpr(in) + case *Order: + return CloneRefOfOrder(in) + case OrderBy: + return CloneOrderBy(in) + case *OrderByOption: + return CloneRefOfOrderByOption(in) + case *OtherAdmin: + return CloneRefOfOtherAdmin(in) + case *OtherRead: + return CloneRefOfOtherRead(in) + case *ParenSelect: + return CloneRefOfParenSelect(in) + case *ParenTableExpr: + return CloneRefOfParenTableExpr(in) + case *PartitionDefinition: + return CloneRefOfPartitionDefinition(in) + case *PartitionSpec: + return CloneRefOfPartitionSpec(in) + case Partitions: + return ClonePartitions(in) + case *RangeCond: + return CloneRefOfRangeCond(in) + case ReferenceAction: + return in + case *Release: + return CloneRefOfRelease(in) + case *RenameIndex: + return CloneRefOfRenameIndex(in) + case *RenameTable: + return CloneRefOfRenameTable(in) + case *RenameTableName: + return CloneRefOfRenameTableName(in) + case *RevertMigration: + return CloneRefOfRevertMigration(in) + case *Rollback: + return CloneRefOfRollback(in) + case *SRollback: + return CloneRefOfSRollback(in) + case *Savepoint: + return CloneRefOfSavepoint(in) + case *Select: + return CloneRefOfSelect(in) + case SelectExprs: + return CloneSelectExprs(in) + case *SelectInto: + return CloneRefOfSelectInto(in) + case *Set: + return CloneRefOfSet(in) + case *SetExpr: + return CloneRefOfSetExpr(in) + case SetExprs: + return CloneSetExprs(in) + case *SetTransaction: + return CloneRefOfSetTransaction(in) + case *Show: + return CloneRefOfShow(in) + case *ShowBasic: + return CloneRefOfShowBasic(in) + case *ShowCreate: + return CloneRefOfShowCreate(in) + case *ShowFilter: + return CloneRefOfShowFilter(in) + case *ShowLegacy: + return CloneRefOfShowLegacy(in) + case *StarExpr: + return CloneRefOfStarExpr(in) + case *Stream: + return CloneRefOfStream(in) + case *Subquery: + return CloneRefOfSubquery(in) + case *SubstrExpr: + return CloneRefOfSubstrExpr(in) + case TableExprs: + return CloneTableExprs(in) + case TableIdent: + return CloneTableIdent(in) + case TableName: + return CloneTableName(in) + case TableNames: + return CloneTableNames(in) + case TableOptions: + return CloneTableOptions(in) + case *TableSpec: + return CloneRefOfTableSpec(in) + case *TablespaceOperation: + return CloneRefOfTablespaceOperation(in) + case *TimestampFuncExpr: + return CloneRefOfTimestampFuncExpr(in) + case *TruncateTable: + return CloneRefOfTruncateTable(in) + case *UnaryExpr: + return CloneRefOfUnaryExpr(in) + case *Union: + return CloneRefOfUnion(in) + case *UnionSelect: + return CloneRefOfUnionSelect(in) + case *UnlockTables: + return CloneRefOfUnlockTables(in) + case *Update: + return CloneRefOfUpdate(in) + case *UpdateExpr: + return CloneRefOfUpdateExpr(in) + case UpdateExprs: + return CloneUpdateExprs(in) + case *Use: + return CloneRefOfUse(in) + case *VStream: + return CloneRefOfVStream(in) + case ValTuple: + return CloneValTuple(in) + case *Validation: + return CloneRefOfValidation(in) + case Values: + return CloneValues(in) + case *ValuesFuncExpr: + return CloneRefOfValuesFuncExpr(in) + case VindexParam: + return CloneVindexParam(in) + case *VindexSpec: + return CloneRefOfVindexSpec(in) + case *When: + return CloneRefOfWhen(in) + case *Where: + return CloneRefOfWhere(in) + case *XorExpr: + return CloneRefOfXorExpr(in) + default: + // this should never happen + return nil + } +} + +// CloneRefOfAddColumns creates a deep clone of the input. +func CloneRefOfAddColumns(n *AddColumns) *AddColumns { + if n == nil { + return nil + } + out := *n + out.Columns = CloneSliceOfRefOfColumnDefinition(n.Columns) + out.First = CloneRefOfColName(n.First) + out.After = CloneRefOfColName(n.After) + return &out +} + +// CloneRefOfAddConstraintDefinition creates a deep clone of the input. +func CloneRefOfAddConstraintDefinition(n *AddConstraintDefinition) *AddConstraintDefinition { + if n == nil { + return nil + } + out := *n + out.ConstraintDefinition = CloneRefOfConstraintDefinition(n.ConstraintDefinition) + return &out +} + +// CloneRefOfAddIndexDefinition creates a deep clone of the input. +func CloneRefOfAddIndexDefinition(n *AddIndexDefinition) *AddIndexDefinition { + if n == nil { + return nil + } + out := *n + out.IndexDefinition = CloneRefOfIndexDefinition(n.IndexDefinition) + return &out +} + +// CloneRefOfAliasedExpr creates a deep clone of the input. +func CloneRefOfAliasedExpr(n *AliasedExpr) *AliasedExpr { + if n == nil { + return nil + } + out := *n + out.Expr = CloneExpr(n.Expr) + out.As = CloneColIdent(n.As) + return &out +} + +// CloneRefOfAliasedTableExpr creates a deep clone of the input. +func CloneRefOfAliasedTableExpr(n *AliasedTableExpr) *AliasedTableExpr { + if n == nil { + return nil + } + out := *n + out.Expr = CloneSimpleTableExpr(n.Expr) + out.Partitions = ClonePartitions(n.Partitions) + out.As = CloneTableIdent(n.As) + out.Hints = CloneRefOfIndexHints(n.Hints) + return &out +} + +// CloneRefOfAlterCharset creates a deep clone of the input. +func CloneRefOfAlterCharset(n *AlterCharset) *AlterCharset { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfAlterColumn creates a deep clone of the input. +func CloneRefOfAlterColumn(n *AlterColumn) *AlterColumn { + if n == nil { + return nil + } + out := *n + out.Column = CloneRefOfColName(n.Column) + out.DefaultVal = CloneExpr(n.DefaultVal) + return &out +} + +// CloneRefOfAlterDatabase creates a deep clone of the input. +func CloneRefOfAlterDatabase(n *AlterDatabase) *AlterDatabase { + if n == nil { + return nil + } + out := *n + out.DBName = CloneTableIdent(n.DBName) + out.AlterOptions = CloneSliceOfCollateAndCharset(n.AlterOptions) + return &out +} + +// CloneRefOfAlterMigration creates a deep clone of the input. +func CloneRefOfAlterMigration(n *AlterMigration) *AlterMigration { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfAlterTable creates a deep clone of the input. +func CloneRefOfAlterTable(n *AlterTable) *AlterTable { + if n == nil { + return nil + } + out := *n + out.Table = CloneTableName(n.Table) + out.AlterOptions = CloneSliceOfAlterOption(n.AlterOptions) + out.PartitionSpec = CloneRefOfPartitionSpec(n.PartitionSpec) + return &out +} + +// CloneRefOfAlterView creates a deep clone of the input. +func CloneRefOfAlterView(n *AlterView) *AlterView { + if n == nil { + return nil + } + out := *n + out.ViewName = CloneTableName(n.ViewName) + out.Columns = CloneColumns(n.Columns) + out.Select = CloneSelectStatement(n.Select) + return &out +} + +// CloneRefOfAlterVschema creates a deep clone of the input. +func CloneRefOfAlterVschema(n *AlterVschema) *AlterVschema { + if n == nil { + return nil + } + out := *n + out.Table = CloneTableName(n.Table) + out.VindexSpec = CloneRefOfVindexSpec(n.VindexSpec) + out.VindexCols = CloneSliceOfColIdent(n.VindexCols) + out.AutoIncSpec = CloneRefOfAutoIncSpec(n.AutoIncSpec) + return &out +} + +// CloneRefOfAndExpr creates a deep clone of the input. +func CloneRefOfAndExpr(n *AndExpr) *AndExpr { + if n == nil { + return nil + } + out := *n + out.Left = CloneExpr(n.Left) + out.Right = CloneExpr(n.Right) + return &out +} + +// CloneRefOfAutoIncSpec creates a deep clone of the input. +func CloneRefOfAutoIncSpec(n *AutoIncSpec) *AutoIncSpec { + if n == nil { + return nil + } + out := *n + out.Column = CloneColIdent(n.Column) + out.Sequence = CloneTableName(n.Sequence) + return &out +} + +// CloneRefOfBegin creates a deep clone of the input. +func CloneRefOfBegin(n *Begin) *Begin { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfBinaryExpr creates a deep clone of the input. +func CloneRefOfBinaryExpr(n *BinaryExpr) *BinaryExpr { + if n == nil { + return nil + } + out := *n + out.Left = CloneExpr(n.Left) + out.Right = CloneExpr(n.Right) + return &out +} + +// CloneRefOfCallProc creates a deep clone of the input. +func CloneRefOfCallProc(n *CallProc) *CallProc { + if n == nil { + return nil + } + out := *n + out.Name = CloneTableName(n.Name) + out.Params = CloneExprs(n.Params) + return &out +} + +// CloneRefOfCaseExpr creates a deep clone of the input. +func CloneRefOfCaseExpr(n *CaseExpr) *CaseExpr { + if n == nil { + return nil + } + out := *n + out.Expr = CloneExpr(n.Expr) + out.Whens = CloneSliceOfRefOfWhen(n.Whens) + out.Else = CloneExpr(n.Else) + return &out +} + +// CloneRefOfChangeColumn creates a deep clone of the input. +func CloneRefOfChangeColumn(n *ChangeColumn) *ChangeColumn { + if n == nil { + return nil + } + out := *n + out.OldColumn = CloneRefOfColName(n.OldColumn) + out.NewColDefinition = CloneRefOfColumnDefinition(n.NewColDefinition) + out.First = CloneRefOfColName(n.First) + out.After = CloneRefOfColName(n.After) + return &out +} + +// CloneRefOfCheckConstraintDefinition creates a deep clone of the input. +func CloneRefOfCheckConstraintDefinition(n *CheckConstraintDefinition) *CheckConstraintDefinition { + if n == nil { + return nil + } + out := *n + out.Expr = CloneExpr(n.Expr) + return &out +} + +// CloneColIdent creates a deep clone of the input. +func CloneColIdent(n ColIdent) ColIdent { + return *CloneRefOfColIdent(&n) +} + +// CloneRefOfColName creates a deep clone of the input. +func CloneRefOfColName(n *ColName) *ColName { + return n +} + +// CloneRefOfCollateExpr creates a deep clone of the input. +func CloneRefOfCollateExpr(n *CollateExpr) *CollateExpr { + if n == nil { + return nil + } + out := *n + out.Expr = CloneExpr(n.Expr) + return &out +} + +// CloneRefOfColumnDefinition creates a deep clone of the input. +func CloneRefOfColumnDefinition(n *ColumnDefinition) *ColumnDefinition { + if n == nil { + return nil + } + out := *n + out.Name = CloneColIdent(n.Name) + out.Type = CloneColumnType(n.Type) + return &out +} + +// CloneRefOfColumnType creates a deep clone of the input. +func CloneRefOfColumnType(n *ColumnType) *ColumnType { + if n == nil { + return nil + } + out := *n + out.Options = CloneRefOfColumnTypeOptions(n.Options) + out.Length = CloneRefOfLiteral(n.Length) + out.Scale = CloneRefOfLiteral(n.Scale) + out.EnumValues = CloneSliceOfString(n.EnumValues) + return &out +} + +// CloneColumns creates a deep clone of the input. +func CloneColumns(n Columns) Columns { + res := make(Columns, 0, len(n)) + for _, x := range n { + res = append(res, CloneColIdent(x)) + } + return res +} + +// CloneComments creates a deep clone of the input. +func CloneComments(n Comments) Comments { + res := make(Comments, 0, len(n)) + copy(res, n) + return res +} + +// CloneRefOfCommit creates a deep clone of the input. +func CloneRefOfCommit(n *Commit) *Commit { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfComparisonExpr creates a deep clone of the input. +func CloneRefOfComparisonExpr(n *ComparisonExpr) *ComparisonExpr { + if n == nil { + return nil + } + out := *n + out.Left = CloneExpr(n.Left) + out.Right = CloneExpr(n.Right) + out.Escape = CloneExpr(n.Escape) + return &out +} + +// CloneRefOfConstraintDefinition creates a deep clone of the input. +func CloneRefOfConstraintDefinition(n *ConstraintDefinition) *ConstraintDefinition { + if n == nil { + return nil + } + out := *n + out.Name = CloneColIdent(n.Name) + out.Details = CloneConstraintInfo(n.Details) + return &out +} + +// CloneRefOfConvertExpr creates a deep clone of the input. +func CloneRefOfConvertExpr(n *ConvertExpr) *ConvertExpr { + if n == nil { + return nil + } + out := *n + out.Expr = CloneExpr(n.Expr) + out.Type = CloneRefOfConvertType(n.Type) + return &out +} + +// CloneRefOfConvertType creates a deep clone of the input. +func CloneRefOfConvertType(n *ConvertType) *ConvertType { + if n == nil { + return nil + } + out := *n + out.Length = CloneRefOfLiteral(n.Length) + out.Scale = CloneRefOfLiteral(n.Scale) + return &out +} + +// CloneRefOfConvertUsingExpr creates a deep clone of the input. +func CloneRefOfConvertUsingExpr(n *ConvertUsingExpr) *ConvertUsingExpr { + if n == nil { + return nil + } + out := *n + out.Expr = CloneExpr(n.Expr) + return &out +} + +// CloneRefOfCreateDatabase creates a deep clone of the input. +func CloneRefOfCreateDatabase(n *CreateDatabase) *CreateDatabase { + if n == nil { + return nil + } + out := *n + out.Comments = CloneComments(n.Comments) + out.DBName = CloneTableIdent(n.DBName) + out.CreateOptions = CloneSliceOfCollateAndCharset(n.CreateOptions) + return &out +} + +// CloneRefOfCreateTable creates a deep clone of the input. +func CloneRefOfCreateTable(n *CreateTable) *CreateTable { + if n == nil { + return nil + } + out := *n + out.Table = CloneTableName(n.Table) + out.TableSpec = CloneRefOfTableSpec(n.TableSpec) + out.OptLike = CloneRefOfOptLike(n.OptLike) + return &out +} + +// CloneRefOfCreateView creates a deep clone of the input. +func CloneRefOfCreateView(n *CreateView) *CreateView { + if n == nil { + return nil + } + out := *n + out.ViewName = CloneTableName(n.ViewName) + out.Columns = CloneColumns(n.Columns) + out.Select = CloneSelectStatement(n.Select) + return &out +} + +// CloneRefOfCurTimeFuncExpr creates a deep clone of the input. +func CloneRefOfCurTimeFuncExpr(n *CurTimeFuncExpr) *CurTimeFuncExpr { + if n == nil { + return nil + } + out := *n + out.Name = CloneColIdent(n.Name) + out.Fsp = CloneExpr(n.Fsp) + return &out +} + +// CloneRefOfDefault creates a deep clone of the input. +func CloneRefOfDefault(n *Default) *Default { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfDelete creates a deep clone of the input. +func CloneRefOfDelete(n *Delete) *Delete { + if n == nil { + return nil + } + out := *n + out.Comments = CloneComments(n.Comments) + out.Targets = CloneTableNames(n.Targets) + out.TableExprs = CloneTableExprs(n.TableExprs) + out.Partitions = ClonePartitions(n.Partitions) + out.Where = CloneRefOfWhere(n.Where) + out.OrderBy = CloneOrderBy(n.OrderBy) + out.Limit = CloneRefOfLimit(n.Limit) + return &out +} + +// CloneRefOfDerivedTable creates a deep clone of the input. +func CloneRefOfDerivedTable(n *DerivedTable) *DerivedTable { + if n == nil { + return nil + } + out := *n + out.Select = CloneSelectStatement(n.Select) + return &out +} + +// CloneRefOfDropColumn creates a deep clone of the input. +func CloneRefOfDropColumn(n *DropColumn) *DropColumn { + if n == nil { + return nil + } + out := *n + out.Name = CloneRefOfColName(n.Name) + return &out +} + +// CloneRefOfDropDatabase creates a deep clone of the input. +func CloneRefOfDropDatabase(n *DropDatabase) *DropDatabase { + if n == nil { + return nil + } + out := *n + out.Comments = CloneComments(n.Comments) + out.DBName = CloneTableIdent(n.DBName) + return &out +} + +// CloneRefOfDropKey creates a deep clone of the input. +func CloneRefOfDropKey(n *DropKey) *DropKey { + if n == nil { + return nil + } + out := *n + out.Name = CloneColIdent(n.Name) + return &out +} + +// CloneRefOfDropTable creates a deep clone of the input. +func CloneRefOfDropTable(n *DropTable) *DropTable { + if n == nil { + return nil + } + out := *n + out.FromTables = CloneTableNames(n.FromTables) + return &out +} + +// CloneRefOfDropView creates a deep clone of the input. +func CloneRefOfDropView(n *DropView) *DropView { + if n == nil { + return nil + } + out := *n + out.FromTables = CloneTableNames(n.FromTables) + return &out +} + +// CloneRefOfExistsExpr creates a deep clone of the input. +func CloneRefOfExistsExpr(n *ExistsExpr) *ExistsExpr { + if n == nil { + return nil + } + out := *n + out.Subquery = CloneRefOfSubquery(n.Subquery) + return &out +} + +// CloneRefOfExplainStmt creates a deep clone of the input. +func CloneRefOfExplainStmt(n *ExplainStmt) *ExplainStmt { + if n == nil { + return nil + } + out := *n + out.Statement = CloneStatement(n.Statement) + return &out +} + +// CloneRefOfExplainTab creates a deep clone of the input. +func CloneRefOfExplainTab(n *ExplainTab) *ExplainTab { + if n == nil { + return nil + } + out := *n + out.Table = CloneTableName(n.Table) + return &out +} + +// CloneExprs creates a deep clone of the input. +func CloneExprs(n Exprs) Exprs { + res := make(Exprs, 0, len(n)) + for _, x := range n { + res = append(res, CloneExpr(x)) + } + return res +} + +// CloneRefOfFlush creates a deep clone of the input. +func CloneRefOfFlush(n *Flush) *Flush { + if n == nil { + return nil + } + out := *n + out.FlushOptions = CloneSliceOfString(n.FlushOptions) + out.TableNames = CloneTableNames(n.TableNames) + return &out +} + +// CloneRefOfForce creates a deep clone of the input. +func CloneRefOfForce(n *Force) *Force { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfForeignKeyDefinition creates a deep clone of the input. +func CloneRefOfForeignKeyDefinition(n *ForeignKeyDefinition) *ForeignKeyDefinition { + if n == nil { + return nil + } + out := *n + out.Source = CloneColumns(n.Source) + out.ReferencedTable = CloneTableName(n.ReferencedTable) + out.ReferencedColumns = CloneColumns(n.ReferencedColumns) + return &out +} + +// CloneRefOfFuncExpr creates a deep clone of the input. +func CloneRefOfFuncExpr(n *FuncExpr) *FuncExpr { + if n == nil { + return nil + } + out := *n + out.Qualifier = CloneTableIdent(n.Qualifier) + out.Name = CloneColIdent(n.Name) + out.Exprs = CloneSelectExprs(n.Exprs) + return &out +} + +// CloneGroupBy creates a deep clone of the input. +func CloneGroupBy(n GroupBy) GroupBy { + res := make(GroupBy, 0, len(n)) + for _, x := range n { + res = append(res, CloneExpr(x)) + } + return res +} + +// CloneRefOfGroupConcatExpr creates a deep clone of the input. +func CloneRefOfGroupConcatExpr(n *GroupConcatExpr) *GroupConcatExpr { + if n == nil { + return nil + } + out := *n + out.Exprs = CloneSelectExprs(n.Exprs) + out.OrderBy = CloneOrderBy(n.OrderBy) + out.Limit = CloneRefOfLimit(n.Limit) + return &out +} + +// CloneRefOfIndexDefinition creates a deep clone of the input. +func CloneRefOfIndexDefinition(n *IndexDefinition) *IndexDefinition { + if n == nil { + return nil + } + out := *n + out.Info = CloneRefOfIndexInfo(n.Info) + out.Columns = CloneSliceOfRefOfIndexColumn(n.Columns) + out.Options = CloneSliceOfRefOfIndexOption(n.Options) + return &out +} + +// CloneRefOfIndexHints creates a deep clone of the input. +func CloneRefOfIndexHints(n *IndexHints) *IndexHints { + if n == nil { + return nil + } + out := *n + out.Indexes = CloneSliceOfColIdent(n.Indexes) + return &out +} + +// CloneRefOfIndexInfo creates a deep clone of the input. +func CloneRefOfIndexInfo(n *IndexInfo) *IndexInfo { + if n == nil { + return nil + } + out := *n + out.Name = CloneColIdent(n.Name) + out.ConstraintName = CloneColIdent(n.ConstraintName) + return &out +} + +// CloneRefOfInsert creates a deep clone of the input. +func CloneRefOfInsert(n *Insert) *Insert { + if n == nil { + return nil + } + out := *n + out.Comments = CloneComments(n.Comments) + out.Table = CloneTableName(n.Table) + out.Partitions = ClonePartitions(n.Partitions) + out.Columns = CloneColumns(n.Columns) + out.Rows = CloneInsertRows(n.Rows) + out.OnDup = CloneOnDup(n.OnDup) + return &out +} + +// CloneRefOfIntervalExpr creates a deep clone of the input. +func CloneRefOfIntervalExpr(n *IntervalExpr) *IntervalExpr { + if n == nil { + return nil + } + out := *n + out.Expr = CloneExpr(n.Expr) + return &out +} + +// CloneRefOfIsExpr creates a deep clone of the input. +func CloneRefOfIsExpr(n *IsExpr) *IsExpr { + if n == nil { + return nil + } + out := *n + out.Expr = CloneExpr(n.Expr) + return &out +} + +// CloneJoinCondition creates a deep clone of the input. +func CloneJoinCondition(n JoinCondition) JoinCondition { + return *CloneRefOfJoinCondition(&n) +} + +// CloneRefOfJoinTableExpr creates a deep clone of the input. +func CloneRefOfJoinTableExpr(n *JoinTableExpr) *JoinTableExpr { + if n == nil { + return nil + } + out := *n + out.LeftExpr = CloneTableExpr(n.LeftExpr) + out.RightExpr = CloneTableExpr(n.RightExpr) + out.Condition = CloneJoinCondition(n.Condition) + return &out +} + +// CloneRefOfKeyState creates a deep clone of the input. +func CloneRefOfKeyState(n *KeyState) *KeyState { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfLimit creates a deep clone of the input. +func CloneRefOfLimit(n *Limit) *Limit { + if n == nil { + return nil + } + out := *n + out.Offset = CloneExpr(n.Offset) + out.Rowcount = CloneExpr(n.Rowcount) + return &out +} + +// CloneListArg creates a deep clone of the input. +func CloneListArg(n ListArg) ListArg { + res := make(ListArg, 0, len(n)) + copy(res, n) + return res +} + +// CloneRefOfLiteral creates a deep clone of the input. +func CloneRefOfLiteral(n *Literal) *Literal { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfLoad creates a deep clone of the input. +func CloneRefOfLoad(n *Load) *Load { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfLockOption creates a deep clone of the input. +func CloneRefOfLockOption(n *LockOption) *LockOption { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfLockTables creates a deep clone of the input. +func CloneRefOfLockTables(n *LockTables) *LockTables { + if n == nil { + return nil + } + out := *n + out.Tables = CloneTableAndLockTypes(n.Tables) + return &out +} + +// CloneRefOfMatchExpr creates a deep clone of the input. +func CloneRefOfMatchExpr(n *MatchExpr) *MatchExpr { + if n == nil { + return nil + } + out := *n + out.Columns = CloneSelectExprs(n.Columns) + out.Expr = CloneExpr(n.Expr) + return &out +} + +// CloneRefOfModifyColumn creates a deep clone of the input. +func CloneRefOfModifyColumn(n *ModifyColumn) *ModifyColumn { + if n == nil { + return nil + } + out := *n + out.NewColDefinition = CloneRefOfColumnDefinition(n.NewColDefinition) + out.First = CloneRefOfColName(n.First) + out.After = CloneRefOfColName(n.After) + return &out +} + +// CloneRefOfNextval creates a deep clone of the input. +func CloneRefOfNextval(n *Nextval) *Nextval { + if n == nil { + return nil + } + out := *n + out.Expr = CloneExpr(n.Expr) + return &out +} + +// CloneRefOfNotExpr creates a deep clone of the input. +func CloneRefOfNotExpr(n *NotExpr) *NotExpr { + if n == nil { + return nil + } + out := *n + out.Expr = CloneExpr(n.Expr) + return &out +} + +// CloneRefOfNullVal creates a deep clone of the input. +func CloneRefOfNullVal(n *NullVal) *NullVal { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneOnDup creates a deep clone of the input. +func CloneOnDup(n OnDup) OnDup { + res := make(OnDup, 0, len(n)) + for _, x := range n { + res = append(res, CloneRefOfUpdateExpr(x)) + } + return res +} + +// CloneRefOfOptLike creates a deep clone of the input. +func CloneRefOfOptLike(n *OptLike) *OptLike { + if n == nil { + return nil + } + out := *n + out.LikeTable = CloneTableName(n.LikeTable) + return &out +} + +// CloneRefOfOrExpr creates a deep clone of the input. +func CloneRefOfOrExpr(n *OrExpr) *OrExpr { + if n == nil { + return nil + } + out := *n + out.Left = CloneExpr(n.Left) + out.Right = CloneExpr(n.Right) + return &out +} + +// CloneRefOfOrder creates a deep clone of the input. +func CloneRefOfOrder(n *Order) *Order { + if n == nil { + return nil + } + out := *n + out.Expr = CloneExpr(n.Expr) + return &out +} + +// CloneOrderBy creates a deep clone of the input. +func CloneOrderBy(n OrderBy) OrderBy { + res := make(OrderBy, 0, len(n)) + for _, x := range n { + res = append(res, CloneRefOfOrder(x)) + } + return res +} + +// CloneRefOfOrderByOption creates a deep clone of the input. +func CloneRefOfOrderByOption(n *OrderByOption) *OrderByOption { + if n == nil { + return nil + } + out := *n + out.Cols = CloneColumns(n.Cols) + return &out +} + +// CloneRefOfOtherAdmin creates a deep clone of the input. +func CloneRefOfOtherAdmin(n *OtherAdmin) *OtherAdmin { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfOtherRead creates a deep clone of the input. +func CloneRefOfOtherRead(n *OtherRead) *OtherRead { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfParenSelect creates a deep clone of the input. +func CloneRefOfParenSelect(n *ParenSelect) *ParenSelect { + if n == nil { + return nil + } + out := *n + out.Select = CloneSelectStatement(n.Select) + return &out +} + +// CloneRefOfParenTableExpr creates a deep clone of the input. +func CloneRefOfParenTableExpr(n *ParenTableExpr) *ParenTableExpr { + if n == nil { + return nil + } + out := *n + out.Exprs = CloneTableExprs(n.Exprs) + return &out +} + +// CloneRefOfPartitionDefinition creates a deep clone of the input. +func CloneRefOfPartitionDefinition(n *PartitionDefinition) *PartitionDefinition { + if n == nil { + return nil + } + out := *n + out.Name = CloneColIdent(n.Name) + out.Limit = CloneExpr(n.Limit) + return &out +} + +// CloneRefOfPartitionSpec creates a deep clone of the input. +func CloneRefOfPartitionSpec(n *PartitionSpec) *PartitionSpec { + if n == nil { + return nil + } + out := *n + out.Names = ClonePartitions(n.Names) + out.Number = CloneRefOfLiteral(n.Number) + out.TableName = CloneTableName(n.TableName) + out.Definitions = CloneSliceOfRefOfPartitionDefinition(n.Definitions) + return &out +} + +// ClonePartitions creates a deep clone of the input. +func ClonePartitions(n Partitions) Partitions { + res := make(Partitions, 0, len(n)) + for _, x := range n { + res = append(res, CloneColIdent(x)) + } + return res +} + +// CloneRefOfRangeCond creates a deep clone of the input. +func CloneRefOfRangeCond(n *RangeCond) *RangeCond { + if n == nil { + return nil + } + out := *n + out.Left = CloneExpr(n.Left) + out.From = CloneExpr(n.From) + out.To = CloneExpr(n.To) + return &out +} + +// CloneRefOfRelease creates a deep clone of the input. +func CloneRefOfRelease(n *Release) *Release { + if n == nil { + return nil + } + out := *n + out.Name = CloneColIdent(n.Name) + return &out +} + +// CloneRefOfRenameIndex creates a deep clone of the input. +func CloneRefOfRenameIndex(n *RenameIndex) *RenameIndex { + if n == nil { + return nil + } + out := *n + out.OldName = CloneColIdent(n.OldName) + out.NewName = CloneColIdent(n.NewName) + return &out +} + +// CloneRefOfRenameTable creates a deep clone of the input. +func CloneRefOfRenameTable(n *RenameTable) *RenameTable { + if n == nil { + return nil + } + out := *n + out.TablePairs = CloneSliceOfRefOfRenameTablePair(n.TablePairs) + return &out +} + +// CloneRefOfRenameTableName creates a deep clone of the input. +func CloneRefOfRenameTableName(n *RenameTableName) *RenameTableName { + if n == nil { + return nil + } + out := *n + out.Table = CloneTableName(n.Table) + return &out +} + +// CloneRefOfRevertMigration creates a deep clone of the input. +func CloneRefOfRevertMigration(n *RevertMigration) *RevertMigration { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfRollback creates a deep clone of the input. +func CloneRefOfRollback(n *Rollback) *Rollback { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfSRollback creates a deep clone of the input. +func CloneRefOfSRollback(n *SRollback) *SRollback { + if n == nil { + return nil + } + out := *n + out.Name = CloneColIdent(n.Name) + return &out +} + +// CloneRefOfSavepoint creates a deep clone of the input. +func CloneRefOfSavepoint(n *Savepoint) *Savepoint { + if n == nil { + return nil + } + out := *n + out.Name = CloneColIdent(n.Name) + return &out +} + +// CloneRefOfSelect creates a deep clone of the input. +func CloneRefOfSelect(n *Select) *Select { + if n == nil { + return nil + } + out := *n + out.Cache = CloneRefOfBool(n.Cache) + out.Comments = CloneComments(n.Comments) + out.SelectExprs = CloneSelectExprs(n.SelectExprs) + out.From = CloneTableExprs(n.From) + out.Where = CloneRefOfWhere(n.Where) + out.GroupBy = CloneGroupBy(n.GroupBy) + out.Having = CloneRefOfWhere(n.Having) + out.OrderBy = CloneOrderBy(n.OrderBy) + out.Limit = CloneRefOfLimit(n.Limit) + out.Into = CloneRefOfSelectInto(n.Into) + return &out +} + +// CloneSelectExprs creates a deep clone of the input. +func CloneSelectExprs(n SelectExprs) SelectExprs { + res := make(SelectExprs, 0, len(n)) + for _, x := range n { + res = append(res, CloneSelectExpr(x)) + } + return res +} + +// CloneRefOfSelectInto creates a deep clone of the input. +func CloneRefOfSelectInto(n *SelectInto) *SelectInto { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfSet creates a deep clone of the input. +func CloneRefOfSet(n *Set) *Set { + if n == nil { + return nil + } + out := *n + out.Comments = CloneComments(n.Comments) + out.Exprs = CloneSetExprs(n.Exprs) + return &out +} + +// CloneRefOfSetExpr creates a deep clone of the input. +func CloneRefOfSetExpr(n *SetExpr) *SetExpr { + if n == nil { + return nil + } + out := *n + out.Name = CloneColIdent(n.Name) + out.Expr = CloneExpr(n.Expr) + return &out +} + +// CloneSetExprs creates a deep clone of the input. +func CloneSetExprs(n SetExprs) SetExprs { + res := make(SetExprs, 0, len(n)) + for _, x := range n { + res = append(res, CloneRefOfSetExpr(x)) + } + return res +} + +// CloneRefOfSetTransaction creates a deep clone of the input. +func CloneRefOfSetTransaction(n *SetTransaction) *SetTransaction { + if n == nil { + return nil + } + out := *n + out.SQLNode = CloneSQLNode(n.SQLNode) + out.Comments = CloneComments(n.Comments) + out.Characteristics = CloneSliceOfCharacteristic(n.Characteristics) + return &out +} + +// CloneRefOfShow creates a deep clone of the input. +func CloneRefOfShow(n *Show) *Show { + if n == nil { + return nil + } + out := *n + out.Internal = CloneShowInternal(n.Internal) + return &out +} + +// CloneRefOfShowBasic creates a deep clone of the input. +func CloneRefOfShowBasic(n *ShowBasic) *ShowBasic { + if n == nil { + return nil + } + out := *n + out.Tbl = CloneTableName(n.Tbl) + out.DbName = CloneTableIdent(n.DbName) + out.Filter = CloneRefOfShowFilter(n.Filter) + return &out +} + +// CloneRefOfShowCreate creates a deep clone of the input. +func CloneRefOfShowCreate(n *ShowCreate) *ShowCreate { + if n == nil { + return nil + } + out := *n + out.Op = CloneTableName(n.Op) + return &out +} + +// CloneRefOfShowFilter creates a deep clone of the input. +func CloneRefOfShowFilter(n *ShowFilter) *ShowFilter { + if n == nil { + return nil + } + out := *n + out.Filter = CloneExpr(n.Filter) + return &out +} + +// CloneRefOfShowLegacy creates a deep clone of the input. +func CloneRefOfShowLegacy(n *ShowLegacy) *ShowLegacy { + if n == nil { + return nil + } + out := *n + out.OnTable = CloneTableName(n.OnTable) + out.Table = CloneTableName(n.Table) + out.ShowTablesOpt = CloneRefOfShowTablesOpt(n.ShowTablesOpt) + out.ShowCollationFilterOpt = CloneExpr(n.ShowCollationFilterOpt) + return &out +} + +// CloneRefOfStarExpr creates a deep clone of the input. +func CloneRefOfStarExpr(n *StarExpr) *StarExpr { + if n == nil { + return nil + } + out := *n + out.TableName = CloneTableName(n.TableName) + return &out +} + +// CloneRefOfStream creates a deep clone of the input. +func CloneRefOfStream(n *Stream) *Stream { + if n == nil { + return nil + } + out := *n + out.Comments = CloneComments(n.Comments) + out.SelectExpr = CloneSelectExpr(n.SelectExpr) + out.Table = CloneTableName(n.Table) + return &out +} + +// CloneRefOfSubquery creates a deep clone of the input. +func CloneRefOfSubquery(n *Subquery) *Subquery { + if n == nil { + return nil + } + out := *n + out.Select = CloneSelectStatement(n.Select) + return &out +} + +// CloneRefOfSubstrExpr creates a deep clone of the input. +func CloneRefOfSubstrExpr(n *SubstrExpr) *SubstrExpr { + if n == nil { + return nil + } + out := *n + out.Name = CloneRefOfColName(n.Name) + out.StrVal = CloneRefOfLiteral(n.StrVal) + out.From = CloneExpr(n.From) + out.To = CloneExpr(n.To) + return &out +} + +// CloneTableExprs creates a deep clone of the input. +func CloneTableExprs(n TableExprs) TableExprs { + res := make(TableExprs, 0, len(n)) + for _, x := range n { + res = append(res, CloneTableExpr(x)) + } + return res +} + +// CloneTableIdent creates a deep clone of the input. +func CloneTableIdent(n TableIdent) TableIdent { + return *CloneRefOfTableIdent(&n) +} + +// CloneTableName creates a deep clone of the input. +func CloneTableName(n TableName) TableName { + return *CloneRefOfTableName(&n) +} + +// CloneTableNames creates a deep clone of the input. +func CloneTableNames(n TableNames) TableNames { + res := make(TableNames, 0, len(n)) + for _, x := range n { + res = append(res, CloneTableName(x)) + } + return res +} + +// CloneTableOptions creates a deep clone of the input. +func CloneTableOptions(n TableOptions) TableOptions { + res := make(TableOptions, 0, len(n)) + for _, x := range n { + res = append(res, CloneRefOfTableOption(x)) + } + return res +} + +// CloneRefOfTableSpec creates a deep clone of the input. +func CloneRefOfTableSpec(n *TableSpec) *TableSpec { + if n == nil { + return nil + } + out := *n + out.Columns = CloneSliceOfRefOfColumnDefinition(n.Columns) + out.Indexes = CloneSliceOfRefOfIndexDefinition(n.Indexes) + out.Constraints = CloneSliceOfRefOfConstraintDefinition(n.Constraints) + out.Options = CloneTableOptions(n.Options) + return &out +} + +// CloneRefOfTablespaceOperation creates a deep clone of the input. +func CloneRefOfTablespaceOperation(n *TablespaceOperation) *TablespaceOperation { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfTimestampFuncExpr creates a deep clone of the input. +func CloneRefOfTimestampFuncExpr(n *TimestampFuncExpr) *TimestampFuncExpr { + if n == nil { + return nil + } + out := *n + out.Expr1 = CloneExpr(n.Expr1) + out.Expr2 = CloneExpr(n.Expr2) + return &out +} + +// CloneRefOfTruncateTable creates a deep clone of the input. +func CloneRefOfTruncateTable(n *TruncateTable) *TruncateTable { + if n == nil { + return nil + } + out := *n + out.Table = CloneTableName(n.Table) + return &out +} + +// CloneRefOfUnaryExpr creates a deep clone of the input. +func CloneRefOfUnaryExpr(n *UnaryExpr) *UnaryExpr { + if n == nil { + return nil + } + out := *n + out.Expr = CloneExpr(n.Expr) + return &out +} + +// CloneRefOfUnion creates a deep clone of the input. +func CloneRefOfUnion(n *Union) *Union { + if n == nil { + return nil + } + out := *n + out.FirstStatement = CloneSelectStatement(n.FirstStatement) + out.UnionSelects = CloneSliceOfRefOfUnionSelect(n.UnionSelects) + out.OrderBy = CloneOrderBy(n.OrderBy) + out.Limit = CloneRefOfLimit(n.Limit) + return &out +} + +// CloneRefOfUnionSelect creates a deep clone of the input. +func CloneRefOfUnionSelect(n *UnionSelect) *UnionSelect { + if n == nil { + return nil + } + out := *n + out.Statement = CloneSelectStatement(n.Statement) + return &out +} + +// CloneRefOfUnlockTables creates a deep clone of the input. +func CloneRefOfUnlockTables(n *UnlockTables) *UnlockTables { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfUpdate creates a deep clone of the input. +func CloneRefOfUpdate(n *Update) *Update { + if n == nil { + return nil + } + out := *n + out.Comments = CloneComments(n.Comments) + out.TableExprs = CloneTableExprs(n.TableExprs) + out.Exprs = CloneUpdateExprs(n.Exprs) + out.Where = CloneRefOfWhere(n.Where) + out.OrderBy = CloneOrderBy(n.OrderBy) + out.Limit = CloneRefOfLimit(n.Limit) + return &out +} + +// CloneRefOfUpdateExpr creates a deep clone of the input. +func CloneRefOfUpdateExpr(n *UpdateExpr) *UpdateExpr { + if n == nil { + return nil + } + out := *n + out.Name = CloneRefOfColName(n.Name) + out.Expr = CloneExpr(n.Expr) + return &out +} + +// CloneUpdateExprs creates a deep clone of the input. +func CloneUpdateExprs(n UpdateExprs) UpdateExprs { + res := make(UpdateExprs, 0, len(n)) + for _, x := range n { + res = append(res, CloneRefOfUpdateExpr(x)) + } + return res +} + +// CloneRefOfUse creates a deep clone of the input. +func CloneRefOfUse(n *Use) *Use { + if n == nil { + return nil + } + out := *n + out.DBName = CloneTableIdent(n.DBName) + return &out +} + +// CloneRefOfVStream creates a deep clone of the input. +func CloneRefOfVStream(n *VStream) *VStream { + if n == nil { + return nil + } + out := *n + out.Comments = CloneComments(n.Comments) + out.SelectExpr = CloneSelectExpr(n.SelectExpr) + out.Table = CloneTableName(n.Table) + out.Where = CloneRefOfWhere(n.Where) + out.Limit = CloneRefOfLimit(n.Limit) + return &out +} + +// CloneValTuple creates a deep clone of the input. +func CloneValTuple(n ValTuple) ValTuple { + res := make(ValTuple, 0, len(n)) + for _, x := range n { + res = append(res, CloneExpr(x)) + } + return res +} + +// CloneRefOfValidation creates a deep clone of the input. +func CloneRefOfValidation(n *Validation) *Validation { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneValues creates a deep clone of the input. +func CloneValues(n Values) Values { + res := make(Values, 0, len(n)) + for _, x := range n { + res = append(res, CloneValTuple(x)) + } + return res +} + +// CloneRefOfValuesFuncExpr creates a deep clone of the input. +func CloneRefOfValuesFuncExpr(n *ValuesFuncExpr) *ValuesFuncExpr { + if n == nil { + return nil + } + out := *n + out.Name = CloneRefOfColName(n.Name) + return &out +} + +// CloneVindexParam creates a deep clone of the input. +func CloneVindexParam(n VindexParam) VindexParam { + return *CloneRefOfVindexParam(&n) +} + +// CloneRefOfVindexSpec creates a deep clone of the input. +func CloneRefOfVindexSpec(n *VindexSpec) *VindexSpec { + if n == nil { + return nil + } + out := *n + out.Name = CloneColIdent(n.Name) + out.Type = CloneColIdent(n.Type) + out.Params = CloneSliceOfVindexParam(n.Params) + return &out +} + +// CloneRefOfWhen creates a deep clone of the input. +func CloneRefOfWhen(n *When) *When { + if n == nil { + return nil + } + out := *n + out.Cond = CloneExpr(n.Cond) + out.Val = CloneExpr(n.Val) + return &out +} + +// CloneRefOfWhere creates a deep clone of the input. +func CloneRefOfWhere(n *Where) *Where { + if n == nil { + return nil + } + out := *n + out.Expr = CloneExpr(n.Expr) + return &out +} + +// CloneRefOfXorExpr creates a deep clone of the input. +func CloneRefOfXorExpr(n *XorExpr) *XorExpr { + if n == nil { + return nil + } + out := *n + out.Left = CloneExpr(n.Left) + out.Right = CloneExpr(n.Right) + return &out +} + +// CloneAlterOption creates a deep clone of the input. +func CloneAlterOption(in AlterOption) AlterOption { + if in == nil { + return nil + } + switch in := in.(type) { + case *AddColumns: + return CloneRefOfAddColumns(in) + case *AddConstraintDefinition: + return CloneRefOfAddConstraintDefinition(in) + case *AddIndexDefinition: + return CloneRefOfAddIndexDefinition(in) + case AlgorithmValue: + return in + case *AlterCharset: + return CloneRefOfAlterCharset(in) + case *AlterColumn: + return CloneRefOfAlterColumn(in) + case *ChangeColumn: + return CloneRefOfChangeColumn(in) + case *DropColumn: + return CloneRefOfDropColumn(in) + case *DropKey: + return CloneRefOfDropKey(in) + case *Force: + return CloneRefOfForce(in) + case *KeyState: + return CloneRefOfKeyState(in) + case *LockOption: + return CloneRefOfLockOption(in) + case *ModifyColumn: + return CloneRefOfModifyColumn(in) + case *OrderByOption: + return CloneRefOfOrderByOption(in) + case *RenameIndex: + return CloneRefOfRenameIndex(in) + case *RenameTableName: + return CloneRefOfRenameTableName(in) + case TableOptions: + return CloneTableOptions(in) + case *TablespaceOperation: + return CloneRefOfTablespaceOperation(in) + case *Validation: + return CloneRefOfValidation(in) + default: + // this should never happen + return nil + } +} + +// CloneCharacteristic creates a deep clone of the input. +func CloneCharacteristic(in Characteristic) Characteristic { + if in == nil { + return nil + } + switch in := in.(type) { + case AccessMode: + return in + case IsolationLevel: + return in + default: + // this should never happen + return nil + } +} + +// CloneColTuple creates a deep clone of the input. +func CloneColTuple(in ColTuple) ColTuple { + if in == nil { + return nil + } + switch in := in.(type) { + case ListArg: + return CloneListArg(in) + case *Subquery: + return CloneRefOfSubquery(in) + case ValTuple: + return CloneValTuple(in) + default: + // this should never happen + return nil + } +} + +// CloneConstraintInfo creates a deep clone of the input. +func CloneConstraintInfo(in ConstraintInfo) ConstraintInfo { + if in == nil { + return nil + } + switch in := in.(type) { + case *CheckConstraintDefinition: + return CloneRefOfCheckConstraintDefinition(in) + case *ForeignKeyDefinition: + return CloneRefOfForeignKeyDefinition(in) + default: + // this should never happen + return nil + } +} + +// CloneDBDDLStatement creates a deep clone of the input. +func CloneDBDDLStatement(in DBDDLStatement) DBDDLStatement { + if in == nil { + return nil + } + switch in := in.(type) { + case *AlterDatabase: + return CloneRefOfAlterDatabase(in) + case *CreateDatabase: + return CloneRefOfCreateDatabase(in) + case *DropDatabase: + return CloneRefOfDropDatabase(in) + default: + // this should never happen + return nil + } +} + +// CloneDDLStatement creates a deep clone of the input. +func CloneDDLStatement(in DDLStatement) DDLStatement { + if in == nil { + return nil + } + switch in := in.(type) { + case *AlterTable: + return CloneRefOfAlterTable(in) + case *AlterView: + return CloneRefOfAlterView(in) + case *CreateTable: + return CloneRefOfCreateTable(in) + case *CreateView: + return CloneRefOfCreateView(in) + case *DropTable: + return CloneRefOfDropTable(in) + case *DropView: + return CloneRefOfDropView(in) + case *RenameTable: + return CloneRefOfRenameTable(in) + case *TruncateTable: + return CloneRefOfTruncateTable(in) + default: + // this should never happen + return nil + } +} + +// CloneExplain creates a deep clone of the input. +func CloneExplain(in Explain) Explain { + if in == nil { + return nil + } + switch in := in.(type) { + case *ExplainStmt: + return CloneRefOfExplainStmt(in) + case *ExplainTab: + return CloneRefOfExplainTab(in) + default: + // this should never happen + return nil + } +} + +// CloneExpr creates a deep clone of the input. +func CloneExpr(in Expr) Expr { + if in == nil { + return nil + } + switch in := in.(type) { + case *AndExpr: + return CloneRefOfAndExpr(in) + case Argument: + return in + case *BinaryExpr: + return CloneRefOfBinaryExpr(in) + case BoolVal: + return in + case *CaseExpr: + return CloneRefOfCaseExpr(in) + case *ColName: + return CloneRefOfColName(in) + case *CollateExpr: + return CloneRefOfCollateExpr(in) + case *ComparisonExpr: + return CloneRefOfComparisonExpr(in) + case *ConvertExpr: + return CloneRefOfConvertExpr(in) + case *ConvertUsingExpr: + return CloneRefOfConvertUsingExpr(in) + case *CurTimeFuncExpr: + return CloneRefOfCurTimeFuncExpr(in) + case *Default: + return CloneRefOfDefault(in) + case *ExistsExpr: + return CloneRefOfExistsExpr(in) + case *FuncExpr: + return CloneRefOfFuncExpr(in) + case *GroupConcatExpr: + return CloneRefOfGroupConcatExpr(in) + case *IntervalExpr: + return CloneRefOfIntervalExpr(in) + case *IsExpr: + return CloneRefOfIsExpr(in) + case ListArg: + return CloneListArg(in) + case *Literal: + return CloneRefOfLiteral(in) + case *MatchExpr: + return CloneRefOfMatchExpr(in) + case *NotExpr: + return CloneRefOfNotExpr(in) + case *NullVal: + return CloneRefOfNullVal(in) + case *OrExpr: + return CloneRefOfOrExpr(in) + case *RangeCond: + return CloneRefOfRangeCond(in) + case *Subquery: + return CloneRefOfSubquery(in) + case *SubstrExpr: + return CloneRefOfSubstrExpr(in) + case *TimestampFuncExpr: + return CloneRefOfTimestampFuncExpr(in) + case *UnaryExpr: + return CloneRefOfUnaryExpr(in) + case ValTuple: + return CloneValTuple(in) + case *ValuesFuncExpr: + return CloneRefOfValuesFuncExpr(in) + case *XorExpr: + return CloneRefOfXorExpr(in) + default: + // this should never happen + return nil + } +} + +// CloneInsertRows creates a deep clone of the input. +func CloneInsertRows(in InsertRows) InsertRows { + if in == nil { + return nil + } + switch in := in.(type) { + case *ParenSelect: + return CloneRefOfParenSelect(in) + case *Select: + return CloneRefOfSelect(in) + case *Union: + return CloneRefOfUnion(in) + case Values: + return CloneValues(in) + default: + // this should never happen + return nil + } +} + +// CloneSelectExpr creates a deep clone of the input. +func CloneSelectExpr(in SelectExpr) SelectExpr { + if in == nil { + return nil + } + switch in := in.(type) { + case *AliasedExpr: + return CloneRefOfAliasedExpr(in) + case *Nextval: + return CloneRefOfNextval(in) + case *StarExpr: + return CloneRefOfStarExpr(in) + default: + // this should never happen + return nil + } +} + +// CloneSelectStatement creates a deep clone of the input. +func CloneSelectStatement(in SelectStatement) SelectStatement { + if in == nil { + return nil + } + switch in := in.(type) { + case *ParenSelect: + return CloneRefOfParenSelect(in) + case *Select: + return CloneRefOfSelect(in) + case *Union: + return CloneRefOfUnion(in) + default: + // this should never happen + return nil + } +} + +// CloneShowInternal creates a deep clone of the input. +func CloneShowInternal(in ShowInternal) ShowInternal { + if in == nil { + return nil + } + switch in := in.(type) { + case *ShowBasic: + return CloneRefOfShowBasic(in) + case *ShowCreate: + return CloneRefOfShowCreate(in) + case *ShowLegacy: + return CloneRefOfShowLegacy(in) + default: + // this should never happen + return nil + } +} + +// CloneSimpleTableExpr creates a deep clone of the input. +func CloneSimpleTableExpr(in SimpleTableExpr) SimpleTableExpr { + if in == nil { + return nil + } + switch in := in.(type) { + case *DerivedTable: + return CloneRefOfDerivedTable(in) + case TableName: + return CloneTableName(in) + default: + // this should never happen + return nil + } +} + +// CloneStatement creates a deep clone of the input. +func CloneStatement(in Statement) Statement { + if in == nil { + return nil + } + switch in := in.(type) { + case *AlterDatabase: + return CloneRefOfAlterDatabase(in) + case *AlterMigration: + return CloneRefOfAlterMigration(in) + case *AlterTable: + return CloneRefOfAlterTable(in) + case *AlterView: + return CloneRefOfAlterView(in) + case *AlterVschema: + return CloneRefOfAlterVschema(in) + case *Begin: + return CloneRefOfBegin(in) + case *CallProc: + return CloneRefOfCallProc(in) + case *Commit: + return CloneRefOfCommit(in) + case *CreateDatabase: + return CloneRefOfCreateDatabase(in) + case *CreateTable: + return CloneRefOfCreateTable(in) + case *CreateView: + return CloneRefOfCreateView(in) + case *Delete: + return CloneRefOfDelete(in) + case *DropDatabase: + return CloneRefOfDropDatabase(in) + case *DropTable: + return CloneRefOfDropTable(in) + case *DropView: + return CloneRefOfDropView(in) + case *ExplainStmt: + return CloneRefOfExplainStmt(in) + case *ExplainTab: + return CloneRefOfExplainTab(in) + case *Flush: + return CloneRefOfFlush(in) + case *Insert: + return CloneRefOfInsert(in) + case *Load: + return CloneRefOfLoad(in) + case *LockTables: + return CloneRefOfLockTables(in) + case *OtherAdmin: + return CloneRefOfOtherAdmin(in) + case *OtherRead: + return CloneRefOfOtherRead(in) + case *ParenSelect: + return CloneRefOfParenSelect(in) + case *Release: + return CloneRefOfRelease(in) + case *RenameTable: + return CloneRefOfRenameTable(in) + case *RevertMigration: + return CloneRefOfRevertMigration(in) + case *Rollback: + return CloneRefOfRollback(in) + case *SRollback: + return CloneRefOfSRollback(in) + case *Savepoint: + return CloneRefOfSavepoint(in) + case *Select: + return CloneRefOfSelect(in) + case *Set: + return CloneRefOfSet(in) + case *SetTransaction: + return CloneRefOfSetTransaction(in) + case *Show: + return CloneRefOfShow(in) + case *Stream: + return CloneRefOfStream(in) + case *TruncateTable: + return CloneRefOfTruncateTable(in) + case *Union: + return CloneRefOfUnion(in) + case *UnlockTables: + return CloneRefOfUnlockTables(in) + case *Update: + return CloneRefOfUpdate(in) + case *Use: + return CloneRefOfUse(in) + case *VStream: + return CloneRefOfVStream(in) + default: + // this should never happen + return nil + } +} + +// CloneTableExpr creates a deep clone of the input. +func CloneTableExpr(in TableExpr) TableExpr { + if in == nil { + return nil + } + switch in := in.(type) { + case *AliasedTableExpr: + return CloneRefOfAliasedTableExpr(in) + case *JoinTableExpr: + return CloneRefOfJoinTableExpr(in) + case *ParenTableExpr: + return CloneRefOfParenTableExpr(in) + default: + // this should never happen + return nil + } +} + +// CloneSliceOfRefOfColumnDefinition creates a deep clone of the input. +func CloneSliceOfRefOfColumnDefinition(n []*ColumnDefinition) []*ColumnDefinition { + res := make([]*ColumnDefinition, 0, len(n)) + for _, x := range n { + res = append(res, CloneRefOfColumnDefinition(x)) + } + return res +} + +// CloneSliceOfCollateAndCharset creates a deep clone of the input. +func CloneSliceOfCollateAndCharset(n []CollateAndCharset) []CollateAndCharset { + res := make([]CollateAndCharset, 0, len(n)) + for _, x := range n { + res = append(res, CloneCollateAndCharset(x)) + } + return res +} + +// CloneSliceOfAlterOption creates a deep clone of the input. +func CloneSliceOfAlterOption(n []AlterOption) []AlterOption { + res := make([]AlterOption, 0, len(n)) + for _, x := range n { + res = append(res, CloneAlterOption(x)) + } + return res +} + +// CloneSliceOfColIdent creates a deep clone of the input. +func CloneSliceOfColIdent(n []ColIdent) []ColIdent { + res := make([]ColIdent, 0, len(n)) + for _, x := range n { + res = append(res, CloneColIdent(x)) + } + return res +} + +// CloneSliceOfRefOfWhen creates a deep clone of the input. +func CloneSliceOfRefOfWhen(n []*When) []*When { + res := make([]*When, 0, len(n)) + for _, x := range n { + res = append(res, CloneRefOfWhen(x)) + } + return res +} + +// CloneRefOfColIdent creates a deep clone of the input. +func CloneRefOfColIdent(n *ColIdent) *ColIdent { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneColumnType creates a deep clone of the input. +func CloneColumnType(n ColumnType) ColumnType { + return *CloneRefOfColumnType(&n) +} + +// CloneRefOfColumnTypeOptions creates a deep clone of the input. +func CloneRefOfColumnTypeOptions(n *ColumnTypeOptions) *ColumnTypeOptions { + if n == nil { + return nil + } + out := *n + out.Null = CloneRefOfBool(n.Null) + out.Default = CloneExpr(n.Default) + out.OnUpdate = CloneExpr(n.OnUpdate) + out.Comment = CloneRefOfLiteral(n.Comment) + return &out +} + +// CloneSliceOfString creates a deep clone of the input. +func CloneSliceOfString(n []string) []string { + res := make([]string, 0, len(n)) + copy(res, n) + return res +} + +// CloneSliceOfRefOfIndexColumn creates a deep clone of the input. +func CloneSliceOfRefOfIndexColumn(n []*IndexColumn) []*IndexColumn { + res := make([]*IndexColumn, 0, len(n)) + for _, x := range n { + res = append(res, CloneRefOfIndexColumn(x)) + } + return res +} + +// CloneSliceOfRefOfIndexOption creates a deep clone of the input. +func CloneSliceOfRefOfIndexOption(n []*IndexOption) []*IndexOption { + res := make([]*IndexOption, 0, len(n)) + for _, x := range n { + res = append(res, CloneRefOfIndexOption(x)) + } + return res +} + +// CloneRefOfJoinCondition creates a deep clone of the input. +func CloneRefOfJoinCondition(n *JoinCondition) *JoinCondition { + if n == nil { + return nil + } + out := *n + out.On = CloneExpr(n.On) + out.Using = CloneColumns(n.Using) + return &out +} + +// CloneTableAndLockTypes creates a deep clone of the input. +func CloneTableAndLockTypes(n TableAndLockTypes) TableAndLockTypes { + res := make(TableAndLockTypes, 0, len(n)) + for _, x := range n { + res = append(res, CloneRefOfTableAndLockType(x)) + } + return res +} + +// CloneSliceOfRefOfPartitionDefinition creates a deep clone of the input. +func CloneSliceOfRefOfPartitionDefinition(n []*PartitionDefinition) []*PartitionDefinition { + res := make([]*PartitionDefinition, 0, len(n)) + for _, x := range n { + res = append(res, CloneRefOfPartitionDefinition(x)) + } + return res +} + +// CloneSliceOfRefOfRenameTablePair creates a deep clone of the input. +func CloneSliceOfRefOfRenameTablePair(n []*RenameTablePair) []*RenameTablePair { + res := make([]*RenameTablePair, 0, len(n)) + for _, x := range n { + res = append(res, CloneRefOfRenameTablePair(x)) + } + return res +} + +// CloneRefOfBool creates a deep clone of the input. +func CloneRefOfBool(n *bool) *bool { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneSliceOfCharacteristic creates a deep clone of the input. +func CloneSliceOfCharacteristic(n []Characteristic) []Characteristic { + res := make([]Characteristic, 0, len(n)) + for _, x := range n { + res = append(res, CloneCharacteristic(x)) + } + return res +} + +// CloneRefOfShowTablesOpt creates a deep clone of the input. +func CloneRefOfShowTablesOpt(n *ShowTablesOpt) *ShowTablesOpt { + if n == nil { + return nil + } + out := *n + out.Filter = CloneRefOfShowFilter(n.Filter) + return &out +} + +// CloneRefOfTableIdent creates a deep clone of the input. +func CloneRefOfTableIdent(n *TableIdent) *TableIdent { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfTableName creates a deep clone of the input. +func CloneRefOfTableName(n *TableName) *TableName { + if n == nil { + return nil + } + out := *n + out.Name = CloneTableIdent(n.Name) + out.Qualifier = CloneTableIdent(n.Qualifier) + return &out +} + +// CloneRefOfTableOption creates a deep clone of the input. +func CloneRefOfTableOption(n *TableOption) *TableOption { + if n == nil { + return nil + } + out := *n + out.Value = CloneRefOfLiteral(n.Value) + out.Tables = CloneTableNames(n.Tables) + return &out +} + +// CloneSliceOfRefOfIndexDefinition creates a deep clone of the input. +func CloneSliceOfRefOfIndexDefinition(n []*IndexDefinition) []*IndexDefinition { + res := make([]*IndexDefinition, 0, len(n)) + for _, x := range n { + res = append(res, CloneRefOfIndexDefinition(x)) + } + return res +} + +// CloneSliceOfRefOfConstraintDefinition creates a deep clone of the input. +func CloneSliceOfRefOfConstraintDefinition(n []*ConstraintDefinition) []*ConstraintDefinition { + res := make([]*ConstraintDefinition, 0, len(n)) + for _, x := range n { + res = append(res, CloneRefOfConstraintDefinition(x)) + } + return res +} + +// CloneSliceOfRefOfUnionSelect creates a deep clone of the input. +func CloneSliceOfRefOfUnionSelect(n []*UnionSelect) []*UnionSelect { + res := make([]*UnionSelect, 0, len(n)) + for _, x := range n { + res = append(res, CloneRefOfUnionSelect(x)) + } + return res +} + +// CloneRefOfVindexParam creates a deep clone of the input. +func CloneRefOfVindexParam(n *VindexParam) *VindexParam { + if n == nil { + return nil + } + out := *n + out.Key = CloneColIdent(n.Key) + return &out +} + +// CloneSliceOfVindexParam creates a deep clone of the input. +func CloneSliceOfVindexParam(n []VindexParam) []VindexParam { + res := make([]VindexParam, 0, len(n)) + for _, x := range n { + res = append(res, CloneVindexParam(x)) + } + return res +} + +// CloneCollateAndCharset creates a deep clone of the input. +func CloneCollateAndCharset(n CollateAndCharset) CollateAndCharset { + return *CloneRefOfCollateAndCharset(&n) +} + +// CloneRefOfIndexColumn creates a deep clone of the input. +func CloneRefOfIndexColumn(n *IndexColumn) *IndexColumn { + if n == nil { + return nil + } + out := *n + out.Column = CloneColIdent(n.Column) + out.Length = CloneRefOfLiteral(n.Length) + return &out +} + +// CloneRefOfIndexOption creates a deep clone of the input. +func CloneRefOfIndexOption(n *IndexOption) *IndexOption { + if n == nil { + return nil + } + out := *n + out.Value = CloneRefOfLiteral(n.Value) + return &out +} + +// CloneRefOfTableAndLockType creates a deep clone of the input. +func CloneRefOfTableAndLockType(n *TableAndLockType) *TableAndLockType { + if n == nil { + return nil + } + out := *n + out.Table = CloneTableExpr(n.Table) + return &out +} + +// CloneRefOfRenameTablePair creates a deep clone of the input. +func CloneRefOfRenameTablePair(n *RenameTablePair) *RenameTablePair { + if n == nil { + return nil + } + out := *n + out.FromTable = CloneTableName(n.FromTable) + out.ToTable = CloneTableName(n.ToTable) + return &out +} + +// CloneRefOfCollateAndCharset creates a deep clone of the input. +func CloneRefOfCollateAndCharset(n *CollateAndCharset) *CollateAndCharset { + if n == nil { + return nil + } + out := *n + return &out +} diff --git a/go/vt/sqlparser/ast_equals.go b/go/vt/sqlparser/ast_equals.go new file mode 100644 index 00000000000..c19138584ba --- /dev/null +++ b/go/vt/sqlparser/ast_equals.go @@ -0,0 +1,4076 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by ASTHelperGen. DO NOT EDIT. + +package sqlparser + +// EqualsSQLNode does deep equals between the two objects. +func EqualsSQLNode(inA, inB SQLNode) bool { + if inA == nil && inB == nil { + return true + } + if inA == nil || inB == nil { + return false + } + switch a := inA.(type) { + case AccessMode: + b, ok := inB.(AccessMode) + if !ok { + return false + } + return a == b + case *AddColumns: + b, ok := inB.(*AddColumns) + if !ok { + return false + } + return EqualsRefOfAddColumns(a, b) + case *AddConstraintDefinition: + b, ok := inB.(*AddConstraintDefinition) + if !ok { + return false + } + return EqualsRefOfAddConstraintDefinition(a, b) + case *AddIndexDefinition: + b, ok := inB.(*AddIndexDefinition) + if !ok { + return false + } + return EqualsRefOfAddIndexDefinition(a, b) + case AlgorithmValue: + b, ok := inB.(AlgorithmValue) + if !ok { + return false + } + return a == b + case *AliasedExpr: + b, ok := inB.(*AliasedExpr) + if !ok { + return false + } + return EqualsRefOfAliasedExpr(a, b) + case *AliasedTableExpr: + b, ok := inB.(*AliasedTableExpr) + if !ok { + return false + } + return EqualsRefOfAliasedTableExpr(a, b) + case *AlterCharset: + b, ok := inB.(*AlterCharset) + if !ok { + return false + } + return EqualsRefOfAlterCharset(a, b) + case *AlterColumn: + b, ok := inB.(*AlterColumn) + if !ok { + return false + } + return EqualsRefOfAlterColumn(a, b) + case *AlterDatabase: + b, ok := inB.(*AlterDatabase) + if !ok { + return false + } + return EqualsRefOfAlterDatabase(a, b) + case *AlterMigration: + b, ok := inB.(*AlterMigration) + if !ok { + return false + } + return EqualsRefOfAlterMigration(a, b) + case *AlterTable: + b, ok := inB.(*AlterTable) + if !ok { + return false + } + return EqualsRefOfAlterTable(a, b) + case *AlterView: + b, ok := inB.(*AlterView) + if !ok { + return false + } + return EqualsRefOfAlterView(a, b) + case *AlterVschema: + b, ok := inB.(*AlterVschema) + if !ok { + return false + } + return EqualsRefOfAlterVschema(a, b) + case *AndExpr: + b, ok := inB.(*AndExpr) + if !ok { + return false + } + return EqualsRefOfAndExpr(a, b) + case Argument: + b, ok := inB.(Argument) + if !ok { + return false + } + return a == b + case *AutoIncSpec: + b, ok := inB.(*AutoIncSpec) + if !ok { + return false + } + return EqualsRefOfAutoIncSpec(a, b) + case *Begin: + b, ok := inB.(*Begin) + if !ok { + return false + } + return EqualsRefOfBegin(a, b) + case *BinaryExpr: + b, ok := inB.(*BinaryExpr) + if !ok { + return false + } + return EqualsRefOfBinaryExpr(a, b) + case BoolVal: + b, ok := inB.(BoolVal) + if !ok { + return false + } + return a == b + case *CallProc: + b, ok := inB.(*CallProc) + if !ok { + return false + } + return EqualsRefOfCallProc(a, b) + case *CaseExpr: + b, ok := inB.(*CaseExpr) + if !ok { + return false + } + return EqualsRefOfCaseExpr(a, b) + case *ChangeColumn: + b, ok := inB.(*ChangeColumn) + if !ok { + return false + } + return EqualsRefOfChangeColumn(a, b) + case *CheckConstraintDefinition: + b, ok := inB.(*CheckConstraintDefinition) + if !ok { + return false + } + return EqualsRefOfCheckConstraintDefinition(a, b) + case ColIdent: + b, ok := inB.(ColIdent) + if !ok { + return false + } + return EqualsColIdent(a, b) + case *ColName: + b, ok := inB.(*ColName) + if !ok { + return false + } + return EqualsRefOfColName(a, b) + case *CollateExpr: + b, ok := inB.(*CollateExpr) + if !ok { + return false + } + return EqualsRefOfCollateExpr(a, b) + case *ColumnDefinition: + b, ok := inB.(*ColumnDefinition) + if !ok { + return false + } + return EqualsRefOfColumnDefinition(a, b) + case *ColumnType: + b, ok := inB.(*ColumnType) + if !ok { + return false + } + return EqualsRefOfColumnType(a, b) + case Columns: + b, ok := inB.(Columns) + if !ok { + return false + } + return EqualsColumns(a, b) + case Comments: + b, ok := inB.(Comments) + if !ok { + return false + } + return EqualsComments(a, b) + case *Commit: + b, ok := inB.(*Commit) + if !ok { + return false + } + return EqualsRefOfCommit(a, b) + case *ComparisonExpr: + b, ok := inB.(*ComparisonExpr) + if !ok { + return false + } + return EqualsRefOfComparisonExpr(a, b) + case *ConstraintDefinition: + b, ok := inB.(*ConstraintDefinition) + if !ok { + return false + } + return EqualsRefOfConstraintDefinition(a, b) + case *ConvertExpr: + b, ok := inB.(*ConvertExpr) + if !ok { + return false + } + return EqualsRefOfConvertExpr(a, b) + case *ConvertType: + b, ok := inB.(*ConvertType) + if !ok { + return false + } + return EqualsRefOfConvertType(a, b) + case *ConvertUsingExpr: + b, ok := inB.(*ConvertUsingExpr) + if !ok { + return false + } + return EqualsRefOfConvertUsingExpr(a, b) + case *CreateDatabase: + b, ok := inB.(*CreateDatabase) + if !ok { + return false + } + return EqualsRefOfCreateDatabase(a, b) + case *CreateTable: + b, ok := inB.(*CreateTable) + if !ok { + return false + } + return EqualsRefOfCreateTable(a, b) + case *CreateView: + b, ok := inB.(*CreateView) + if !ok { + return false + } + return EqualsRefOfCreateView(a, b) + case *CurTimeFuncExpr: + b, ok := inB.(*CurTimeFuncExpr) + if !ok { + return false + } + return EqualsRefOfCurTimeFuncExpr(a, b) + case *Default: + b, ok := inB.(*Default) + if !ok { + return false + } + return EqualsRefOfDefault(a, b) + case *Delete: + b, ok := inB.(*Delete) + if !ok { + return false + } + return EqualsRefOfDelete(a, b) + case *DerivedTable: + b, ok := inB.(*DerivedTable) + if !ok { + return false + } + return EqualsRefOfDerivedTable(a, b) + case *DropColumn: + b, ok := inB.(*DropColumn) + if !ok { + return false + } + return EqualsRefOfDropColumn(a, b) + case *DropDatabase: + b, ok := inB.(*DropDatabase) + if !ok { + return false + } + return EqualsRefOfDropDatabase(a, b) + case *DropKey: + b, ok := inB.(*DropKey) + if !ok { + return false + } + return EqualsRefOfDropKey(a, b) + case *DropTable: + b, ok := inB.(*DropTable) + if !ok { + return false + } + return EqualsRefOfDropTable(a, b) + case *DropView: + b, ok := inB.(*DropView) + if !ok { + return false + } + return EqualsRefOfDropView(a, b) + case *ExistsExpr: + b, ok := inB.(*ExistsExpr) + if !ok { + return false + } + return EqualsRefOfExistsExpr(a, b) + case *ExplainStmt: + b, ok := inB.(*ExplainStmt) + if !ok { + return false + } + return EqualsRefOfExplainStmt(a, b) + case *ExplainTab: + b, ok := inB.(*ExplainTab) + if !ok { + return false + } + return EqualsRefOfExplainTab(a, b) + case Exprs: + b, ok := inB.(Exprs) + if !ok { + return false + } + return EqualsExprs(a, b) + case *Flush: + b, ok := inB.(*Flush) + if !ok { + return false + } + return EqualsRefOfFlush(a, b) + case *Force: + b, ok := inB.(*Force) + if !ok { + return false + } + return EqualsRefOfForce(a, b) + case *ForeignKeyDefinition: + b, ok := inB.(*ForeignKeyDefinition) + if !ok { + return false + } + return EqualsRefOfForeignKeyDefinition(a, b) + case *FuncExpr: + b, ok := inB.(*FuncExpr) + if !ok { + return false + } + return EqualsRefOfFuncExpr(a, b) + case GroupBy: + b, ok := inB.(GroupBy) + if !ok { + return false + } + return EqualsGroupBy(a, b) + case *GroupConcatExpr: + b, ok := inB.(*GroupConcatExpr) + if !ok { + return false + } + return EqualsRefOfGroupConcatExpr(a, b) + case *IndexDefinition: + b, ok := inB.(*IndexDefinition) + if !ok { + return false + } + return EqualsRefOfIndexDefinition(a, b) + case *IndexHints: + b, ok := inB.(*IndexHints) + if !ok { + return false + } + return EqualsRefOfIndexHints(a, b) + case *IndexInfo: + b, ok := inB.(*IndexInfo) + if !ok { + return false + } + return EqualsRefOfIndexInfo(a, b) + case *Insert: + b, ok := inB.(*Insert) + if !ok { + return false + } + return EqualsRefOfInsert(a, b) + case *IntervalExpr: + b, ok := inB.(*IntervalExpr) + if !ok { + return false + } + return EqualsRefOfIntervalExpr(a, b) + case *IsExpr: + b, ok := inB.(*IsExpr) + if !ok { + return false + } + return EqualsRefOfIsExpr(a, b) + case IsolationLevel: + b, ok := inB.(IsolationLevel) + if !ok { + return false + } + return a == b + case JoinCondition: + b, ok := inB.(JoinCondition) + if !ok { + return false + } + return EqualsJoinCondition(a, b) + case *JoinTableExpr: + b, ok := inB.(*JoinTableExpr) + if !ok { + return false + } + return EqualsRefOfJoinTableExpr(a, b) + case *KeyState: + b, ok := inB.(*KeyState) + if !ok { + return false + } + return EqualsRefOfKeyState(a, b) + case *Limit: + b, ok := inB.(*Limit) + if !ok { + return false + } + return EqualsRefOfLimit(a, b) + case ListArg: + b, ok := inB.(ListArg) + if !ok { + return false + } + return EqualsListArg(a, b) + case *Literal: + b, ok := inB.(*Literal) + if !ok { + return false + } + return EqualsRefOfLiteral(a, b) + case *Load: + b, ok := inB.(*Load) + if !ok { + return false + } + return EqualsRefOfLoad(a, b) + case *LockOption: + b, ok := inB.(*LockOption) + if !ok { + return false + } + return EqualsRefOfLockOption(a, b) + case *LockTables: + b, ok := inB.(*LockTables) + if !ok { + return false + } + return EqualsRefOfLockTables(a, b) + case *MatchExpr: + b, ok := inB.(*MatchExpr) + if !ok { + return false + } + return EqualsRefOfMatchExpr(a, b) + case *ModifyColumn: + b, ok := inB.(*ModifyColumn) + if !ok { + return false + } + return EqualsRefOfModifyColumn(a, b) + case *Nextval: + b, ok := inB.(*Nextval) + if !ok { + return false + } + return EqualsRefOfNextval(a, b) + case *NotExpr: + b, ok := inB.(*NotExpr) + if !ok { + return false + } + return EqualsRefOfNotExpr(a, b) + case *NullVal: + b, ok := inB.(*NullVal) + if !ok { + return false + } + return EqualsRefOfNullVal(a, b) + case OnDup: + b, ok := inB.(OnDup) + if !ok { + return false + } + return EqualsOnDup(a, b) + case *OptLike: + b, ok := inB.(*OptLike) + if !ok { + return false + } + return EqualsRefOfOptLike(a, b) + case *OrExpr: + b, ok := inB.(*OrExpr) + if !ok { + return false + } + return EqualsRefOfOrExpr(a, b) + case *Order: + b, ok := inB.(*Order) + if !ok { + return false + } + return EqualsRefOfOrder(a, b) + case OrderBy: + b, ok := inB.(OrderBy) + if !ok { + return false + } + return EqualsOrderBy(a, b) + case *OrderByOption: + b, ok := inB.(*OrderByOption) + if !ok { + return false + } + return EqualsRefOfOrderByOption(a, b) + case *OtherAdmin: + b, ok := inB.(*OtherAdmin) + if !ok { + return false + } + return EqualsRefOfOtherAdmin(a, b) + case *OtherRead: + b, ok := inB.(*OtherRead) + if !ok { + return false + } + return EqualsRefOfOtherRead(a, b) + case *ParenSelect: + b, ok := inB.(*ParenSelect) + if !ok { + return false + } + return EqualsRefOfParenSelect(a, b) + case *ParenTableExpr: + b, ok := inB.(*ParenTableExpr) + if !ok { + return false + } + return EqualsRefOfParenTableExpr(a, b) + case *PartitionDefinition: + b, ok := inB.(*PartitionDefinition) + if !ok { + return false + } + return EqualsRefOfPartitionDefinition(a, b) + case *PartitionSpec: + b, ok := inB.(*PartitionSpec) + if !ok { + return false + } + return EqualsRefOfPartitionSpec(a, b) + case Partitions: + b, ok := inB.(Partitions) + if !ok { + return false + } + return EqualsPartitions(a, b) + case *RangeCond: + b, ok := inB.(*RangeCond) + if !ok { + return false + } + return EqualsRefOfRangeCond(a, b) + case ReferenceAction: + b, ok := inB.(ReferenceAction) + if !ok { + return false + } + return a == b + case *Release: + b, ok := inB.(*Release) + if !ok { + return false + } + return EqualsRefOfRelease(a, b) + case *RenameIndex: + b, ok := inB.(*RenameIndex) + if !ok { + return false + } + return EqualsRefOfRenameIndex(a, b) + case *RenameTable: + b, ok := inB.(*RenameTable) + if !ok { + return false + } + return EqualsRefOfRenameTable(a, b) + case *RenameTableName: + b, ok := inB.(*RenameTableName) + if !ok { + return false + } + return EqualsRefOfRenameTableName(a, b) + case *RevertMigration: + b, ok := inB.(*RevertMigration) + if !ok { + return false + } + return EqualsRefOfRevertMigration(a, b) + case *Rollback: + b, ok := inB.(*Rollback) + if !ok { + return false + } + return EqualsRefOfRollback(a, b) + case *SRollback: + b, ok := inB.(*SRollback) + if !ok { + return false + } + return EqualsRefOfSRollback(a, b) + case *Savepoint: + b, ok := inB.(*Savepoint) + if !ok { + return false + } + return EqualsRefOfSavepoint(a, b) + case *Select: + b, ok := inB.(*Select) + if !ok { + return false + } + return EqualsRefOfSelect(a, b) + case SelectExprs: + b, ok := inB.(SelectExprs) + if !ok { + return false + } + return EqualsSelectExprs(a, b) + case *SelectInto: + b, ok := inB.(*SelectInto) + if !ok { + return false + } + return EqualsRefOfSelectInto(a, b) + case *Set: + b, ok := inB.(*Set) + if !ok { + return false + } + return EqualsRefOfSet(a, b) + case *SetExpr: + b, ok := inB.(*SetExpr) + if !ok { + return false + } + return EqualsRefOfSetExpr(a, b) + case SetExprs: + b, ok := inB.(SetExprs) + if !ok { + return false + } + return EqualsSetExprs(a, b) + case *SetTransaction: + b, ok := inB.(*SetTransaction) + if !ok { + return false + } + return EqualsRefOfSetTransaction(a, b) + case *Show: + b, ok := inB.(*Show) + if !ok { + return false + } + return EqualsRefOfShow(a, b) + case *ShowBasic: + b, ok := inB.(*ShowBasic) + if !ok { + return false + } + return EqualsRefOfShowBasic(a, b) + case *ShowCreate: + b, ok := inB.(*ShowCreate) + if !ok { + return false + } + return EqualsRefOfShowCreate(a, b) + case *ShowFilter: + b, ok := inB.(*ShowFilter) + if !ok { + return false + } + return EqualsRefOfShowFilter(a, b) + case *ShowLegacy: + b, ok := inB.(*ShowLegacy) + if !ok { + return false + } + return EqualsRefOfShowLegacy(a, b) + case *StarExpr: + b, ok := inB.(*StarExpr) + if !ok { + return false + } + return EqualsRefOfStarExpr(a, b) + case *Stream: + b, ok := inB.(*Stream) + if !ok { + return false + } + return EqualsRefOfStream(a, b) + case *Subquery: + b, ok := inB.(*Subquery) + if !ok { + return false + } + return EqualsRefOfSubquery(a, b) + case *SubstrExpr: + b, ok := inB.(*SubstrExpr) + if !ok { + return false + } + return EqualsRefOfSubstrExpr(a, b) + case TableExprs: + b, ok := inB.(TableExprs) + if !ok { + return false + } + return EqualsTableExprs(a, b) + case TableIdent: + b, ok := inB.(TableIdent) + if !ok { + return false + } + return EqualsTableIdent(a, b) + case TableName: + b, ok := inB.(TableName) + if !ok { + return false + } + return EqualsTableName(a, b) + case TableNames: + b, ok := inB.(TableNames) + if !ok { + return false + } + return EqualsTableNames(a, b) + case TableOptions: + b, ok := inB.(TableOptions) + if !ok { + return false + } + return EqualsTableOptions(a, b) + case *TableSpec: + b, ok := inB.(*TableSpec) + if !ok { + return false + } + return EqualsRefOfTableSpec(a, b) + case *TablespaceOperation: + b, ok := inB.(*TablespaceOperation) + if !ok { + return false + } + return EqualsRefOfTablespaceOperation(a, b) + case *TimestampFuncExpr: + b, ok := inB.(*TimestampFuncExpr) + if !ok { + return false + } + return EqualsRefOfTimestampFuncExpr(a, b) + case *TruncateTable: + b, ok := inB.(*TruncateTable) + if !ok { + return false + } + return EqualsRefOfTruncateTable(a, b) + case *UnaryExpr: + b, ok := inB.(*UnaryExpr) + if !ok { + return false + } + return EqualsRefOfUnaryExpr(a, b) + case *Union: + b, ok := inB.(*Union) + if !ok { + return false + } + return EqualsRefOfUnion(a, b) + case *UnionSelect: + b, ok := inB.(*UnionSelect) + if !ok { + return false + } + return EqualsRefOfUnionSelect(a, b) + case *UnlockTables: + b, ok := inB.(*UnlockTables) + if !ok { + return false + } + return EqualsRefOfUnlockTables(a, b) + case *Update: + b, ok := inB.(*Update) + if !ok { + return false + } + return EqualsRefOfUpdate(a, b) + case *UpdateExpr: + b, ok := inB.(*UpdateExpr) + if !ok { + return false + } + return EqualsRefOfUpdateExpr(a, b) + case UpdateExprs: + b, ok := inB.(UpdateExprs) + if !ok { + return false + } + return EqualsUpdateExprs(a, b) + case *Use: + b, ok := inB.(*Use) + if !ok { + return false + } + return EqualsRefOfUse(a, b) + case *VStream: + b, ok := inB.(*VStream) + if !ok { + return false + } + return EqualsRefOfVStream(a, b) + case ValTuple: + b, ok := inB.(ValTuple) + if !ok { + return false + } + return EqualsValTuple(a, b) + case *Validation: + b, ok := inB.(*Validation) + if !ok { + return false + } + return EqualsRefOfValidation(a, b) + case Values: + b, ok := inB.(Values) + if !ok { + return false + } + return EqualsValues(a, b) + case *ValuesFuncExpr: + b, ok := inB.(*ValuesFuncExpr) + if !ok { + return false + } + return EqualsRefOfValuesFuncExpr(a, b) + case VindexParam: + b, ok := inB.(VindexParam) + if !ok { + return false + } + return EqualsVindexParam(a, b) + case *VindexSpec: + b, ok := inB.(*VindexSpec) + if !ok { + return false + } + return EqualsRefOfVindexSpec(a, b) + case *When: + b, ok := inB.(*When) + if !ok { + return false + } + return EqualsRefOfWhen(a, b) + case *Where: + b, ok := inB.(*Where) + if !ok { + return false + } + return EqualsRefOfWhere(a, b) + case *XorExpr: + b, ok := inB.(*XorExpr) + if !ok { + return false + } + return EqualsRefOfXorExpr(a, b) + default: + // this should never happen + return false + } +} + +// EqualsRefOfAddColumns does deep equals between the two objects. +func EqualsRefOfAddColumns(a, b *AddColumns) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsSliceOfRefOfColumnDefinition(a.Columns, b.Columns) && + EqualsRefOfColName(a.First, b.First) && + EqualsRefOfColName(a.After, b.After) +} + +// EqualsRefOfAddConstraintDefinition does deep equals between the two objects. +func EqualsRefOfAddConstraintDefinition(a, b *AddConstraintDefinition) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsRefOfConstraintDefinition(a.ConstraintDefinition, b.ConstraintDefinition) +} + +// EqualsRefOfAddIndexDefinition does deep equals between the two objects. +func EqualsRefOfAddIndexDefinition(a, b *AddIndexDefinition) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsRefOfIndexDefinition(a.IndexDefinition, b.IndexDefinition) +} + +// EqualsRefOfAliasedExpr does deep equals between the two objects. +func EqualsRefOfAliasedExpr(a, b *AliasedExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsExpr(a.Expr, b.Expr) && + EqualsColIdent(a.As, b.As) +} + +// EqualsRefOfAliasedTableExpr does deep equals between the two objects. +func EqualsRefOfAliasedTableExpr(a, b *AliasedTableExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsSimpleTableExpr(a.Expr, b.Expr) && + EqualsPartitions(a.Partitions, b.Partitions) && + EqualsTableIdent(a.As, b.As) && + EqualsRefOfIndexHints(a.Hints, b.Hints) +} + +// EqualsRefOfAlterCharset does deep equals between the two objects. +func EqualsRefOfAlterCharset(a, b *AlterCharset) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.CharacterSet == b.CharacterSet && + a.Collate == b.Collate +} + +// EqualsRefOfAlterColumn does deep equals between the two objects. +func EqualsRefOfAlterColumn(a, b *AlterColumn) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.DropDefault == b.DropDefault && + EqualsRefOfColName(a.Column, b.Column) && + EqualsExpr(a.DefaultVal, b.DefaultVal) +} + +// EqualsRefOfAlterDatabase does deep equals between the two objects. +func EqualsRefOfAlterDatabase(a, b *AlterDatabase) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.UpdateDataDirectory == b.UpdateDataDirectory && + a.FullyParsed == b.FullyParsed && + EqualsTableIdent(a.DBName, b.DBName) && + EqualsSliceOfCollateAndCharset(a.AlterOptions, b.AlterOptions) +} + +// EqualsRefOfAlterMigration does deep equals between the two objects. +func EqualsRefOfAlterMigration(a, b *AlterMigration) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.UUID == b.UUID && + a.Type == b.Type +} + +// EqualsRefOfAlterTable does deep equals between the two objects. +func EqualsRefOfAlterTable(a, b *AlterTable) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.FullyParsed == b.FullyParsed && + EqualsTableName(a.Table, b.Table) && + EqualsSliceOfAlterOption(a.AlterOptions, b.AlterOptions) && + EqualsRefOfPartitionSpec(a.PartitionSpec, b.PartitionSpec) +} + +// EqualsRefOfAlterView does deep equals between the two objects. +func EqualsRefOfAlterView(a, b *AlterView) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Algorithm == b.Algorithm && + a.Definer == b.Definer && + a.Security == b.Security && + a.CheckOption == b.CheckOption && + EqualsTableName(a.ViewName, b.ViewName) && + EqualsColumns(a.Columns, b.Columns) && + EqualsSelectStatement(a.Select, b.Select) +} + +// EqualsRefOfAlterVschema does deep equals between the two objects. +func EqualsRefOfAlterVschema(a, b *AlterVschema) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Action == b.Action && + EqualsTableName(a.Table, b.Table) && + EqualsRefOfVindexSpec(a.VindexSpec, b.VindexSpec) && + EqualsSliceOfColIdent(a.VindexCols, b.VindexCols) && + EqualsRefOfAutoIncSpec(a.AutoIncSpec, b.AutoIncSpec) +} + +// EqualsRefOfAndExpr does deep equals between the two objects. +func EqualsRefOfAndExpr(a, b *AndExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsExpr(a.Left, b.Left) && + EqualsExpr(a.Right, b.Right) +} + +// EqualsRefOfAutoIncSpec does deep equals between the two objects. +func EqualsRefOfAutoIncSpec(a, b *AutoIncSpec) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsColIdent(a.Column, b.Column) && + EqualsTableName(a.Sequence, b.Sequence) +} + +// EqualsRefOfBegin does deep equals between the two objects. +func EqualsRefOfBegin(a, b *Begin) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return true +} + +// EqualsRefOfBinaryExpr does deep equals between the two objects. +func EqualsRefOfBinaryExpr(a, b *BinaryExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Operator == b.Operator && + EqualsExpr(a.Left, b.Left) && + EqualsExpr(a.Right, b.Right) +} + +// EqualsRefOfCallProc does deep equals between the two objects. +func EqualsRefOfCallProc(a, b *CallProc) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsTableName(a.Name, b.Name) && + EqualsExprs(a.Params, b.Params) +} + +// EqualsRefOfCaseExpr does deep equals between the two objects. +func EqualsRefOfCaseExpr(a, b *CaseExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsExpr(a.Expr, b.Expr) && + EqualsSliceOfRefOfWhen(a.Whens, b.Whens) && + EqualsExpr(a.Else, b.Else) +} + +// EqualsRefOfChangeColumn does deep equals between the two objects. +func EqualsRefOfChangeColumn(a, b *ChangeColumn) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsRefOfColName(a.OldColumn, b.OldColumn) && + EqualsRefOfColumnDefinition(a.NewColDefinition, b.NewColDefinition) && + EqualsRefOfColName(a.First, b.First) && + EqualsRefOfColName(a.After, b.After) +} + +// EqualsRefOfCheckConstraintDefinition does deep equals between the two objects. +func EqualsRefOfCheckConstraintDefinition(a, b *CheckConstraintDefinition) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Enforced == b.Enforced && + EqualsExpr(a.Expr, b.Expr) +} + +// EqualsColIdent does deep equals between the two objects. +func EqualsColIdent(a, b ColIdent) bool { + return a.val == b.val && + a.lowered == b.lowered && + a.at == b.at +} + +// EqualsRefOfColName does deep equals between the two objects. +func EqualsRefOfColName(a, b *ColName) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsColIdent(a.Name, b.Name) && + EqualsTableName(a.Qualifier, b.Qualifier) +} + +// EqualsRefOfCollateExpr does deep equals between the two objects. +func EqualsRefOfCollateExpr(a, b *CollateExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Charset == b.Charset && + EqualsExpr(a.Expr, b.Expr) +} + +// EqualsRefOfColumnDefinition does deep equals between the two objects. +func EqualsRefOfColumnDefinition(a, b *ColumnDefinition) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsColIdent(a.Name, b.Name) && + EqualsColumnType(a.Type, b.Type) +} + +// EqualsRefOfColumnType does deep equals between the two objects. +func EqualsRefOfColumnType(a, b *ColumnType) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Type == b.Type && + a.Unsigned == b.Unsigned && + a.Zerofill == b.Zerofill && + a.Charset == b.Charset && + a.Collate == b.Collate && + EqualsRefOfColumnTypeOptions(a.Options, b.Options) && + EqualsRefOfLiteral(a.Length, b.Length) && + EqualsRefOfLiteral(a.Scale, b.Scale) && + EqualsSliceOfString(a.EnumValues, b.EnumValues) +} + +// EqualsColumns does deep equals between the two objects. +func EqualsColumns(a, b Columns) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !EqualsColIdent(a[i], b[i]) { + return false + } + } + return true +} + +// EqualsComments does deep equals between the two objects. +func EqualsComments(a, b Comments) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if a[i] != b[i] { + return false + } + } + return true +} + +// EqualsRefOfCommit does deep equals between the two objects. +func EqualsRefOfCommit(a, b *Commit) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return true +} + +// EqualsRefOfComparisonExpr does deep equals between the two objects. +func EqualsRefOfComparisonExpr(a, b *ComparisonExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Operator == b.Operator && + EqualsExpr(a.Left, b.Left) && + EqualsExpr(a.Right, b.Right) && + EqualsExpr(a.Escape, b.Escape) +} + +// EqualsRefOfConstraintDefinition does deep equals between the two objects. +func EqualsRefOfConstraintDefinition(a, b *ConstraintDefinition) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsColIdent(a.Name, b.Name) && + EqualsConstraintInfo(a.Details, b.Details) +} + +// EqualsRefOfConvertExpr does deep equals between the two objects. +func EqualsRefOfConvertExpr(a, b *ConvertExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsExpr(a.Expr, b.Expr) && + EqualsRefOfConvertType(a.Type, b.Type) +} + +// EqualsRefOfConvertType does deep equals between the two objects. +func EqualsRefOfConvertType(a, b *ConvertType) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Type == b.Type && + a.Charset == b.Charset && + EqualsRefOfLiteral(a.Length, b.Length) && + EqualsRefOfLiteral(a.Scale, b.Scale) && + a.Operator == b.Operator +} + +// EqualsRefOfConvertUsingExpr does deep equals between the two objects. +func EqualsRefOfConvertUsingExpr(a, b *ConvertUsingExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Type == b.Type && + EqualsExpr(a.Expr, b.Expr) +} + +// EqualsRefOfCreateDatabase does deep equals between the two objects. +func EqualsRefOfCreateDatabase(a, b *CreateDatabase) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.IfNotExists == b.IfNotExists && + a.FullyParsed == b.FullyParsed && + EqualsComments(a.Comments, b.Comments) && + EqualsTableIdent(a.DBName, b.DBName) && + EqualsSliceOfCollateAndCharset(a.CreateOptions, b.CreateOptions) +} + +// EqualsRefOfCreateTable does deep equals between the two objects. +func EqualsRefOfCreateTable(a, b *CreateTable) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Temp == b.Temp && + a.IfNotExists == b.IfNotExists && + a.FullyParsed == b.FullyParsed && + EqualsTableName(a.Table, b.Table) && + EqualsRefOfTableSpec(a.TableSpec, b.TableSpec) && + EqualsRefOfOptLike(a.OptLike, b.OptLike) +} + +// EqualsRefOfCreateView does deep equals between the two objects. +func EqualsRefOfCreateView(a, b *CreateView) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Algorithm == b.Algorithm && + a.Definer == b.Definer && + a.Security == b.Security && + a.CheckOption == b.CheckOption && + a.IsReplace == b.IsReplace && + EqualsTableName(a.ViewName, b.ViewName) && + EqualsColumns(a.Columns, b.Columns) && + EqualsSelectStatement(a.Select, b.Select) +} + +// EqualsRefOfCurTimeFuncExpr does deep equals between the two objects. +func EqualsRefOfCurTimeFuncExpr(a, b *CurTimeFuncExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsColIdent(a.Name, b.Name) && + EqualsExpr(a.Fsp, b.Fsp) +} + +// EqualsRefOfDefault does deep equals between the two objects. +func EqualsRefOfDefault(a, b *Default) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.ColName == b.ColName +} + +// EqualsRefOfDelete does deep equals between the two objects. +func EqualsRefOfDelete(a, b *Delete) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Ignore == b.Ignore && + EqualsComments(a.Comments, b.Comments) && + EqualsTableNames(a.Targets, b.Targets) && + EqualsTableExprs(a.TableExprs, b.TableExprs) && + EqualsPartitions(a.Partitions, b.Partitions) && + EqualsRefOfWhere(a.Where, b.Where) && + EqualsOrderBy(a.OrderBy, b.OrderBy) && + EqualsRefOfLimit(a.Limit, b.Limit) +} + +// EqualsRefOfDerivedTable does deep equals between the two objects. +func EqualsRefOfDerivedTable(a, b *DerivedTable) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsSelectStatement(a.Select, b.Select) +} + +// EqualsRefOfDropColumn does deep equals between the two objects. +func EqualsRefOfDropColumn(a, b *DropColumn) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsRefOfColName(a.Name, b.Name) +} + +// EqualsRefOfDropDatabase does deep equals between the two objects. +func EqualsRefOfDropDatabase(a, b *DropDatabase) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.IfExists == b.IfExists && + EqualsComments(a.Comments, b.Comments) && + EqualsTableIdent(a.DBName, b.DBName) +} + +// EqualsRefOfDropKey does deep equals between the two objects. +func EqualsRefOfDropKey(a, b *DropKey) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Type == b.Type && + EqualsColIdent(a.Name, b.Name) +} + +// EqualsRefOfDropTable does deep equals between the two objects. +func EqualsRefOfDropTable(a, b *DropTable) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Temp == b.Temp && + a.IfExists == b.IfExists && + EqualsTableNames(a.FromTables, b.FromTables) +} + +// EqualsRefOfDropView does deep equals between the two objects. +func EqualsRefOfDropView(a, b *DropView) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.IfExists == b.IfExists && + EqualsTableNames(a.FromTables, b.FromTables) +} + +// EqualsRefOfExistsExpr does deep equals between the two objects. +func EqualsRefOfExistsExpr(a, b *ExistsExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsRefOfSubquery(a.Subquery, b.Subquery) +} + +// EqualsRefOfExplainStmt does deep equals between the two objects. +func EqualsRefOfExplainStmt(a, b *ExplainStmt) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Type == b.Type && + EqualsStatement(a.Statement, b.Statement) +} + +// EqualsRefOfExplainTab does deep equals between the two objects. +func EqualsRefOfExplainTab(a, b *ExplainTab) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Wild == b.Wild && + EqualsTableName(a.Table, b.Table) +} + +// EqualsExprs does deep equals between the two objects. +func EqualsExprs(a, b Exprs) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !EqualsExpr(a[i], b[i]) { + return false + } + } + return true +} + +// EqualsRefOfFlush does deep equals between the two objects. +func EqualsRefOfFlush(a, b *Flush) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.IsLocal == b.IsLocal && + a.WithLock == b.WithLock && + a.ForExport == b.ForExport && + EqualsSliceOfString(a.FlushOptions, b.FlushOptions) && + EqualsTableNames(a.TableNames, b.TableNames) +} + +// EqualsRefOfForce does deep equals between the two objects. +func EqualsRefOfForce(a, b *Force) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return true +} + +// EqualsRefOfForeignKeyDefinition does deep equals between the two objects. +func EqualsRefOfForeignKeyDefinition(a, b *ForeignKeyDefinition) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsColumns(a.Source, b.Source) && + EqualsTableName(a.ReferencedTable, b.ReferencedTable) && + EqualsColumns(a.ReferencedColumns, b.ReferencedColumns) && + a.OnDelete == b.OnDelete && + a.OnUpdate == b.OnUpdate +} + +// EqualsRefOfFuncExpr does deep equals between the two objects. +func EqualsRefOfFuncExpr(a, b *FuncExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Distinct == b.Distinct && + EqualsTableIdent(a.Qualifier, b.Qualifier) && + EqualsColIdent(a.Name, b.Name) && + EqualsSelectExprs(a.Exprs, b.Exprs) +} + +// EqualsGroupBy does deep equals between the two objects. +func EqualsGroupBy(a, b GroupBy) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !EqualsExpr(a[i], b[i]) { + return false + } + } + return true +} + +// EqualsRefOfGroupConcatExpr does deep equals between the two objects. +func EqualsRefOfGroupConcatExpr(a, b *GroupConcatExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Distinct == b.Distinct && + a.Separator == b.Separator && + EqualsSelectExprs(a.Exprs, b.Exprs) && + EqualsOrderBy(a.OrderBy, b.OrderBy) && + EqualsRefOfLimit(a.Limit, b.Limit) +} + +// EqualsRefOfIndexDefinition does deep equals between the two objects. +func EqualsRefOfIndexDefinition(a, b *IndexDefinition) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsRefOfIndexInfo(a.Info, b.Info) && + EqualsSliceOfRefOfIndexColumn(a.Columns, b.Columns) && + EqualsSliceOfRefOfIndexOption(a.Options, b.Options) +} + +// EqualsRefOfIndexHints does deep equals between the two objects. +func EqualsRefOfIndexHints(a, b *IndexHints) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Type == b.Type && + EqualsSliceOfColIdent(a.Indexes, b.Indexes) +} + +// EqualsRefOfIndexInfo does deep equals between the two objects. +func EqualsRefOfIndexInfo(a, b *IndexInfo) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Type == b.Type && + a.Primary == b.Primary && + a.Spatial == b.Spatial && + a.Fulltext == b.Fulltext && + a.Unique == b.Unique && + EqualsColIdent(a.Name, b.Name) && + EqualsColIdent(a.ConstraintName, b.ConstraintName) +} + +// EqualsRefOfInsert does deep equals between the two objects. +func EqualsRefOfInsert(a, b *Insert) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Action == b.Action && + EqualsComments(a.Comments, b.Comments) && + a.Ignore == b.Ignore && + EqualsTableName(a.Table, b.Table) && + EqualsPartitions(a.Partitions, b.Partitions) && + EqualsColumns(a.Columns, b.Columns) && + EqualsInsertRows(a.Rows, b.Rows) && + EqualsOnDup(a.OnDup, b.OnDup) +} + +// EqualsRefOfIntervalExpr does deep equals between the two objects. +func EqualsRefOfIntervalExpr(a, b *IntervalExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Unit == b.Unit && + EqualsExpr(a.Expr, b.Expr) +} + +// EqualsRefOfIsExpr does deep equals between the two objects. +func EqualsRefOfIsExpr(a, b *IsExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Operator == b.Operator && + EqualsExpr(a.Expr, b.Expr) +} + +// EqualsJoinCondition does deep equals between the two objects. +func EqualsJoinCondition(a, b JoinCondition) bool { + return EqualsExpr(a.On, b.On) && + EqualsColumns(a.Using, b.Using) +} + +// EqualsRefOfJoinTableExpr does deep equals between the two objects. +func EqualsRefOfJoinTableExpr(a, b *JoinTableExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsTableExpr(a.LeftExpr, b.LeftExpr) && + a.Join == b.Join && + EqualsTableExpr(a.RightExpr, b.RightExpr) && + EqualsJoinCondition(a.Condition, b.Condition) +} + +// EqualsRefOfKeyState does deep equals between the two objects. +func EqualsRefOfKeyState(a, b *KeyState) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Enable == b.Enable +} + +// EqualsRefOfLimit does deep equals between the two objects. +func EqualsRefOfLimit(a, b *Limit) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsExpr(a.Offset, b.Offset) && + EqualsExpr(a.Rowcount, b.Rowcount) +} + +// EqualsListArg does deep equals between the two objects. +func EqualsListArg(a, b ListArg) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if a[i] != b[i] { + return false + } + } + return true +} + +// EqualsRefOfLiteral does deep equals between the two objects. +func EqualsRefOfLiteral(a, b *Literal) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Val == b.Val && + a.Type == b.Type +} + +// EqualsRefOfLoad does deep equals between the two objects. +func EqualsRefOfLoad(a, b *Load) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return true +} + +// EqualsRefOfLockOption does deep equals between the two objects. +func EqualsRefOfLockOption(a, b *LockOption) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Type == b.Type +} + +// EqualsRefOfLockTables does deep equals between the two objects. +func EqualsRefOfLockTables(a, b *LockTables) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsTableAndLockTypes(a.Tables, b.Tables) +} + +// EqualsRefOfMatchExpr does deep equals between the two objects. +func EqualsRefOfMatchExpr(a, b *MatchExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsSelectExprs(a.Columns, b.Columns) && + EqualsExpr(a.Expr, b.Expr) && + a.Option == b.Option +} + +// EqualsRefOfModifyColumn does deep equals between the two objects. +func EqualsRefOfModifyColumn(a, b *ModifyColumn) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsRefOfColumnDefinition(a.NewColDefinition, b.NewColDefinition) && + EqualsRefOfColName(a.First, b.First) && + EqualsRefOfColName(a.After, b.After) +} + +// EqualsRefOfNextval does deep equals between the two objects. +func EqualsRefOfNextval(a, b *Nextval) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsExpr(a.Expr, b.Expr) +} + +// EqualsRefOfNotExpr does deep equals between the two objects. +func EqualsRefOfNotExpr(a, b *NotExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsExpr(a.Expr, b.Expr) +} + +// EqualsRefOfNullVal does deep equals between the two objects. +func EqualsRefOfNullVal(a, b *NullVal) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return true +} + +// EqualsOnDup does deep equals between the two objects. +func EqualsOnDup(a, b OnDup) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !EqualsRefOfUpdateExpr(a[i], b[i]) { + return false + } + } + return true +} + +// EqualsRefOfOptLike does deep equals between the two objects. +func EqualsRefOfOptLike(a, b *OptLike) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsTableName(a.LikeTable, b.LikeTable) +} + +// EqualsRefOfOrExpr does deep equals between the two objects. +func EqualsRefOfOrExpr(a, b *OrExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsExpr(a.Left, b.Left) && + EqualsExpr(a.Right, b.Right) +} + +// EqualsRefOfOrder does deep equals between the two objects. +func EqualsRefOfOrder(a, b *Order) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsExpr(a.Expr, b.Expr) && + a.Direction == b.Direction +} + +// EqualsOrderBy does deep equals between the two objects. +func EqualsOrderBy(a, b OrderBy) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !EqualsRefOfOrder(a[i], b[i]) { + return false + } + } + return true +} + +// EqualsRefOfOrderByOption does deep equals between the two objects. +func EqualsRefOfOrderByOption(a, b *OrderByOption) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsColumns(a.Cols, b.Cols) +} + +// EqualsRefOfOtherAdmin does deep equals between the two objects. +func EqualsRefOfOtherAdmin(a, b *OtherAdmin) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return true +} + +// EqualsRefOfOtherRead does deep equals between the two objects. +func EqualsRefOfOtherRead(a, b *OtherRead) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return true +} + +// EqualsRefOfParenSelect does deep equals between the two objects. +func EqualsRefOfParenSelect(a, b *ParenSelect) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsSelectStatement(a.Select, b.Select) +} + +// EqualsRefOfParenTableExpr does deep equals between the two objects. +func EqualsRefOfParenTableExpr(a, b *ParenTableExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsTableExprs(a.Exprs, b.Exprs) +} + +// EqualsRefOfPartitionDefinition does deep equals between the two objects. +func EqualsRefOfPartitionDefinition(a, b *PartitionDefinition) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Maxvalue == b.Maxvalue && + EqualsColIdent(a.Name, b.Name) && + EqualsExpr(a.Limit, b.Limit) +} + +// EqualsRefOfPartitionSpec does deep equals between the two objects. +func EqualsRefOfPartitionSpec(a, b *PartitionSpec) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.IsAll == b.IsAll && + a.WithoutValidation == b.WithoutValidation && + a.Action == b.Action && + EqualsPartitions(a.Names, b.Names) && + EqualsRefOfLiteral(a.Number, b.Number) && + EqualsTableName(a.TableName, b.TableName) && + EqualsSliceOfRefOfPartitionDefinition(a.Definitions, b.Definitions) +} + +// EqualsPartitions does deep equals between the two objects. +func EqualsPartitions(a, b Partitions) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !EqualsColIdent(a[i], b[i]) { + return false + } + } + return true +} + +// EqualsRefOfRangeCond does deep equals between the two objects. +func EqualsRefOfRangeCond(a, b *RangeCond) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Operator == b.Operator && + EqualsExpr(a.Left, b.Left) && + EqualsExpr(a.From, b.From) && + EqualsExpr(a.To, b.To) +} + +// EqualsRefOfRelease does deep equals between the two objects. +func EqualsRefOfRelease(a, b *Release) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsColIdent(a.Name, b.Name) +} + +// EqualsRefOfRenameIndex does deep equals between the two objects. +func EqualsRefOfRenameIndex(a, b *RenameIndex) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsColIdent(a.OldName, b.OldName) && + EqualsColIdent(a.NewName, b.NewName) +} + +// EqualsRefOfRenameTable does deep equals between the two objects. +func EqualsRefOfRenameTable(a, b *RenameTable) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsSliceOfRefOfRenameTablePair(a.TablePairs, b.TablePairs) +} + +// EqualsRefOfRenameTableName does deep equals between the two objects. +func EqualsRefOfRenameTableName(a, b *RenameTableName) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsTableName(a.Table, b.Table) +} + +// EqualsRefOfRevertMigration does deep equals between the two objects. +func EqualsRefOfRevertMigration(a, b *RevertMigration) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.UUID == b.UUID +} + +// EqualsRefOfRollback does deep equals between the two objects. +func EqualsRefOfRollback(a, b *Rollback) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return true +} + +// EqualsRefOfSRollback does deep equals between the two objects. +func EqualsRefOfSRollback(a, b *SRollback) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsColIdent(a.Name, b.Name) +} + +// EqualsRefOfSavepoint does deep equals between the two objects. +func EqualsRefOfSavepoint(a, b *Savepoint) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsColIdent(a.Name, b.Name) +} + +// EqualsRefOfSelect does deep equals between the two objects. +func EqualsRefOfSelect(a, b *Select) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Distinct == b.Distinct && + a.StraightJoinHint == b.StraightJoinHint && + a.SQLCalcFoundRows == b.SQLCalcFoundRows && + EqualsRefOfBool(a.Cache, b.Cache) && + EqualsComments(a.Comments, b.Comments) && + EqualsSelectExprs(a.SelectExprs, b.SelectExprs) && + EqualsTableExprs(a.From, b.From) && + EqualsRefOfWhere(a.Where, b.Where) && + EqualsGroupBy(a.GroupBy, b.GroupBy) && + EqualsRefOfWhere(a.Having, b.Having) && + EqualsOrderBy(a.OrderBy, b.OrderBy) && + EqualsRefOfLimit(a.Limit, b.Limit) && + a.Lock == b.Lock && + EqualsRefOfSelectInto(a.Into, b.Into) +} + +// EqualsSelectExprs does deep equals between the two objects. +func EqualsSelectExprs(a, b SelectExprs) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !EqualsSelectExpr(a[i], b[i]) { + return false + } + } + return true +} + +// EqualsRefOfSelectInto does deep equals between the two objects. +func EqualsRefOfSelectInto(a, b *SelectInto) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.FileName == b.FileName && + a.Charset == b.Charset && + a.FormatOption == b.FormatOption && + a.ExportOption == b.ExportOption && + a.Manifest == b.Manifest && + a.Overwrite == b.Overwrite && + a.Type == b.Type +} + +// EqualsRefOfSet does deep equals between the two objects. +func EqualsRefOfSet(a, b *Set) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsComments(a.Comments, b.Comments) && + EqualsSetExprs(a.Exprs, b.Exprs) +} + +// EqualsRefOfSetExpr does deep equals between the two objects. +func EqualsRefOfSetExpr(a, b *SetExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Scope == b.Scope && + EqualsColIdent(a.Name, b.Name) && + EqualsExpr(a.Expr, b.Expr) +} + +// EqualsSetExprs does deep equals between the two objects. +func EqualsSetExprs(a, b SetExprs) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !EqualsRefOfSetExpr(a[i], b[i]) { + return false + } + } + return true +} + +// EqualsRefOfSetTransaction does deep equals between the two objects. +func EqualsRefOfSetTransaction(a, b *SetTransaction) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsSQLNode(a.SQLNode, b.SQLNode) && + EqualsComments(a.Comments, b.Comments) && + a.Scope == b.Scope && + EqualsSliceOfCharacteristic(a.Characteristics, b.Characteristics) +} + +// EqualsRefOfShow does deep equals between the two objects. +func EqualsRefOfShow(a, b *Show) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsShowInternal(a.Internal, b.Internal) +} + +// EqualsRefOfShowBasic does deep equals between the two objects. +func EqualsRefOfShowBasic(a, b *ShowBasic) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Full == b.Full && + a.Command == b.Command && + EqualsTableName(a.Tbl, b.Tbl) && + EqualsTableIdent(a.DbName, b.DbName) && + EqualsRefOfShowFilter(a.Filter, b.Filter) +} + +// EqualsRefOfShowCreate does deep equals between the two objects. +func EqualsRefOfShowCreate(a, b *ShowCreate) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Command == b.Command && + EqualsTableName(a.Op, b.Op) +} + +// EqualsRefOfShowFilter does deep equals between the two objects. +func EqualsRefOfShowFilter(a, b *ShowFilter) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Like == b.Like && + EqualsExpr(a.Filter, b.Filter) +} + +// EqualsRefOfShowLegacy does deep equals between the two objects. +func EqualsRefOfShowLegacy(a, b *ShowLegacy) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Extended == b.Extended && + a.Type == b.Type && + EqualsTableName(a.OnTable, b.OnTable) && + EqualsTableName(a.Table, b.Table) && + EqualsRefOfShowTablesOpt(a.ShowTablesOpt, b.ShowTablesOpt) && + a.Scope == b.Scope && + EqualsExpr(a.ShowCollationFilterOpt, b.ShowCollationFilterOpt) +} + +// EqualsRefOfStarExpr does deep equals between the two objects. +func EqualsRefOfStarExpr(a, b *StarExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsTableName(a.TableName, b.TableName) +} + +// EqualsRefOfStream does deep equals between the two objects. +func EqualsRefOfStream(a, b *Stream) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsComments(a.Comments, b.Comments) && + EqualsSelectExpr(a.SelectExpr, b.SelectExpr) && + EqualsTableName(a.Table, b.Table) +} + +// EqualsRefOfSubquery does deep equals between the two objects. +func EqualsRefOfSubquery(a, b *Subquery) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsSelectStatement(a.Select, b.Select) +} + +// EqualsRefOfSubstrExpr does deep equals between the two objects. +func EqualsRefOfSubstrExpr(a, b *SubstrExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsRefOfColName(a.Name, b.Name) && + EqualsRefOfLiteral(a.StrVal, b.StrVal) && + EqualsExpr(a.From, b.From) && + EqualsExpr(a.To, b.To) +} + +// EqualsTableExprs does deep equals between the two objects. +func EqualsTableExprs(a, b TableExprs) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !EqualsTableExpr(a[i], b[i]) { + return false + } + } + return true +} + +// EqualsTableIdent does deep equals between the two objects. +func EqualsTableIdent(a, b TableIdent) bool { + return a.v == b.v +} + +// EqualsTableName does deep equals between the two objects. +func EqualsTableName(a, b TableName) bool { + return EqualsTableIdent(a.Name, b.Name) && + EqualsTableIdent(a.Qualifier, b.Qualifier) +} + +// EqualsTableNames does deep equals between the two objects. +func EqualsTableNames(a, b TableNames) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !EqualsTableName(a[i], b[i]) { + return false + } + } + return true +} + +// EqualsTableOptions does deep equals between the two objects. +func EqualsTableOptions(a, b TableOptions) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !EqualsRefOfTableOption(a[i], b[i]) { + return false + } + } + return true +} + +// EqualsRefOfTableSpec does deep equals between the two objects. +func EqualsRefOfTableSpec(a, b *TableSpec) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsSliceOfRefOfColumnDefinition(a.Columns, b.Columns) && + EqualsSliceOfRefOfIndexDefinition(a.Indexes, b.Indexes) && + EqualsSliceOfRefOfConstraintDefinition(a.Constraints, b.Constraints) && + EqualsTableOptions(a.Options, b.Options) +} + +// EqualsRefOfTablespaceOperation does deep equals between the two objects. +func EqualsRefOfTablespaceOperation(a, b *TablespaceOperation) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Import == b.Import +} + +// EqualsRefOfTimestampFuncExpr does deep equals between the two objects. +func EqualsRefOfTimestampFuncExpr(a, b *TimestampFuncExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Name == b.Name && + a.Unit == b.Unit && + EqualsExpr(a.Expr1, b.Expr1) && + EqualsExpr(a.Expr2, b.Expr2) +} + +// EqualsRefOfTruncateTable does deep equals between the two objects. +func EqualsRefOfTruncateTable(a, b *TruncateTable) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsTableName(a.Table, b.Table) +} + +// EqualsRefOfUnaryExpr does deep equals between the two objects. +func EqualsRefOfUnaryExpr(a, b *UnaryExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Operator == b.Operator && + EqualsExpr(a.Expr, b.Expr) +} + +// EqualsRefOfUnion does deep equals between the two objects. +func EqualsRefOfUnion(a, b *Union) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsSelectStatement(a.FirstStatement, b.FirstStatement) && + EqualsSliceOfRefOfUnionSelect(a.UnionSelects, b.UnionSelects) && + EqualsOrderBy(a.OrderBy, b.OrderBy) && + EqualsRefOfLimit(a.Limit, b.Limit) && + a.Lock == b.Lock +} + +// EqualsRefOfUnionSelect does deep equals between the two objects. +func EqualsRefOfUnionSelect(a, b *UnionSelect) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Distinct == b.Distinct && + EqualsSelectStatement(a.Statement, b.Statement) +} + +// EqualsRefOfUnlockTables does deep equals between the two objects. +func EqualsRefOfUnlockTables(a, b *UnlockTables) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return true +} + +// EqualsRefOfUpdate does deep equals between the two objects. +func EqualsRefOfUpdate(a, b *Update) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsComments(a.Comments, b.Comments) && + a.Ignore == b.Ignore && + EqualsTableExprs(a.TableExprs, b.TableExprs) && + EqualsUpdateExprs(a.Exprs, b.Exprs) && + EqualsRefOfWhere(a.Where, b.Where) && + EqualsOrderBy(a.OrderBy, b.OrderBy) && + EqualsRefOfLimit(a.Limit, b.Limit) +} + +// EqualsRefOfUpdateExpr does deep equals between the two objects. +func EqualsRefOfUpdateExpr(a, b *UpdateExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsRefOfColName(a.Name, b.Name) && + EqualsExpr(a.Expr, b.Expr) +} + +// EqualsUpdateExprs does deep equals between the two objects. +func EqualsUpdateExprs(a, b UpdateExprs) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !EqualsRefOfUpdateExpr(a[i], b[i]) { + return false + } + } + return true +} + +// EqualsRefOfUse does deep equals between the two objects. +func EqualsRefOfUse(a, b *Use) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsTableIdent(a.DBName, b.DBName) +} + +// EqualsRefOfVStream does deep equals between the two objects. +func EqualsRefOfVStream(a, b *VStream) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsComments(a.Comments, b.Comments) && + EqualsSelectExpr(a.SelectExpr, b.SelectExpr) && + EqualsTableName(a.Table, b.Table) && + EqualsRefOfWhere(a.Where, b.Where) && + EqualsRefOfLimit(a.Limit, b.Limit) +} + +// EqualsValTuple does deep equals between the two objects. +func EqualsValTuple(a, b ValTuple) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !EqualsExpr(a[i], b[i]) { + return false + } + } + return true +} + +// EqualsRefOfValidation does deep equals between the two objects. +func EqualsRefOfValidation(a, b *Validation) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.With == b.With +} + +// EqualsValues does deep equals between the two objects. +func EqualsValues(a, b Values) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !EqualsValTuple(a[i], b[i]) { + return false + } + } + return true +} + +// EqualsRefOfValuesFuncExpr does deep equals between the two objects. +func EqualsRefOfValuesFuncExpr(a, b *ValuesFuncExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsRefOfColName(a.Name, b.Name) +} + +// EqualsVindexParam does deep equals between the two objects. +func EqualsVindexParam(a, b VindexParam) bool { + return a.Val == b.Val && + EqualsColIdent(a.Key, b.Key) +} + +// EqualsRefOfVindexSpec does deep equals between the two objects. +func EqualsRefOfVindexSpec(a, b *VindexSpec) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsColIdent(a.Name, b.Name) && + EqualsColIdent(a.Type, b.Type) && + EqualsSliceOfVindexParam(a.Params, b.Params) +} + +// EqualsRefOfWhen does deep equals between the two objects. +func EqualsRefOfWhen(a, b *When) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsExpr(a.Cond, b.Cond) && + EqualsExpr(a.Val, b.Val) +} + +// EqualsRefOfWhere does deep equals between the two objects. +func EqualsRefOfWhere(a, b *Where) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Type == b.Type && + EqualsExpr(a.Expr, b.Expr) +} + +// EqualsRefOfXorExpr does deep equals between the two objects. +func EqualsRefOfXorExpr(a, b *XorExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsExpr(a.Left, b.Left) && + EqualsExpr(a.Right, b.Right) +} + +// EqualsAlterOption does deep equals between the two objects. +func EqualsAlterOption(inA, inB AlterOption) bool { + if inA == nil && inB == nil { + return true + } + if inA == nil || inB == nil { + return false + } + switch a := inA.(type) { + case *AddColumns: + b, ok := inB.(*AddColumns) + if !ok { + return false + } + return EqualsRefOfAddColumns(a, b) + case *AddConstraintDefinition: + b, ok := inB.(*AddConstraintDefinition) + if !ok { + return false + } + return EqualsRefOfAddConstraintDefinition(a, b) + case *AddIndexDefinition: + b, ok := inB.(*AddIndexDefinition) + if !ok { + return false + } + return EqualsRefOfAddIndexDefinition(a, b) + case AlgorithmValue: + b, ok := inB.(AlgorithmValue) + if !ok { + return false + } + return a == b + case *AlterCharset: + b, ok := inB.(*AlterCharset) + if !ok { + return false + } + return EqualsRefOfAlterCharset(a, b) + case *AlterColumn: + b, ok := inB.(*AlterColumn) + if !ok { + return false + } + return EqualsRefOfAlterColumn(a, b) + case *ChangeColumn: + b, ok := inB.(*ChangeColumn) + if !ok { + return false + } + return EqualsRefOfChangeColumn(a, b) + case *DropColumn: + b, ok := inB.(*DropColumn) + if !ok { + return false + } + return EqualsRefOfDropColumn(a, b) + case *DropKey: + b, ok := inB.(*DropKey) + if !ok { + return false + } + return EqualsRefOfDropKey(a, b) + case *Force: + b, ok := inB.(*Force) + if !ok { + return false + } + return EqualsRefOfForce(a, b) + case *KeyState: + b, ok := inB.(*KeyState) + if !ok { + return false + } + return EqualsRefOfKeyState(a, b) + case *LockOption: + b, ok := inB.(*LockOption) + if !ok { + return false + } + return EqualsRefOfLockOption(a, b) + case *ModifyColumn: + b, ok := inB.(*ModifyColumn) + if !ok { + return false + } + return EqualsRefOfModifyColumn(a, b) + case *OrderByOption: + b, ok := inB.(*OrderByOption) + if !ok { + return false + } + return EqualsRefOfOrderByOption(a, b) + case *RenameIndex: + b, ok := inB.(*RenameIndex) + if !ok { + return false + } + return EqualsRefOfRenameIndex(a, b) + case *RenameTableName: + b, ok := inB.(*RenameTableName) + if !ok { + return false + } + return EqualsRefOfRenameTableName(a, b) + case TableOptions: + b, ok := inB.(TableOptions) + if !ok { + return false + } + return EqualsTableOptions(a, b) + case *TablespaceOperation: + b, ok := inB.(*TablespaceOperation) + if !ok { + return false + } + return EqualsRefOfTablespaceOperation(a, b) + case *Validation: + b, ok := inB.(*Validation) + if !ok { + return false + } + return EqualsRefOfValidation(a, b) + default: + // this should never happen + return false + } +} + +// EqualsCharacteristic does deep equals between the two objects. +func EqualsCharacteristic(inA, inB Characteristic) bool { + if inA == nil && inB == nil { + return true + } + if inA == nil || inB == nil { + return false + } + switch a := inA.(type) { + case AccessMode: + b, ok := inB.(AccessMode) + if !ok { + return false + } + return a == b + case IsolationLevel: + b, ok := inB.(IsolationLevel) + if !ok { + return false + } + return a == b + default: + // this should never happen + return false + } +} + +// EqualsColTuple does deep equals between the two objects. +func EqualsColTuple(inA, inB ColTuple) bool { + if inA == nil && inB == nil { + return true + } + if inA == nil || inB == nil { + return false + } + switch a := inA.(type) { + case ListArg: + b, ok := inB.(ListArg) + if !ok { + return false + } + return EqualsListArg(a, b) + case *Subquery: + b, ok := inB.(*Subquery) + if !ok { + return false + } + return EqualsRefOfSubquery(a, b) + case ValTuple: + b, ok := inB.(ValTuple) + if !ok { + return false + } + return EqualsValTuple(a, b) + default: + // this should never happen + return false + } +} + +// EqualsConstraintInfo does deep equals between the two objects. +func EqualsConstraintInfo(inA, inB ConstraintInfo) bool { + if inA == nil && inB == nil { + return true + } + if inA == nil || inB == nil { + return false + } + switch a := inA.(type) { + case *CheckConstraintDefinition: + b, ok := inB.(*CheckConstraintDefinition) + if !ok { + return false + } + return EqualsRefOfCheckConstraintDefinition(a, b) + case *ForeignKeyDefinition: + b, ok := inB.(*ForeignKeyDefinition) + if !ok { + return false + } + return EqualsRefOfForeignKeyDefinition(a, b) + default: + // this should never happen + return false + } +} + +// EqualsDBDDLStatement does deep equals between the two objects. +func EqualsDBDDLStatement(inA, inB DBDDLStatement) bool { + if inA == nil && inB == nil { + return true + } + if inA == nil || inB == nil { + return false + } + switch a := inA.(type) { + case *AlterDatabase: + b, ok := inB.(*AlterDatabase) + if !ok { + return false + } + return EqualsRefOfAlterDatabase(a, b) + case *CreateDatabase: + b, ok := inB.(*CreateDatabase) + if !ok { + return false + } + return EqualsRefOfCreateDatabase(a, b) + case *DropDatabase: + b, ok := inB.(*DropDatabase) + if !ok { + return false + } + return EqualsRefOfDropDatabase(a, b) + default: + // this should never happen + return false + } +} + +// EqualsDDLStatement does deep equals between the two objects. +func EqualsDDLStatement(inA, inB DDLStatement) bool { + if inA == nil && inB == nil { + return true + } + if inA == nil || inB == nil { + return false + } + switch a := inA.(type) { + case *AlterTable: + b, ok := inB.(*AlterTable) + if !ok { + return false + } + return EqualsRefOfAlterTable(a, b) + case *AlterView: + b, ok := inB.(*AlterView) + if !ok { + return false + } + return EqualsRefOfAlterView(a, b) + case *CreateTable: + b, ok := inB.(*CreateTable) + if !ok { + return false + } + return EqualsRefOfCreateTable(a, b) + case *CreateView: + b, ok := inB.(*CreateView) + if !ok { + return false + } + return EqualsRefOfCreateView(a, b) + case *DropTable: + b, ok := inB.(*DropTable) + if !ok { + return false + } + return EqualsRefOfDropTable(a, b) + case *DropView: + b, ok := inB.(*DropView) + if !ok { + return false + } + return EqualsRefOfDropView(a, b) + case *RenameTable: + b, ok := inB.(*RenameTable) + if !ok { + return false + } + return EqualsRefOfRenameTable(a, b) + case *TruncateTable: + b, ok := inB.(*TruncateTable) + if !ok { + return false + } + return EqualsRefOfTruncateTable(a, b) + default: + // this should never happen + return false + } +} + +// EqualsExplain does deep equals between the two objects. +func EqualsExplain(inA, inB Explain) bool { + if inA == nil && inB == nil { + return true + } + if inA == nil || inB == nil { + return false + } + switch a := inA.(type) { + case *ExplainStmt: + b, ok := inB.(*ExplainStmt) + if !ok { + return false + } + return EqualsRefOfExplainStmt(a, b) + case *ExplainTab: + b, ok := inB.(*ExplainTab) + if !ok { + return false + } + return EqualsRefOfExplainTab(a, b) + default: + // this should never happen + return false + } +} + +// EqualsExpr does deep equals between the two objects. +func EqualsExpr(inA, inB Expr) bool { + if inA == nil && inB == nil { + return true + } + if inA == nil || inB == nil { + return false + } + switch a := inA.(type) { + case *AndExpr: + b, ok := inB.(*AndExpr) + if !ok { + return false + } + return EqualsRefOfAndExpr(a, b) + case Argument: + b, ok := inB.(Argument) + if !ok { + return false + } + return a == b + case *BinaryExpr: + b, ok := inB.(*BinaryExpr) + if !ok { + return false + } + return EqualsRefOfBinaryExpr(a, b) + case BoolVal: + b, ok := inB.(BoolVal) + if !ok { + return false + } + return a == b + case *CaseExpr: + b, ok := inB.(*CaseExpr) + if !ok { + return false + } + return EqualsRefOfCaseExpr(a, b) + case *ColName: + b, ok := inB.(*ColName) + if !ok { + return false + } + return EqualsRefOfColName(a, b) + case *CollateExpr: + b, ok := inB.(*CollateExpr) + if !ok { + return false + } + return EqualsRefOfCollateExpr(a, b) + case *ComparisonExpr: + b, ok := inB.(*ComparisonExpr) + if !ok { + return false + } + return EqualsRefOfComparisonExpr(a, b) + case *ConvertExpr: + b, ok := inB.(*ConvertExpr) + if !ok { + return false + } + return EqualsRefOfConvertExpr(a, b) + case *ConvertUsingExpr: + b, ok := inB.(*ConvertUsingExpr) + if !ok { + return false + } + return EqualsRefOfConvertUsingExpr(a, b) + case *CurTimeFuncExpr: + b, ok := inB.(*CurTimeFuncExpr) + if !ok { + return false + } + return EqualsRefOfCurTimeFuncExpr(a, b) + case *Default: + b, ok := inB.(*Default) + if !ok { + return false + } + return EqualsRefOfDefault(a, b) + case *ExistsExpr: + b, ok := inB.(*ExistsExpr) + if !ok { + return false + } + return EqualsRefOfExistsExpr(a, b) + case *FuncExpr: + b, ok := inB.(*FuncExpr) + if !ok { + return false + } + return EqualsRefOfFuncExpr(a, b) + case *GroupConcatExpr: + b, ok := inB.(*GroupConcatExpr) + if !ok { + return false + } + return EqualsRefOfGroupConcatExpr(a, b) + case *IntervalExpr: + b, ok := inB.(*IntervalExpr) + if !ok { + return false + } + return EqualsRefOfIntervalExpr(a, b) + case *IsExpr: + b, ok := inB.(*IsExpr) + if !ok { + return false + } + return EqualsRefOfIsExpr(a, b) + case ListArg: + b, ok := inB.(ListArg) + if !ok { + return false + } + return EqualsListArg(a, b) + case *Literal: + b, ok := inB.(*Literal) + if !ok { + return false + } + return EqualsRefOfLiteral(a, b) + case *MatchExpr: + b, ok := inB.(*MatchExpr) + if !ok { + return false + } + return EqualsRefOfMatchExpr(a, b) + case *NotExpr: + b, ok := inB.(*NotExpr) + if !ok { + return false + } + return EqualsRefOfNotExpr(a, b) + case *NullVal: + b, ok := inB.(*NullVal) + if !ok { + return false + } + return EqualsRefOfNullVal(a, b) + case *OrExpr: + b, ok := inB.(*OrExpr) + if !ok { + return false + } + return EqualsRefOfOrExpr(a, b) + case *RangeCond: + b, ok := inB.(*RangeCond) + if !ok { + return false + } + return EqualsRefOfRangeCond(a, b) + case *Subquery: + b, ok := inB.(*Subquery) + if !ok { + return false + } + return EqualsRefOfSubquery(a, b) + case *SubstrExpr: + b, ok := inB.(*SubstrExpr) + if !ok { + return false + } + return EqualsRefOfSubstrExpr(a, b) + case *TimestampFuncExpr: + b, ok := inB.(*TimestampFuncExpr) + if !ok { + return false + } + return EqualsRefOfTimestampFuncExpr(a, b) + case *UnaryExpr: + b, ok := inB.(*UnaryExpr) + if !ok { + return false + } + return EqualsRefOfUnaryExpr(a, b) + case ValTuple: + b, ok := inB.(ValTuple) + if !ok { + return false + } + return EqualsValTuple(a, b) + case *ValuesFuncExpr: + b, ok := inB.(*ValuesFuncExpr) + if !ok { + return false + } + return EqualsRefOfValuesFuncExpr(a, b) + case *XorExpr: + b, ok := inB.(*XorExpr) + if !ok { + return false + } + return EqualsRefOfXorExpr(a, b) + default: + // this should never happen + return false + } +} + +// EqualsInsertRows does deep equals between the two objects. +func EqualsInsertRows(inA, inB InsertRows) bool { + if inA == nil && inB == nil { + return true + } + if inA == nil || inB == nil { + return false + } + switch a := inA.(type) { + case *ParenSelect: + b, ok := inB.(*ParenSelect) + if !ok { + return false + } + return EqualsRefOfParenSelect(a, b) + case *Select: + b, ok := inB.(*Select) + if !ok { + return false + } + return EqualsRefOfSelect(a, b) + case *Union: + b, ok := inB.(*Union) + if !ok { + return false + } + return EqualsRefOfUnion(a, b) + case Values: + b, ok := inB.(Values) + if !ok { + return false + } + return EqualsValues(a, b) + default: + // this should never happen + return false + } +} + +// EqualsSelectExpr does deep equals between the two objects. +func EqualsSelectExpr(inA, inB SelectExpr) bool { + if inA == nil && inB == nil { + return true + } + if inA == nil || inB == nil { + return false + } + switch a := inA.(type) { + case *AliasedExpr: + b, ok := inB.(*AliasedExpr) + if !ok { + return false + } + return EqualsRefOfAliasedExpr(a, b) + case *Nextval: + b, ok := inB.(*Nextval) + if !ok { + return false + } + return EqualsRefOfNextval(a, b) + case *StarExpr: + b, ok := inB.(*StarExpr) + if !ok { + return false + } + return EqualsRefOfStarExpr(a, b) + default: + // this should never happen + return false + } +} + +// EqualsSelectStatement does deep equals between the two objects. +func EqualsSelectStatement(inA, inB SelectStatement) bool { + if inA == nil && inB == nil { + return true + } + if inA == nil || inB == nil { + return false + } + switch a := inA.(type) { + case *ParenSelect: + b, ok := inB.(*ParenSelect) + if !ok { + return false + } + return EqualsRefOfParenSelect(a, b) + case *Select: + b, ok := inB.(*Select) + if !ok { + return false + } + return EqualsRefOfSelect(a, b) + case *Union: + b, ok := inB.(*Union) + if !ok { + return false + } + return EqualsRefOfUnion(a, b) + default: + // this should never happen + return false + } +} + +// EqualsShowInternal does deep equals between the two objects. +func EqualsShowInternal(inA, inB ShowInternal) bool { + if inA == nil && inB == nil { + return true + } + if inA == nil || inB == nil { + return false + } + switch a := inA.(type) { + case *ShowBasic: + b, ok := inB.(*ShowBasic) + if !ok { + return false + } + return EqualsRefOfShowBasic(a, b) + case *ShowCreate: + b, ok := inB.(*ShowCreate) + if !ok { + return false + } + return EqualsRefOfShowCreate(a, b) + case *ShowLegacy: + b, ok := inB.(*ShowLegacy) + if !ok { + return false + } + return EqualsRefOfShowLegacy(a, b) + default: + // this should never happen + return false + } +} + +// EqualsSimpleTableExpr does deep equals between the two objects. +func EqualsSimpleTableExpr(inA, inB SimpleTableExpr) bool { + if inA == nil && inB == nil { + return true + } + if inA == nil || inB == nil { + return false + } + switch a := inA.(type) { + case *DerivedTable: + b, ok := inB.(*DerivedTable) + if !ok { + return false + } + return EqualsRefOfDerivedTable(a, b) + case TableName: + b, ok := inB.(TableName) + if !ok { + return false + } + return EqualsTableName(a, b) + default: + // this should never happen + return false + } +} + +// EqualsStatement does deep equals between the two objects. +func EqualsStatement(inA, inB Statement) bool { + if inA == nil && inB == nil { + return true + } + if inA == nil || inB == nil { + return false + } + switch a := inA.(type) { + case *AlterDatabase: + b, ok := inB.(*AlterDatabase) + if !ok { + return false + } + return EqualsRefOfAlterDatabase(a, b) + case *AlterMigration: + b, ok := inB.(*AlterMigration) + if !ok { + return false + } + return EqualsRefOfAlterMigration(a, b) + case *AlterTable: + b, ok := inB.(*AlterTable) + if !ok { + return false + } + return EqualsRefOfAlterTable(a, b) + case *AlterView: + b, ok := inB.(*AlterView) + if !ok { + return false + } + return EqualsRefOfAlterView(a, b) + case *AlterVschema: + b, ok := inB.(*AlterVschema) + if !ok { + return false + } + return EqualsRefOfAlterVschema(a, b) + case *Begin: + b, ok := inB.(*Begin) + if !ok { + return false + } + return EqualsRefOfBegin(a, b) + case *CallProc: + b, ok := inB.(*CallProc) + if !ok { + return false + } + return EqualsRefOfCallProc(a, b) + case *Commit: + b, ok := inB.(*Commit) + if !ok { + return false + } + return EqualsRefOfCommit(a, b) + case *CreateDatabase: + b, ok := inB.(*CreateDatabase) + if !ok { + return false + } + return EqualsRefOfCreateDatabase(a, b) + case *CreateTable: + b, ok := inB.(*CreateTable) + if !ok { + return false + } + return EqualsRefOfCreateTable(a, b) + case *CreateView: + b, ok := inB.(*CreateView) + if !ok { + return false + } + return EqualsRefOfCreateView(a, b) + case *Delete: + b, ok := inB.(*Delete) + if !ok { + return false + } + return EqualsRefOfDelete(a, b) + case *DropDatabase: + b, ok := inB.(*DropDatabase) + if !ok { + return false + } + return EqualsRefOfDropDatabase(a, b) + case *DropTable: + b, ok := inB.(*DropTable) + if !ok { + return false + } + return EqualsRefOfDropTable(a, b) + case *DropView: + b, ok := inB.(*DropView) + if !ok { + return false + } + return EqualsRefOfDropView(a, b) + case *ExplainStmt: + b, ok := inB.(*ExplainStmt) + if !ok { + return false + } + return EqualsRefOfExplainStmt(a, b) + case *ExplainTab: + b, ok := inB.(*ExplainTab) + if !ok { + return false + } + return EqualsRefOfExplainTab(a, b) + case *Flush: + b, ok := inB.(*Flush) + if !ok { + return false + } + return EqualsRefOfFlush(a, b) + case *Insert: + b, ok := inB.(*Insert) + if !ok { + return false + } + return EqualsRefOfInsert(a, b) + case *Load: + b, ok := inB.(*Load) + if !ok { + return false + } + return EqualsRefOfLoad(a, b) + case *LockTables: + b, ok := inB.(*LockTables) + if !ok { + return false + } + return EqualsRefOfLockTables(a, b) + case *OtherAdmin: + b, ok := inB.(*OtherAdmin) + if !ok { + return false + } + return EqualsRefOfOtherAdmin(a, b) + case *OtherRead: + b, ok := inB.(*OtherRead) + if !ok { + return false + } + return EqualsRefOfOtherRead(a, b) + case *ParenSelect: + b, ok := inB.(*ParenSelect) + if !ok { + return false + } + return EqualsRefOfParenSelect(a, b) + case *Release: + b, ok := inB.(*Release) + if !ok { + return false + } + return EqualsRefOfRelease(a, b) + case *RenameTable: + b, ok := inB.(*RenameTable) + if !ok { + return false + } + return EqualsRefOfRenameTable(a, b) + case *RevertMigration: + b, ok := inB.(*RevertMigration) + if !ok { + return false + } + return EqualsRefOfRevertMigration(a, b) + case *Rollback: + b, ok := inB.(*Rollback) + if !ok { + return false + } + return EqualsRefOfRollback(a, b) + case *SRollback: + b, ok := inB.(*SRollback) + if !ok { + return false + } + return EqualsRefOfSRollback(a, b) + case *Savepoint: + b, ok := inB.(*Savepoint) + if !ok { + return false + } + return EqualsRefOfSavepoint(a, b) + case *Select: + b, ok := inB.(*Select) + if !ok { + return false + } + return EqualsRefOfSelect(a, b) + case *Set: + b, ok := inB.(*Set) + if !ok { + return false + } + return EqualsRefOfSet(a, b) + case *SetTransaction: + b, ok := inB.(*SetTransaction) + if !ok { + return false + } + return EqualsRefOfSetTransaction(a, b) + case *Show: + b, ok := inB.(*Show) + if !ok { + return false + } + return EqualsRefOfShow(a, b) + case *Stream: + b, ok := inB.(*Stream) + if !ok { + return false + } + return EqualsRefOfStream(a, b) + case *TruncateTable: + b, ok := inB.(*TruncateTable) + if !ok { + return false + } + return EqualsRefOfTruncateTable(a, b) + case *Union: + b, ok := inB.(*Union) + if !ok { + return false + } + return EqualsRefOfUnion(a, b) + case *UnlockTables: + b, ok := inB.(*UnlockTables) + if !ok { + return false + } + return EqualsRefOfUnlockTables(a, b) + case *Update: + b, ok := inB.(*Update) + if !ok { + return false + } + return EqualsRefOfUpdate(a, b) + case *Use: + b, ok := inB.(*Use) + if !ok { + return false + } + return EqualsRefOfUse(a, b) + case *VStream: + b, ok := inB.(*VStream) + if !ok { + return false + } + return EqualsRefOfVStream(a, b) + default: + // this should never happen + return false + } +} + +// EqualsTableExpr does deep equals between the two objects. +func EqualsTableExpr(inA, inB TableExpr) bool { + if inA == nil && inB == nil { + return true + } + if inA == nil || inB == nil { + return false + } + switch a := inA.(type) { + case *AliasedTableExpr: + b, ok := inB.(*AliasedTableExpr) + if !ok { + return false + } + return EqualsRefOfAliasedTableExpr(a, b) + case *JoinTableExpr: + b, ok := inB.(*JoinTableExpr) + if !ok { + return false + } + return EqualsRefOfJoinTableExpr(a, b) + case *ParenTableExpr: + b, ok := inB.(*ParenTableExpr) + if !ok { + return false + } + return EqualsRefOfParenTableExpr(a, b) + default: + // this should never happen + return false + } +} + +// EqualsSliceOfRefOfColumnDefinition does deep equals between the two objects. +func EqualsSliceOfRefOfColumnDefinition(a, b []*ColumnDefinition) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !EqualsRefOfColumnDefinition(a[i], b[i]) { + return false + } + } + return true +} + +// EqualsSliceOfCollateAndCharset does deep equals between the two objects. +func EqualsSliceOfCollateAndCharset(a, b []CollateAndCharset) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !EqualsCollateAndCharset(a[i], b[i]) { + return false + } + } + return true +} + +// EqualsSliceOfAlterOption does deep equals between the two objects. +func EqualsSliceOfAlterOption(a, b []AlterOption) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !EqualsAlterOption(a[i], b[i]) { + return false + } + } + return true +} + +// EqualsSliceOfColIdent does deep equals between the two objects. +func EqualsSliceOfColIdent(a, b []ColIdent) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !EqualsColIdent(a[i], b[i]) { + return false + } + } + return true +} + +// EqualsSliceOfRefOfWhen does deep equals between the two objects. +func EqualsSliceOfRefOfWhen(a, b []*When) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !EqualsRefOfWhen(a[i], b[i]) { + return false + } + } + return true +} + +// EqualsRefOfColIdent does deep equals between the two objects. +func EqualsRefOfColIdent(a, b *ColIdent) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.val == b.val && + a.lowered == b.lowered && + a.at == b.at +} + +// EqualsColumnType does deep equals between the two objects. +func EqualsColumnType(a, b ColumnType) bool { + return a.Type == b.Type && + a.Unsigned == b.Unsigned && + a.Zerofill == b.Zerofill && + a.Charset == b.Charset && + a.Collate == b.Collate && + EqualsRefOfColumnTypeOptions(a.Options, b.Options) && + EqualsRefOfLiteral(a.Length, b.Length) && + EqualsRefOfLiteral(a.Scale, b.Scale) && + EqualsSliceOfString(a.EnumValues, b.EnumValues) +} + +// EqualsRefOfColumnTypeOptions does deep equals between the two objects. +func EqualsRefOfColumnTypeOptions(a, b *ColumnTypeOptions) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Autoincrement == b.Autoincrement && + EqualsRefOfBool(a.Null, b.Null) && + EqualsExpr(a.Default, b.Default) && + EqualsExpr(a.OnUpdate, b.OnUpdate) && + EqualsRefOfLiteral(a.Comment, b.Comment) && + a.KeyOpt == b.KeyOpt +} + +// EqualsSliceOfString does deep equals between the two objects. +func EqualsSliceOfString(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if a[i] != b[i] { + return false + } + } + return true +} + +// EqualsSliceOfRefOfIndexColumn does deep equals between the two objects. +func EqualsSliceOfRefOfIndexColumn(a, b []*IndexColumn) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !EqualsRefOfIndexColumn(a[i], b[i]) { + return false + } + } + return true +} + +// EqualsSliceOfRefOfIndexOption does deep equals between the two objects. +func EqualsSliceOfRefOfIndexOption(a, b []*IndexOption) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !EqualsRefOfIndexOption(a[i], b[i]) { + return false + } + } + return true +} + +// EqualsRefOfJoinCondition does deep equals between the two objects. +func EqualsRefOfJoinCondition(a, b *JoinCondition) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsExpr(a.On, b.On) && + EqualsColumns(a.Using, b.Using) +} + +// EqualsTableAndLockTypes does deep equals between the two objects. +func EqualsTableAndLockTypes(a, b TableAndLockTypes) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !EqualsRefOfTableAndLockType(a[i], b[i]) { + return false + } + } + return true +} + +// EqualsSliceOfRefOfPartitionDefinition does deep equals between the two objects. +func EqualsSliceOfRefOfPartitionDefinition(a, b []*PartitionDefinition) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !EqualsRefOfPartitionDefinition(a[i], b[i]) { + return false + } + } + return true +} + +// EqualsSliceOfRefOfRenameTablePair does deep equals between the two objects. +func EqualsSliceOfRefOfRenameTablePair(a, b []*RenameTablePair) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !EqualsRefOfRenameTablePair(a[i], b[i]) { + return false + } + } + return true +} + +// EqualsRefOfBool does deep equals between the two objects. +func EqualsRefOfBool(a, b *bool) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return *a == *b +} + +// EqualsSliceOfCharacteristic does deep equals between the two objects. +func EqualsSliceOfCharacteristic(a, b []Characteristic) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !EqualsCharacteristic(a[i], b[i]) { + return false + } + } + return true +} + +// EqualsRefOfShowTablesOpt does deep equals between the two objects. +func EqualsRefOfShowTablesOpt(a, b *ShowTablesOpt) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Full == b.Full && + a.DbName == b.DbName && + EqualsRefOfShowFilter(a.Filter, b.Filter) +} + +// EqualsRefOfTableIdent does deep equals between the two objects. +func EqualsRefOfTableIdent(a, b *TableIdent) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.v == b.v +} + +// EqualsRefOfTableName does deep equals between the two objects. +func EqualsRefOfTableName(a, b *TableName) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsTableIdent(a.Name, b.Name) && + EqualsTableIdent(a.Qualifier, b.Qualifier) +} + +// EqualsRefOfTableOption does deep equals between the two objects. +func EqualsRefOfTableOption(a, b *TableOption) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Name == b.Name && + a.String == b.String && + EqualsRefOfLiteral(a.Value, b.Value) && + EqualsTableNames(a.Tables, b.Tables) +} + +// EqualsSliceOfRefOfIndexDefinition does deep equals between the two objects. +func EqualsSliceOfRefOfIndexDefinition(a, b []*IndexDefinition) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !EqualsRefOfIndexDefinition(a[i], b[i]) { + return false + } + } + return true +} + +// EqualsSliceOfRefOfConstraintDefinition does deep equals between the two objects. +func EqualsSliceOfRefOfConstraintDefinition(a, b []*ConstraintDefinition) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !EqualsRefOfConstraintDefinition(a[i], b[i]) { + return false + } + } + return true +} + +// EqualsSliceOfRefOfUnionSelect does deep equals between the two objects. +func EqualsSliceOfRefOfUnionSelect(a, b []*UnionSelect) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !EqualsRefOfUnionSelect(a[i], b[i]) { + return false + } + } + return true +} + +// EqualsRefOfVindexParam does deep equals between the two objects. +func EqualsRefOfVindexParam(a, b *VindexParam) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Val == b.Val && + EqualsColIdent(a.Key, b.Key) +} + +// EqualsSliceOfVindexParam does deep equals between the two objects. +func EqualsSliceOfVindexParam(a, b []VindexParam) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !EqualsVindexParam(a[i], b[i]) { + return false + } + } + return true +} + +// EqualsCollateAndCharset does deep equals between the two objects. +func EqualsCollateAndCharset(a, b CollateAndCharset) bool { + return a.IsDefault == b.IsDefault && + a.Value == b.Value && + a.Type == b.Type +} + +// EqualsRefOfIndexColumn does deep equals between the two objects. +func EqualsRefOfIndexColumn(a, b *IndexColumn) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsColIdent(a.Column, b.Column) && + EqualsRefOfLiteral(a.Length, b.Length) && + a.Direction == b.Direction +} + +// EqualsRefOfIndexOption does deep equals between the two objects. +func EqualsRefOfIndexOption(a, b *IndexOption) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Name == b.Name && + a.String == b.String && + EqualsRefOfLiteral(a.Value, b.Value) +} + +// EqualsRefOfTableAndLockType does deep equals between the two objects. +func EqualsRefOfTableAndLockType(a, b *TableAndLockType) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsTableExpr(a.Table, b.Table) && + a.Lock == b.Lock +} + +// EqualsRefOfRenameTablePair does deep equals between the two objects. +func EqualsRefOfRenameTablePair(a, b *RenameTablePair) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return EqualsTableName(a.FromTable, b.FromTable) && + EqualsTableName(a.ToTable, b.ToTable) +} + +// EqualsRefOfCollateAndCharset does deep equals between the two objects. +func EqualsRefOfCollateAndCharset(a, b *CollateAndCharset) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.IsDefault == b.IsDefault && + a.Value == b.Value && + a.Type == b.Type +} diff --git a/go/vt/sqlparser/ast_format.go b/go/vt/sqlparser/ast_format.go new file mode 100644 index 00000000000..6cfda1e4d06 --- /dev/null +++ b/go/vt/sqlparser/ast_format.go @@ -0,0 +1,1669 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "strings" + + "vitess.io/vitess/go/sqltypes" +) + +// Format formats the node. +func (node *Select) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "select %v", node.Comments) + + if node.Distinct { + buf.WriteString(DistinctStr) + } + if node.Cache != nil { + if *node.Cache { + buf.WriteString(SQLCacheStr) + } else { + buf.WriteString(SQLNoCacheStr) + } + } + if node.StraightJoinHint { + buf.WriteString(StraightJoinHint) + } + if node.SQLCalcFoundRows { + buf.WriteString(SQLCalcFoundRowsStr) + } + + buf.astPrintf(node, "%v from %v%v%v%v%v%v%s%v", + node.SelectExprs, + node.From, node.Where, + node.GroupBy, node.Having, node.OrderBy, + node.Limit, node.Lock.ToString(), node.Into) +} + +// Format formats the node. +func (node *ParenSelect) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "(%v)", node.Select) +} + +// Format formats the node. +func (node *Union) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%v", node.FirstStatement) + for _, us := range node.UnionSelects { + buf.astPrintf(node, "%v", us) + } + buf.astPrintf(node, "%v%v%s", node.OrderBy, node.Limit, node.Lock.ToString()) +} + +// Format formats the node. +func (node *UnionSelect) Format(buf *TrackedBuffer) { + if node.Distinct { + buf.astPrintf(node, " %s %v", UnionStr, node.Statement) + } else { + buf.astPrintf(node, " %s %v", UnionAllStr, node.Statement) + } +} + +// Format formats the node. +func (node *VStream) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "vstream %v%v from %v", + node.Comments, node.SelectExpr, node.Table) +} + +// Format formats the node. +func (node *Stream) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "stream %v%v from %v", + node.Comments, node.SelectExpr, node.Table) +} + +// Format formats the node. +func (node *Insert) Format(buf *TrackedBuffer) { + switch node.Action { + case InsertAct: + buf.astPrintf(node, "%s %v%sinto %v%v%v %v%v", + InsertStr, + node.Comments, node.Ignore.ToString(), + node.Table, node.Partitions, node.Columns, node.Rows, node.OnDup) + case ReplaceAct: + buf.astPrintf(node, "%s %v%sinto %v%v%v %v%v", + ReplaceStr, + node.Comments, node.Ignore.ToString(), + node.Table, node.Partitions, node.Columns, node.Rows, node.OnDup) + default: + buf.astPrintf(node, "%s %v%sinto %v%v%v %v%v", + "Unkown Insert Action", + node.Comments, node.Ignore.ToString(), + node.Table, node.Partitions, node.Columns, node.Rows, node.OnDup) + } + +} + +// Format formats the node. +func (node *Update) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "update %v%s%v set %v%v%v%v", + node.Comments, node.Ignore.ToString(), node.TableExprs, + node.Exprs, node.Where, node.OrderBy, node.Limit) +} + +// Format formats the node. +func (node *Delete) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "delete %v", node.Comments) + if node.Ignore { + buf.WriteString("ignore ") + } + if node.Targets != nil { + buf.astPrintf(node, "%v ", node.Targets) + } + buf.astPrintf(node, "from %v%v%v%v%v", node.TableExprs, node.Partitions, node.Where, node.OrderBy, node.Limit) +} + +// Format formats the node. +func (node *Set) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "set %v%v", node.Comments, node.Exprs) +} + +// Format formats the node. +func (node *SetTransaction) Format(buf *TrackedBuffer) { + if node.Scope == ImplicitScope { + buf.astPrintf(node, "set %vtransaction ", node.Comments) + } else { + buf.astPrintf(node, "set %v%s transaction ", node.Comments, node.Scope.ToString()) + } + + for i, char := range node.Characteristics { + if i > 0 { + buf.WriteString(", ") + } + buf.astPrintf(node, "%v", char) + } +} + +// Format formats the node. +func (node *DropDatabase) Format(buf *TrackedBuffer) { + exists := "" + if node.IfExists { + exists = "if exists " + } + buf.astPrintf(node, "%s database %v%s%v", DropStr, node.Comments, exists, node.DBName) +} + +// Format formats the node. +func (node *Flush) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%s", FlushStr) + if node.IsLocal { + buf.WriteString(" local") + } + if len(node.FlushOptions) != 0 { + prefix := " " + for _, option := range node.FlushOptions { + buf.astPrintf(node, "%s%s", prefix, option) + prefix = ", " + } + } else { + buf.WriteString(" tables") + if len(node.TableNames) != 0 { + buf.astPrintf(node, " %v", node.TableNames) + } + if node.ForExport { + buf.WriteString(" for export") + } + if node.WithLock { + buf.WriteString(" with read lock") + } + } +} + +// Format formats the node. +func (node *AlterVschema) Format(buf *TrackedBuffer) { + switch node.Action { + case CreateVindexDDLAction: + buf.astPrintf(node, "alter vschema create vindex %v %v", node.Table, node.VindexSpec) + case DropVindexDDLAction: + buf.astPrintf(node, "alter vschema drop vindex %v", node.Table) + case AddVschemaTableDDLAction: + buf.astPrintf(node, "alter vschema add table %v", node.Table) + case DropVschemaTableDDLAction: + buf.astPrintf(node, "alter vschema drop table %v", node.Table) + case AddColVindexDDLAction: + buf.astPrintf(node, "alter vschema on %v add vindex %v (", node.Table, node.VindexSpec.Name) + for i, col := range node.VindexCols { + if i != 0 { + buf.astPrintf(node, ", %v", col) + } else { + buf.astPrintf(node, "%v", col) + } + } + buf.astPrintf(node, ")") + if node.VindexSpec.Type.String() != "" { + buf.astPrintf(node, " %v", node.VindexSpec) + } + case DropColVindexDDLAction: + buf.astPrintf(node, "alter vschema on %v drop vindex %v", node.Table, node.VindexSpec.Name) + case AddSequenceDDLAction: + buf.astPrintf(node, "alter vschema add sequence %v", node.Table) + case AddAutoIncDDLAction: + buf.astPrintf(node, "alter vschema on %v add auto_increment %v", node.Table, node.AutoIncSpec) + default: + buf.astPrintf(node, "%s table %v", node.Action.ToString(), node.Table) + } +} + +// Format formats the node. +func (node *AlterMigration) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "alter vitess_migration") + if node.UUID != "" { + buf.astPrintf(node, " '%s'", node.UUID) + } + var alterType string + switch node.Type { + case RetryMigrationType: + alterType = "retry" + case CompleteMigrationType: + alterType = "complete" + case CancelMigrationType: + alterType = "cancel" + case CancelAllMigrationType: + alterType = "cancel all" + } + buf.astPrintf(node, " %s", alterType) +} + +// Format formats the node. +func (node *RevertMigration) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "revert vitess_migration '%s'", node.UUID) +} + +// Format formats the node. +func (node *OptLike) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "like %v", node.LikeTable) +} + +// Format formats the node. +func (node *PartitionSpec) Format(buf *TrackedBuffer) { + switch node.Action { + case ReorganizeAction: + buf.astPrintf(node, "%s ", ReorganizeStr) + for i, n := range node.Names { + if i != 0 { + buf.WriteString(", ") + } + buf.astPrintf(node, "%v", n) + } + buf.WriteString(" into (") + for i, pd := range node.Definitions { + if i != 0 { + buf.WriteString(", ") + } + buf.astPrintf(node, "%v", pd) + } + buf.astPrintf(node, ")") + case AddAction: + buf.astPrintf(node, "%s (%v)", AddStr, node.Definitions[0]) + case DropAction: + buf.astPrintf(node, "%s ", DropPartitionStr) + for i, n := range node.Names { + if i != 0 { + buf.WriteString(", ") + } + buf.astPrintf(node, "%v", n) + } + case DiscardAction: + buf.astPrintf(node, "%s ", DiscardStr) + if node.IsAll { + buf.WriteString("all") + } else { + prefix := "" + for _, n := range node.Names { + buf.astPrintf(node, "%s%v", prefix, n) + prefix = ", " + } + } + buf.WriteString(" tablespace") + case ImportAction: + buf.astPrintf(node, "%s ", ImportStr) + if node.IsAll { + buf.WriteString("all") + } else { + prefix := "" + for _, n := range node.Names { + buf.astPrintf(node, "%s%v", prefix, n) + prefix = ", " + } + } + buf.WriteString(" tablespace") + case TruncateAction: + buf.astPrintf(node, "%s ", TruncatePartitionStr) + if node.IsAll { + buf.WriteString("all") + } else { + prefix := "" + for _, n := range node.Names { + buf.astPrintf(node, "%s%v", prefix, n) + prefix = ", " + } + } + case CoalesceAction: + buf.astPrintf(node, "%s %v", CoalesceStr, node.Number) + case ExchangeAction: + buf.astPrintf(node, "%s %v with table %v", ExchangeStr, node.Names[0], node.TableName) + if node.WithoutValidation { + buf.WriteString(" without validation") + } + case AnalyzeAction: + buf.astPrintf(node, "%s ", AnalyzePartitionStr) + if node.IsAll { + buf.WriteString("all") + } else { + prefix := "" + for _, n := range node.Names { + buf.astPrintf(node, "%s%v", prefix, n) + prefix = ", " + } + } + case CheckAction: + buf.astPrintf(node, "%s ", CheckStr) + if node.IsAll { + buf.WriteString("all") + } else { + prefix := "" + for _, n := range node.Names { + buf.astPrintf(node, "%s%v", prefix, n) + prefix = ", " + } + } + case OptimizeAction: + buf.astPrintf(node, "%s ", OptimizeStr) + if node.IsAll { + buf.WriteString("all") + } else { + prefix := "" + for _, n := range node.Names { + buf.astPrintf(node, "%s%v", prefix, n) + prefix = ", " + } + } + case RebuildAction: + buf.astPrintf(node, "%s ", RebuildStr) + if node.IsAll { + buf.WriteString("all") + } else { + prefix := "" + for _, n := range node.Names { + buf.astPrintf(node, "%s%v", prefix, n) + prefix = ", " + } + } + case RepairAction: + buf.astPrintf(node, "%s ", RepairStr) + if node.IsAll { + buf.WriteString("all") + } else { + prefix := "" + for _, n := range node.Names { + buf.astPrintf(node, "%s%v", prefix, n) + prefix = ", " + } + } + case RemoveAction: + buf.WriteString(RemoveStr) + case UpgradeAction: + buf.WriteString(UpgradeStr) + default: + panic("unimplemented") + } +} + +// Format formats the node +func (node *PartitionDefinition) Format(buf *TrackedBuffer) { + if !node.Maxvalue { + buf.astPrintf(node, "partition %v values less than (%v)", node.Name, node.Limit) + } else { + buf.astPrintf(node, "partition %v values less than (maxvalue)", node.Name) + } +} + +// Format formats the node. +func (ts *TableSpec) Format(buf *TrackedBuffer) { + buf.astPrintf(ts, "(\n") + for i, col := range ts.Columns { + if i == 0 { + buf.astPrintf(ts, "\t%v", col) + } else { + buf.astPrintf(ts, ",\n\t%v", col) + } + } + for _, idx := range ts.Indexes { + buf.astPrintf(ts, ",\n\t%v", idx) + } + for _, c := range ts.Constraints { + buf.astPrintf(ts, ",\n\t%v", c) + } + + buf.astPrintf(ts, "\n)") + for i, opt := range ts.Options { + if i != 0 { + buf.WriteString(",\n ") + } + buf.astPrintf(ts, " %s", opt.Name) + if opt.String != "" { + buf.astPrintf(ts, " %s", opt.String) + } else if opt.Value != nil { + buf.astPrintf(ts, " %v", opt.Value) + } else { + buf.astPrintf(ts, " (%v)", opt.Tables) + } + } +} + +// Format formats the node. +func (col *ColumnDefinition) Format(buf *TrackedBuffer) { + buf.astPrintf(col, "%v %v", col.Name, &col.Type) +} + +// Format returns a canonical string representation of the type and all relevant options +func (ct *ColumnType) Format(buf *TrackedBuffer) { + buf.astPrintf(ct, "%s", ct.Type) + + if ct.Length != nil && ct.Scale != nil { + buf.astPrintf(ct, "(%v,%v)", ct.Length, ct.Scale) + + } else if ct.Length != nil { + buf.astPrintf(ct, "(%v)", ct.Length) + } + + if ct.EnumValues != nil { + buf.astPrintf(ct, "(%s)", strings.Join(ct.EnumValues, ", ")) + } + + if ct.Unsigned { + buf.astPrintf(ct, " %s", keywordStrings[UNSIGNED]) + } + if ct.Zerofill { + buf.astPrintf(ct, " %s", keywordStrings[ZEROFILL]) + } + if ct.Charset != "" { + buf.astPrintf(ct, " %s %s %s", keywordStrings[CHARACTER], keywordStrings[SET], ct.Charset) + } + if ct.Collate != "" { + buf.astPrintf(ct, " %s %s", keywordStrings[COLLATE], ct.Collate) + } + if ct.Options.Null != nil { + if *ct.Options.Null { + buf.astPrintf(ct, " %s", keywordStrings[NULL]) + } else { + buf.astPrintf(ct, " %s %s", keywordStrings[NOT], keywordStrings[NULL]) + } + } + if ct.Options.Default != nil { + buf.astPrintf(ct, " %s %v", keywordStrings[DEFAULT], ct.Options.Default) + } + if ct.Options.OnUpdate != nil { + buf.astPrintf(ct, " %s %s %v", keywordStrings[ON], keywordStrings[UPDATE], ct.Options.OnUpdate) + } + if ct.Options.Autoincrement { + buf.astPrintf(ct, " %s", keywordStrings[AUTO_INCREMENT]) + } + if ct.Options.Comment != nil { + buf.astPrintf(ct, " %s %v", keywordStrings[COMMENT_KEYWORD], ct.Options.Comment) + } + if ct.Options.KeyOpt == colKeyPrimary { + buf.astPrintf(ct, " %s %s", keywordStrings[PRIMARY], keywordStrings[KEY]) + } + if ct.Options.KeyOpt == colKeyUnique { + buf.astPrintf(ct, " %s", keywordStrings[UNIQUE]) + } + if ct.Options.KeyOpt == colKeyUniqueKey { + buf.astPrintf(ct, " %s %s", keywordStrings[UNIQUE], keywordStrings[KEY]) + } + if ct.Options.KeyOpt == colKeySpatialKey { + buf.astPrintf(ct, " %s %s", keywordStrings[SPATIAL], keywordStrings[KEY]) + } + if ct.Options.KeyOpt == colKeyFulltextKey { + buf.astPrintf(ct, " %s %s", keywordStrings[FULLTEXT], keywordStrings[KEY]) + } + if ct.Options.KeyOpt == colKey { + buf.astPrintf(ct, " %s", keywordStrings[KEY]) + } +} + +// Format formats the node. +func (idx *IndexDefinition) Format(buf *TrackedBuffer) { + buf.astPrintf(idx, "%v (", idx.Info) + for i, col := range idx.Columns { + if i != 0 { + buf.astPrintf(idx, ", %v", col.Column) + } else { + buf.astPrintf(idx, "%v", col.Column) + } + if col.Length != nil { + buf.astPrintf(idx, "(%v)", col.Length) + } + if col.Direction == DescOrder { + buf.astPrintf(idx, " desc") + } + } + buf.astPrintf(idx, ")") + + for _, opt := range idx.Options { + buf.astPrintf(idx, " %s", opt.Name) + if opt.String != "" { + buf.astPrintf(idx, " %s", opt.String) + } else { + buf.astPrintf(idx, " %v", opt.Value) + } + } +} + +// Format formats the node. +func (ii *IndexInfo) Format(buf *TrackedBuffer) { + if !ii.ConstraintName.IsEmpty() { + buf.astPrintf(ii, "constraint %v ", ii.ConstraintName) + } + if ii.Primary { + buf.astPrintf(ii, "%s", ii.Type) + } else { + buf.astPrintf(ii, "%s", ii.Type) + if !ii.Name.IsEmpty() { + buf.astPrintf(ii, " %v", ii.Name) + } + } +} + +// Format formats the node. +func (node *AutoIncSpec) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%v ", node.Column) + buf.astPrintf(node, "using %v", node.Sequence) +} + +// Format formats the node. The "CREATE VINDEX" preamble was formatted in +// the containing DDL node Format, so this just prints the type, any +// parameters, and optionally the owner +func (node *VindexSpec) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "using %v", node.Type) + + numParams := len(node.Params) + if numParams != 0 { + buf.astPrintf(node, " with ") + for i, p := range node.Params { + if i != 0 { + buf.astPrintf(node, ", ") + } + buf.astPrintf(node, "%v", p) + } + } +} + +// Format formats the node. +func (node VindexParam) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%s=%s", node.Key.String(), node.Val) +} + +// Format formats the node. +func (c *ConstraintDefinition) Format(buf *TrackedBuffer) { + if !c.Name.IsEmpty() { + buf.astPrintf(c, "constraint %v ", c.Name) + } + c.Details.Format(buf) +} + +// Format formats the node. +func (a ReferenceAction) Format(buf *TrackedBuffer) { + switch a { + case Restrict: + buf.WriteString("restrict") + case Cascade: + buf.WriteString("cascade") + case NoAction: + buf.WriteString("no action") + case SetNull: + buf.WriteString("set null") + case SetDefault: + buf.WriteString("set default") + } +} + +// Format formats the node. +func (f *ForeignKeyDefinition) Format(buf *TrackedBuffer) { + buf.astPrintf(f, "foreign key %v references %v %v", f.Source, f.ReferencedTable, f.ReferencedColumns) + if f.OnDelete != DefaultAction { + buf.astPrintf(f, " on delete %v", f.OnDelete) + } + if f.OnUpdate != DefaultAction { + buf.astPrintf(f, " on update %v", f.OnUpdate) + } +} + +// Format formats the node. +func (c *CheckConstraintDefinition) Format(buf *TrackedBuffer) { + buf.astPrintf(c, "check (%v)", c.Expr) + if !c.Enforced { + buf.astPrintf(c, " not enforced") + } +} + +// Format formats the node. +func (node *Show) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%v", node.Internal) +} + +// Format formats the node. +func (node *ShowLegacy) Format(buf *TrackedBuffer) { + nodeType := strings.ToLower(node.Type) + if (nodeType == "tables" || nodeType == "columns" || nodeType == "fields" || nodeType == "index" || nodeType == "keys" || nodeType == "indexes" || + nodeType == "databases" || nodeType == "schemas" || nodeType == "keyspaces" || nodeType == "vitess_keyspaces" || nodeType == "vitess_shards" || nodeType == "vitess_tablets") && node.ShowTablesOpt != nil { + opt := node.ShowTablesOpt + if node.Extended != "" { + buf.astPrintf(node, "show %s%s", node.Extended, nodeType) + } else { + buf.astPrintf(node, "show %s%s", opt.Full, nodeType) + } + if (nodeType == "columns" || nodeType == "fields") && node.HasOnTable() { + buf.astPrintf(node, " from %v", node.OnTable) + } + if (nodeType == "index" || nodeType == "keys" || nodeType == "indexes") && node.HasOnTable() { + buf.astPrintf(node, " from %v", node.OnTable) + } + if opt.DbName != "" { + buf.astPrintf(node, " from %s", opt.DbName) + } + buf.astPrintf(node, "%v", opt.Filter) + return + } + if node.Scope == ImplicitScope { + buf.astPrintf(node, "show %s", nodeType) + } else { + buf.astPrintf(node, "show %s %s", node.Scope.ToString(), nodeType) + } + if node.HasOnTable() { + buf.astPrintf(node, " on %v", node.OnTable) + } + if nodeType == "collation" && node.ShowCollationFilterOpt != nil { + buf.astPrintf(node, " where %v", node.ShowCollationFilterOpt) + } + if nodeType == "charset" && node.ShowTablesOpt != nil { + buf.astPrintf(node, "%v", node.ShowTablesOpt.Filter) + } + if node.HasTable() { + buf.astPrintf(node, " %v", node.Table) + } +} + +// Format formats the node. +func (node *ShowFilter) Format(buf *TrackedBuffer) { + if node == nil { + return + } + if node.Like != "" { + buf.astPrintf(node, " like ") + sqltypes.BufEncodeStringSQL(buf.Builder, node.Like) + } else { + buf.astPrintf(node, " where %v", node.Filter) + } +} + +// Format formats the node. +func (node *Use) Format(buf *TrackedBuffer) { + if node.DBName.v != "" { + buf.astPrintf(node, "use %v", node.DBName) + } else { + buf.astPrintf(node, "use") + } +} + +// Format formats the node. +func (node *Commit) Format(buf *TrackedBuffer) { + buf.WriteString("commit") +} + +// Format formats the node. +func (node *Begin) Format(buf *TrackedBuffer) { + buf.WriteString("begin") +} + +// Format formats the node. +func (node *Rollback) Format(buf *TrackedBuffer) { + buf.WriteString("rollback") +} + +// Format formats the node. +func (node *SRollback) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "rollback to %v", node.Name) +} + +// Format formats the node. +func (node *Savepoint) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "savepoint %v", node.Name) +} + +// Format formats the node. +func (node *Release) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "release savepoint %v", node.Name) +} + +// Format formats the node. +func (node *ExplainStmt) Format(buf *TrackedBuffer) { + format := "" + switch node.Type { + case EmptyType: + case AnalyzeType: + format = AnalyzeStr + " " + default: + format = "format = " + node.Type.ToString() + " " + } + buf.astPrintf(node, "explain %s%v", format, node.Statement) +} + +// Format formats the node. +func (node *ExplainTab) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "explain %v", node.Table) + if node.Wild != "" { + buf.astPrintf(node, " %s", node.Wild) + } +} + +// Format formats the node. +func (node *CallProc) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "call %v(%v)", node.Name, node.Params) +} + +// Format formats the node. +func (node *OtherRead) Format(buf *TrackedBuffer) { + buf.WriteString("otherread") +} + +// Format formats the node. +func (node *OtherAdmin) Format(buf *TrackedBuffer) { + buf.WriteString("otheradmin") +} + +// Format formats the node. +func (node Comments) Format(buf *TrackedBuffer) { + for _, c := range node { + buf.astPrintf(node, "%s ", c) + } +} + +// Format formats the node. +func (node SelectExprs) Format(buf *TrackedBuffer) { + var prefix string + for _, n := range node { + buf.astPrintf(node, "%s%v", prefix, n) + prefix = ", " + } +} + +// Format formats the node. +func (node *StarExpr) Format(buf *TrackedBuffer) { + if !node.TableName.IsEmpty() { + buf.astPrintf(node, "%v.", node.TableName) + } + buf.astPrintf(node, "*") +} + +// Format formats the node. +func (node *AliasedExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%v", node.Expr) + if !node.As.IsEmpty() { + buf.astPrintf(node, " as %v", node.As) + } +} + +// Format formats the node. +func (node *Nextval) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "next %v values", node.Expr) +} + +// Format formats the node. +func (node Columns) Format(buf *TrackedBuffer) { + if node == nil { + return + } + prefix := "(" + for _, n := range node { + buf.astPrintf(node, "%s%v", prefix, n) + prefix = ", " + } + buf.WriteString(")") +} + +// Format formats the node +func (node Partitions) Format(buf *TrackedBuffer) { + if node == nil { + return + } + prefix := " partition (" + for _, n := range node { + buf.astPrintf(node, "%s%v", prefix, n) + prefix = ", " + } + buf.WriteString(")") +} + +// Format formats the node. +func (node TableExprs) Format(buf *TrackedBuffer) { + var prefix string + for _, n := range node { + buf.astPrintf(node, "%s%v", prefix, n) + prefix = ", " + } +} + +// Format formats the node. +func (node *AliasedTableExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%v%v", node.Expr, node.Partitions) + if !node.As.IsEmpty() { + buf.astPrintf(node, " as %v", node.As) + } + if node.Hints != nil { + // Hint node provides the space padding. + buf.astPrintf(node, "%v", node.Hints) + } +} + +// Format formats the node. +func (node TableNames) Format(buf *TrackedBuffer) { + var prefix string + for _, n := range node { + buf.astPrintf(node, "%s%v", prefix, n) + prefix = ", " + } +} + +// Format formats the node. +func (node TableName) Format(buf *TrackedBuffer) { + if node.IsEmpty() { + return + } + if !node.Qualifier.IsEmpty() { + buf.astPrintf(node, "%v.", node.Qualifier) + } + buf.astPrintf(node, "%v", node.Name) +} + +// Format formats the node. +func (node *ParenTableExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "(%v)", node.Exprs) +} + +// Format formats the node. +func (node JoinCondition) Format(buf *TrackedBuffer) { + if node.On != nil { + buf.astPrintf(node, " on %v", node.On) + } + if node.Using != nil { + buf.astPrintf(node, " using %v", node.Using) + } +} + +// Format formats the node. +func (node *JoinTableExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%v %s %v%v", node.LeftExpr, node.Join.ToString(), node.RightExpr, node.Condition) +} + +// Format formats the node. +func (node *IndexHints) Format(buf *TrackedBuffer) { + buf.astPrintf(node, " %sindex ", node.Type.ToString()) + if len(node.Indexes) == 0 { + buf.astPrintf(node, "()") + } else { + prefix := "(" + for _, n := range node.Indexes { + buf.astPrintf(node, "%s%v", prefix, n) + prefix = ", " + } + buf.astPrintf(node, ")") + } +} + +// Format formats the node. +func (node *Where) Format(buf *TrackedBuffer) { + if node == nil || node.Expr == nil { + return + } + buf.astPrintf(node, " %s %v", node.Type.ToString(), node.Expr) +} + +// Format formats the node. +func (node Exprs) Format(buf *TrackedBuffer) { + var prefix string + for _, n := range node { + buf.astPrintf(node, "%s%v", prefix, n) + prefix = ", " + } +} + +// Format formats the node. +func (node *AndExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%l and %r", node.Left, node.Right) +} + +// Format formats the node. +func (node *OrExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%l or %r", node.Left, node.Right) +} + +// Format formats the node. +func (node *XorExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%l xor %r", node.Left, node.Right) +} + +// Format formats the node. +func (node *NotExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "not %v", node.Expr) +} + +// Format formats the node. +func (node *ComparisonExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%l %s %r", node.Left, node.Operator.ToString(), node.Right) + if node.Escape != nil { + buf.astPrintf(node, " escape %v", node.Escape) + } +} + +// Format formats the node. +func (node *RangeCond) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%v %s %l and %r", node.Left, node.Operator.ToString(), node.From, node.To) +} + +// Format formats the node. +func (node *IsExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%v %s", node.Expr, node.Operator.ToString()) +} + +// Format formats the node. +func (node *ExistsExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "exists %v", node.Subquery) +} + +// Format formats the node. +func (node *Literal) Format(buf *TrackedBuffer) { + switch node.Type { + case StrVal: + sqltypes.MakeTrusted(sqltypes.VarBinary, node.Bytes()).EncodeSQL(buf) + case IntVal, FloatVal, HexNum: + buf.astPrintf(node, "%s", node.Val) + case HexVal: + buf.astPrintf(node, "X'%s'", node.Val) + case BitVal: + buf.astPrintf(node, "B'%s'", node.Val) + default: + panic("unexpected") + } +} + +// Format formats the node. +func (node Argument) Format(buf *TrackedBuffer) { + buf.WriteArg(string(node)) +} + +// Format formats the node. +func (node *NullVal) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "null") +} + +// Format formats the node. +func (node BoolVal) Format(buf *TrackedBuffer) { + if node { + buf.astPrintf(node, "true") + } else { + buf.astPrintf(node, "false") + } +} + +// Format formats the node. +func (node *ColName) Format(buf *TrackedBuffer) { + if !node.Qualifier.IsEmpty() { + buf.astPrintf(node, "%v.", node.Qualifier) + } + buf.astPrintf(node, "%v", node.Name) +} + +// Format formats the node. +func (node ValTuple) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "(%v)", Exprs(node)) +} + +// Format formats the node. +func (node *Subquery) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "(%v)", node.Select) +} + +// Format formats the node. +func (node *DerivedTable) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "(%v)", node.Select) +} + +// Format formats the node. +func (node ListArg) Format(buf *TrackedBuffer) { + buf.WriteArg(string(node)) +} + +// Format formats the node. +func (node *BinaryExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%l %s %r", node.Left, node.Operator.ToString(), node.Right) +} + +// Format formats the node. +func (node *UnaryExpr) Format(buf *TrackedBuffer) { + if _, unary := node.Expr.(*UnaryExpr); unary { + // They have same precedence so parenthesis is not required. + buf.astPrintf(node, "%s %v", node.Operator.ToString(), node.Expr) + return + } + buf.astPrintf(node, "%s%v", node.Operator.ToString(), node.Expr) +} + +// Format formats the node. +func (node *IntervalExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "interval %v %s", node.Expr, node.Unit) +} + +// Format formats the node. +func (node *TimestampFuncExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%s(%s, %v, %v)", node.Name, node.Unit, node.Expr1, node.Expr2) +} + +// Format formats the node. +func (node *CurTimeFuncExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%s(%v)", node.Name.String(), node.Fsp) +} + +// Format formats the node. +func (node *CollateExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%v collate %s", node.Expr, node.Charset) +} + +// Format formats the node. +func (node *FuncExpr) Format(buf *TrackedBuffer) { + var distinct string + if node.Distinct { + distinct = "distinct " + } + if !node.Qualifier.IsEmpty() { + buf.astPrintf(node, "%v.", node.Qualifier) + } + // Function names should not be back-quoted even + // if they match a reserved word, only if they contain illegal characters + funcName := node.Name.String() + + if containEscapableChars(funcName, NoAt) { + writeEscapedString(buf, funcName) + } else { + buf.WriteString(funcName) + } + buf.astPrintf(node, "(%s%v)", distinct, node.Exprs) +} + +// Format formats the node +func (node *GroupConcatExpr) Format(buf *TrackedBuffer) { + if node.Distinct { + buf.astPrintf(node, "group_concat(%s%v%v%s%v)", DistinctStr, node.Exprs, node.OrderBy, node.Separator, node.Limit) + } else { + buf.astPrintf(node, "group_concat(%v%v%s%v)", node.Exprs, node.OrderBy, node.Separator, node.Limit) + } +} + +// Format formats the node. +func (node *ValuesFuncExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "values(%v)", node.Name) +} + +// Format formats the node. +func (node *SubstrExpr) Format(buf *TrackedBuffer) { + var val SQLNode + if node.Name != nil { + val = node.Name + } else { + val = node.StrVal + } + + if node.To == nil { + buf.astPrintf(node, "substr(%v, %v)", val, node.From) + } else { + buf.astPrintf(node, "substr(%v, %v, %v)", val, node.From, node.To) + } +} + +// Format formats the node. +func (node *ConvertExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "convert(%v, %v)", node.Expr, node.Type) +} + +// Format formats the node. +func (node *ConvertUsingExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "convert(%v using %s)", node.Expr, node.Type) +} + +// Format formats the node. +func (node *ConvertType) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%s", node.Type) + if node.Length != nil { + buf.astPrintf(node, "(%v", node.Length) + if node.Scale != nil { + buf.astPrintf(node, ", %v", node.Scale) + } + buf.astPrintf(node, ")") + } + if node.Charset != "" { + buf.astPrintf(node, "%s %s", node.Operator.ToString(), node.Charset) + } +} + +// Format formats the node +func (node *MatchExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "match(%v) against (%v%s)", node.Columns, node.Expr, node.Option.ToString()) +} + +// Format formats the node. +func (node *CaseExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "case ") + if node.Expr != nil { + buf.astPrintf(node, "%v ", node.Expr) + } + for _, when := range node.Whens { + buf.astPrintf(node, "%v ", when) + } + if node.Else != nil { + buf.astPrintf(node, "else %v ", node.Else) + } + buf.astPrintf(node, "end") +} + +// Format formats the node. +func (node *Default) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "default") + if node.ColName != "" { + buf.WriteString("(") + formatID(buf, node.ColName, NoAt) + buf.WriteString(")") + } +} + +// Format formats the node. +func (node *When) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "when %v then %v", node.Cond, node.Val) +} + +// Format formats the node. +func (node GroupBy) Format(buf *TrackedBuffer) { + prefix := " group by " + for _, n := range node { + buf.astPrintf(node, "%s%v", prefix, n) + prefix = ", " + } +} + +// Format formats the node. +func (node OrderBy) Format(buf *TrackedBuffer) { + prefix := " order by " + for _, n := range node { + buf.astPrintf(node, "%s%v", prefix, n) + prefix = ", " + } +} + +// Format formats the node. +func (node *Order) Format(buf *TrackedBuffer) { + if node, ok := node.Expr.(*NullVal); ok { + buf.astPrintf(node, "%v", node) + return + } + if node, ok := node.Expr.(*FuncExpr); ok { + if node.Name.Lowered() == "rand" { + buf.astPrintf(node, "%v", node) + return + } + } + + buf.astPrintf(node, "%v %s", node.Expr, node.Direction.ToString()) +} + +// Format formats the node. +func (node *Limit) Format(buf *TrackedBuffer) { + if node == nil { + return + } + buf.astPrintf(node, " limit ") + if node.Offset != nil { + buf.astPrintf(node, "%v, ", node.Offset) + } + buf.astPrintf(node, "%v", node.Rowcount) +} + +// Format formats the node. +func (node Values) Format(buf *TrackedBuffer) { + prefix := "values " + for _, n := range node { + buf.astPrintf(node, "%s%v", prefix, n) + prefix = ", " + } +} + +// Format formats the node. +func (node UpdateExprs) Format(buf *TrackedBuffer) { + var prefix string + for _, n := range node { + buf.astPrintf(node, "%s%v", prefix, n) + prefix = ", " + } +} + +// Format formats the node. +func (node *UpdateExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%v = %v", node.Name, node.Expr) +} + +// Format formats the node. +func (node SetExprs) Format(buf *TrackedBuffer) { + var prefix string + for _, n := range node { + buf.astPrintf(node, "%s%v", prefix, n) + prefix = ", " + } +} + +// Format formats the node. +func (node *SetExpr) Format(buf *TrackedBuffer) { + if node.Scope != ImplicitScope { + buf.WriteString(node.Scope.ToString()) + buf.WriteString(" ") + } + // We don't have to backtick set variable names. + switch { + case node.Name.EqualString("charset") || node.Name.EqualString("names"): + buf.astPrintf(node, "%s %v", node.Name.String(), node.Expr) + case node.Name.EqualString(TransactionStr): + literal := node.Expr.(*Literal) + buf.astPrintf(node, "%s %s", node.Name.String(), strings.ToLower(string(literal.Val))) + default: + buf.astPrintf(node, "%v = %v", node.Name, node.Expr) + } +} + +// Format formats the node. +func (node OnDup) Format(buf *TrackedBuffer) { + if node == nil { + return + } + buf.astPrintf(node, " on duplicate key update %v", UpdateExprs(node)) +} + +// Format formats the node. +func (node ColIdent) Format(buf *TrackedBuffer) { + for i := NoAt; i < node.at; i++ { + buf.WriteByte('@') + } + formatID(buf, node.val, node.at) +} + +// Format formats the node. +func (node TableIdent) Format(buf *TrackedBuffer) { + formatID(buf, node.v, NoAt) +} + +// Format formats the node. +func (node IsolationLevel) Format(buf *TrackedBuffer) { + buf.WriteString("isolation level ") + switch node { + case ReadUncommitted: + buf.WriteString(ReadUncommittedStr) + case ReadCommitted: + buf.WriteString(ReadCommittedStr) + case RepeatableRead: + buf.WriteString(RepeatableReadStr) + case Serializable: + buf.WriteString(SerializableStr) + default: + buf.WriteString("Unknown Isolation level value") + } +} + +// Format formats the node. +func (node AccessMode) Format(buf *TrackedBuffer) { + if node == ReadOnly { + buf.WriteString(TxReadOnly) + } else { + buf.WriteString(TxReadWrite) + } +} + +// Format formats the node. +func (node *Load) Format(buf *TrackedBuffer) { + buf.WriteString("AST node missing for Load type") +} + +// Format formats the node. +func (node *ShowBasic) Format(buf *TrackedBuffer) { + buf.WriteString("show") + if node.Full { + buf.WriteString(" full") + } + buf.astPrintf(node, "%s", node.Command.ToString()) + if !node.Tbl.IsEmpty() { + buf.astPrintf(node, " from %v", node.Tbl) + } + if !node.DbName.IsEmpty() { + buf.astPrintf(node, " from %v", node.DbName) + } + buf.astPrintf(node, "%v", node.Filter) +} + +// Format formats the node. +func (node *ShowCreate) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "show%s %v", node.Command.ToString(), node.Op) +} + +// Format formats the node. +func (node *SelectInto) Format(buf *TrackedBuffer) { + if node == nil { + return + } + buf.astPrintf(node, "%s%s", node.Type.ToString(), node.FileName) + if node.Charset != "" { + buf.astPrintf(node, " character set %s", node.Charset) + } + buf.astPrintf(node, "%s%s%s%s", node.FormatOption, node.ExportOption, node.Manifest, node.Overwrite) +} + +// Format formats the node. +func (node *CreateDatabase) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "create database %v", node.Comments) + if node.IfNotExists { + buf.WriteString("if not exists ") + } + buf.astPrintf(node, "%v", node.DBName) + if node.CreateOptions != nil { + for _, createOption := range node.CreateOptions { + if createOption.IsDefault { + buf.WriteString(" default") + } + buf.WriteString(createOption.Type.ToString()) + buf.WriteString(" " + createOption.Value) + } + } +} + +// Format formats the node. +func (node *AlterDatabase) Format(buf *TrackedBuffer) { + buf.WriteString("alter database") + if !node.DBName.IsEmpty() { + buf.astPrintf(node, " %v", node.DBName) + } + if node.UpdateDataDirectory { + buf.WriteString(" upgrade data directory name") + } + if node.AlterOptions != nil { + for _, createOption := range node.AlterOptions { + if createOption.IsDefault { + buf.WriteString(" default") + } + buf.WriteString(createOption.Type.ToString()) + buf.WriteString(" " + createOption.Value) + } + } +} + +// Format formats the node. +func (node *CreateTable) Format(buf *TrackedBuffer) { + buf.WriteString("create ") + if node.Temp { + buf.WriteString("temporary ") + } + buf.WriteString("table ") + + if node.IfNotExists { + buf.WriteString("if not exists ") + } + buf.astPrintf(node, "%v", node.Table) + + if node.OptLike != nil { + buf.astPrintf(node, " %v", node.OptLike) + } + if node.TableSpec != nil { + buf.astPrintf(node, " %v", node.TableSpec) + } +} + +// Format formats the node. +func (node *CreateView) Format(buf *TrackedBuffer) { + buf.WriteString("create") + if node.IsReplace { + buf.WriteString(" or replace") + } + if node.Algorithm != "" { + buf.astPrintf(node, " algorithm = %s", node.Algorithm) + } + if node.Definer != "" { + buf.astPrintf(node, " definer = %s", node.Definer) + } + if node.Security != "" { + buf.astPrintf(node, " sql security %s", node.Security) + } + buf.astPrintf(node, " view %v", node.ViewName) + buf.astPrintf(node, "%v as %v", node.Columns, node.Select) + if node.CheckOption != "" { + buf.astPrintf(node, " with %s check option", node.CheckOption) + } +} + +// Format formats the LockTables node. +func (node *LockTables) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "lock tables %v %s", node.Tables[0].Table, node.Tables[0].Lock.ToString()) + for i := 1; i < len(node.Tables); i++ { + buf.astPrintf(node, ", %v %s", node.Tables[i].Table, node.Tables[i].Lock.ToString()) + } +} + +// Format formats the UnlockTables node. +func (node *UnlockTables) Format(buf *TrackedBuffer) { + buf.WriteString("unlock tables") +} + +// Format formats the node. +func (node *AlterView) Format(buf *TrackedBuffer) { + buf.WriteString("alter") + if node.Algorithm != "" { + buf.astPrintf(node, " algorithm = %s", node.Algorithm) + } + if node.Definer != "" { + buf.astPrintf(node, " definer = %s", node.Definer) + } + if node.Security != "" { + buf.astPrintf(node, " sql security %s", node.Security) + } + buf.astPrintf(node, " view %v", node.ViewName) + buf.astPrintf(node, "%v as %v", node.Columns, node.Select) + if node.CheckOption != "" { + buf.astPrintf(node, " with %s check option", node.CheckOption) + } +} + +// Format formats the node. +func (node *DropTable) Format(buf *TrackedBuffer) { + temp := "" + if node.Temp { + temp = " temporary" + } + exists := "" + if node.IfExists { + exists = " if exists" + } + buf.astPrintf(node, "drop%s table%s %v", temp, exists, node.FromTables) +} + +// Format formats the node. +func (node *DropView) Format(buf *TrackedBuffer) { + exists := "" + if node.IfExists { + exists = " if exists" + } + buf.astPrintf(node, "drop view%s %v", exists, node.FromTables) +} + +// Format formats the AlterTable node. +func (node *AlterTable) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "alter table %v", node.Table) + prefix := "" + for i, option := range node.AlterOptions { + if i != 0 { + buf.WriteString(",") + } + buf.astPrintf(node, " %v", option) + if node.PartitionSpec != nil && node.PartitionSpec.Action != RemoveAction { + prefix = "," + } + } + if node.PartitionSpec != nil { + buf.astPrintf(node, "%s %v", prefix, node.PartitionSpec) + } +} + +// Format formats the node. +func (node *AddConstraintDefinition) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "add %v", node.ConstraintDefinition) +} + +// Format formats the node. +func (node *AddIndexDefinition) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "add %v", node.IndexDefinition) +} + +// Format formats the node. +func (node *AddColumns) Format(buf *TrackedBuffer) { + + if len(node.Columns) == 1 { + buf.astPrintf(node, "add column %v", node.Columns[0]) + if node.First != nil { + buf.astPrintf(node, " first %v", node.First) + } + if node.After != nil { + buf.astPrintf(node, " after %v", node.After) + } + } else { + for i, col := range node.Columns { + if i == 0 { + buf.astPrintf(node, "add column (%v", col) + } else { + buf.astPrintf(node, ", %v", col) + } + } + buf.WriteString(")") + } +} + +// Format formats the node. +func (node AlgorithmValue) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "algorithm = %s", string(node)) +} + +// Format formats the node +func (node *AlterColumn) Format(buf *TrackedBuffer) { + if node.DropDefault { + buf.astPrintf(node, "alter column %v drop default", node.Column) + } else { + buf.astPrintf(node, "alter column %v set default", node.Column) + buf.astPrintf(node, " %v", node.DefaultVal) + } +} + +// Format formats the node +func (node *ChangeColumn) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "change column %v %v", node.OldColumn, node.NewColDefinition) + if node.First != nil { + buf.astPrintf(node, " first %v", node.First) + } + if node.After != nil { + buf.astPrintf(node, " after %v", node.After) + } +} + +// Format formats the node +func (node *ModifyColumn) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "modify column %v", node.NewColDefinition) + if node.First != nil { + buf.astPrintf(node, " first %v", node.First) + } + if node.After != nil { + buf.astPrintf(node, " after %v", node.After) + } +} + +// Format formats the node +func (node *AlterCharset) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "convert to character set %s", node.CharacterSet) + if node.Collate != "" { + buf.astPrintf(node, " collate %s", node.Collate) + } +} + +// Format formats the node +func (node *KeyState) Format(buf *TrackedBuffer) { + if node.Enable { + buf.WriteString("enable keys") + } else { + buf.WriteString("disable keys") + } + +} + +// Format formats the node +func (node *TablespaceOperation) Format(buf *TrackedBuffer) { + if node.Import { + buf.WriteString("import tablespace") + } else { + buf.WriteString("discard tablespace") + } +} + +// Format formats the node +func (node *DropColumn) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "drop column %v", node.Name) +} + +// Format formats the node +func (node *DropKey) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "drop %s", node.Type.ToString()) + if !node.Name.IsEmpty() { + buf.astPrintf(node, " %v", node.Name) + } +} + +// Format formats the node +func (node *Force) Format(buf *TrackedBuffer) { + buf.WriteString("force") +} + +// Format formats the node +func (node *LockOption) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "lock %s", node.Type.ToString()) +} + +// Format formats the node +func (node *OrderByOption) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "order by ") + prefix := "" + for _, n := range node.Cols { + buf.astPrintf(node, "%s%v", prefix, n) + prefix = ", " + } +} + +// Format formats the node +func (node *RenameTableName) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "rename %v", node.Table) +} + +// Format formats the node +func (node *RenameIndex) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "rename index %v to %v", node.OldName, node.NewName) +} + +// Format formats the node +func (node *Validation) Format(buf *TrackedBuffer) { + if node.With { + buf.WriteString("with validation") + } else { + buf.WriteString("without validation") + } +} + +// Format formats the node +func (node TableOptions) Format(buf *TrackedBuffer) { + for i, option := range node { + if i != 0 { + buf.WriteString(" ") + } + buf.astPrintf(node, "%s", option.Name) + if option.String != "" { + buf.astPrintf(node, " %s", option.String) + } else if option.Value != nil { + buf.astPrintf(node, " %v", option.Value) + } else { + buf.astPrintf(node, " (%v)", option.Tables) + } + } +} + +// Format formats the node +func (node *TruncateTable) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "truncate table %v", node.Table) +} + +// Format formats the node. +func (node *RenameTable) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "rename table") + prefix := " " + for _, pair := range node.TablePairs { + buf.astPrintf(node, "%s%v to %v", prefix, pair.FromTable, pair.ToTable) + prefix = ", " + } +} diff --git a/go/vt/sqlparser/ast_format_fast.go b/go/vt/sqlparser/ast_format_fast.go new file mode 100644 index 00000000000..5746f5106c8 --- /dev/null +++ b/go/vt/sqlparser/ast_format_fast.go @@ -0,0 +1,2185 @@ +// Code generated by ASTFmtGen. DO NOT EDIT. +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "strings" + + "vitess.io/vitess/go/sqltypes" +) + +// formatFast formats the node. +func (node *Select) formatFast(buf *TrackedBuffer) { + buf.WriteString("select ") + node.Comments.formatFast(buf) + + if node.Distinct { + buf.WriteString(DistinctStr) + } + if node.Cache != nil { + if *node.Cache { + buf.WriteString(SQLCacheStr) + } else { + buf.WriteString(SQLNoCacheStr) + } + } + if node.StraightJoinHint { + buf.WriteString(StraightJoinHint) + } + if node.SQLCalcFoundRows { + buf.WriteString(SQLCalcFoundRowsStr) + } + + node.SelectExprs.formatFast(buf) + buf.WriteString(" from ") + + node.From.formatFast(buf) + + node.Where.formatFast(buf) + + node.GroupBy.formatFast(buf) + + node.Having.formatFast(buf) + + node.OrderBy.formatFast(buf) + + node.Limit.formatFast(buf) + buf.WriteString(node.Lock.ToString()) + node.Into.formatFast(buf) + +} + +// formatFast formats the node. +func (node *ParenSelect) formatFast(buf *TrackedBuffer) { + buf.WriteByte('(') + node.Select.formatFast(buf) + buf.WriteByte(')') +} + +// formatFast formats the node. +func (node *Union) formatFast(buf *TrackedBuffer) { + node.FirstStatement.formatFast(buf) + for _, us := range node.UnionSelects { + us.formatFast(buf) + } + node.OrderBy.formatFast(buf) + node.Limit.formatFast(buf) + buf.WriteString(node.Lock.ToString()) +} + +// formatFast formats the node. +func (node *UnionSelect) formatFast(buf *TrackedBuffer) { + if node.Distinct { + buf.WriteByte(' ') + buf.WriteString(UnionStr) + buf.WriteByte(' ') + node.Statement.formatFast(buf) + } else { + buf.WriteByte(' ') + buf.WriteString(UnionAllStr) + buf.WriteByte(' ') + node.Statement.formatFast(buf) + } +} + +// formatFast formats the node. +func (node *VStream) formatFast(buf *TrackedBuffer) { + buf.WriteString("vstream ") + node.Comments.formatFast(buf) + node.SelectExpr.formatFast(buf) + buf.WriteString(" from ") + node.Table.formatFast(buf) + +} + +// formatFast formats the node. +func (node *Stream) formatFast(buf *TrackedBuffer) { + buf.WriteString("stream ") + node.Comments.formatFast(buf) + node.SelectExpr.formatFast(buf) + buf.WriteString(" from ") + node.Table.formatFast(buf) + +} + +// formatFast formats the node. +func (node *Insert) formatFast(buf *TrackedBuffer) { + switch node.Action { + case InsertAct: + buf.WriteString(InsertStr) + buf.WriteByte(' ') + + node.Comments.formatFast(buf) + buf.WriteString(node.Ignore.ToString()) + buf.WriteString("into ") + + node.Table.formatFast(buf) + + node.Partitions.formatFast(buf) + + node.Columns.formatFast(buf) + buf.WriteByte(' ') + + node.Rows.formatFast(buf) + + node.OnDup.formatFast(buf) + + case ReplaceAct: + buf.WriteString(ReplaceStr) + buf.WriteByte(' ') + + node.Comments.formatFast(buf) + buf.WriteString(node.Ignore.ToString()) + buf.WriteString("into ") + + node.Table.formatFast(buf) + + node.Partitions.formatFast(buf) + + node.Columns.formatFast(buf) + buf.WriteByte(' ') + + node.Rows.formatFast(buf) + + node.OnDup.formatFast(buf) + + default: + buf.WriteString("Unkown Insert Action") + buf.WriteByte(' ') + + node.Comments.formatFast(buf) + buf.WriteString(node.Ignore.ToString()) + buf.WriteString("into ") + + node.Table.formatFast(buf) + + node.Partitions.formatFast(buf) + + node.Columns.formatFast(buf) + buf.WriteByte(' ') + + node.Rows.formatFast(buf) + + node.OnDup.formatFast(buf) + + } + +} + +// formatFast formats the node. +func (node *Update) formatFast(buf *TrackedBuffer) { + buf.WriteString("update ") + node.Comments.formatFast(buf) + buf.WriteString(node.Ignore.ToString()) + node.TableExprs.formatFast(buf) + buf.WriteString(" set ") + + node.Exprs.formatFast(buf) + + node.Where.formatFast(buf) + + node.OrderBy.formatFast(buf) + + node.Limit.formatFast(buf) + +} + +// formatFast formats the node. +func (node *Delete) formatFast(buf *TrackedBuffer) { + buf.WriteString("delete ") + node.Comments.formatFast(buf) + if node.Ignore { + buf.WriteString("ignore ") + } + if node.Targets != nil { + node.Targets.formatFast(buf) + buf.WriteByte(' ') + } + buf.WriteString("from ") + node.TableExprs.formatFast(buf) + node.Partitions.formatFast(buf) + node.Where.formatFast(buf) + node.OrderBy.formatFast(buf) + node.Limit.formatFast(buf) +} + +// formatFast formats the node. +func (node *Set) formatFast(buf *TrackedBuffer) { + buf.WriteString("set ") + node.Comments.formatFast(buf) + node.Exprs.formatFast(buf) +} + +// formatFast formats the node. +func (node *SetTransaction) formatFast(buf *TrackedBuffer) { + if node.Scope == ImplicitScope { + buf.WriteString("set ") + node.Comments.formatFast(buf) + buf.WriteString("transaction ") + } else { + buf.WriteString("set ") + node.Comments.formatFast(buf) + buf.WriteString(node.Scope.ToString()) + buf.WriteString(" transaction ") + } + + for i, char := range node.Characteristics { + if i > 0 { + buf.WriteString(", ") + } + char.formatFast(buf) + } +} + +// formatFast formats the node. +func (node *DropDatabase) formatFast(buf *TrackedBuffer) { + exists := "" + if node.IfExists { + exists = "if exists " + } + buf.WriteString(DropStr) + buf.WriteString(" database ") + node.Comments.formatFast(buf) + buf.WriteString(exists) + node.DBName.formatFast(buf) +} + +// formatFast formats the node. +func (node *Flush) formatFast(buf *TrackedBuffer) { + buf.WriteString(FlushStr) + if node.IsLocal { + buf.WriteString(" local") + } + if len(node.FlushOptions) != 0 { + prefix := " " + for _, option := range node.FlushOptions { + buf.WriteString(prefix) + buf.WriteString(option) + prefix = ", " + } + } else { + buf.WriteString(" tables") + if len(node.TableNames) != 0 { + buf.WriteByte(' ') + node.TableNames.formatFast(buf) + } + if node.ForExport { + buf.WriteString(" for export") + } + if node.WithLock { + buf.WriteString(" with read lock") + } + } +} + +// formatFast formats the node. +func (node *AlterVschema) formatFast(buf *TrackedBuffer) { + switch node.Action { + case CreateVindexDDLAction: + buf.WriteString("alter vschema create vindex ") + node.Table.formatFast(buf) + buf.WriteByte(' ') + node.VindexSpec.formatFast(buf) + case DropVindexDDLAction: + buf.WriteString("alter vschema drop vindex ") + node.Table.formatFast(buf) + case AddVschemaTableDDLAction: + buf.WriteString("alter vschema add table ") + node.Table.formatFast(buf) + case DropVschemaTableDDLAction: + buf.WriteString("alter vschema drop table ") + node.Table.formatFast(buf) + case AddColVindexDDLAction: + buf.WriteString("alter vschema on ") + node.Table.formatFast(buf) + buf.WriteString(" add vindex ") + node.VindexSpec.Name.formatFast(buf) + buf.WriteString(" (") + for i, col := range node.VindexCols { + if i != 0 { + buf.WriteString(", ") + col.formatFast(buf) + } else { + col.formatFast(buf) + } + } + buf.WriteByte(')') + if node.VindexSpec.Type.String() != "" { + buf.WriteByte(' ') + node.VindexSpec.formatFast(buf) + } + case DropColVindexDDLAction: + buf.WriteString("alter vschema on ") + node.Table.formatFast(buf) + buf.WriteString(" drop vindex ") + node.VindexSpec.Name.formatFast(buf) + case AddSequenceDDLAction: + buf.WriteString("alter vschema add sequence ") + node.Table.formatFast(buf) + case AddAutoIncDDLAction: + buf.WriteString("alter vschema on ") + node.Table.formatFast(buf) + buf.WriteString(" add auto_increment ") + node.AutoIncSpec.formatFast(buf) + default: + buf.WriteString(node.Action.ToString()) + buf.WriteString(" table ") + node.Table.formatFast(buf) + } +} + +// formatFast formats the node. +func (node *AlterMigration) formatFast(buf *TrackedBuffer) { + buf.WriteString("alter vitess_migration") + if node.UUID != "" { + buf.WriteString(" '") + buf.WriteString(node.UUID) + buf.WriteByte('\'') + } + var alterType string + switch node.Type { + case RetryMigrationType: + alterType = "retry" + case CompleteMigrationType: + alterType = "complete" + case CancelMigrationType: + alterType = "cancel" + case CancelAllMigrationType: + alterType = "cancel all" + } + buf.WriteByte(' ') + buf.WriteString(alterType) +} + +// formatFast formats the node. +func (node *RevertMigration) formatFast(buf *TrackedBuffer) { + buf.WriteString("revert vitess_migration '") + buf.WriteString(node.UUID) + buf.WriteByte('\'') +} + +// formatFast formats the node. +func (node *OptLike) formatFast(buf *TrackedBuffer) { + buf.WriteString("like ") + node.LikeTable.formatFast(buf) +} + +// formatFast formats the node. +func (node *PartitionSpec) formatFast(buf *TrackedBuffer) { + switch node.Action { + case ReorganizeAction: + buf.WriteString(ReorganizeStr) + buf.WriteByte(' ') + for i, n := range node.Names { + if i != 0 { + buf.WriteString(", ") + } + n.formatFast(buf) + } + buf.WriteString(" into (") + for i, pd := range node.Definitions { + if i != 0 { + buf.WriteString(", ") + } + pd.formatFast(buf) + } + buf.WriteByte(')') + case AddAction: + buf.WriteString(AddStr) + buf.WriteString(" (") + node.Definitions[0].formatFast(buf) + buf.WriteByte(')') + case DropAction: + buf.WriteString(DropPartitionStr) + buf.WriteByte(' ') + for i, n := range node.Names { + if i != 0 { + buf.WriteString(", ") + } + n.formatFast(buf) + } + case DiscardAction: + buf.WriteString(DiscardStr) + buf.WriteByte(' ') + if node.IsAll { + buf.WriteString("all") + } else { + prefix := "" + for _, n := range node.Names { + buf.WriteString(prefix) + n.formatFast(buf) + prefix = ", " + } + } + buf.WriteString(" tablespace") + case ImportAction: + buf.WriteString(ImportStr) + buf.WriteByte(' ') + if node.IsAll { + buf.WriteString("all") + } else { + prefix := "" + for _, n := range node.Names { + buf.WriteString(prefix) + n.formatFast(buf) + prefix = ", " + } + } + buf.WriteString(" tablespace") + case TruncateAction: + buf.WriteString(TruncatePartitionStr) + buf.WriteByte(' ') + if node.IsAll { + buf.WriteString("all") + } else { + prefix := "" + for _, n := range node.Names { + buf.WriteString(prefix) + n.formatFast(buf) + prefix = ", " + } + } + case CoalesceAction: + buf.WriteString(CoalesceStr) + buf.WriteByte(' ') + node.Number.formatFast(buf) + case ExchangeAction: + buf.WriteString(ExchangeStr) + buf.WriteByte(' ') + node.Names[0].formatFast(buf) + buf.WriteString(" with table ") + node.TableName.formatFast(buf) + if node.WithoutValidation { + buf.WriteString(" without validation") + } + case AnalyzeAction: + buf.WriteString(AnalyzePartitionStr) + buf.WriteByte(' ') + if node.IsAll { + buf.WriteString("all") + } else { + prefix := "" + for _, n := range node.Names { + buf.WriteString(prefix) + n.formatFast(buf) + prefix = ", " + } + } + case CheckAction: + buf.WriteString(CheckStr) + buf.WriteByte(' ') + if node.IsAll { + buf.WriteString("all") + } else { + prefix := "" + for _, n := range node.Names { + buf.WriteString(prefix) + n.formatFast(buf) + prefix = ", " + } + } + case OptimizeAction: + buf.WriteString(OptimizeStr) + buf.WriteByte(' ') + if node.IsAll { + buf.WriteString("all") + } else { + prefix := "" + for _, n := range node.Names { + buf.WriteString(prefix) + n.formatFast(buf) + prefix = ", " + } + } + case RebuildAction: + buf.WriteString(RebuildStr) + buf.WriteByte(' ') + if node.IsAll { + buf.WriteString("all") + } else { + prefix := "" + for _, n := range node.Names { + buf.WriteString(prefix) + n.formatFast(buf) + prefix = ", " + } + } + case RepairAction: + buf.WriteString(RepairStr) + buf.WriteByte(' ') + if node.IsAll { + buf.WriteString("all") + } else { + prefix := "" + for _, n := range node.Names { + buf.WriteString(prefix) + n.formatFast(buf) + prefix = ", " + } + } + case RemoveAction: + buf.WriteString(RemoveStr) + case UpgradeAction: + buf.WriteString(UpgradeStr) + default: + panic("unimplemented") + } +} + +// formatFast formats the node +func (node *PartitionDefinition) formatFast(buf *TrackedBuffer) { + if !node.Maxvalue { + buf.WriteString("partition ") + node.Name.formatFast(buf) + buf.WriteString(" values less than (") + node.Limit.formatFast(buf) + buf.WriteByte(')') + } else { + buf.WriteString("partition ") + node.Name.formatFast(buf) + buf.WriteString(" values less than (maxvalue)") + } +} + +// formatFast formats the node. +func (ts *TableSpec) formatFast(buf *TrackedBuffer) { + buf.WriteString("(\n") + for i, col := range ts.Columns { + if i == 0 { + buf.WriteByte('\t') + col.formatFast(buf) + } else { + buf.WriteString(",\n\t") + col.formatFast(buf) + } + } + for _, idx := range ts.Indexes { + buf.WriteString(",\n\t") + idx.formatFast(buf) + } + for _, c := range ts.Constraints { + buf.WriteString(",\n\t") + c.formatFast(buf) + } + + buf.WriteString("\n)") + for i, opt := range ts.Options { + if i != 0 { + buf.WriteString(",\n ") + } + buf.WriteByte(' ') + buf.WriteString(opt.Name) + if opt.String != "" { + buf.WriteByte(' ') + buf.WriteString(opt.String) + } else if opt.Value != nil { + buf.WriteByte(' ') + opt.Value.formatFast(buf) + } else { + buf.WriteString(" (") + opt.Tables.formatFast(buf) + buf.WriteByte(')') + } + } +} + +// formatFast formats the node. +func (col *ColumnDefinition) formatFast(buf *TrackedBuffer) { + col.Name.formatFast(buf) + buf.WriteByte(' ') + (&col.Type).formatFast(buf) +} + +// formatFast returns a canonical string representation of the type and all relevant options +func (ct *ColumnType) formatFast(buf *TrackedBuffer) { + buf.WriteString(ct.Type) + + if ct.Length != nil && ct.Scale != nil { + buf.WriteByte('(') + ct.Length.formatFast(buf) + buf.WriteByte(',') + ct.Scale.formatFast(buf) + buf.WriteByte(')') + + } else if ct.Length != nil { + buf.WriteByte('(') + ct.Length.formatFast(buf) + buf.WriteByte(')') + } + + if ct.EnumValues != nil { + buf.WriteByte('(') + buf.WriteString(strings.Join(ct.EnumValues, ", ")) + buf.WriteByte(')') + } + + if ct.Unsigned { + buf.WriteByte(' ') + buf.WriteString(keywordStrings[UNSIGNED]) + } + if ct.Zerofill { + buf.WriteByte(' ') + buf.WriteString(keywordStrings[ZEROFILL]) + } + if ct.Charset != "" { + buf.WriteByte(' ') + buf.WriteString(keywordStrings[CHARACTER]) + buf.WriteByte(' ') + buf.WriteString(keywordStrings[SET]) + buf.WriteByte(' ') + buf.WriteString(ct.Charset) + } + if ct.Collate != "" { + buf.WriteByte(' ') + buf.WriteString(keywordStrings[COLLATE]) + buf.WriteByte(' ') + buf.WriteString(ct.Collate) + } + if ct.Options.Null != nil { + if *ct.Options.Null { + buf.WriteByte(' ') + buf.WriteString(keywordStrings[NULL]) + } else { + buf.WriteByte(' ') + buf.WriteString(keywordStrings[NOT]) + buf.WriteByte(' ') + buf.WriteString(keywordStrings[NULL]) + } + } + if ct.Options.Default != nil { + buf.WriteByte(' ') + buf.WriteString(keywordStrings[DEFAULT]) + buf.WriteByte(' ') + ct.Options.Default.formatFast(buf) + } + if ct.Options.OnUpdate != nil { + buf.WriteByte(' ') + buf.WriteString(keywordStrings[ON]) + buf.WriteByte(' ') + buf.WriteString(keywordStrings[UPDATE]) + buf.WriteByte(' ') + ct.Options.OnUpdate.formatFast(buf) + } + if ct.Options.Autoincrement { + buf.WriteByte(' ') + buf.WriteString(keywordStrings[AUTO_INCREMENT]) + } + if ct.Options.Comment != nil { + buf.WriteByte(' ') + buf.WriteString(keywordStrings[COMMENT_KEYWORD]) + buf.WriteByte(' ') + ct.Options.Comment.formatFast(buf) + } + if ct.Options.KeyOpt == colKeyPrimary { + buf.WriteByte(' ') + buf.WriteString(keywordStrings[PRIMARY]) + buf.WriteByte(' ') + buf.WriteString(keywordStrings[KEY]) + } + if ct.Options.KeyOpt == colKeyUnique { + buf.WriteByte(' ') + buf.WriteString(keywordStrings[UNIQUE]) + } + if ct.Options.KeyOpt == colKeyUniqueKey { + buf.WriteByte(' ') + buf.WriteString(keywordStrings[UNIQUE]) + buf.WriteByte(' ') + buf.WriteString(keywordStrings[KEY]) + } + if ct.Options.KeyOpt == colKeySpatialKey { + buf.WriteByte(' ') + buf.WriteString(keywordStrings[SPATIAL]) + buf.WriteByte(' ') + buf.WriteString(keywordStrings[KEY]) + } + if ct.Options.KeyOpt == colKeyFulltextKey { + buf.WriteByte(' ') + buf.WriteString(keywordStrings[FULLTEXT]) + buf.WriteByte(' ') + buf.WriteString(keywordStrings[KEY]) + } + if ct.Options.KeyOpt == colKey { + buf.WriteByte(' ') + buf.WriteString(keywordStrings[KEY]) + } +} + +// formatFast formats the node. +func (idx *IndexDefinition) formatFast(buf *TrackedBuffer) { + idx.Info.formatFast(buf) + buf.WriteString(" (") + for i, col := range idx.Columns { + if i != 0 { + buf.WriteString(", ") + col.Column.formatFast(buf) + } else { + col.Column.formatFast(buf) + } + if col.Length != nil { + buf.WriteByte('(') + col.Length.formatFast(buf) + buf.WriteByte(')') + } + if col.Direction == DescOrder { + buf.WriteString(" desc") + } + } + buf.WriteByte(')') + + for _, opt := range idx.Options { + buf.WriteByte(' ') + buf.WriteString(opt.Name) + if opt.String != "" { + buf.WriteByte(' ') + buf.WriteString(opt.String) + } else { + buf.WriteByte(' ') + opt.Value.formatFast(buf) + } + } +} + +// formatFast formats the node. +func (ii *IndexInfo) formatFast(buf *TrackedBuffer) { + if !ii.ConstraintName.IsEmpty() { + buf.WriteString("constraint ") + ii.ConstraintName.formatFast(buf) + buf.WriteByte(' ') + } + if ii.Primary { + buf.WriteString(ii.Type) + } else { + buf.WriteString(ii.Type) + if !ii.Name.IsEmpty() { + buf.WriteByte(' ') + ii.Name.formatFast(buf) + } + } +} + +// formatFast formats the node. +func (node *AutoIncSpec) formatFast(buf *TrackedBuffer) { + node.Column.formatFast(buf) + buf.WriteByte(' ') + buf.WriteString("using ") + node.Sequence.formatFast(buf) +} + +// formatFast formats the node. The "CREATE VINDEX" preamble was formatted in +// the containing DDL node Format, so this just prints the type, any +// parameters, and optionally the owner +func (node *VindexSpec) formatFast(buf *TrackedBuffer) { + buf.WriteString("using ") + node.Type.formatFast(buf) + + numParams := len(node.Params) + if numParams != 0 { + buf.WriteString(" with ") + for i, p := range node.Params { + if i != 0 { + buf.WriteString(", ") + } + p.formatFast(buf) + } + } +} + +// formatFast formats the node. +func (node VindexParam) formatFast(buf *TrackedBuffer) { + buf.WriteString(node.Key.String()) + buf.WriteByte('=') + buf.WriteString(node.Val) +} + +// formatFast formats the node. +func (c *ConstraintDefinition) formatFast(buf *TrackedBuffer) { + if !c.Name.IsEmpty() { + buf.WriteString("constraint ") + c.Name.formatFast(buf) + buf.WriteByte(' ') + } + c.Details.Format(buf) +} + +// formatFast formats the node. +func (a ReferenceAction) formatFast(buf *TrackedBuffer) { + switch a { + case Restrict: + buf.WriteString("restrict") + case Cascade: + buf.WriteString("cascade") + case NoAction: + buf.WriteString("no action") + case SetNull: + buf.WriteString("set null") + case SetDefault: + buf.WriteString("set default") + } +} + +// formatFast formats the node. +func (f *ForeignKeyDefinition) formatFast(buf *TrackedBuffer) { + buf.WriteString("foreign key ") + f.Source.formatFast(buf) + buf.WriteString(" references ") + f.ReferencedTable.formatFast(buf) + buf.WriteByte(' ') + f.ReferencedColumns.formatFast(buf) + if f.OnDelete != DefaultAction { + buf.WriteString(" on delete ") + f.OnDelete.formatFast(buf) + } + if f.OnUpdate != DefaultAction { + buf.WriteString(" on update ") + f.OnUpdate.formatFast(buf) + } +} + +// formatFast formats the node. +func (c *CheckConstraintDefinition) formatFast(buf *TrackedBuffer) { + buf.WriteString("check (") + c.Expr.formatFast(buf) + buf.WriteByte(')') + if !c.Enforced { + buf.WriteString(" not enforced") + } +} + +// formatFast formats the node. +func (node *Show) formatFast(buf *TrackedBuffer) { + node.Internal.formatFast(buf) +} + +// formatFast formats the node. +func (node *ShowLegacy) formatFast(buf *TrackedBuffer) { + nodeType := strings.ToLower(node.Type) + if (nodeType == "tables" || nodeType == "columns" || nodeType == "fields" || nodeType == "index" || nodeType == "keys" || nodeType == "indexes" || + nodeType == "databases" || nodeType == "schemas" || nodeType == "keyspaces" || nodeType == "vitess_keyspaces" || nodeType == "vitess_shards" || nodeType == "vitess_tablets") && node.ShowTablesOpt != nil { + opt := node.ShowTablesOpt + if node.Extended != "" { + buf.WriteString("show ") + buf.WriteString(node.Extended) + buf.WriteString(nodeType) + } else { + buf.WriteString("show ") + buf.WriteString(opt.Full) + buf.WriteString(nodeType) + } + if (nodeType == "columns" || nodeType == "fields") && node.HasOnTable() { + buf.WriteString(" from ") + node.OnTable.formatFast(buf) + } + if (nodeType == "index" || nodeType == "keys" || nodeType == "indexes") && node.HasOnTable() { + buf.WriteString(" from ") + node.OnTable.formatFast(buf) + } + if opt.DbName != "" { + buf.WriteString(" from ") + buf.WriteString(opt.DbName) + } + opt.Filter.formatFast(buf) + return + } + if node.Scope == ImplicitScope { + buf.WriteString("show ") + buf.WriteString(nodeType) + } else { + buf.WriteString("show ") + buf.WriteString(node.Scope.ToString()) + buf.WriteByte(' ') + buf.WriteString(nodeType) + } + if node.HasOnTable() { + buf.WriteString(" on ") + node.OnTable.formatFast(buf) + } + if nodeType == "collation" && node.ShowCollationFilterOpt != nil { + buf.WriteString(" where ") + node.ShowCollationFilterOpt.formatFast(buf) + } + if nodeType == "charset" && node.ShowTablesOpt != nil { + node.ShowTablesOpt.Filter.formatFast(buf) + } + if node.HasTable() { + buf.WriteByte(' ') + node.Table.formatFast(buf) + } +} + +// formatFast formats the node. +func (node *ShowFilter) formatFast(buf *TrackedBuffer) { + if node == nil { + return + } + if node.Like != "" { + buf.WriteString(" like ") + sqltypes.BufEncodeStringSQL(buf.Builder, node.Like) + } else { + buf.WriteString(" where ") + node.Filter.formatFast(buf) + } +} + +// formatFast formats the node. +func (node *Use) formatFast(buf *TrackedBuffer) { + if node.DBName.v != "" { + buf.WriteString("use ") + node.DBName.formatFast(buf) + } else { + buf.WriteString("use") + } +} + +// formatFast formats the node. +func (node *Commit) formatFast(buf *TrackedBuffer) { + buf.WriteString("commit") +} + +// formatFast formats the node. +func (node *Begin) formatFast(buf *TrackedBuffer) { + buf.WriteString("begin") +} + +// formatFast formats the node. +func (node *Rollback) formatFast(buf *TrackedBuffer) { + buf.WriteString("rollback") +} + +// formatFast formats the node. +func (node *SRollback) formatFast(buf *TrackedBuffer) { + buf.WriteString("rollback to ") + node.Name.formatFast(buf) +} + +// formatFast formats the node. +func (node *Savepoint) formatFast(buf *TrackedBuffer) { + buf.WriteString("savepoint ") + node.Name.formatFast(buf) +} + +// formatFast formats the node. +func (node *Release) formatFast(buf *TrackedBuffer) { + buf.WriteString("release savepoint ") + node.Name.formatFast(buf) +} + +// formatFast formats the node. +func (node *ExplainStmt) formatFast(buf *TrackedBuffer) { + format := "" + switch node.Type { + case EmptyType: + case AnalyzeType: + format = AnalyzeStr + " " + default: + format = "format = " + node.Type.ToString() + " " + } + buf.WriteString("explain ") + buf.WriteString(format) + node.Statement.formatFast(buf) +} + +// formatFast formats the node. +func (node *ExplainTab) formatFast(buf *TrackedBuffer) { + buf.WriteString("explain ") + node.Table.formatFast(buf) + if node.Wild != "" { + buf.WriteByte(' ') + buf.WriteString(node.Wild) + } +} + +// formatFast formats the node. +func (node *CallProc) formatFast(buf *TrackedBuffer) { + buf.WriteString("call ") + node.Name.formatFast(buf) + buf.WriteByte('(') + node.Params.formatFast(buf) + buf.WriteByte(')') +} + +// formatFast formats the node. +func (node *OtherRead) formatFast(buf *TrackedBuffer) { + buf.WriteString("otherread") +} + +// formatFast formats the node. +func (node *OtherAdmin) formatFast(buf *TrackedBuffer) { + buf.WriteString("otheradmin") +} + +// formatFast formats the node. +func (node Comments) formatFast(buf *TrackedBuffer) { + for _, c := range node { + buf.WriteString(c) + buf.WriteByte(' ') + } +} + +// formatFast formats the node. +func (node SelectExprs) formatFast(buf *TrackedBuffer) { + var prefix string + for _, n := range node { + buf.WriteString(prefix) + n.formatFast(buf) + prefix = ", " + } +} + +// formatFast formats the node. +func (node *StarExpr) formatFast(buf *TrackedBuffer) { + if !node.TableName.IsEmpty() { + node.TableName.formatFast(buf) + buf.WriteByte('.') + } + buf.WriteByte('*') +} + +// formatFast formats the node. +func (node *AliasedExpr) formatFast(buf *TrackedBuffer) { + node.Expr.formatFast(buf) + if !node.As.IsEmpty() { + buf.WriteString(" as ") + node.As.formatFast(buf) + } +} + +// formatFast formats the node. +func (node *Nextval) formatFast(buf *TrackedBuffer) { + buf.WriteString("next ") + node.Expr.formatFast(buf) + buf.WriteString(" values") +} + +// formatFast formats the node. +func (node Columns) formatFast(buf *TrackedBuffer) { + if node == nil { + return + } + prefix := "(" + for _, n := range node { + buf.WriteString(prefix) + n.formatFast(buf) + prefix = ", " + } + buf.WriteString(")") +} + +// formatFast formats the node +func (node Partitions) formatFast(buf *TrackedBuffer) { + if node == nil { + return + } + prefix := " partition (" + for _, n := range node { + buf.WriteString(prefix) + n.formatFast(buf) + prefix = ", " + } + buf.WriteString(")") +} + +// formatFast formats the node. +func (node TableExprs) formatFast(buf *TrackedBuffer) { + var prefix string + for _, n := range node { + buf.WriteString(prefix) + n.formatFast(buf) + prefix = ", " + } +} + +// formatFast formats the node. +func (node *AliasedTableExpr) formatFast(buf *TrackedBuffer) { + node.Expr.formatFast(buf) + node.Partitions.formatFast(buf) + if !node.As.IsEmpty() { + buf.WriteString(" as ") + node.As.formatFast(buf) + } + if node.Hints != nil { + // Hint node provides the space padding. + node.Hints.formatFast(buf) + } +} + +// formatFast formats the node. +func (node TableNames) formatFast(buf *TrackedBuffer) { + var prefix string + for _, n := range node { + buf.WriteString(prefix) + n.formatFast(buf) + prefix = ", " + } +} + +// formatFast formats the node. +func (node TableName) formatFast(buf *TrackedBuffer) { + if node.IsEmpty() { + return + } + if !node.Qualifier.IsEmpty() { + node.Qualifier.formatFast(buf) + buf.WriteByte('.') + } + node.Name.formatFast(buf) +} + +// formatFast formats the node. +func (node *ParenTableExpr) formatFast(buf *TrackedBuffer) { + buf.WriteByte('(') + node.Exprs.formatFast(buf) + buf.WriteByte(')') +} + +// formatFast formats the node. +func (node JoinCondition) formatFast(buf *TrackedBuffer) { + if node.On != nil { + buf.WriteString(" on ") + node.On.formatFast(buf) + } + if node.Using != nil { + buf.WriteString(" using ") + node.Using.formatFast(buf) + } +} + +// formatFast formats the node. +func (node *JoinTableExpr) formatFast(buf *TrackedBuffer) { + node.LeftExpr.formatFast(buf) + buf.WriteByte(' ') + buf.WriteString(node.Join.ToString()) + buf.WriteByte(' ') + node.RightExpr.formatFast(buf) + node.Condition.formatFast(buf) +} + +// formatFast formats the node. +func (node *IndexHints) formatFast(buf *TrackedBuffer) { + buf.WriteByte(' ') + buf.WriteString(node.Type.ToString()) + buf.WriteString("index ") + if len(node.Indexes) == 0 { + buf.WriteString("()") + } else { + prefix := "(" + for _, n := range node.Indexes { + buf.WriteString(prefix) + n.formatFast(buf) + prefix = ", " + } + buf.WriteByte(')') + } +} + +// formatFast formats the node. +func (node *Where) formatFast(buf *TrackedBuffer) { + if node == nil || node.Expr == nil { + return + } + buf.WriteByte(' ') + buf.WriteString(node.Type.ToString()) + buf.WriteByte(' ') + node.Expr.formatFast(buf) +} + +// formatFast formats the node. +func (node Exprs) formatFast(buf *TrackedBuffer) { + var prefix string + for _, n := range node { + buf.WriteString(prefix) + n.formatFast(buf) + prefix = ", " + } +} + +// formatFast formats the node. +func (node *AndExpr) formatFast(buf *TrackedBuffer) { + buf.printExpr(node, node.Left, true) + buf.WriteString(" and ") + buf.printExpr(node, node.Right, false) +} + +// formatFast formats the node. +func (node *OrExpr) formatFast(buf *TrackedBuffer) { + buf.printExpr(node, node.Left, true) + buf.WriteString(" or ") + buf.printExpr(node, node.Right, false) +} + +// formatFast formats the node. +func (node *XorExpr) formatFast(buf *TrackedBuffer) { + buf.printExpr(node, node.Left, true) + buf.WriteString(" xor ") + buf.printExpr(node, node.Right, false) +} + +// formatFast formats the node. +func (node *NotExpr) formatFast(buf *TrackedBuffer) { + buf.WriteString("not ") + buf.printExpr(node, node.Expr, true) +} + +// formatFast formats the node. +func (node *ComparisonExpr) formatFast(buf *TrackedBuffer) { + buf.printExpr(node, node.Left, true) + buf.WriteByte(' ') + buf.WriteString(node.Operator.ToString()) + buf.WriteByte(' ') + buf.printExpr(node, node.Right, false) + if node.Escape != nil { + buf.WriteString(" escape ") + buf.printExpr(node, node.Escape, true) + } +} + +// formatFast formats the node. +func (node *RangeCond) formatFast(buf *TrackedBuffer) { + buf.printExpr(node, node.Left, true) + buf.WriteByte(' ') + buf.WriteString(node.Operator.ToString()) + buf.WriteByte(' ') + buf.printExpr(node, node.From, true) + buf.WriteString(" and ") + buf.printExpr(node, node.To, false) +} + +// formatFast formats the node. +func (node *IsExpr) formatFast(buf *TrackedBuffer) { + buf.printExpr(node, node.Expr, true) + buf.WriteByte(' ') + buf.WriteString(node.Operator.ToString()) +} + +// formatFast formats the node. +func (node *ExistsExpr) formatFast(buf *TrackedBuffer) { + buf.WriteString("exists ") + buf.printExpr(node, node.Subquery, true) +} + +// formatFast formats the node. +func (node *Literal) formatFast(buf *TrackedBuffer) { + switch node.Type { + case StrVal: + sqltypes.MakeTrusted(sqltypes.VarBinary, node.Bytes()).EncodeSQL(buf) + case IntVal, FloatVal, HexNum: + buf.WriteString(node.Val) + case HexVal: + buf.WriteString("X'") + buf.WriteString(node.Val) + buf.WriteByte('\'') + case BitVal: + buf.WriteString("B'") + buf.WriteString(node.Val) + buf.WriteByte('\'') + default: + panic("unexpected") + } +} + +// formatFast formats the node. +func (node Argument) formatFast(buf *TrackedBuffer) { + buf.WriteArg(string(node)) +} + +// formatFast formats the node. +func (node *NullVal) formatFast(buf *TrackedBuffer) { + buf.WriteString("null") +} + +// formatFast formats the node. +func (node BoolVal) formatFast(buf *TrackedBuffer) { + if node { + buf.WriteString("true") + } else { + buf.WriteString("false") + } +} + +// formatFast formats the node. +func (node *ColName) formatFast(buf *TrackedBuffer) { + if !node.Qualifier.IsEmpty() { + node.Qualifier.formatFast(buf) + buf.WriteByte('.') + } + node.Name.formatFast(buf) +} + +// formatFast formats the node. +func (node ValTuple) formatFast(buf *TrackedBuffer) { + buf.WriteByte('(') + Exprs(node).formatFast(buf) + buf.WriteByte(')') +} + +// formatFast formats the node. +func (node *Subquery) formatFast(buf *TrackedBuffer) { + buf.WriteByte('(') + node.Select.formatFast(buf) + buf.WriteByte(')') +} + +// formatFast formats the node. +func (node *DerivedTable) formatFast(buf *TrackedBuffer) { + buf.WriteByte('(') + node.Select.formatFast(buf) + buf.WriteByte(')') +} + +// formatFast formats the node. +func (node ListArg) formatFast(buf *TrackedBuffer) { + buf.WriteArg(string(node)) +} + +// formatFast formats the node. +func (node *BinaryExpr) formatFast(buf *TrackedBuffer) { + buf.printExpr(node, node.Left, true) + buf.WriteByte(' ') + buf.WriteString(node.Operator.ToString()) + buf.WriteByte(' ') + buf.printExpr(node, node.Right, false) +} + +// formatFast formats the node. +func (node *UnaryExpr) formatFast(buf *TrackedBuffer) { + if _, unary := node.Expr.(*UnaryExpr); unary { + // They have same precedence so parenthesis is not required. + buf.WriteString(node.Operator.ToString()) + buf.WriteByte(' ') + buf.printExpr(node, node.Expr, true) + return + } + buf.WriteString(node.Operator.ToString()) + buf.printExpr(node, node.Expr, true) +} + +// formatFast formats the node. +func (node *IntervalExpr) formatFast(buf *TrackedBuffer) { + buf.WriteString("interval ") + buf.printExpr(node, node.Expr, true) + buf.WriteByte(' ') + buf.WriteString(node.Unit) +} + +// formatFast formats the node. +func (node *TimestampFuncExpr) formatFast(buf *TrackedBuffer) { + buf.WriteString(node.Name) + buf.WriteByte('(') + buf.WriteString(node.Unit) + buf.WriteString(", ") + buf.printExpr(node, node.Expr1, true) + buf.WriteString(", ") + buf.printExpr(node, node.Expr2, true) + buf.WriteByte(')') +} + +// formatFast formats the node. +func (node *CurTimeFuncExpr) formatFast(buf *TrackedBuffer) { + buf.WriteString(node.Name.String()) + buf.WriteByte('(') + buf.printExpr(node, node.Fsp, true) + buf.WriteByte(')') +} + +// formatFast formats the node. +func (node *CollateExpr) formatFast(buf *TrackedBuffer) { + buf.printExpr(node, node.Expr, true) + buf.WriteString(" collate ") + buf.WriteString(node.Charset) +} + +// formatFast formats the node. +func (node *FuncExpr) formatFast(buf *TrackedBuffer) { + var distinct string + if node.Distinct { + distinct = "distinct " + } + if !node.Qualifier.IsEmpty() { + node.Qualifier.formatFast(buf) + buf.WriteByte('.') + } + // Function names should not be back-quoted even + // if they match a reserved word, only if they contain illegal characters + funcName := node.Name.String() + + if containEscapableChars(funcName, NoAt) { + writeEscapedString(buf, funcName) + } else { + buf.WriteString(funcName) + } + buf.WriteByte('(') + buf.WriteString(distinct) + node.Exprs.formatFast(buf) + buf.WriteByte(')') +} + +// formatFast formats the node +func (node *GroupConcatExpr) formatFast(buf *TrackedBuffer) { + if node.Distinct { + buf.WriteString("group_concat(") + buf.WriteString(DistinctStr) + node.Exprs.formatFast(buf) + node.OrderBy.formatFast(buf) + buf.WriteString(node.Separator) + node.Limit.formatFast(buf) + buf.WriteByte(')') + } else { + buf.WriteString("group_concat(") + node.Exprs.formatFast(buf) + node.OrderBy.formatFast(buf) + buf.WriteString(node.Separator) + node.Limit.formatFast(buf) + buf.WriteByte(')') + } +} + +// formatFast formats the node. +func (node *ValuesFuncExpr) formatFast(buf *TrackedBuffer) { + buf.WriteString("values(") + buf.printExpr(node, node.Name, true) + buf.WriteByte(')') +} + +// formatFast formats the node. +func (node *SubstrExpr) formatFast(buf *TrackedBuffer) { + var val SQLNode + if node.Name != nil { + val = node.Name + } else { + val = node.StrVal + } + + if node.To == nil { + buf.WriteString("substr(") + val.formatFast(buf) + buf.WriteString(", ") + buf.printExpr(node, node.From, true) + buf.WriteByte(')') + } else { + buf.WriteString("substr(") + val.formatFast(buf) + buf.WriteString(", ") + buf.printExpr(node, node.From, true) + buf.WriteString(", ") + buf.printExpr(node, node.To, true) + buf.WriteByte(')') + } +} + +// formatFast formats the node. +func (node *ConvertExpr) formatFast(buf *TrackedBuffer) { + buf.WriteString("convert(") + buf.printExpr(node, node.Expr, true) + buf.WriteString(", ") + node.Type.formatFast(buf) + buf.WriteByte(')') +} + +// formatFast formats the node. +func (node *ConvertUsingExpr) formatFast(buf *TrackedBuffer) { + buf.WriteString("convert(") + buf.printExpr(node, node.Expr, true) + buf.WriteString(" using ") + buf.WriteString(node.Type) + buf.WriteByte(')') +} + +// formatFast formats the node. +func (node *ConvertType) formatFast(buf *TrackedBuffer) { + buf.WriteString(node.Type) + if node.Length != nil { + buf.WriteByte('(') + node.Length.formatFast(buf) + if node.Scale != nil { + buf.WriteString(", ") + node.Scale.formatFast(buf) + } + buf.WriteByte(')') + } + if node.Charset != "" { + buf.WriteString(node.Operator.ToString()) + buf.WriteByte(' ') + buf.WriteString(node.Charset) + } +} + +// formatFast formats the node +func (node *MatchExpr) formatFast(buf *TrackedBuffer) { + buf.WriteString("match(") + node.Columns.formatFast(buf) + buf.WriteString(") against (") + buf.printExpr(node, node.Expr, true) + buf.WriteString(node.Option.ToString()) + buf.WriteByte(')') +} + +// formatFast formats the node. +func (node *CaseExpr) formatFast(buf *TrackedBuffer) { + buf.WriteString("case ") + if node.Expr != nil { + buf.printExpr(node, node.Expr, true) + buf.WriteByte(' ') + } + for _, when := range node.Whens { + when.formatFast(buf) + buf.WriteByte(' ') + } + if node.Else != nil { + buf.WriteString("else ") + buf.printExpr(node, node.Else, true) + buf.WriteByte(' ') + } + buf.WriteString("end") +} + +// formatFast formats the node. +func (node *Default) formatFast(buf *TrackedBuffer) { + buf.WriteString("default") + if node.ColName != "" { + buf.WriteString("(") + formatID(buf, node.ColName, NoAt) + buf.WriteString(")") + } +} + +// formatFast formats the node. +func (node *When) formatFast(buf *TrackedBuffer) { + buf.WriteString("when ") + node.Cond.formatFast(buf) + buf.WriteString(" then ") + node.Val.formatFast(buf) +} + +// formatFast formats the node. +func (node GroupBy) formatFast(buf *TrackedBuffer) { + prefix := " group by " + for _, n := range node { + buf.WriteString(prefix) + n.formatFast(buf) + prefix = ", " + } +} + +// formatFast formats the node. +func (node OrderBy) formatFast(buf *TrackedBuffer) { + prefix := " order by " + for _, n := range node { + buf.WriteString(prefix) + n.formatFast(buf) + prefix = ", " + } +} + +// formatFast formats the node. +func (node *Order) formatFast(buf *TrackedBuffer) { + if node, ok := node.Expr.(*NullVal); ok { + buf.printExpr(node, node, true) + return + } + if node, ok := node.Expr.(*FuncExpr); ok { + if node.Name.Lowered() == "rand" { + buf.printExpr(node, node, true) + return + } + } + + node.Expr.formatFast(buf) + buf.WriteByte(' ') + buf.WriteString(node.Direction.ToString()) +} + +// formatFast formats the node. +func (node *Limit) formatFast(buf *TrackedBuffer) { + if node == nil { + return + } + buf.WriteString(" limit ") + if node.Offset != nil { + node.Offset.formatFast(buf) + buf.WriteString(", ") + } + node.Rowcount.formatFast(buf) +} + +// formatFast formats the node. +func (node Values) formatFast(buf *TrackedBuffer) { + prefix := "values " + for _, n := range node { + buf.WriteString(prefix) + n.formatFast(buf) + prefix = ", " + } +} + +// formatFast formats the node. +func (node UpdateExprs) formatFast(buf *TrackedBuffer) { + var prefix string + for _, n := range node { + buf.WriteString(prefix) + n.formatFast(buf) + prefix = ", " + } +} + +// formatFast formats the node. +func (node *UpdateExpr) formatFast(buf *TrackedBuffer) { + node.Name.formatFast(buf) + buf.WriteString(" = ") + node.Expr.formatFast(buf) +} + +// formatFast formats the node. +func (node SetExprs) formatFast(buf *TrackedBuffer) { + var prefix string + for _, n := range node { + buf.WriteString(prefix) + n.formatFast(buf) + prefix = ", " + } +} + +// formatFast formats the node. +func (node *SetExpr) formatFast(buf *TrackedBuffer) { + if node.Scope != ImplicitScope { + buf.WriteString(node.Scope.ToString()) + buf.WriteString(" ") + } + // We don't have to backtick set variable names. + switch { + case node.Name.EqualString("charset") || node.Name.EqualString("names"): + buf.WriteString(node.Name.String()) + buf.WriteByte(' ') + node.Expr.formatFast(buf) + case node.Name.EqualString(TransactionStr): + literal := node.Expr.(*Literal) + buf.WriteString(node.Name.String()) + buf.WriteByte(' ') + buf.WriteString(strings.ToLower(string(literal.Val))) + default: + node.Name.formatFast(buf) + buf.WriteString(" = ") + node.Expr.formatFast(buf) + } +} + +// formatFast formats the node. +func (node OnDup) formatFast(buf *TrackedBuffer) { + if node == nil { + return + } + buf.WriteString(" on duplicate key update ") + UpdateExprs(node).formatFast(buf) +} + +// formatFast formats the node. +func (node ColIdent) formatFast(buf *TrackedBuffer) { + for i := NoAt; i < node.at; i++ { + buf.WriteByte('@') + } + formatID(buf, node.val, node.at) +} + +// formatFast formats the node. +func (node TableIdent) formatFast(buf *TrackedBuffer) { + formatID(buf, node.v, NoAt) +} + +// formatFast formats the node. +func (node IsolationLevel) formatFast(buf *TrackedBuffer) { + buf.WriteString("isolation level ") + switch node { + case ReadUncommitted: + buf.WriteString(ReadUncommittedStr) + case ReadCommitted: + buf.WriteString(ReadCommittedStr) + case RepeatableRead: + buf.WriteString(RepeatableReadStr) + case Serializable: + buf.WriteString(SerializableStr) + default: + buf.WriteString("Unknown Isolation level value") + } +} + +// formatFast formats the node. +func (node AccessMode) formatFast(buf *TrackedBuffer) { + if node == ReadOnly { + buf.WriteString(TxReadOnly) + } else { + buf.WriteString(TxReadWrite) + } +} + +// formatFast formats the node. +func (node *Load) formatFast(buf *TrackedBuffer) { + buf.WriteString("AST node missing for Load type") +} + +// formatFast formats the node. +func (node *ShowBasic) formatFast(buf *TrackedBuffer) { + buf.WriteString("show") + if node.Full { + buf.WriteString(" full") + } + buf.WriteString(node.Command.ToString()) + if !node.Tbl.IsEmpty() { + buf.WriteString(" from ") + node.Tbl.formatFast(buf) + } + if !node.DbName.IsEmpty() { + buf.WriteString(" from ") + node.DbName.formatFast(buf) + } + node.Filter.formatFast(buf) +} + +// formatFast formats the node. +func (node *ShowCreate) formatFast(buf *TrackedBuffer) { + buf.WriteString("show") + buf.WriteString(node.Command.ToString()) + buf.WriteByte(' ') + node.Op.formatFast(buf) +} + +// formatFast formats the node. +func (node *SelectInto) formatFast(buf *TrackedBuffer) { + if node == nil { + return + } + buf.WriteString(node.Type.ToString()) + buf.WriteString(node.FileName) + if node.Charset != "" { + buf.WriteString(" character set ") + buf.WriteString(node.Charset) + } + buf.WriteString(node.FormatOption) + buf.WriteString(node.ExportOption) + buf.WriteString(node.Manifest) + buf.WriteString(node.Overwrite) +} + +// formatFast formats the node. +func (node *CreateDatabase) formatFast(buf *TrackedBuffer) { + buf.WriteString("create database ") + node.Comments.formatFast(buf) + if node.IfNotExists { + buf.WriteString("if not exists ") + } + node.DBName.formatFast(buf) + if node.CreateOptions != nil { + for _, createOption := range node.CreateOptions { + if createOption.IsDefault { + buf.WriteString(" default") + } + buf.WriteString(createOption.Type.ToString()) + buf.WriteString(" " + createOption.Value) + } + } +} + +// formatFast formats the node. +func (node *AlterDatabase) formatFast(buf *TrackedBuffer) { + buf.WriteString("alter database") + if !node.DBName.IsEmpty() { + buf.WriteByte(' ') + node.DBName.formatFast(buf) + } + if node.UpdateDataDirectory { + buf.WriteString(" upgrade data directory name") + } + if node.AlterOptions != nil { + for _, createOption := range node.AlterOptions { + if createOption.IsDefault { + buf.WriteString(" default") + } + buf.WriteString(createOption.Type.ToString()) + buf.WriteString(" " + createOption.Value) + } + } +} + +// formatFast formats the node. +func (node *CreateTable) formatFast(buf *TrackedBuffer) { + buf.WriteString("create ") + if node.Temp { + buf.WriteString("temporary ") + } + buf.WriteString("table ") + + if node.IfNotExists { + buf.WriteString("if not exists ") + } + node.Table.formatFast(buf) + + if node.OptLike != nil { + buf.WriteByte(' ') + node.OptLike.formatFast(buf) + } + if node.TableSpec != nil { + buf.WriteByte(' ') + node.TableSpec.formatFast(buf) + } +} + +// formatFast formats the node. +func (node *CreateView) formatFast(buf *TrackedBuffer) { + buf.WriteString("create") + if node.IsReplace { + buf.WriteString(" or replace") + } + if node.Algorithm != "" { + buf.WriteString(" algorithm = ") + buf.WriteString(node.Algorithm) + } + if node.Definer != "" { + buf.WriteString(" definer = ") + buf.WriteString(node.Definer) + } + if node.Security != "" { + buf.WriteString(" sql security ") + buf.WriteString(node.Security) + } + buf.WriteString(" view ") + node.ViewName.formatFast(buf) + node.Columns.formatFast(buf) + buf.WriteString(" as ") + node.Select.formatFast(buf) + if node.CheckOption != "" { + buf.WriteString(" with ") + buf.WriteString(node.CheckOption) + buf.WriteString(" check option") + } +} + +// formatFast formats the LockTables node. +func (node *LockTables) formatFast(buf *TrackedBuffer) { + buf.WriteString("lock tables ") + node.Tables[0].Table.formatFast(buf) + buf.WriteByte(' ') + buf.WriteString(node.Tables[0].Lock.ToString()) + for i := 1; i < len(node.Tables); i++ { + buf.WriteString(", ") + node.Tables[i].Table.formatFast(buf) + buf.WriteByte(' ') + buf.WriteString(node.Tables[i].Lock.ToString()) + } +} + +// formatFast formats the UnlockTables node. +func (node *UnlockTables) formatFast(buf *TrackedBuffer) { + buf.WriteString("unlock tables") +} + +// formatFast formats the node. +func (node *AlterView) formatFast(buf *TrackedBuffer) { + buf.WriteString("alter") + if node.Algorithm != "" { + buf.WriteString(" algorithm = ") + buf.WriteString(node.Algorithm) + } + if node.Definer != "" { + buf.WriteString(" definer = ") + buf.WriteString(node.Definer) + } + if node.Security != "" { + buf.WriteString(" sql security ") + buf.WriteString(node.Security) + } + buf.WriteString(" view ") + node.ViewName.formatFast(buf) + node.Columns.formatFast(buf) + buf.WriteString(" as ") + node.Select.formatFast(buf) + if node.CheckOption != "" { + buf.WriteString(" with ") + buf.WriteString(node.CheckOption) + buf.WriteString(" check option") + } +} + +// formatFast formats the node. +func (node *DropTable) formatFast(buf *TrackedBuffer) { + temp := "" + if node.Temp { + temp = " temporary" + } + exists := "" + if node.IfExists { + exists = " if exists" + } + buf.WriteString("drop") + buf.WriteString(temp) + buf.WriteString(" table") + buf.WriteString(exists) + buf.WriteByte(' ') + node.FromTables.formatFast(buf) +} + +// formatFast formats the node. +func (node *DropView) formatFast(buf *TrackedBuffer) { + exists := "" + if node.IfExists { + exists = " if exists" + } + buf.WriteString("drop view") + buf.WriteString(exists) + buf.WriteByte(' ') + node.FromTables.formatFast(buf) +} + +// formatFast formats the AlterTable node. +func (node *AlterTable) formatFast(buf *TrackedBuffer) { + buf.WriteString("alter table ") + node.Table.formatFast(buf) + prefix := "" + for i, option := range node.AlterOptions { + if i != 0 { + buf.WriteString(",") + } + buf.WriteByte(' ') + option.formatFast(buf) + if node.PartitionSpec != nil && node.PartitionSpec.Action != RemoveAction { + prefix = "," + } + } + if node.PartitionSpec != nil { + buf.WriteString(prefix) + buf.WriteByte(' ') + node.PartitionSpec.formatFast(buf) + } +} + +// formatFast formats the node. +func (node *AddConstraintDefinition) formatFast(buf *TrackedBuffer) { + buf.WriteString("add ") + node.ConstraintDefinition.formatFast(buf) +} + +// formatFast formats the node. +func (node *AddIndexDefinition) formatFast(buf *TrackedBuffer) { + buf.WriteString("add ") + node.IndexDefinition.formatFast(buf) +} + +// formatFast formats the node. +func (node *AddColumns) formatFast(buf *TrackedBuffer) { + + if len(node.Columns) == 1 { + buf.WriteString("add column ") + node.Columns[0].formatFast(buf) + if node.First != nil { + buf.WriteString(" first ") + node.First.formatFast(buf) + } + if node.After != nil { + buf.WriteString(" after ") + node.After.formatFast(buf) + } + } else { + for i, col := range node.Columns { + if i == 0 { + buf.WriteString("add column (") + col.formatFast(buf) + } else { + buf.WriteString(", ") + col.formatFast(buf) + } + } + buf.WriteString(")") + } +} + +// formatFast formats the node. +func (node AlgorithmValue) formatFast(buf *TrackedBuffer) { + buf.WriteString("algorithm = ") + buf.WriteString(string(node)) +} + +// formatFast formats the node +func (node *AlterColumn) formatFast(buf *TrackedBuffer) { + if node.DropDefault { + buf.WriteString("alter column ") + node.Column.formatFast(buf) + buf.WriteString(" drop default") + } else { + buf.WriteString("alter column ") + node.Column.formatFast(buf) + buf.WriteString(" set default") + buf.WriteByte(' ') + node.DefaultVal.formatFast(buf) + } +} + +// formatFast formats the node +func (node *ChangeColumn) formatFast(buf *TrackedBuffer) { + buf.WriteString("change column ") + node.OldColumn.formatFast(buf) + buf.WriteByte(' ') + node.NewColDefinition.formatFast(buf) + if node.First != nil { + buf.WriteString(" first ") + node.First.formatFast(buf) + } + if node.After != nil { + buf.WriteString(" after ") + node.After.formatFast(buf) + } +} + +// formatFast formats the node +func (node *ModifyColumn) formatFast(buf *TrackedBuffer) { + buf.WriteString("modify column ") + node.NewColDefinition.formatFast(buf) + if node.First != nil { + buf.WriteString(" first ") + node.First.formatFast(buf) + } + if node.After != nil { + buf.WriteString(" after ") + node.After.formatFast(buf) + } +} + +// formatFast formats the node +func (node *AlterCharset) formatFast(buf *TrackedBuffer) { + buf.WriteString("convert to character set ") + buf.WriteString(node.CharacterSet) + if node.Collate != "" { + buf.WriteString(" collate ") + buf.WriteString(node.Collate) + } +} + +// formatFast formats the node +func (node *KeyState) formatFast(buf *TrackedBuffer) { + if node.Enable { + buf.WriteString("enable keys") + } else { + buf.WriteString("disable keys") + } + +} + +// formatFast formats the node +func (node *TablespaceOperation) formatFast(buf *TrackedBuffer) { + if node.Import { + buf.WriteString("import tablespace") + } else { + buf.WriteString("discard tablespace") + } +} + +// formatFast formats the node +func (node *DropColumn) formatFast(buf *TrackedBuffer) { + buf.WriteString("drop column ") + node.Name.formatFast(buf) +} + +// formatFast formats the node +func (node *DropKey) formatFast(buf *TrackedBuffer) { + buf.WriteString("drop ") + buf.WriteString(node.Type.ToString()) + if !node.Name.IsEmpty() { + buf.WriteByte(' ') + node.Name.formatFast(buf) + } +} + +// formatFast formats the node +func (node *Force) formatFast(buf *TrackedBuffer) { + buf.WriteString("force") +} + +// formatFast formats the node +func (node *LockOption) formatFast(buf *TrackedBuffer) { + buf.WriteString("lock ") + buf.WriteString(node.Type.ToString()) +} + +// formatFast formats the node +func (node *OrderByOption) formatFast(buf *TrackedBuffer) { + buf.WriteString("order by ") + prefix := "" + for _, n := range node.Cols { + buf.WriteString(prefix) + n.formatFast(buf) + prefix = ", " + } +} + +// formatFast formats the node +func (node *RenameTableName) formatFast(buf *TrackedBuffer) { + buf.WriteString("rename ") + node.Table.formatFast(buf) +} + +// formatFast formats the node +func (node *RenameIndex) formatFast(buf *TrackedBuffer) { + buf.WriteString("rename index ") + node.OldName.formatFast(buf) + buf.WriteString(" to ") + node.NewName.formatFast(buf) +} + +// formatFast formats the node +func (node *Validation) formatFast(buf *TrackedBuffer) { + if node.With { + buf.WriteString("with validation") + } else { + buf.WriteString("without validation") + } +} + +// formatFast formats the node +func (node TableOptions) formatFast(buf *TrackedBuffer) { + for i, option := range node { + if i != 0 { + buf.WriteString(" ") + } + buf.WriteString(option.Name) + if option.String != "" { + buf.WriteByte(' ') + buf.WriteString(option.String) + } else if option.Value != nil { + buf.WriteByte(' ') + option.Value.formatFast(buf) + } else { + buf.WriteString(" (") + option.Tables.formatFast(buf) + buf.WriteByte(')') + } + } +} + +// formatFast formats the node +func (node *TruncateTable) formatFast(buf *TrackedBuffer) { + buf.WriteString("truncate table ") + node.Table.formatFast(buf) +} + +// formatFast formats the node. +func (node *RenameTable) formatFast(buf *TrackedBuffer) { + buf.WriteString("rename table") + prefix := " " + for _, pair := range node.TablePairs { + buf.WriteString(prefix) + pair.FromTable.formatFast(buf) + buf.WriteString(" to ") + pair.ToTable.formatFast(buf) + prefix = ", " + } +} diff --git a/go/vt/sqlparser/ast_funcs.go b/go/vt/sqlparser/ast_funcs.go index ec2493d494b..f59d4804a3a 100644 --- a/go/vt/sqlparser/ast_funcs.go +++ b/go/vt/sqlparser/ast_funcs.go @@ -36,27 +36,7 @@ import ( // is interrupted, and the error is returned. func Walk(visit Visit, nodes ...SQLNode) error { for _, node := range nodes { - if node == nil { - continue - } - var err error - var kontinue bool - pre := func(cursor *Cursor) bool { - // If we already have found an error, don't visit these nodes, just exit early - if err != nil { - return false - } - kontinue, err = visit(cursor.Node()) - if err != nil { - return true // we have to return true here so that post gets called - } - return kontinue - } - post := func(cursor *Cursor) bool { - return err == nil // now we can abort the traversal if an error was found - } - - Rewrite(node, pre, post) + err := VisitSQLNode(node, visit) if err != nil { return err } @@ -66,6 +46,8 @@ func Walk(visit Visit, nodes ...SQLNode) error { // Visit defines the signature of a function that // can be used to visit all nodes of a parse tree. +// returning false on kontinue means that children will not be visited +// returning an error will abort the visitation and return the error type Visit func(node SQLNode) (kontinue bool, err error) // Append appends the SQLNode to the buffer. @@ -392,9 +374,10 @@ func NewWhere(typ WhereType, expr Expr) *Where { // then to is returned. func ReplaceExpr(root, from, to Expr) Expr { tmp := Rewrite(root, replaceExpr(from, to), nil) + expr, success := tmp.(Expr) if !success { - log.Errorf("Failed to rewrite expression. Rewriter returned a non-expression: " + String(tmp)) + log.Errorf("Failed to rewrite expression. Rewriter returned a non-expression: %s", String(tmp)) return from } @@ -442,48 +425,48 @@ func (node *ComparisonExpr) IsImpossible() bool { } // NewStrLiteral builds a new StrVal. -func NewStrLiteral(in []byte) *Literal { +func NewStrLiteral(in string) *Literal { return &Literal{Type: StrVal, Val: in} } // NewIntLiteral builds a new IntVal. -func NewIntLiteral(in []byte) *Literal { +func NewIntLiteral(in string) *Literal { return &Literal{Type: IntVal, Val: in} } // NewFloatLiteral builds a new FloatVal. -func NewFloatLiteral(in []byte) *Literal { +func NewFloatLiteral(in string) *Literal { return &Literal{Type: FloatVal, Val: in} } // NewHexNumLiteral builds a new HexNum. -func NewHexNumLiteral(in []byte) *Literal { +func NewHexNumLiteral(in string) *Literal { return &Literal{Type: HexNum, Val: in} } // NewHexLiteral builds a new HexVal. -func NewHexLiteral(in []byte) *Literal { +func NewHexLiteral(in string) *Literal { return &Literal{Type: HexVal, Val: in} } // NewBitLiteral builds a new BitVal containing a bit literal. -func NewBitLiteral(in []byte) *Literal { +func NewBitLiteral(in string) *Literal { return &Literal{Type: BitVal, Val: in} } // NewArgument builds a new ValArg. -func NewArgument(in []byte) Argument { - return in +func NewArgument(in string) Argument { + return Argument(in) +} + +// Bytes return the []byte +func (node *Literal) Bytes() []byte { + return []byte(node.Val) } // HexDecode decodes the hexval into bytes. func (node *Literal) HexDecode() ([]byte, error) { - dst := make([]byte, hex.DecodedLen(len([]byte(node.Val)))) - _, err := hex.Decode(dst, []byte(node.Val)) - if err != nil { - return nil, err - } - return dst, err + return hex.DecodeString(node.Val) } // Equal returns true if the column names match. @@ -693,11 +676,12 @@ func (node *TableIdent) UnmarshalJSON(b []byte) error { func containEscapableChars(s string, at AtCount) bool { isDbSystemVariable := at != NoAt - for i, c := range s { - letter := isLetter(uint16(c)) - systemVarChar := isDbSystemVariable && isCarat(uint16(c)) + for i := range s { + c := uint16(s[i]) + letter := isLetter(c) + systemVarChar := isDbSystemVariable && isCarat(c) if !(letter || systemVarChar) { - if i == 0 || !isDigit(uint16(c)) { + if i == 0 || !isDigit(c) { return true } } @@ -706,16 +690,12 @@ func containEscapableChars(s string, at AtCount) bool { return false } -func isKeyword(s string) bool { - _, isKeyword := keywords[s] - return isKeyword -} - -func formatID(buf *TrackedBuffer, original, lowered string, at AtCount) { - if containEscapableChars(original, at) || isKeyword(lowered) { +func formatID(buf *TrackedBuffer, original string, at AtCount) { + _, isKeyword := keywordLookupTable.LookupString(original) + if isKeyword || containEscapableChars(original, at) { writeEscapedString(buf, original) } else { - buf.Myprintf("%s", original) + buf.WriteString(original) } } @@ -816,6 +796,22 @@ func (node *ParenSelect) MakeDistinct() { node.Select.MakeDistinct() } +// AddWhere adds the boolean expression to the +// WHERE clause as an AND condition. +func (node *Update) AddWhere(expr Expr) { + if node.Where == nil { + node.Where = &Where{ + Type: WhereClause, + Expr: expr, + } + return + } + node.Where.Expr = &AndExpr{ + Left: node.Where.Expr, + Right: expr, + } +} + // AddOrder adds an order by element func (node *Union) AddOrder(order *Order) { node.OrderBy = append(node.OrderBy, order) @@ -863,8 +859,6 @@ func (action DDLAction) ToString() string { return RenameStr case TruncateDDLAction: return TruncateStr - case FlushDDLAction: - return FlushStr case CreateVindexDDLAction: return CreateVindexStr case DropVindexDDLAction: @@ -1213,24 +1207,59 @@ func (ty ShowCommandType) ToString() string { return CharsetStr case Collation: return CollationStr + case Column: + return ColumnStr + case CreateDb: + return CreateDbStr + case CreateE: + return CreateEStr + case CreateF: + return CreateFStr + case CreateProc: + return CreateProcStr + case CreateTbl: + return CreateTblStr + case CreateTr: + return CreateTrStr + case CreateV: + return CreateVStr case Database: return DatabaseStr + case FunctionC: + return FunctionCStr case Function: return FunctionStr + case Index: + return IndexStr + case OpenTable: + return OpenTableStr case Privilege: return PrivilegeStr + case ProcedureC: + return ProcedureCStr case Procedure: return ProcedureStr case StatusGlobal: return StatusGlobalStr case StatusSession: return StatusSessionStr + case Table: + return TableStr + case TableStatus: + return TableStatusStr + case Trigger: + return TriggerStr case VariableGlobal: return VariableGlobalStr case VariableSession: return VariableSessionStr + case VitessMigrations: + return VitessMigrationsStr + case Keyspace: + return KeyspaceStr default: - return "Unknown ShowCommandType" + return "" + + "Unknown ShowCommandType" } } @@ -1264,6 +1293,14 @@ func (lock LockOptionType) ToString() string { } } +// CompliantName is used to get the name of the bind variable to use for this column name +func (node *ColName) CompliantName(suffix string) string { + if !node.Qualifier.IsEmpty() { + return node.Qualifier.Name.CompliantName() + "_" + node.Name.CompliantName() + suffix + } + return node.Name.CompliantName() + suffix +} + // AtCount represents the '@' count in ColIdent type AtCount int @@ -1275,3 +1312,24 @@ const ( // DoubleAt represnts @@ DoubleAt ) + +// handleUnaryMinus handles the case when a unary minus operator is seen in the parser. It takes 1 argument which is the expr to which the unary minus has been added to. +func handleUnaryMinus(expr Expr) Expr { + if num, ok := expr.(*Literal); ok && num.Type == IntVal { + // Handle double negative + if num.Val[0] == '-' { + num.Val = num.Val[1:] + return num + } + return NewIntLiteral("-" + num.Val) + } + if unaryExpr, ok := expr.(*UnaryExpr); ok && unaryExpr.Operator == UMinusOp { + return unaryExpr.Expr + } + return &UnaryExpr{Operator: UMinusOp, Expr: expr} +} + +// encodeSQLString encodes the string as a SQL string. +func encodeSQLString(val string) string { + return sqltypes.EncodeStringSQL(val) +} diff --git a/go/vt/sqlparser/ast_rewrite.go b/go/vt/sqlparser/ast_rewrite.go new file mode 100644 index 00000000000..68cbc4b27be --- /dev/null +++ b/go/vt/sqlparser/ast_rewrite.go @@ -0,0 +1,5356 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by ASTHelperGen. DO NOT EDIT. + +package sqlparser + +func (a *application) rewriteSQLNode(parent SQLNode, node SQLNode, replacer replacerFunc) bool { + if node == nil { + return true + } + switch node := node.(type) { + case AccessMode: + return a.rewriteAccessMode(parent, node, replacer) + case *AddColumns: + return a.rewriteRefOfAddColumns(parent, node, replacer) + case *AddConstraintDefinition: + return a.rewriteRefOfAddConstraintDefinition(parent, node, replacer) + case *AddIndexDefinition: + return a.rewriteRefOfAddIndexDefinition(parent, node, replacer) + case AlgorithmValue: + return a.rewriteAlgorithmValue(parent, node, replacer) + case *AliasedExpr: + return a.rewriteRefOfAliasedExpr(parent, node, replacer) + case *AliasedTableExpr: + return a.rewriteRefOfAliasedTableExpr(parent, node, replacer) + case *AlterCharset: + return a.rewriteRefOfAlterCharset(parent, node, replacer) + case *AlterColumn: + return a.rewriteRefOfAlterColumn(parent, node, replacer) + case *AlterDatabase: + return a.rewriteRefOfAlterDatabase(parent, node, replacer) + case *AlterMigration: + return a.rewriteRefOfAlterMigration(parent, node, replacer) + case *AlterTable: + return a.rewriteRefOfAlterTable(parent, node, replacer) + case *AlterView: + return a.rewriteRefOfAlterView(parent, node, replacer) + case *AlterVschema: + return a.rewriteRefOfAlterVschema(parent, node, replacer) + case *AndExpr: + return a.rewriteRefOfAndExpr(parent, node, replacer) + case Argument: + return a.rewriteArgument(parent, node, replacer) + case *AutoIncSpec: + return a.rewriteRefOfAutoIncSpec(parent, node, replacer) + case *Begin: + return a.rewriteRefOfBegin(parent, node, replacer) + case *BinaryExpr: + return a.rewriteRefOfBinaryExpr(parent, node, replacer) + case BoolVal: + return a.rewriteBoolVal(parent, node, replacer) + case *CallProc: + return a.rewriteRefOfCallProc(parent, node, replacer) + case *CaseExpr: + return a.rewriteRefOfCaseExpr(parent, node, replacer) + case *ChangeColumn: + return a.rewriteRefOfChangeColumn(parent, node, replacer) + case *CheckConstraintDefinition: + return a.rewriteRefOfCheckConstraintDefinition(parent, node, replacer) + case ColIdent: + return a.rewriteColIdent(parent, node, replacer) + case *ColName: + return a.rewriteRefOfColName(parent, node, replacer) + case *CollateExpr: + return a.rewriteRefOfCollateExpr(parent, node, replacer) + case *ColumnDefinition: + return a.rewriteRefOfColumnDefinition(parent, node, replacer) + case *ColumnType: + return a.rewriteRefOfColumnType(parent, node, replacer) + case Columns: + return a.rewriteColumns(parent, node, replacer) + case Comments: + return a.rewriteComments(parent, node, replacer) + case *Commit: + return a.rewriteRefOfCommit(parent, node, replacer) + case *ComparisonExpr: + return a.rewriteRefOfComparisonExpr(parent, node, replacer) + case *ConstraintDefinition: + return a.rewriteRefOfConstraintDefinition(parent, node, replacer) + case *ConvertExpr: + return a.rewriteRefOfConvertExpr(parent, node, replacer) + case *ConvertType: + return a.rewriteRefOfConvertType(parent, node, replacer) + case *ConvertUsingExpr: + return a.rewriteRefOfConvertUsingExpr(parent, node, replacer) + case *CreateDatabase: + return a.rewriteRefOfCreateDatabase(parent, node, replacer) + case *CreateTable: + return a.rewriteRefOfCreateTable(parent, node, replacer) + case *CreateView: + return a.rewriteRefOfCreateView(parent, node, replacer) + case *CurTimeFuncExpr: + return a.rewriteRefOfCurTimeFuncExpr(parent, node, replacer) + case *Default: + return a.rewriteRefOfDefault(parent, node, replacer) + case *Delete: + return a.rewriteRefOfDelete(parent, node, replacer) + case *DerivedTable: + return a.rewriteRefOfDerivedTable(parent, node, replacer) + case *DropColumn: + return a.rewriteRefOfDropColumn(parent, node, replacer) + case *DropDatabase: + return a.rewriteRefOfDropDatabase(parent, node, replacer) + case *DropKey: + return a.rewriteRefOfDropKey(parent, node, replacer) + case *DropTable: + return a.rewriteRefOfDropTable(parent, node, replacer) + case *DropView: + return a.rewriteRefOfDropView(parent, node, replacer) + case *ExistsExpr: + return a.rewriteRefOfExistsExpr(parent, node, replacer) + case *ExplainStmt: + return a.rewriteRefOfExplainStmt(parent, node, replacer) + case *ExplainTab: + return a.rewriteRefOfExplainTab(parent, node, replacer) + case Exprs: + return a.rewriteExprs(parent, node, replacer) + case *Flush: + return a.rewriteRefOfFlush(parent, node, replacer) + case *Force: + return a.rewriteRefOfForce(parent, node, replacer) + case *ForeignKeyDefinition: + return a.rewriteRefOfForeignKeyDefinition(parent, node, replacer) + case *FuncExpr: + return a.rewriteRefOfFuncExpr(parent, node, replacer) + case GroupBy: + return a.rewriteGroupBy(parent, node, replacer) + case *GroupConcatExpr: + return a.rewriteRefOfGroupConcatExpr(parent, node, replacer) + case *IndexDefinition: + return a.rewriteRefOfIndexDefinition(parent, node, replacer) + case *IndexHints: + return a.rewriteRefOfIndexHints(parent, node, replacer) + case *IndexInfo: + return a.rewriteRefOfIndexInfo(parent, node, replacer) + case *Insert: + return a.rewriteRefOfInsert(parent, node, replacer) + case *IntervalExpr: + return a.rewriteRefOfIntervalExpr(parent, node, replacer) + case *IsExpr: + return a.rewriteRefOfIsExpr(parent, node, replacer) + case IsolationLevel: + return a.rewriteIsolationLevel(parent, node, replacer) + case JoinCondition: + return a.rewriteJoinCondition(parent, node, replacer) + case *JoinTableExpr: + return a.rewriteRefOfJoinTableExpr(parent, node, replacer) + case *KeyState: + return a.rewriteRefOfKeyState(parent, node, replacer) + case *Limit: + return a.rewriteRefOfLimit(parent, node, replacer) + case ListArg: + return a.rewriteListArg(parent, node, replacer) + case *Literal: + return a.rewriteRefOfLiteral(parent, node, replacer) + case *Load: + return a.rewriteRefOfLoad(parent, node, replacer) + case *LockOption: + return a.rewriteRefOfLockOption(parent, node, replacer) + case *LockTables: + return a.rewriteRefOfLockTables(parent, node, replacer) + case *MatchExpr: + return a.rewriteRefOfMatchExpr(parent, node, replacer) + case *ModifyColumn: + return a.rewriteRefOfModifyColumn(parent, node, replacer) + case *Nextval: + return a.rewriteRefOfNextval(parent, node, replacer) + case *NotExpr: + return a.rewriteRefOfNotExpr(parent, node, replacer) + case *NullVal: + return a.rewriteRefOfNullVal(parent, node, replacer) + case OnDup: + return a.rewriteOnDup(parent, node, replacer) + case *OptLike: + return a.rewriteRefOfOptLike(parent, node, replacer) + case *OrExpr: + return a.rewriteRefOfOrExpr(parent, node, replacer) + case *Order: + return a.rewriteRefOfOrder(parent, node, replacer) + case OrderBy: + return a.rewriteOrderBy(parent, node, replacer) + case *OrderByOption: + return a.rewriteRefOfOrderByOption(parent, node, replacer) + case *OtherAdmin: + return a.rewriteRefOfOtherAdmin(parent, node, replacer) + case *OtherRead: + return a.rewriteRefOfOtherRead(parent, node, replacer) + case *ParenSelect: + return a.rewriteRefOfParenSelect(parent, node, replacer) + case *ParenTableExpr: + return a.rewriteRefOfParenTableExpr(parent, node, replacer) + case *PartitionDefinition: + return a.rewriteRefOfPartitionDefinition(parent, node, replacer) + case *PartitionSpec: + return a.rewriteRefOfPartitionSpec(parent, node, replacer) + case Partitions: + return a.rewritePartitions(parent, node, replacer) + case *RangeCond: + return a.rewriteRefOfRangeCond(parent, node, replacer) + case ReferenceAction: + return a.rewriteReferenceAction(parent, node, replacer) + case *Release: + return a.rewriteRefOfRelease(parent, node, replacer) + case *RenameIndex: + return a.rewriteRefOfRenameIndex(parent, node, replacer) + case *RenameTable: + return a.rewriteRefOfRenameTable(parent, node, replacer) + case *RenameTableName: + return a.rewriteRefOfRenameTableName(parent, node, replacer) + case *RevertMigration: + return a.rewriteRefOfRevertMigration(parent, node, replacer) + case *Rollback: + return a.rewriteRefOfRollback(parent, node, replacer) + case *SRollback: + return a.rewriteRefOfSRollback(parent, node, replacer) + case *Savepoint: + return a.rewriteRefOfSavepoint(parent, node, replacer) + case *Select: + return a.rewriteRefOfSelect(parent, node, replacer) + case SelectExprs: + return a.rewriteSelectExprs(parent, node, replacer) + case *SelectInto: + return a.rewriteRefOfSelectInto(parent, node, replacer) + case *Set: + return a.rewriteRefOfSet(parent, node, replacer) + case *SetExpr: + return a.rewriteRefOfSetExpr(parent, node, replacer) + case SetExprs: + return a.rewriteSetExprs(parent, node, replacer) + case *SetTransaction: + return a.rewriteRefOfSetTransaction(parent, node, replacer) + case *Show: + return a.rewriteRefOfShow(parent, node, replacer) + case *ShowBasic: + return a.rewriteRefOfShowBasic(parent, node, replacer) + case *ShowCreate: + return a.rewriteRefOfShowCreate(parent, node, replacer) + case *ShowFilter: + return a.rewriteRefOfShowFilter(parent, node, replacer) + case *ShowLegacy: + return a.rewriteRefOfShowLegacy(parent, node, replacer) + case *StarExpr: + return a.rewriteRefOfStarExpr(parent, node, replacer) + case *Stream: + return a.rewriteRefOfStream(parent, node, replacer) + case *Subquery: + return a.rewriteRefOfSubquery(parent, node, replacer) + case *SubstrExpr: + return a.rewriteRefOfSubstrExpr(parent, node, replacer) + case TableExprs: + return a.rewriteTableExprs(parent, node, replacer) + case TableIdent: + return a.rewriteTableIdent(parent, node, replacer) + case TableName: + return a.rewriteTableName(parent, node, replacer) + case TableNames: + return a.rewriteTableNames(parent, node, replacer) + case TableOptions: + return a.rewriteTableOptions(parent, node, replacer) + case *TableSpec: + return a.rewriteRefOfTableSpec(parent, node, replacer) + case *TablespaceOperation: + return a.rewriteRefOfTablespaceOperation(parent, node, replacer) + case *TimestampFuncExpr: + return a.rewriteRefOfTimestampFuncExpr(parent, node, replacer) + case *TruncateTable: + return a.rewriteRefOfTruncateTable(parent, node, replacer) + case *UnaryExpr: + return a.rewriteRefOfUnaryExpr(parent, node, replacer) + case *Union: + return a.rewriteRefOfUnion(parent, node, replacer) + case *UnionSelect: + return a.rewriteRefOfUnionSelect(parent, node, replacer) + case *UnlockTables: + return a.rewriteRefOfUnlockTables(parent, node, replacer) + case *Update: + return a.rewriteRefOfUpdate(parent, node, replacer) + case *UpdateExpr: + return a.rewriteRefOfUpdateExpr(parent, node, replacer) + case UpdateExprs: + return a.rewriteUpdateExprs(parent, node, replacer) + case *Use: + return a.rewriteRefOfUse(parent, node, replacer) + case *VStream: + return a.rewriteRefOfVStream(parent, node, replacer) + case ValTuple: + return a.rewriteValTuple(parent, node, replacer) + case *Validation: + return a.rewriteRefOfValidation(parent, node, replacer) + case Values: + return a.rewriteValues(parent, node, replacer) + case *ValuesFuncExpr: + return a.rewriteRefOfValuesFuncExpr(parent, node, replacer) + case VindexParam: + return a.rewriteVindexParam(parent, node, replacer) + case *VindexSpec: + return a.rewriteRefOfVindexSpec(parent, node, replacer) + case *When: + return a.rewriteRefOfWhen(parent, node, replacer) + case *Where: + return a.rewriteRefOfWhere(parent, node, replacer) + case *XorExpr: + return a.rewriteRefOfXorExpr(parent, node, replacer) + default: + // this should never happen + return true + } +} +func (a *application) rewriteRefOfAddColumns(parent SQLNode, node *AddColumns, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + for x, el := range node.Columns { + if !a.rewriteRefOfColumnDefinition(node, el, func(idx int) replacerFunc { + return func(newNode, parent SQLNode) { + parent.(*AddColumns).Columns[idx] = newNode.(*ColumnDefinition) + } + }(x)) { + return false + } + } + if !a.rewriteRefOfColName(node, node.First, func(newNode, parent SQLNode) { + parent.(*AddColumns).First = newNode.(*ColName) + }) { + return false + } + if !a.rewriteRefOfColName(node, node.After, func(newNode, parent SQLNode) { + parent.(*AddColumns).After = newNode.(*ColName) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfAddConstraintDefinition(parent SQLNode, node *AddConstraintDefinition, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteRefOfConstraintDefinition(node, node.ConstraintDefinition, func(newNode, parent SQLNode) { + parent.(*AddConstraintDefinition).ConstraintDefinition = newNode.(*ConstraintDefinition) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfAddIndexDefinition(parent SQLNode, node *AddIndexDefinition, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteRefOfIndexDefinition(node, node.IndexDefinition, func(newNode, parent SQLNode) { + parent.(*AddIndexDefinition).IndexDefinition = newNode.(*IndexDefinition) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfAliasedExpr(parent SQLNode, node *AliasedExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.Expr, func(newNode, parent SQLNode) { + parent.(*AliasedExpr).Expr = newNode.(Expr) + }) { + return false + } + if !a.rewriteColIdent(node, node.As, func(newNode, parent SQLNode) { + parent.(*AliasedExpr).As = newNode.(ColIdent) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfAliasedTableExpr(parent SQLNode, node *AliasedTableExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteSimpleTableExpr(node, node.Expr, func(newNode, parent SQLNode) { + parent.(*AliasedTableExpr).Expr = newNode.(SimpleTableExpr) + }) { + return false + } + if !a.rewritePartitions(node, node.Partitions, func(newNode, parent SQLNode) { + parent.(*AliasedTableExpr).Partitions = newNode.(Partitions) + }) { + return false + } + if !a.rewriteTableIdent(node, node.As, func(newNode, parent SQLNode) { + parent.(*AliasedTableExpr).As = newNode.(TableIdent) + }) { + return false + } + if !a.rewriteRefOfIndexHints(node, node.Hints, func(newNode, parent SQLNode) { + parent.(*AliasedTableExpr).Hints = newNode.(*IndexHints) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfAlterCharset(parent SQLNode, node *AlterCharset, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfAlterColumn(parent SQLNode, node *AlterColumn, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteRefOfColName(node, node.Column, func(newNode, parent SQLNode) { + parent.(*AlterColumn).Column = newNode.(*ColName) + }) { + return false + } + if !a.rewriteExpr(node, node.DefaultVal, func(newNode, parent SQLNode) { + parent.(*AlterColumn).DefaultVal = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfAlterDatabase(parent SQLNode, node *AlterDatabase, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteTableIdent(node, node.DBName, func(newNode, parent SQLNode) { + parent.(*AlterDatabase).DBName = newNode.(TableIdent) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfAlterMigration(parent SQLNode, node *AlterMigration, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfAlterTable(parent SQLNode, node *AlterTable, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteTableName(node, node.Table, func(newNode, parent SQLNode) { + parent.(*AlterTable).Table = newNode.(TableName) + }) { + return false + } + for x, el := range node.AlterOptions { + if !a.rewriteAlterOption(node, el, func(idx int) replacerFunc { + return func(newNode, parent SQLNode) { + parent.(*AlterTable).AlterOptions[idx] = newNode.(AlterOption) + } + }(x)) { + return false + } + } + if !a.rewriteRefOfPartitionSpec(node, node.PartitionSpec, func(newNode, parent SQLNode) { + parent.(*AlterTable).PartitionSpec = newNode.(*PartitionSpec) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfAlterView(parent SQLNode, node *AlterView, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteTableName(node, node.ViewName, func(newNode, parent SQLNode) { + parent.(*AlterView).ViewName = newNode.(TableName) + }) { + return false + } + if !a.rewriteColumns(node, node.Columns, func(newNode, parent SQLNode) { + parent.(*AlterView).Columns = newNode.(Columns) + }) { + return false + } + if !a.rewriteSelectStatement(node, node.Select, func(newNode, parent SQLNode) { + parent.(*AlterView).Select = newNode.(SelectStatement) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfAlterVschema(parent SQLNode, node *AlterVschema, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteTableName(node, node.Table, func(newNode, parent SQLNode) { + parent.(*AlterVschema).Table = newNode.(TableName) + }) { + return false + } + if !a.rewriteRefOfVindexSpec(node, node.VindexSpec, func(newNode, parent SQLNode) { + parent.(*AlterVschema).VindexSpec = newNode.(*VindexSpec) + }) { + return false + } + for x, el := range node.VindexCols { + if !a.rewriteColIdent(node, el, func(idx int) replacerFunc { + return func(newNode, parent SQLNode) { + parent.(*AlterVschema).VindexCols[idx] = newNode.(ColIdent) + } + }(x)) { + return false + } + } + if !a.rewriteRefOfAutoIncSpec(node, node.AutoIncSpec, func(newNode, parent SQLNode) { + parent.(*AlterVschema).AutoIncSpec = newNode.(*AutoIncSpec) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfAndExpr(parent SQLNode, node *AndExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.Left, func(newNode, parent SQLNode) { + parent.(*AndExpr).Left = newNode.(Expr) + }) { + return false + } + if !a.rewriteExpr(node, node.Right, func(newNode, parent SQLNode) { + parent.(*AndExpr).Right = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfAutoIncSpec(parent SQLNode, node *AutoIncSpec, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteColIdent(node, node.Column, func(newNode, parent SQLNode) { + parent.(*AutoIncSpec).Column = newNode.(ColIdent) + }) { + return false + } + if !a.rewriteTableName(node, node.Sequence, func(newNode, parent SQLNode) { + parent.(*AutoIncSpec).Sequence = newNode.(TableName) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfBegin(parent SQLNode, node *Begin, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfBinaryExpr(parent SQLNode, node *BinaryExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.Left, func(newNode, parent SQLNode) { + parent.(*BinaryExpr).Left = newNode.(Expr) + }) { + return false + } + if !a.rewriteExpr(node, node.Right, func(newNode, parent SQLNode) { + parent.(*BinaryExpr).Right = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfCallProc(parent SQLNode, node *CallProc, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteTableName(node, node.Name, func(newNode, parent SQLNode) { + parent.(*CallProc).Name = newNode.(TableName) + }) { + return false + } + if !a.rewriteExprs(node, node.Params, func(newNode, parent SQLNode) { + parent.(*CallProc).Params = newNode.(Exprs) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfCaseExpr(parent SQLNode, node *CaseExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.Expr, func(newNode, parent SQLNode) { + parent.(*CaseExpr).Expr = newNode.(Expr) + }) { + return false + } + for x, el := range node.Whens { + if !a.rewriteRefOfWhen(node, el, func(idx int) replacerFunc { + return func(newNode, parent SQLNode) { + parent.(*CaseExpr).Whens[idx] = newNode.(*When) + } + }(x)) { + return false + } + } + if !a.rewriteExpr(node, node.Else, func(newNode, parent SQLNode) { + parent.(*CaseExpr).Else = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfChangeColumn(parent SQLNode, node *ChangeColumn, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteRefOfColName(node, node.OldColumn, func(newNode, parent SQLNode) { + parent.(*ChangeColumn).OldColumn = newNode.(*ColName) + }) { + return false + } + if !a.rewriteRefOfColumnDefinition(node, node.NewColDefinition, func(newNode, parent SQLNode) { + parent.(*ChangeColumn).NewColDefinition = newNode.(*ColumnDefinition) + }) { + return false + } + if !a.rewriteRefOfColName(node, node.First, func(newNode, parent SQLNode) { + parent.(*ChangeColumn).First = newNode.(*ColName) + }) { + return false + } + if !a.rewriteRefOfColName(node, node.After, func(newNode, parent SQLNode) { + parent.(*ChangeColumn).After = newNode.(*ColName) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfCheckConstraintDefinition(parent SQLNode, node *CheckConstraintDefinition, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.Expr, func(newNode, parent SQLNode) { + parent.(*CheckConstraintDefinition).Expr = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteColIdent(parent SQLNode, node ColIdent, replacer replacerFunc) bool { + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfColName(parent SQLNode, node *ColName, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteColIdent(node, node.Name, func(newNode, parent SQLNode) { + parent.(*ColName).Name = newNode.(ColIdent) + }) { + return false + } + if !a.rewriteTableName(node, node.Qualifier, func(newNode, parent SQLNode) { + parent.(*ColName).Qualifier = newNode.(TableName) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfCollateExpr(parent SQLNode, node *CollateExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.Expr, func(newNode, parent SQLNode) { + parent.(*CollateExpr).Expr = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfColumnDefinition(parent SQLNode, node *ColumnDefinition, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteColIdent(node, node.Name, func(newNode, parent SQLNode) { + parent.(*ColumnDefinition).Name = newNode.(ColIdent) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfColumnType(parent SQLNode, node *ColumnType, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteRefOfLiteral(node, node.Length, func(newNode, parent SQLNode) { + parent.(*ColumnType).Length = newNode.(*Literal) + }) { + return false + } + if !a.rewriteRefOfLiteral(node, node.Scale, func(newNode, parent SQLNode) { + parent.(*ColumnType).Scale = newNode.(*Literal) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteColumns(parent SQLNode, node Columns, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + for x, el := range node { + if !a.rewriteColIdent(node, el, func(idx int) replacerFunc { + return func(newNode, parent SQLNode) { + parent.(Columns)[idx] = newNode.(ColIdent) + } + }(x)) { + return false + } + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteComments(parent SQLNode, node Comments, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfCommit(parent SQLNode, node *Commit, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfComparisonExpr(parent SQLNode, node *ComparisonExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.Left, func(newNode, parent SQLNode) { + parent.(*ComparisonExpr).Left = newNode.(Expr) + }) { + return false + } + if !a.rewriteExpr(node, node.Right, func(newNode, parent SQLNode) { + parent.(*ComparisonExpr).Right = newNode.(Expr) + }) { + return false + } + if !a.rewriteExpr(node, node.Escape, func(newNode, parent SQLNode) { + parent.(*ComparisonExpr).Escape = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfConstraintDefinition(parent SQLNode, node *ConstraintDefinition, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteColIdent(node, node.Name, func(newNode, parent SQLNode) { + parent.(*ConstraintDefinition).Name = newNode.(ColIdent) + }) { + return false + } + if !a.rewriteConstraintInfo(node, node.Details, func(newNode, parent SQLNode) { + parent.(*ConstraintDefinition).Details = newNode.(ConstraintInfo) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfConvertExpr(parent SQLNode, node *ConvertExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.Expr, func(newNode, parent SQLNode) { + parent.(*ConvertExpr).Expr = newNode.(Expr) + }) { + return false + } + if !a.rewriteRefOfConvertType(node, node.Type, func(newNode, parent SQLNode) { + parent.(*ConvertExpr).Type = newNode.(*ConvertType) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfConvertType(parent SQLNode, node *ConvertType, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteRefOfLiteral(node, node.Length, func(newNode, parent SQLNode) { + parent.(*ConvertType).Length = newNode.(*Literal) + }) { + return false + } + if !a.rewriteRefOfLiteral(node, node.Scale, func(newNode, parent SQLNode) { + parent.(*ConvertType).Scale = newNode.(*Literal) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfConvertUsingExpr(parent SQLNode, node *ConvertUsingExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.Expr, func(newNode, parent SQLNode) { + parent.(*ConvertUsingExpr).Expr = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfCreateDatabase(parent SQLNode, node *CreateDatabase, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteComments(node, node.Comments, func(newNode, parent SQLNode) { + parent.(*CreateDatabase).Comments = newNode.(Comments) + }) { + return false + } + if !a.rewriteTableIdent(node, node.DBName, func(newNode, parent SQLNode) { + parent.(*CreateDatabase).DBName = newNode.(TableIdent) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfCreateTable(parent SQLNode, node *CreateTable, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteTableName(node, node.Table, func(newNode, parent SQLNode) { + parent.(*CreateTable).Table = newNode.(TableName) + }) { + return false + } + if !a.rewriteRefOfTableSpec(node, node.TableSpec, func(newNode, parent SQLNode) { + parent.(*CreateTable).TableSpec = newNode.(*TableSpec) + }) { + return false + } + if !a.rewriteRefOfOptLike(node, node.OptLike, func(newNode, parent SQLNode) { + parent.(*CreateTable).OptLike = newNode.(*OptLike) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfCreateView(parent SQLNode, node *CreateView, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteTableName(node, node.ViewName, func(newNode, parent SQLNode) { + parent.(*CreateView).ViewName = newNode.(TableName) + }) { + return false + } + if !a.rewriteColumns(node, node.Columns, func(newNode, parent SQLNode) { + parent.(*CreateView).Columns = newNode.(Columns) + }) { + return false + } + if !a.rewriteSelectStatement(node, node.Select, func(newNode, parent SQLNode) { + parent.(*CreateView).Select = newNode.(SelectStatement) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfCurTimeFuncExpr(parent SQLNode, node *CurTimeFuncExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteColIdent(node, node.Name, func(newNode, parent SQLNode) { + parent.(*CurTimeFuncExpr).Name = newNode.(ColIdent) + }) { + return false + } + if !a.rewriteExpr(node, node.Fsp, func(newNode, parent SQLNode) { + parent.(*CurTimeFuncExpr).Fsp = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfDefault(parent SQLNode, node *Default, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfDelete(parent SQLNode, node *Delete, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteComments(node, node.Comments, func(newNode, parent SQLNode) { + parent.(*Delete).Comments = newNode.(Comments) + }) { + return false + } + if !a.rewriteTableNames(node, node.Targets, func(newNode, parent SQLNode) { + parent.(*Delete).Targets = newNode.(TableNames) + }) { + return false + } + if !a.rewriteTableExprs(node, node.TableExprs, func(newNode, parent SQLNode) { + parent.(*Delete).TableExprs = newNode.(TableExprs) + }) { + return false + } + if !a.rewritePartitions(node, node.Partitions, func(newNode, parent SQLNode) { + parent.(*Delete).Partitions = newNode.(Partitions) + }) { + return false + } + if !a.rewriteRefOfWhere(node, node.Where, func(newNode, parent SQLNode) { + parent.(*Delete).Where = newNode.(*Where) + }) { + return false + } + if !a.rewriteOrderBy(node, node.OrderBy, func(newNode, parent SQLNode) { + parent.(*Delete).OrderBy = newNode.(OrderBy) + }) { + return false + } + if !a.rewriteRefOfLimit(node, node.Limit, func(newNode, parent SQLNode) { + parent.(*Delete).Limit = newNode.(*Limit) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfDerivedTable(parent SQLNode, node *DerivedTable, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteSelectStatement(node, node.Select, func(newNode, parent SQLNode) { + parent.(*DerivedTable).Select = newNode.(SelectStatement) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfDropColumn(parent SQLNode, node *DropColumn, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteRefOfColName(node, node.Name, func(newNode, parent SQLNode) { + parent.(*DropColumn).Name = newNode.(*ColName) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfDropDatabase(parent SQLNode, node *DropDatabase, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteComments(node, node.Comments, func(newNode, parent SQLNode) { + parent.(*DropDatabase).Comments = newNode.(Comments) + }) { + return false + } + if !a.rewriteTableIdent(node, node.DBName, func(newNode, parent SQLNode) { + parent.(*DropDatabase).DBName = newNode.(TableIdent) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfDropKey(parent SQLNode, node *DropKey, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteColIdent(node, node.Name, func(newNode, parent SQLNode) { + parent.(*DropKey).Name = newNode.(ColIdent) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfDropTable(parent SQLNode, node *DropTable, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteTableNames(node, node.FromTables, func(newNode, parent SQLNode) { + parent.(*DropTable).FromTables = newNode.(TableNames) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfDropView(parent SQLNode, node *DropView, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteTableNames(node, node.FromTables, func(newNode, parent SQLNode) { + parent.(*DropView).FromTables = newNode.(TableNames) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfExistsExpr(parent SQLNode, node *ExistsExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteRefOfSubquery(node, node.Subquery, func(newNode, parent SQLNode) { + parent.(*ExistsExpr).Subquery = newNode.(*Subquery) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfExplainStmt(parent SQLNode, node *ExplainStmt, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteStatement(node, node.Statement, func(newNode, parent SQLNode) { + parent.(*ExplainStmt).Statement = newNode.(Statement) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfExplainTab(parent SQLNode, node *ExplainTab, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteTableName(node, node.Table, func(newNode, parent SQLNode) { + parent.(*ExplainTab).Table = newNode.(TableName) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteExprs(parent SQLNode, node Exprs, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + for x, el := range node { + if !a.rewriteExpr(node, el, func(idx int) replacerFunc { + return func(newNode, parent SQLNode) { + parent.(Exprs)[idx] = newNode.(Expr) + } + }(x)) { + return false + } + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfFlush(parent SQLNode, node *Flush, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteTableNames(node, node.TableNames, func(newNode, parent SQLNode) { + parent.(*Flush).TableNames = newNode.(TableNames) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfForce(parent SQLNode, node *Force, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfForeignKeyDefinition(parent SQLNode, node *ForeignKeyDefinition, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteColumns(node, node.Source, func(newNode, parent SQLNode) { + parent.(*ForeignKeyDefinition).Source = newNode.(Columns) + }) { + return false + } + if !a.rewriteTableName(node, node.ReferencedTable, func(newNode, parent SQLNode) { + parent.(*ForeignKeyDefinition).ReferencedTable = newNode.(TableName) + }) { + return false + } + if !a.rewriteColumns(node, node.ReferencedColumns, func(newNode, parent SQLNode) { + parent.(*ForeignKeyDefinition).ReferencedColumns = newNode.(Columns) + }) { + return false + } + if !a.rewriteReferenceAction(node, node.OnDelete, func(newNode, parent SQLNode) { + parent.(*ForeignKeyDefinition).OnDelete = newNode.(ReferenceAction) + }) { + return false + } + if !a.rewriteReferenceAction(node, node.OnUpdate, func(newNode, parent SQLNode) { + parent.(*ForeignKeyDefinition).OnUpdate = newNode.(ReferenceAction) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfFuncExpr(parent SQLNode, node *FuncExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteTableIdent(node, node.Qualifier, func(newNode, parent SQLNode) { + parent.(*FuncExpr).Qualifier = newNode.(TableIdent) + }) { + return false + } + if !a.rewriteColIdent(node, node.Name, func(newNode, parent SQLNode) { + parent.(*FuncExpr).Name = newNode.(ColIdent) + }) { + return false + } + if !a.rewriteSelectExprs(node, node.Exprs, func(newNode, parent SQLNode) { + parent.(*FuncExpr).Exprs = newNode.(SelectExprs) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteGroupBy(parent SQLNode, node GroupBy, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + for x, el := range node { + if !a.rewriteExpr(node, el, func(idx int) replacerFunc { + return func(newNode, parent SQLNode) { + parent.(GroupBy)[idx] = newNode.(Expr) + } + }(x)) { + return false + } + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfGroupConcatExpr(parent SQLNode, node *GroupConcatExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteSelectExprs(node, node.Exprs, func(newNode, parent SQLNode) { + parent.(*GroupConcatExpr).Exprs = newNode.(SelectExprs) + }) { + return false + } + if !a.rewriteOrderBy(node, node.OrderBy, func(newNode, parent SQLNode) { + parent.(*GroupConcatExpr).OrderBy = newNode.(OrderBy) + }) { + return false + } + if !a.rewriteRefOfLimit(node, node.Limit, func(newNode, parent SQLNode) { + parent.(*GroupConcatExpr).Limit = newNode.(*Limit) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfIndexDefinition(parent SQLNode, node *IndexDefinition, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteRefOfIndexInfo(node, node.Info, func(newNode, parent SQLNode) { + parent.(*IndexDefinition).Info = newNode.(*IndexInfo) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfIndexHints(parent SQLNode, node *IndexHints, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + for x, el := range node.Indexes { + if !a.rewriteColIdent(node, el, func(idx int) replacerFunc { + return func(newNode, parent SQLNode) { + parent.(*IndexHints).Indexes[idx] = newNode.(ColIdent) + } + }(x)) { + return false + } + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfIndexInfo(parent SQLNode, node *IndexInfo, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteColIdent(node, node.Name, func(newNode, parent SQLNode) { + parent.(*IndexInfo).Name = newNode.(ColIdent) + }) { + return false + } + if !a.rewriteColIdent(node, node.ConstraintName, func(newNode, parent SQLNode) { + parent.(*IndexInfo).ConstraintName = newNode.(ColIdent) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfInsert(parent SQLNode, node *Insert, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteComments(node, node.Comments, func(newNode, parent SQLNode) { + parent.(*Insert).Comments = newNode.(Comments) + }) { + return false + } + if !a.rewriteTableName(node, node.Table, func(newNode, parent SQLNode) { + parent.(*Insert).Table = newNode.(TableName) + }) { + return false + } + if !a.rewritePartitions(node, node.Partitions, func(newNode, parent SQLNode) { + parent.(*Insert).Partitions = newNode.(Partitions) + }) { + return false + } + if !a.rewriteColumns(node, node.Columns, func(newNode, parent SQLNode) { + parent.(*Insert).Columns = newNode.(Columns) + }) { + return false + } + if !a.rewriteInsertRows(node, node.Rows, func(newNode, parent SQLNode) { + parent.(*Insert).Rows = newNode.(InsertRows) + }) { + return false + } + if !a.rewriteOnDup(node, node.OnDup, func(newNode, parent SQLNode) { + parent.(*Insert).OnDup = newNode.(OnDup) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfIntervalExpr(parent SQLNode, node *IntervalExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.Expr, func(newNode, parent SQLNode) { + parent.(*IntervalExpr).Expr = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfIsExpr(parent SQLNode, node *IsExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.Expr, func(newNode, parent SQLNode) { + parent.(*IsExpr).Expr = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteJoinCondition(parent SQLNode, node JoinCondition, replacer replacerFunc) bool { + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.On, func(newNode, parent SQLNode) { + panic("[BUG] tried to replace 'On' on 'JoinCondition'") + }) { + return false + } + if !a.rewriteColumns(node, node.Using, func(newNode, parent SQLNode) { + panic("[BUG] tried to replace 'Using' on 'JoinCondition'") + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfJoinTableExpr(parent SQLNode, node *JoinTableExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteTableExpr(node, node.LeftExpr, func(newNode, parent SQLNode) { + parent.(*JoinTableExpr).LeftExpr = newNode.(TableExpr) + }) { + return false + } + if !a.rewriteTableExpr(node, node.RightExpr, func(newNode, parent SQLNode) { + parent.(*JoinTableExpr).RightExpr = newNode.(TableExpr) + }) { + return false + } + if !a.rewriteJoinCondition(node, node.Condition, func(newNode, parent SQLNode) { + parent.(*JoinTableExpr).Condition = newNode.(JoinCondition) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfKeyState(parent SQLNode, node *KeyState, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfLimit(parent SQLNode, node *Limit, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.Offset, func(newNode, parent SQLNode) { + parent.(*Limit).Offset = newNode.(Expr) + }) { + return false + } + if !a.rewriteExpr(node, node.Rowcount, func(newNode, parent SQLNode) { + parent.(*Limit).Rowcount = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteListArg(parent SQLNode, node ListArg, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfLiteral(parent SQLNode, node *Literal, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfLoad(parent SQLNode, node *Load, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfLockOption(parent SQLNode, node *LockOption, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfLockTables(parent SQLNode, node *LockTables, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfMatchExpr(parent SQLNode, node *MatchExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteSelectExprs(node, node.Columns, func(newNode, parent SQLNode) { + parent.(*MatchExpr).Columns = newNode.(SelectExprs) + }) { + return false + } + if !a.rewriteExpr(node, node.Expr, func(newNode, parent SQLNode) { + parent.(*MatchExpr).Expr = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfModifyColumn(parent SQLNode, node *ModifyColumn, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteRefOfColumnDefinition(node, node.NewColDefinition, func(newNode, parent SQLNode) { + parent.(*ModifyColumn).NewColDefinition = newNode.(*ColumnDefinition) + }) { + return false + } + if !a.rewriteRefOfColName(node, node.First, func(newNode, parent SQLNode) { + parent.(*ModifyColumn).First = newNode.(*ColName) + }) { + return false + } + if !a.rewriteRefOfColName(node, node.After, func(newNode, parent SQLNode) { + parent.(*ModifyColumn).After = newNode.(*ColName) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfNextval(parent SQLNode, node *Nextval, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.Expr, func(newNode, parent SQLNode) { + parent.(*Nextval).Expr = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfNotExpr(parent SQLNode, node *NotExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.Expr, func(newNode, parent SQLNode) { + parent.(*NotExpr).Expr = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfNullVal(parent SQLNode, node *NullVal, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteOnDup(parent SQLNode, node OnDup, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + for x, el := range node { + if !a.rewriteRefOfUpdateExpr(node, el, func(idx int) replacerFunc { + return func(newNode, parent SQLNode) { + parent.(OnDup)[idx] = newNode.(*UpdateExpr) + } + }(x)) { + return false + } + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfOptLike(parent SQLNode, node *OptLike, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteTableName(node, node.LikeTable, func(newNode, parent SQLNode) { + parent.(*OptLike).LikeTable = newNode.(TableName) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfOrExpr(parent SQLNode, node *OrExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.Left, func(newNode, parent SQLNode) { + parent.(*OrExpr).Left = newNode.(Expr) + }) { + return false + } + if !a.rewriteExpr(node, node.Right, func(newNode, parent SQLNode) { + parent.(*OrExpr).Right = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfOrder(parent SQLNode, node *Order, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.Expr, func(newNode, parent SQLNode) { + parent.(*Order).Expr = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteOrderBy(parent SQLNode, node OrderBy, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + for x, el := range node { + if !a.rewriteRefOfOrder(node, el, func(idx int) replacerFunc { + return func(newNode, parent SQLNode) { + parent.(OrderBy)[idx] = newNode.(*Order) + } + }(x)) { + return false + } + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfOrderByOption(parent SQLNode, node *OrderByOption, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteColumns(node, node.Cols, func(newNode, parent SQLNode) { + parent.(*OrderByOption).Cols = newNode.(Columns) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfOtherAdmin(parent SQLNode, node *OtherAdmin, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfOtherRead(parent SQLNode, node *OtherRead, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfParenSelect(parent SQLNode, node *ParenSelect, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteSelectStatement(node, node.Select, func(newNode, parent SQLNode) { + parent.(*ParenSelect).Select = newNode.(SelectStatement) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfParenTableExpr(parent SQLNode, node *ParenTableExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteTableExprs(node, node.Exprs, func(newNode, parent SQLNode) { + parent.(*ParenTableExpr).Exprs = newNode.(TableExprs) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfPartitionDefinition(parent SQLNode, node *PartitionDefinition, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteColIdent(node, node.Name, func(newNode, parent SQLNode) { + parent.(*PartitionDefinition).Name = newNode.(ColIdent) + }) { + return false + } + if !a.rewriteExpr(node, node.Limit, func(newNode, parent SQLNode) { + parent.(*PartitionDefinition).Limit = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfPartitionSpec(parent SQLNode, node *PartitionSpec, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewritePartitions(node, node.Names, func(newNode, parent SQLNode) { + parent.(*PartitionSpec).Names = newNode.(Partitions) + }) { + return false + } + if !a.rewriteRefOfLiteral(node, node.Number, func(newNode, parent SQLNode) { + parent.(*PartitionSpec).Number = newNode.(*Literal) + }) { + return false + } + if !a.rewriteTableName(node, node.TableName, func(newNode, parent SQLNode) { + parent.(*PartitionSpec).TableName = newNode.(TableName) + }) { + return false + } + for x, el := range node.Definitions { + if !a.rewriteRefOfPartitionDefinition(node, el, func(idx int) replacerFunc { + return func(newNode, parent SQLNode) { + parent.(*PartitionSpec).Definitions[idx] = newNode.(*PartitionDefinition) + } + }(x)) { + return false + } + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewritePartitions(parent SQLNode, node Partitions, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + for x, el := range node { + if !a.rewriteColIdent(node, el, func(idx int) replacerFunc { + return func(newNode, parent SQLNode) { + parent.(Partitions)[idx] = newNode.(ColIdent) + } + }(x)) { + return false + } + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfRangeCond(parent SQLNode, node *RangeCond, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.Left, func(newNode, parent SQLNode) { + parent.(*RangeCond).Left = newNode.(Expr) + }) { + return false + } + if !a.rewriteExpr(node, node.From, func(newNode, parent SQLNode) { + parent.(*RangeCond).From = newNode.(Expr) + }) { + return false + } + if !a.rewriteExpr(node, node.To, func(newNode, parent SQLNode) { + parent.(*RangeCond).To = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfRelease(parent SQLNode, node *Release, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteColIdent(node, node.Name, func(newNode, parent SQLNode) { + parent.(*Release).Name = newNode.(ColIdent) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfRenameIndex(parent SQLNode, node *RenameIndex, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteColIdent(node, node.OldName, func(newNode, parent SQLNode) { + parent.(*RenameIndex).OldName = newNode.(ColIdent) + }) { + return false + } + if !a.rewriteColIdent(node, node.NewName, func(newNode, parent SQLNode) { + parent.(*RenameIndex).NewName = newNode.(ColIdent) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfRenameTable(parent SQLNode, node *RenameTable, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfRenameTableName(parent SQLNode, node *RenameTableName, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteTableName(node, node.Table, func(newNode, parent SQLNode) { + parent.(*RenameTableName).Table = newNode.(TableName) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfRevertMigration(parent SQLNode, node *RevertMigration, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfRollback(parent SQLNode, node *Rollback, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfSRollback(parent SQLNode, node *SRollback, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteColIdent(node, node.Name, func(newNode, parent SQLNode) { + parent.(*SRollback).Name = newNode.(ColIdent) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfSavepoint(parent SQLNode, node *Savepoint, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteColIdent(node, node.Name, func(newNode, parent SQLNode) { + parent.(*Savepoint).Name = newNode.(ColIdent) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfSelect(parent SQLNode, node *Select, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteComments(node, node.Comments, func(newNode, parent SQLNode) { + parent.(*Select).Comments = newNode.(Comments) + }) { + return false + } + if !a.rewriteSelectExprs(node, node.SelectExprs, func(newNode, parent SQLNode) { + parent.(*Select).SelectExprs = newNode.(SelectExprs) + }) { + return false + } + if !a.rewriteTableExprs(node, node.From, func(newNode, parent SQLNode) { + parent.(*Select).From = newNode.(TableExprs) + }) { + return false + } + if !a.rewriteRefOfWhere(node, node.Where, func(newNode, parent SQLNode) { + parent.(*Select).Where = newNode.(*Where) + }) { + return false + } + if !a.rewriteGroupBy(node, node.GroupBy, func(newNode, parent SQLNode) { + parent.(*Select).GroupBy = newNode.(GroupBy) + }) { + return false + } + if !a.rewriteRefOfWhere(node, node.Having, func(newNode, parent SQLNode) { + parent.(*Select).Having = newNode.(*Where) + }) { + return false + } + if !a.rewriteOrderBy(node, node.OrderBy, func(newNode, parent SQLNode) { + parent.(*Select).OrderBy = newNode.(OrderBy) + }) { + return false + } + if !a.rewriteRefOfLimit(node, node.Limit, func(newNode, parent SQLNode) { + parent.(*Select).Limit = newNode.(*Limit) + }) { + return false + } + if !a.rewriteRefOfSelectInto(node, node.Into, func(newNode, parent SQLNode) { + parent.(*Select).Into = newNode.(*SelectInto) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteSelectExprs(parent SQLNode, node SelectExprs, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + for x, el := range node { + if !a.rewriteSelectExpr(node, el, func(idx int) replacerFunc { + return func(newNode, parent SQLNode) { + parent.(SelectExprs)[idx] = newNode.(SelectExpr) + } + }(x)) { + return false + } + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfSelectInto(parent SQLNode, node *SelectInto, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfSet(parent SQLNode, node *Set, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteComments(node, node.Comments, func(newNode, parent SQLNode) { + parent.(*Set).Comments = newNode.(Comments) + }) { + return false + } + if !a.rewriteSetExprs(node, node.Exprs, func(newNode, parent SQLNode) { + parent.(*Set).Exprs = newNode.(SetExprs) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfSetExpr(parent SQLNode, node *SetExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteColIdent(node, node.Name, func(newNode, parent SQLNode) { + parent.(*SetExpr).Name = newNode.(ColIdent) + }) { + return false + } + if !a.rewriteExpr(node, node.Expr, func(newNode, parent SQLNode) { + parent.(*SetExpr).Expr = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteSetExprs(parent SQLNode, node SetExprs, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + for x, el := range node { + if !a.rewriteRefOfSetExpr(node, el, func(idx int) replacerFunc { + return func(newNode, parent SQLNode) { + parent.(SetExprs)[idx] = newNode.(*SetExpr) + } + }(x)) { + return false + } + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfSetTransaction(parent SQLNode, node *SetTransaction, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteSQLNode(node, node.SQLNode, func(newNode, parent SQLNode) { + parent.(*SetTransaction).SQLNode = newNode.(SQLNode) + }) { + return false + } + if !a.rewriteComments(node, node.Comments, func(newNode, parent SQLNode) { + parent.(*SetTransaction).Comments = newNode.(Comments) + }) { + return false + } + for x, el := range node.Characteristics { + if !a.rewriteCharacteristic(node, el, func(idx int) replacerFunc { + return func(newNode, parent SQLNode) { + parent.(*SetTransaction).Characteristics[idx] = newNode.(Characteristic) + } + }(x)) { + return false + } + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfShow(parent SQLNode, node *Show, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteShowInternal(node, node.Internal, func(newNode, parent SQLNode) { + parent.(*Show).Internal = newNode.(ShowInternal) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfShowBasic(parent SQLNode, node *ShowBasic, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteTableName(node, node.Tbl, func(newNode, parent SQLNode) { + parent.(*ShowBasic).Tbl = newNode.(TableName) + }) { + return false + } + if !a.rewriteTableIdent(node, node.DbName, func(newNode, parent SQLNode) { + parent.(*ShowBasic).DbName = newNode.(TableIdent) + }) { + return false + } + if !a.rewriteRefOfShowFilter(node, node.Filter, func(newNode, parent SQLNode) { + parent.(*ShowBasic).Filter = newNode.(*ShowFilter) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfShowCreate(parent SQLNode, node *ShowCreate, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteTableName(node, node.Op, func(newNode, parent SQLNode) { + parent.(*ShowCreate).Op = newNode.(TableName) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfShowFilter(parent SQLNode, node *ShowFilter, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.Filter, func(newNode, parent SQLNode) { + parent.(*ShowFilter).Filter = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfShowLegacy(parent SQLNode, node *ShowLegacy, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteTableName(node, node.OnTable, func(newNode, parent SQLNode) { + parent.(*ShowLegacy).OnTable = newNode.(TableName) + }) { + return false + } + if !a.rewriteTableName(node, node.Table, func(newNode, parent SQLNode) { + parent.(*ShowLegacy).Table = newNode.(TableName) + }) { + return false + } + if !a.rewriteExpr(node, node.ShowCollationFilterOpt, func(newNode, parent SQLNode) { + parent.(*ShowLegacy).ShowCollationFilterOpt = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfStarExpr(parent SQLNode, node *StarExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteTableName(node, node.TableName, func(newNode, parent SQLNode) { + parent.(*StarExpr).TableName = newNode.(TableName) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfStream(parent SQLNode, node *Stream, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteComments(node, node.Comments, func(newNode, parent SQLNode) { + parent.(*Stream).Comments = newNode.(Comments) + }) { + return false + } + if !a.rewriteSelectExpr(node, node.SelectExpr, func(newNode, parent SQLNode) { + parent.(*Stream).SelectExpr = newNode.(SelectExpr) + }) { + return false + } + if !a.rewriteTableName(node, node.Table, func(newNode, parent SQLNode) { + parent.(*Stream).Table = newNode.(TableName) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfSubquery(parent SQLNode, node *Subquery, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteSelectStatement(node, node.Select, func(newNode, parent SQLNode) { + parent.(*Subquery).Select = newNode.(SelectStatement) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfSubstrExpr(parent SQLNode, node *SubstrExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteRefOfColName(node, node.Name, func(newNode, parent SQLNode) { + parent.(*SubstrExpr).Name = newNode.(*ColName) + }) { + return false + } + if !a.rewriteRefOfLiteral(node, node.StrVal, func(newNode, parent SQLNode) { + parent.(*SubstrExpr).StrVal = newNode.(*Literal) + }) { + return false + } + if !a.rewriteExpr(node, node.From, func(newNode, parent SQLNode) { + parent.(*SubstrExpr).From = newNode.(Expr) + }) { + return false + } + if !a.rewriteExpr(node, node.To, func(newNode, parent SQLNode) { + parent.(*SubstrExpr).To = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteTableExprs(parent SQLNode, node TableExprs, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + for x, el := range node { + if !a.rewriteTableExpr(node, el, func(idx int) replacerFunc { + return func(newNode, parent SQLNode) { + parent.(TableExprs)[idx] = newNode.(TableExpr) + } + }(x)) { + return false + } + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteTableIdent(parent SQLNode, node TableIdent, replacer replacerFunc) bool { + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteTableName(parent SQLNode, node TableName, replacer replacerFunc) bool { + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteTableIdent(node, node.Name, func(newNode, parent SQLNode) { + panic("[BUG] tried to replace 'Name' on 'TableName'") + }) { + return false + } + if !a.rewriteTableIdent(node, node.Qualifier, func(newNode, parent SQLNode) { + panic("[BUG] tried to replace 'Qualifier' on 'TableName'") + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteTableNames(parent SQLNode, node TableNames, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + for x, el := range node { + if !a.rewriteTableName(node, el, func(idx int) replacerFunc { + return func(newNode, parent SQLNode) { + parent.(TableNames)[idx] = newNode.(TableName) + } + }(x)) { + return false + } + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteTableOptions(parent SQLNode, node TableOptions, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfTableSpec(parent SQLNode, node *TableSpec, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + for x, el := range node.Columns { + if !a.rewriteRefOfColumnDefinition(node, el, func(idx int) replacerFunc { + return func(newNode, parent SQLNode) { + parent.(*TableSpec).Columns[idx] = newNode.(*ColumnDefinition) + } + }(x)) { + return false + } + } + for x, el := range node.Indexes { + if !a.rewriteRefOfIndexDefinition(node, el, func(idx int) replacerFunc { + return func(newNode, parent SQLNode) { + parent.(*TableSpec).Indexes[idx] = newNode.(*IndexDefinition) + } + }(x)) { + return false + } + } + for x, el := range node.Constraints { + if !a.rewriteRefOfConstraintDefinition(node, el, func(idx int) replacerFunc { + return func(newNode, parent SQLNode) { + parent.(*TableSpec).Constraints[idx] = newNode.(*ConstraintDefinition) + } + }(x)) { + return false + } + } + if !a.rewriteTableOptions(node, node.Options, func(newNode, parent SQLNode) { + parent.(*TableSpec).Options = newNode.(TableOptions) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfTablespaceOperation(parent SQLNode, node *TablespaceOperation, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfTimestampFuncExpr(parent SQLNode, node *TimestampFuncExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.Expr1, func(newNode, parent SQLNode) { + parent.(*TimestampFuncExpr).Expr1 = newNode.(Expr) + }) { + return false + } + if !a.rewriteExpr(node, node.Expr2, func(newNode, parent SQLNode) { + parent.(*TimestampFuncExpr).Expr2 = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfTruncateTable(parent SQLNode, node *TruncateTable, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteTableName(node, node.Table, func(newNode, parent SQLNode) { + parent.(*TruncateTable).Table = newNode.(TableName) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfUnaryExpr(parent SQLNode, node *UnaryExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.Expr, func(newNode, parent SQLNode) { + parent.(*UnaryExpr).Expr = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfUnion(parent SQLNode, node *Union, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteSelectStatement(node, node.FirstStatement, func(newNode, parent SQLNode) { + parent.(*Union).FirstStatement = newNode.(SelectStatement) + }) { + return false + } + for x, el := range node.UnionSelects { + if !a.rewriteRefOfUnionSelect(node, el, func(idx int) replacerFunc { + return func(newNode, parent SQLNode) { + parent.(*Union).UnionSelects[idx] = newNode.(*UnionSelect) + } + }(x)) { + return false + } + } + if !a.rewriteOrderBy(node, node.OrderBy, func(newNode, parent SQLNode) { + parent.(*Union).OrderBy = newNode.(OrderBy) + }) { + return false + } + if !a.rewriteRefOfLimit(node, node.Limit, func(newNode, parent SQLNode) { + parent.(*Union).Limit = newNode.(*Limit) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfUnionSelect(parent SQLNode, node *UnionSelect, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteSelectStatement(node, node.Statement, func(newNode, parent SQLNode) { + parent.(*UnionSelect).Statement = newNode.(SelectStatement) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfUnlockTables(parent SQLNode, node *UnlockTables, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfUpdate(parent SQLNode, node *Update, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteComments(node, node.Comments, func(newNode, parent SQLNode) { + parent.(*Update).Comments = newNode.(Comments) + }) { + return false + } + if !a.rewriteTableExprs(node, node.TableExprs, func(newNode, parent SQLNode) { + parent.(*Update).TableExprs = newNode.(TableExprs) + }) { + return false + } + if !a.rewriteUpdateExprs(node, node.Exprs, func(newNode, parent SQLNode) { + parent.(*Update).Exprs = newNode.(UpdateExprs) + }) { + return false + } + if !a.rewriteRefOfWhere(node, node.Where, func(newNode, parent SQLNode) { + parent.(*Update).Where = newNode.(*Where) + }) { + return false + } + if !a.rewriteOrderBy(node, node.OrderBy, func(newNode, parent SQLNode) { + parent.(*Update).OrderBy = newNode.(OrderBy) + }) { + return false + } + if !a.rewriteRefOfLimit(node, node.Limit, func(newNode, parent SQLNode) { + parent.(*Update).Limit = newNode.(*Limit) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfUpdateExpr(parent SQLNode, node *UpdateExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteRefOfColName(node, node.Name, func(newNode, parent SQLNode) { + parent.(*UpdateExpr).Name = newNode.(*ColName) + }) { + return false + } + if !a.rewriteExpr(node, node.Expr, func(newNode, parent SQLNode) { + parent.(*UpdateExpr).Expr = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteUpdateExprs(parent SQLNode, node UpdateExprs, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + for x, el := range node { + if !a.rewriteRefOfUpdateExpr(node, el, func(idx int) replacerFunc { + return func(newNode, parent SQLNode) { + parent.(UpdateExprs)[idx] = newNode.(*UpdateExpr) + } + }(x)) { + return false + } + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfUse(parent SQLNode, node *Use, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteTableIdent(node, node.DBName, func(newNode, parent SQLNode) { + parent.(*Use).DBName = newNode.(TableIdent) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfVStream(parent SQLNode, node *VStream, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteComments(node, node.Comments, func(newNode, parent SQLNode) { + parent.(*VStream).Comments = newNode.(Comments) + }) { + return false + } + if !a.rewriteSelectExpr(node, node.SelectExpr, func(newNode, parent SQLNode) { + parent.(*VStream).SelectExpr = newNode.(SelectExpr) + }) { + return false + } + if !a.rewriteTableName(node, node.Table, func(newNode, parent SQLNode) { + parent.(*VStream).Table = newNode.(TableName) + }) { + return false + } + if !a.rewriteRefOfWhere(node, node.Where, func(newNode, parent SQLNode) { + parent.(*VStream).Where = newNode.(*Where) + }) { + return false + } + if !a.rewriteRefOfLimit(node, node.Limit, func(newNode, parent SQLNode) { + parent.(*VStream).Limit = newNode.(*Limit) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteValTuple(parent SQLNode, node ValTuple, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + for x, el := range node { + if !a.rewriteExpr(node, el, func(idx int) replacerFunc { + return func(newNode, parent SQLNode) { + parent.(ValTuple)[idx] = newNode.(Expr) + } + }(x)) { + return false + } + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfValidation(parent SQLNode, node *Validation, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteValues(parent SQLNode, node Values, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + for x, el := range node { + if !a.rewriteValTuple(node, el, func(idx int) replacerFunc { + return func(newNode, parent SQLNode) { + parent.(Values)[idx] = newNode.(ValTuple) + } + }(x)) { + return false + } + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfValuesFuncExpr(parent SQLNode, node *ValuesFuncExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteRefOfColName(node, node.Name, func(newNode, parent SQLNode) { + parent.(*ValuesFuncExpr).Name = newNode.(*ColName) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteVindexParam(parent SQLNode, node VindexParam, replacer replacerFunc) bool { + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteColIdent(node, node.Key, func(newNode, parent SQLNode) { + panic("[BUG] tried to replace 'Key' on 'VindexParam'") + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfVindexSpec(parent SQLNode, node *VindexSpec, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteColIdent(node, node.Name, func(newNode, parent SQLNode) { + parent.(*VindexSpec).Name = newNode.(ColIdent) + }) { + return false + } + if !a.rewriteColIdent(node, node.Type, func(newNode, parent SQLNode) { + parent.(*VindexSpec).Type = newNode.(ColIdent) + }) { + return false + } + for x, el := range node.Params { + if !a.rewriteVindexParam(node, el, func(idx int) replacerFunc { + return func(newNode, parent SQLNode) { + parent.(*VindexSpec).Params[idx] = newNode.(VindexParam) + } + }(x)) { + return false + } + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfWhen(parent SQLNode, node *When, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.Cond, func(newNode, parent SQLNode) { + parent.(*When).Cond = newNode.(Expr) + }) { + return false + } + if !a.rewriteExpr(node, node.Val, func(newNode, parent SQLNode) { + parent.(*When).Val = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfWhere(parent SQLNode, node *Where, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.Expr, func(newNode, parent SQLNode) { + parent.(*Where).Expr = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfXorExpr(parent SQLNode, node *XorExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.Left, func(newNode, parent SQLNode) { + parent.(*XorExpr).Left = newNode.(Expr) + }) { + return false + } + if !a.rewriteExpr(node, node.Right, func(newNode, parent SQLNode) { + parent.(*XorExpr).Right = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteAlterOption(parent SQLNode, node AlterOption, replacer replacerFunc) bool { + if node == nil { + return true + } + switch node := node.(type) { + case *AddColumns: + return a.rewriteRefOfAddColumns(parent, node, replacer) + case *AddConstraintDefinition: + return a.rewriteRefOfAddConstraintDefinition(parent, node, replacer) + case *AddIndexDefinition: + return a.rewriteRefOfAddIndexDefinition(parent, node, replacer) + case AlgorithmValue: + return a.rewriteAlgorithmValue(parent, node, replacer) + case *AlterCharset: + return a.rewriteRefOfAlterCharset(parent, node, replacer) + case *AlterColumn: + return a.rewriteRefOfAlterColumn(parent, node, replacer) + case *ChangeColumn: + return a.rewriteRefOfChangeColumn(parent, node, replacer) + case *DropColumn: + return a.rewriteRefOfDropColumn(parent, node, replacer) + case *DropKey: + return a.rewriteRefOfDropKey(parent, node, replacer) + case *Force: + return a.rewriteRefOfForce(parent, node, replacer) + case *KeyState: + return a.rewriteRefOfKeyState(parent, node, replacer) + case *LockOption: + return a.rewriteRefOfLockOption(parent, node, replacer) + case *ModifyColumn: + return a.rewriteRefOfModifyColumn(parent, node, replacer) + case *OrderByOption: + return a.rewriteRefOfOrderByOption(parent, node, replacer) + case *RenameIndex: + return a.rewriteRefOfRenameIndex(parent, node, replacer) + case *RenameTableName: + return a.rewriteRefOfRenameTableName(parent, node, replacer) + case TableOptions: + return a.rewriteTableOptions(parent, node, replacer) + case *TablespaceOperation: + return a.rewriteRefOfTablespaceOperation(parent, node, replacer) + case *Validation: + return a.rewriteRefOfValidation(parent, node, replacer) + default: + // this should never happen + return true + } +} +func (a *application) rewriteCharacteristic(parent SQLNode, node Characteristic, replacer replacerFunc) bool { + if node == nil { + return true + } + switch node := node.(type) { + case AccessMode: + return a.rewriteAccessMode(parent, node, replacer) + case IsolationLevel: + return a.rewriteIsolationLevel(parent, node, replacer) + default: + // this should never happen + return true + } +} +func (a *application) rewriteColTuple(parent SQLNode, node ColTuple, replacer replacerFunc) bool { + if node == nil { + return true + } + switch node := node.(type) { + case ListArg: + return a.rewriteListArg(parent, node, replacer) + case *Subquery: + return a.rewriteRefOfSubquery(parent, node, replacer) + case ValTuple: + return a.rewriteValTuple(parent, node, replacer) + default: + // this should never happen + return true + } +} +func (a *application) rewriteConstraintInfo(parent SQLNode, node ConstraintInfo, replacer replacerFunc) bool { + if node == nil { + return true + } + switch node := node.(type) { + case *CheckConstraintDefinition: + return a.rewriteRefOfCheckConstraintDefinition(parent, node, replacer) + case *ForeignKeyDefinition: + return a.rewriteRefOfForeignKeyDefinition(parent, node, replacer) + default: + // this should never happen + return true + } +} +func (a *application) rewriteDBDDLStatement(parent SQLNode, node DBDDLStatement, replacer replacerFunc) bool { + if node == nil { + return true + } + switch node := node.(type) { + case *AlterDatabase: + return a.rewriteRefOfAlterDatabase(parent, node, replacer) + case *CreateDatabase: + return a.rewriteRefOfCreateDatabase(parent, node, replacer) + case *DropDatabase: + return a.rewriteRefOfDropDatabase(parent, node, replacer) + default: + // this should never happen + return true + } +} +func (a *application) rewriteDDLStatement(parent SQLNode, node DDLStatement, replacer replacerFunc) bool { + if node == nil { + return true + } + switch node := node.(type) { + case *AlterTable: + return a.rewriteRefOfAlterTable(parent, node, replacer) + case *AlterView: + return a.rewriteRefOfAlterView(parent, node, replacer) + case *CreateTable: + return a.rewriteRefOfCreateTable(parent, node, replacer) + case *CreateView: + return a.rewriteRefOfCreateView(parent, node, replacer) + case *DropTable: + return a.rewriteRefOfDropTable(parent, node, replacer) + case *DropView: + return a.rewriteRefOfDropView(parent, node, replacer) + case *RenameTable: + return a.rewriteRefOfRenameTable(parent, node, replacer) + case *TruncateTable: + return a.rewriteRefOfTruncateTable(parent, node, replacer) + default: + // this should never happen + return true + } +} +func (a *application) rewriteExplain(parent SQLNode, node Explain, replacer replacerFunc) bool { + if node == nil { + return true + } + switch node := node.(type) { + case *ExplainStmt: + return a.rewriteRefOfExplainStmt(parent, node, replacer) + case *ExplainTab: + return a.rewriteRefOfExplainTab(parent, node, replacer) + default: + // this should never happen + return true + } +} +func (a *application) rewriteExpr(parent SQLNode, node Expr, replacer replacerFunc) bool { + if node == nil { + return true + } + switch node := node.(type) { + case *AndExpr: + return a.rewriteRefOfAndExpr(parent, node, replacer) + case Argument: + return a.rewriteArgument(parent, node, replacer) + case *BinaryExpr: + return a.rewriteRefOfBinaryExpr(parent, node, replacer) + case BoolVal: + return a.rewriteBoolVal(parent, node, replacer) + case *CaseExpr: + return a.rewriteRefOfCaseExpr(parent, node, replacer) + case *ColName: + return a.rewriteRefOfColName(parent, node, replacer) + case *CollateExpr: + return a.rewriteRefOfCollateExpr(parent, node, replacer) + case *ComparisonExpr: + return a.rewriteRefOfComparisonExpr(parent, node, replacer) + case *ConvertExpr: + return a.rewriteRefOfConvertExpr(parent, node, replacer) + case *ConvertUsingExpr: + return a.rewriteRefOfConvertUsingExpr(parent, node, replacer) + case *CurTimeFuncExpr: + return a.rewriteRefOfCurTimeFuncExpr(parent, node, replacer) + case *Default: + return a.rewriteRefOfDefault(parent, node, replacer) + case *ExistsExpr: + return a.rewriteRefOfExistsExpr(parent, node, replacer) + case *FuncExpr: + return a.rewriteRefOfFuncExpr(parent, node, replacer) + case *GroupConcatExpr: + return a.rewriteRefOfGroupConcatExpr(parent, node, replacer) + case *IntervalExpr: + return a.rewriteRefOfIntervalExpr(parent, node, replacer) + case *IsExpr: + return a.rewriteRefOfIsExpr(parent, node, replacer) + case ListArg: + return a.rewriteListArg(parent, node, replacer) + case *Literal: + return a.rewriteRefOfLiteral(parent, node, replacer) + case *MatchExpr: + return a.rewriteRefOfMatchExpr(parent, node, replacer) + case *NotExpr: + return a.rewriteRefOfNotExpr(parent, node, replacer) + case *NullVal: + return a.rewriteRefOfNullVal(parent, node, replacer) + case *OrExpr: + return a.rewriteRefOfOrExpr(parent, node, replacer) + case *RangeCond: + return a.rewriteRefOfRangeCond(parent, node, replacer) + case *Subquery: + return a.rewriteRefOfSubquery(parent, node, replacer) + case *SubstrExpr: + return a.rewriteRefOfSubstrExpr(parent, node, replacer) + case *TimestampFuncExpr: + return a.rewriteRefOfTimestampFuncExpr(parent, node, replacer) + case *UnaryExpr: + return a.rewriteRefOfUnaryExpr(parent, node, replacer) + case ValTuple: + return a.rewriteValTuple(parent, node, replacer) + case *ValuesFuncExpr: + return a.rewriteRefOfValuesFuncExpr(parent, node, replacer) + case *XorExpr: + return a.rewriteRefOfXorExpr(parent, node, replacer) + default: + // this should never happen + return true + } +} +func (a *application) rewriteInsertRows(parent SQLNode, node InsertRows, replacer replacerFunc) bool { + if node == nil { + return true + } + switch node := node.(type) { + case *ParenSelect: + return a.rewriteRefOfParenSelect(parent, node, replacer) + case *Select: + return a.rewriteRefOfSelect(parent, node, replacer) + case *Union: + return a.rewriteRefOfUnion(parent, node, replacer) + case Values: + return a.rewriteValues(parent, node, replacer) + default: + // this should never happen + return true + } +} +func (a *application) rewriteSelectExpr(parent SQLNode, node SelectExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + switch node := node.(type) { + case *AliasedExpr: + return a.rewriteRefOfAliasedExpr(parent, node, replacer) + case *Nextval: + return a.rewriteRefOfNextval(parent, node, replacer) + case *StarExpr: + return a.rewriteRefOfStarExpr(parent, node, replacer) + default: + // this should never happen + return true + } +} +func (a *application) rewriteSelectStatement(parent SQLNode, node SelectStatement, replacer replacerFunc) bool { + if node == nil { + return true + } + switch node := node.(type) { + case *ParenSelect: + return a.rewriteRefOfParenSelect(parent, node, replacer) + case *Select: + return a.rewriteRefOfSelect(parent, node, replacer) + case *Union: + return a.rewriteRefOfUnion(parent, node, replacer) + default: + // this should never happen + return true + } +} +func (a *application) rewriteShowInternal(parent SQLNode, node ShowInternal, replacer replacerFunc) bool { + if node == nil { + return true + } + switch node := node.(type) { + case *ShowBasic: + return a.rewriteRefOfShowBasic(parent, node, replacer) + case *ShowCreate: + return a.rewriteRefOfShowCreate(parent, node, replacer) + case *ShowLegacy: + return a.rewriteRefOfShowLegacy(parent, node, replacer) + default: + // this should never happen + return true + } +} +func (a *application) rewriteSimpleTableExpr(parent SQLNode, node SimpleTableExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + switch node := node.(type) { + case *DerivedTable: + return a.rewriteRefOfDerivedTable(parent, node, replacer) + case TableName: + return a.rewriteTableName(parent, node, replacer) + default: + // this should never happen + return true + } +} +func (a *application) rewriteStatement(parent SQLNode, node Statement, replacer replacerFunc) bool { + if node == nil { + return true + } + switch node := node.(type) { + case *AlterDatabase: + return a.rewriteRefOfAlterDatabase(parent, node, replacer) + case *AlterMigration: + return a.rewriteRefOfAlterMigration(parent, node, replacer) + case *AlterTable: + return a.rewriteRefOfAlterTable(parent, node, replacer) + case *AlterView: + return a.rewriteRefOfAlterView(parent, node, replacer) + case *AlterVschema: + return a.rewriteRefOfAlterVschema(parent, node, replacer) + case *Begin: + return a.rewriteRefOfBegin(parent, node, replacer) + case *CallProc: + return a.rewriteRefOfCallProc(parent, node, replacer) + case *Commit: + return a.rewriteRefOfCommit(parent, node, replacer) + case *CreateDatabase: + return a.rewriteRefOfCreateDatabase(parent, node, replacer) + case *CreateTable: + return a.rewriteRefOfCreateTable(parent, node, replacer) + case *CreateView: + return a.rewriteRefOfCreateView(parent, node, replacer) + case *Delete: + return a.rewriteRefOfDelete(parent, node, replacer) + case *DropDatabase: + return a.rewriteRefOfDropDatabase(parent, node, replacer) + case *DropTable: + return a.rewriteRefOfDropTable(parent, node, replacer) + case *DropView: + return a.rewriteRefOfDropView(parent, node, replacer) + case *ExplainStmt: + return a.rewriteRefOfExplainStmt(parent, node, replacer) + case *ExplainTab: + return a.rewriteRefOfExplainTab(parent, node, replacer) + case *Flush: + return a.rewriteRefOfFlush(parent, node, replacer) + case *Insert: + return a.rewriteRefOfInsert(parent, node, replacer) + case *Load: + return a.rewriteRefOfLoad(parent, node, replacer) + case *LockTables: + return a.rewriteRefOfLockTables(parent, node, replacer) + case *OtherAdmin: + return a.rewriteRefOfOtherAdmin(parent, node, replacer) + case *OtherRead: + return a.rewriteRefOfOtherRead(parent, node, replacer) + case *ParenSelect: + return a.rewriteRefOfParenSelect(parent, node, replacer) + case *Release: + return a.rewriteRefOfRelease(parent, node, replacer) + case *RenameTable: + return a.rewriteRefOfRenameTable(parent, node, replacer) + case *RevertMigration: + return a.rewriteRefOfRevertMigration(parent, node, replacer) + case *Rollback: + return a.rewriteRefOfRollback(parent, node, replacer) + case *SRollback: + return a.rewriteRefOfSRollback(parent, node, replacer) + case *Savepoint: + return a.rewriteRefOfSavepoint(parent, node, replacer) + case *Select: + return a.rewriteRefOfSelect(parent, node, replacer) + case *Set: + return a.rewriteRefOfSet(parent, node, replacer) + case *SetTransaction: + return a.rewriteRefOfSetTransaction(parent, node, replacer) + case *Show: + return a.rewriteRefOfShow(parent, node, replacer) + case *Stream: + return a.rewriteRefOfStream(parent, node, replacer) + case *TruncateTable: + return a.rewriteRefOfTruncateTable(parent, node, replacer) + case *Union: + return a.rewriteRefOfUnion(parent, node, replacer) + case *UnlockTables: + return a.rewriteRefOfUnlockTables(parent, node, replacer) + case *Update: + return a.rewriteRefOfUpdate(parent, node, replacer) + case *Use: + return a.rewriteRefOfUse(parent, node, replacer) + case *VStream: + return a.rewriteRefOfVStream(parent, node, replacer) + default: + // this should never happen + return true + } +} +func (a *application) rewriteTableExpr(parent SQLNode, node TableExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + switch node := node.(type) { + case *AliasedTableExpr: + return a.rewriteRefOfAliasedTableExpr(parent, node, replacer) + case *JoinTableExpr: + return a.rewriteRefOfJoinTableExpr(parent, node, replacer) + case *ParenTableExpr: + return a.rewriteRefOfParenTableExpr(parent, node, replacer) + default: + // this should never happen + return true + } +} +func (a *application) rewriteAccessMode(parent SQLNode, node AccessMode, replacer replacerFunc) bool { + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteAlgorithmValue(parent SQLNode, node AlgorithmValue, replacer replacerFunc) bool { + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteArgument(parent SQLNode, node Argument, replacer replacerFunc) bool { + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteBoolVal(parent SQLNode, node BoolVal, replacer replacerFunc) bool { + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteIsolationLevel(parent SQLNode, node IsolationLevel, replacer replacerFunc) bool { + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteReferenceAction(parent SQLNode, node ReferenceAction, replacer replacerFunc) bool { + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfColIdent(parent SQLNode, node *ColIdent, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfJoinCondition(parent SQLNode, node *JoinCondition, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.On, func(newNode, parent SQLNode) { + parent.(*JoinCondition).On = newNode.(Expr) + }) { + return false + } + if !a.rewriteColumns(node, node.Using, func(newNode, parent SQLNode) { + parent.(*JoinCondition).Using = newNode.(Columns) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfTableIdent(parent SQLNode, node *TableIdent, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfTableName(parent SQLNode, node *TableName, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteTableIdent(node, node.Name, func(newNode, parent SQLNode) { + parent.(*TableName).Name = newNode.(TableIdent) + }) { + return false + } + if !a.rewriteTableIdent(node, node.Qualifier, func(newNode, parent SQLNode) { + parent.(*TableName).Qualifier = newNode.(TableIdent) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfVindexParam(parent SQLNode, node *VindexParam, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteColIdent(node, node.Key, func(newNode, parent SQLNode) { + parent.(*VindexParam).Key = newNode.(ColIdent) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} diff --git a/go/vt/sqlparser/ast_rewriting.go b/go/vt/sqlparser/ast_rewriting.go index 7791f1cfd8f..017ddfbdd6d 100644 --- a/go/vt/sqlparser/ast_rewriting.go +++ b/go/vt/sqlparser/ast_rewriting.go @@ -33,9 +33,12 @@ type RewriteASTResult struct { } // PrepareAST will normalize the query -func PrepareAST(in Statement, bindVars map[string]*querypb.BindVariable, prefix string, parameterize bool, keyspace string) (*RewriteASTResult, error) { +func PrepareAST(in Statement, reservedVars BindVars, bindVars map[string]*querypb.BindVariable, prefix string, parameterize bool, keyspace string) (*RewriteASTResult, error) { if parameterize { - Normalize(in, bindVars, prefix) + err := Normalize(in, reservedVars, bindVars, prefix) + if err != nil { + return nil, err + } } return RewriteAST(in, keyspace) } @@ -45,14 +48,16 @@ func RewriteAST(in Statement, keyspace string) (*RewriteASTResult, error) { er := newExpressionRewriter(keyspace) er.shouldRewriteDatabaseFunc = shouldRewriteDatabaseFunc(in) setRewriter := &setNormalizer{} - out, ok := Rewrite(in, er.rewrite, setRewriter.rewriteSetComingUp).(Statement) - if !ok { - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "statement rewriting returned a non statement: %s", String(out)) - } + result := Rewrite(in, er.rewrite, setRewriter.rewriteSetComingUp) if setRewriter.err != nil { return nil, setRewriter.err } + out, ok := result.(Statement) + if !ok { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "statement rewriting returned a non statement: %s", String(out)) + } + r := &RewriteASTResult{ AST: out, BindVarNeeds: er.bindVars, @@ -178,6 +183,13 @@ func (er *expressionRewriter) rewrite(cursor *Cursor) bool { node.Expr = aliasTableName cursor.Replace(node) } + case *ShowBasic: + if node.Command == VariableGlobal || node.Command == VariableSession { + varsToAdd := sysvars.GetInterestingVariables() + for _, sysVar := range varsToAdd { + er.bindVars.AddSysVar(sysVar) + } + } } return true } @@ -229,15 +241,19 @@ func (er *expressionRewriter) sysVarRewrite(cursor *Cursor, node *ColName) { switch lowered { case sysvars.Autocommit.Name, sysvars.ClientFoundRows.Name, - sysvars.SkipQueryPlanCache.Name, - sysvars.SQLSelectLimit.Name, - sysvars.TransactionMode.Name, - sysvars.Workload.Name, sysvars.DDLStrategy.Name, - sysvars.SessionUUID.Name, + sysvars.TransactionMode.Name, sysvars.ReadAfterWriteGTID.Name, sysvars.ReadAfterWriteTimeOut.Name, - sysvars.SessionTrackGTIDs.Name: + sysvars.SessionEnableSystemSettings.Name, + sysvars.SessionTrackGTIDs.Name, + sysvars.SessionUUID.Name, + sysvars.SkipQueryPlanCache.Name, + sysvars.Socket.Name, + sysvars.SQLSelectLimit.Name, + sysvars.Version.Name, + sysvars.VersionComment.Name, + sysvars.Workload.Name: cursor.Replace(bindVarExpression("__vt" + lowered)) er.bindVars.AddSysVar(lowered) } @@ -307,7 +323,7 @@ func (er *expressionRewriter) unnestSubQueries(cursor *Cursor, subquery *Subquer } func bindVarExpression(name string) Expr { - return NewArgument([]byte(":" + name)) + return NewArgument(":" + name) } // SystemSchema returns true if the schema passed is system schema @@ -317,3 +333,188 @@ func SystemSchema(schema string) bool { strings.EqualFold(schema, "sys") || strings.EqualFold(schema, "mysql") } + +// RewriteToCNF walks the input AST and rewrites any boolean logic into CNF +// Note: In order to re-plan, we need to empty the accumulated metadata in the AST, +// so ColName.Metadata will be nil:ed out as part of this rewrite +func RewriteToCNF(ast SQLNode) SQLNode { + for { + finishedRewrite := true + ast = Rewrite(ast, func(cursor *Cursor) bool { + if e, isExpr := cursor.node.(Expr); isExpr { + rewritten, didRewrite := rewriteToCNFExpr(e) + if didRewrite { + finishedRewrite = false + cursor.Replace(rewritten) + } + } + if col, isCol := cursor.node.(*ColName); isCol { + col.Metadata = nil + } + return true + }, nil) + + if finishedRewrite { + return ast + } + } +} + +func distinctOr(in *OrExpr) (Expr, bool) { + todo := []*OrExpr{in} + var leaves []Expr + for len(todo) > 0 { + curr := todo[0] + todo = todo[1:] + addAnd := func(in Expr) { + and, ok := in.(*OrExpr) + if ok { + todo = append(todo, and) + } else { + leaves = append(leaves, in) + } + } + addAnd(curr.Left) + addAnd(curr.Right) + } + original := len(leaves) + var predicates []Expr + +outer1: + for len(leaves) > 0 { + curr := leaves[0] + leaves = leaves[1:] + for _, alreadyIn := range predicates { + if EqualsExpr(alreadyIn, curr) { + continue outer1 + } + } + predicates = append(predicates, curr) + } + if original == len(predicates) { + return in, false + } + var result Expr + for i, curr := range predicates { + if i == 0 { + result = curr + continue + } + result = &OrExpr{Left: result, Right: curr} + } + return result, true +} +func distinctAnd(in *AndExpr) (Expr, bool) { + todo := []*AndExpr{in} + var leaves []Expr + for len(todo) > 0 { + curr := todo[0] + todo = todo[1:] + addAnd := func(in Expr) { + and, ok := in.(*AndExpr) + if ok { + todo = append(todo, and) + } else { + leaves = append(leaves, in) + } + } + addAnd(curr.Left) + addAnd(curr.Right) + } + original := len(leaves) + var predicates []Expr + +outer1: + for len(leaves) > 0 { + curr := leaves[0] + leaves = leaves[1:] + for _, alreadyIn := range predicates { + if EqualsExpr(alreadyIn, curr) { + continue outer1 + } + } + predicates = append(predicates, curr) + } + if original == len(predicates) { + return in, false + } + var result Expr + for i, curr := range predicates { + if i == 0 { + result = curr + continue + } + result = &AndExpr{Left: result, Right: curr} + } + return result, true +} + +func rewriteToCNFExpr(expr Expr) (Expr, bool) { + switch expr := expr.(type) { + case *NotExpr: + switch child := expr.Expr.(type) { + case *NotExpr: + // NOT NOT A => A + return child.Expr, true + case *OrExpr: + // DeMorgan Rewriter + // NOT (A OR B) => NOT A AND NOT B + return &AndExpr{Right: &NotExpr{Expr: child.Right}, Left: &NotExpr{Expr: child.Left}}, true + case *AndExpr: + // DeMorgan Rewriter + // NOT (A AND B) => NOT A OR NOT B + return &OrExpr{Right: &NotExpr{Expr: child.Right}, Left: &NotExpr{Expr: child.Left}}, true + } + case *OrExpr: + or := expr + if and, ok := or.Left.(*AndExpr); ok { + // Simplification + // (A AND B) OR A => A + if EqualsExpr(or.Right, and.Left) || EqualsExpr(or.Right, and.Right) { + return or.Right, true + } + // Distribution Law + // (A AND B) OR C => (A OR C) AND (B OR C) + return &AndExpr{Left: &OrExpr{Left: and.Left, Right: or.Right}, Right: &OrExpr{Left: and.Right, Right: or.Right}}, true + } + if and, ok := or.Right.(*AndExpr); ok { + // Simplification + // A OR (A AND B) => A + if EqualsExpr(or.Left, and.Left) || EqualsExpr(or.Left, and.Right) { + return or.Left, true + } + // Distribution Law + // C OR (A AND B) => (C OR A) AND (C OR B) + return &AndExpr{Left: &OrExpr{Left: or.Left, Right: and.Left}, Right: &OrExpr{Left: or.Left, Right: and.Right}}, true + } + // Try to make distinct + return distinctOr(expr) + + case *XorExpr: + // DeMorgan Rewriter + // (A XOR B) => (A OR B) AND NOT (A AND B) + return &AndExpr{Left: &OrExpr{Left: expr.Left, Right: expr.Right}, Right: &NotExpr{Expr: &AndExpr{Left: expr.Left, Right: expr.Right}}}, true + case *AndExpr: + res, rewritten := distinctAnd(expr) + if rewritten { + return res, rewritten + } + and := expr + if or, ok := and.Left.(*OrExpr); ok { + // Simplification + // (A OR B) AND A => A + if EqualsExpr(or.Left, and.Right) || EqualsExpr(or.Right, and.Right) { + return and.Right, true + } + } + if or, ok := and.Right.(*OrExpr); ok { + // Simplification + // A OR (A AND B) => A + if EqualsExpr(or.Left, and.Left) || EqualsExpr(or.Right, and.Left) { + return or.Left, true + } + } + + } + return expr, false +} diff --git a/go/vt/sqlparser/ast_rewriting_test.go b/go/vt/sqlparser/ast_rewriting_test.go index 802f632f627..dbad2825a89 100644 --- a/go/vt/sqlparser/ast_rewriting_test.go +++ b/go/vt/sqlparser/ast_rewriting_test.go @@ -27,12 +27,12 @@ import ( ) type myTestCase struct { - in, expected string - liid, db, foundRows, rowCount, rawGTID, rawTimeout, sessTrackGTID bool - ddlStrategy, sessionUUID bool - udv int - autocommit, clientFoundRows, skipQueryPlanCache bool - sqlSelectLimit, transactionMode, workload bool + in, expected string + liid, db, foundRows, rowCount, rawGTID, rawTimeout, sessTrackGTID bool + ddlStrategy, sessionUUID, sessionEnableSystemSettings bool + udv int + autocommit, clientFoundRows, skipQueryPlanCache, socket bool + sqlSelectLimit, transactionMode, workload, version, versionComment bool } func TestRewrites(in *testing.T) { @@ -40,6 +40,18 @@ func TestRewrites(in *testing.T) { in: "SELECT 42", expected: "SELECT 42", // no bindvar needs + }, { + in: "SELECT @@version", + expected: "SELECT :__vtversion as `@@version`", + version: true, + }, { + in: "SELECT @@version_comment", + expected: "SELECT :__vtversion_comment as `@@version_comment`", + versionComment: true, + }, { + in: "SELECT @@enable_system_settings", + expected: "SELECT :__vtenable_system_settings as `@@enable_system_settings`", + sessionEnableSystemSettings: true, }, { in: "SELECT last_insert_id()", expected: "SELECT :__lastInsertId as `last_insert_id()`", @@ -134,6 +146,10 @@ func TestRewrites(in *testing.T) { in: "SELECT @@workload", expected: "SELECT :__vtworkload as `@@workload`", workload: true, + }, { + in: "SELECT @@socket", + expected: "SELECT :__vtsocket as `@@socket`", + socket: true, }, { in: "select (select 42) from dual", expected: "select 42 as `(select 42 from dual)` from dual", @@ -165,6 +181,46 @@ func TestRewrites(in *testing.T) { // SELECT * behaves different depending the join type used, so if that has been used, we won't rewrite in: "SELECT * FROM A JOIN B USING (id1,id2,id3)", expected: "SELECT * FROM A JOIN B USING (id1,id2,id3)", + }, { + in: "CALL proc(@foo)", + expected: "CALL proc(:__vtudvfoo)", + udv: 1, + }, { + in: "SHOW VARIABLES", + expected: "SHOW VARIABLES", + autocommit: true, + clientFoundRows: true, + skipQueryPlanCache: true, + sqlSelectLimit: true, + transactionMode: true, + workload: true, + version: true, + versionComment: true, + ddlStrategy: true, + sessionUUID: true, + sessionEnableSystemSettings: true, + rawGTID: true, + rawTimeout: true, + sessTrackGTID: true, + socket: true, + }, { + in: "SHOW GLOBAL VARIABLES", + expected: "SHOW GLOBAL VARIABLES", + autocommit: true, + clientFoundRows: true, + skipQueryPlanCache: true, + sqlSelectLimit: true, + transactionMode: true, + workload: true, + version: true, + versionComment: true, + ddlStrategy: true, + sessionUUID: true, + sessionEnableSystemSettings: true, + rawGTID: true, + rawTimeout: true, + sessTrackGTID: true, + socket: true, }} for _, tc := range tests { @@ -195,9 +251,13 @@ func TestRewrites(in *testing.T) { assert.Equal(tc.workload, result.NeedsSysVar(sysvars.Workload.Name), "should need :__vtworkload") assert.Equal(tc.ddlStrategy, result.NeedsSysVar(sysvars.DDLStrategy.Name), "should need ddlStrategy") assert.Equal(tc.sessionUUID, result.NeedsSysVar(sysvars.SessionUUID.Name), "should need sessionUUID") + assert.Equal(tc.sessionEnableSystemSettings, result.NeedsSysVar(sysvars.SessionEnableSystemSettings.Name), "should need sessionEnableSystemSettings") assert.Equal(tc.rawGTID, result.NeedsSysVar(sysvars.ReadAfterWriteGTID.Name), "should need rawGTID") assert.Equal(tc.rawTimeout, result.NeedsSysVar(sysvars.ReadAfterWriteTimeOut.Name), "should need rawTimeout") assert.Equal(tc.sessTrackGTID, result.NeedsSysVar(sysvars.SessionTrackGTIDs.Name), "should need sessTrackGTID") + assert.Equal(tc.version, result.NeedsSysVar(sysvars.Version.Name), "should need Vitess version") + assert.Equal(tc.versionComment, result.NeedsSysVar(sysvars.VersionComment.Name), "should need Vitess version") + assert.Equal(tc.socket, result.NeedsSysVar(sysvars.Socket.Name), "should need :__vtsocket") }) } } @@ -251,3 +311,98 @@ func TestRewritesWithDefaultKeyspace(in *testing.T) { }) } } + +func TestRewriteToCNF(in *testing.T) { + tests := []struct { + in string + expected string + }{{ + in: "not (not A = 3)", + expected: "A = 3", + }, { + in: "not (A = 3 and B = 2)", + expected: "not A = 3 or not B = 2", + }, { + in: "not (A = 3 or B = 2)", + expected: "not A = 3 and not B = 2", + }, { + in: "A xor B", + expected: "(A or B) and not (A and B)", + }, { + in: "(A and B) or C", + expected: "(A or C) and (B or C)", + }, { + in: "C or (A and B)", + expected: "(C or A) and (C or B)", + }, { + in: "A and A", + expected: "A", + }, { + in: "A OR A", + expected: "A", + }, { + in: "A OR (A AND B)", + expected: "A", + }, { + in: "A OR (B AND A)", + expected: "A", + }, { + in: "(A AND B) OR A", + expected: "A", + }, { + in: "(B AND A) OR A", + expected: "A", + }, { + in: "(A and B) and (B and A)", + expected: "A and B", + }, { + in: "(A or B) and A", + expected: "A", + }, { + in: "A and (A or B)", + expected: "A", + }} + + for _, tc := range tests { + in.Run(tc.in, func(t *testing.T) { + stmt, err := Parse("SELECT * FROM T WHERE " + tc.in) + require.NoError(t, err) + + expr := stmt.(*Select).Where.Expr + expr, didRewrite := rewriteToCNFExpr(expr) + assert.True(t, didRewrite) + assert.Equal(t, tc.expected, String(expr)) + }) + } +} + +func TestFixedPointRewriteToCNF(in *testing.T) { + tests := []struct { + in string + expected string + }{{ + in: "A xor B", + expected: "(A or B) and (not A or not B)", + }, { + in: "(A and B) and (B and A) and (B and A) and (A and B)", + expected: "A and B", + }, { + in: "((A and B) OR (A and C) OR (A and D)) and E and F", + expected: "A and ((A or B) and (B or C or A)) and ((A or D) and ((B or A or D) and (B or C or D))) and E and F", + }, { + in: "(A and B) OR (A and C)", + expected: "A and ((B or A) and (B or C))", + }} + + for _, tc := range tests { + in.Run(tc.in, func(t *testing.T) { + require := require.New(t) + stmt, err := Parse("SELECT * FROM T WHERE " + tc.in) + require.NoError(err) + + expr := stmt.(*Select).Where.Expr + output := RewriteToCNF(expr) + assert.Equal(t, tc.expected, String(output)) + }) + } +} diff --git a/go/vt/sqlparser/ast_test.go b/go/vt/sqlparser/ast_test.go index 31a46476486..fc93e46e15d 100644 --- a/go/vt/sqlparser/ast_test.go +++ b/go/vt/sqlparser/ast_test.go @@ -105,6 +105,28 @@ func TestSelect(t *testing.T) { } } +func TestUpdate(t *testing.T) { + tree, err := Parse("update t set a = 1") + require.NoError(t, err) + + upd, ok := tree.(*Update) + require.True(t, ok) + + upd.AddWhere(&ComparisonExpr{ + Left: &ColName{Name: NewColIdent("b")}, + Operator: EqualOp, + Right: NewIntLiteral("2"), + }) + assert.Equal(t, "update t set a = 1 where b = 2", String(upd)) + + upd.AddWhere(&ComparisonExpr{ + Left: &ColName{Name: NewColIdent("c")}, + Operator: EqualOp, + Right: NewIntLiteral("3"), + }) + assert.Equal(t, "update t set a = 1 where b = 2 and c = 3", String(upd)) +} + func TestRemoveHints(t *testing.T) { for _, query := range []string{ "select * from t use index (i)", @@ -187,30 +209,29 @@ func TestDDL(t *testing.T) { affected: []string{"a"}, }, { query: "rename table a to b", - output: &DDL{ - Action: RenameDDLAction, - FromTables: TableNames{ - TableName{Name: NewTableIdent("a")}, - }, - ToTables: TableNames{ - TableName{Name: NewTableIdent("b")}, + output: &RenameTable{ + TablePairs: []*RenameTablePair{ + { + FromTable: TableName{Name: NewTableIdent("a")}, + ToTable: TableName{Name: NewTableIdent("b")}, + }, }, }, affected: []string{"a", "b"}, }, { query: "rename table a to b, c to d", - output: &DDL{ - Action: RenameDDLAction, - FromTables: TableNames{ - TableName{Name: NewTableIdent("a")}, - TableName{Name: NewTableIdent("c")}, - }, - ToTables: TableNames{ - TableName{Name: NewTableIdent("b")}, - TableName{Name: NewTableIdent("d")}, + output: &RenameTable{ + TablePairs: []*RenameTablePair{ + { + FromTable: TableName{Name: NewTableIdent("a")}, + ToTable: TableName{Name: NewTableIdent("b")}, + }, { + FromTable: TableName{Name: NewTableIdent("c")}, + ToTable: TableName{Name: NewTableIdent("d")}, + }, }, }, - affected: []string{"a", "c", "b", "d"}, + affected: []string{"a", "b", "c", "d"}, }, { query: "drop table a", output: &DropTable{ @@ -266,7 +287,7 @@ func TestSetAutocommitON(t *testing.T) { t.Errorf("SET statement value is not StrVal: %T", v) } - if !bytes.Equal([]byte("on"), v.Val) { + if "on" != v.Val { t.Errorf("SET statement value want: on, got: %s", v.Val) } default: @@ -291,7 +312,7 @@ func TestSetAutocommitON(t *testing.T) { t.Errorf("SET statement value is not StrVal: %T", v) } - if !bytes.Equal([]byte("on"), v.Val) { + if "on" != v.Val { t.Errorf("SET statement value want: on, got: %s", v.Val) } default: @@ -318,7 +339,7 @@ func TestSetAutocommitOFF(t *testing.T) { t.Errorf("SET statement value is not StrVal: %T", v) } - if !bytes.Equal([]byte("off"), v.Val) { + if "off" != v.Val { t.Errorf("SET statement value want: on, got: %s", v.Val) } default: @@ -343,7 +364,7 @@ func TestSetAutocommitOFF(t *testing.T) { t.Errorf("SET statement value is not StrVal: %T", v) } - if !bytes.Equal([]byte("off"), v.Val) { + if "off" != v.Val { t.Errorf("SET statement value want: on, got: %s", v.Val) } default: @@ -387,8 +408,8 @@ func TestIsAggregate(t *testing.T) { func TestIsImpossible(t *testing.T) { f := ComparisonExpr{ Operator: NotEqualOp, - Left: newIntLiteral("1"), - Right: newIntLiteral("1"), + Left: NewIntLiteral("1"), + Right: NewIntLiteral("1"), } if !f.IsImpossible() { t.Error("IsImpossible: false, want true") @@ -396,8 +417,8 @@ func TestIsImpossible(t *testing.T) { f = ComparisonExpr{ Operator: EqualOp, - Left: newIntLiteral("1"), - Right: newIntLiteral("1"), + Left: NewIntLiteral("1"), + Right: NewIntLiteral("1"), } if f.IsImpossible() { t.Error("IsImpossible: true, want false") @@ -405,8 +426,8 @@ func TestIsImpossible(t *testing.T) { f = ComparisonExpr{ Operator: NotEqualOp, - Left: newIntLiteral("1"), - Right: newIntLiteral("2"), + Left: NewIntLiteral("1"), + Right: NewIntLiteral("2"), } if f.IsImpossible() { t.Error("IsImpossible: true, want false") @@ -535,7 +556,7 @@ func TestReplaceExpr(t *testing.T) { in: "select * from t where case a when b then c when d then c else (select a from b) end", out: "case a when b then c when d then c else :a end", }} - to := NewArgument([]byte(":a")) + to := NewArgument(":a") for _, tcase := range tcases { tree, err := Parse(tcase.in) if err != nil { @@ -665,7 +686,7 @@ func TestHexDecode(t *testing.T) { out: "encoding/hex: odd length hex string", }} for _, tc := range testcase { - out, err := newHexLiteral(tc.in).HexDecode() + out, err := NewHexLiteral(tc.in).HexDecode() if err != nil { if err.Error() != tc.out { t.Errorf("Decode(%q): %v, want %s", tc.in, err, tc.out) @@ -802,3 +823,32 @@ func TestShowTableStatus(t *testing.T) { require.NoError(t, err) require.NotNil(t, tree) } + +func BenchmarkStringTraces(b *testing.B) { + for _, trace := range []string{"django_queries.txt", "lobsters.sql.gz"} { + b.Run(trace, func(b *testing.B) { + queries := loadQueries(b, trace) + if len(queries) > 10000 { + queries = queries[:10000] + } + + parsed := make([]Statement, 0, len(queries)) + for _, q := range queries { + pp, err := Parse(q) + if err != nil { + b.Fatal(err) + } + parsed = append(parsed, pp) + } + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + for _, stmt := range parsed { + _ = String(stmt) + } + } + }) + } +} diff --git a/go/vt/sqlparser/ast_visit.go b/go/vt/sqlparser/ast_visit.go new file mode 100644 index 00000000000..bd52dfb25c7 --- /dev/null +++ b/go/vt/sqlparser/ast_visit.go @@ -0,0 +1,2780 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by ASTHelperGen. DO NOT EDIT. + +package sqlparser + +func VisitSQLNode(in SQLNode, f Visit) error { + if in == nil { + return nil + } + switch in := in.(type) { + case AccessMode: + return VisitAccessMode(in, f) + case *AddColumns: + return VisitRefOfAddColumns(in, f) + case *AddConstraintDefinition: + return VisitRefOfAddConstraintDefinition(in, f) + case *AddIndexDefinition: + return VisitRefOfAddIndexDefinition(in, f) + case AlgorithmValue: + return VisitAlgorithmValue(in, f) + case *AliasedExpr: + return VisitRefOfAliasedExpr(in, f) + case *AliasedTableExpr: + return VisitRefOfAliasedTableExpr(in, f) + case *AlterCharset: + return VisitRefOfAlterCharset(in, f) + case *AlterColumn: + return VisitRefOfAlterColumn(in, f) + case *AlterDatabase: + return VisitRefOfAlterDatabase(in, f) + case *AlterMigration: + return VisitRefOfAlterMigration(in, f) + case *AlterTable: + return VisitRefOfAlterTable(in, f) + case *AlterView: + return VisitRefOfAlterView(in, f) + case *AlterVschema: + return VisitRefOfAlterVschema(in, f) + case *AndExpr: + return VisitRefOfAndExpr(in, f) + case Argument: + return VisitArgument(in, f) + case *AutoIncSpec: + return VisitRefOfAutoIncSpec(in, f) + case *Begin: + return VisitRefOfBegin(in, f) + case *BinaryExpr: + return VisitRefOfBinaryExpr(in, f) + case BoolVal: + return VisitBoolVal(in, f) + case *CallProc: + return VisitRefOfCallProc(in, f) + case *CaseExpr: + return VisitRefOfCaseExpr(in, f) + case *ChangeColumn: + return VisitRefOfChangeColumn(in, f) + case *CheckConstraintDefinition: + return VisitRefOfCheckConstraintDefinition(in, f) + case ColIdent: + return VisitColIdent(in, f) + case *ColName: + return VisitRefOfColName(in, f) + case *CollateExpr: + return VisitRefOfCollateExpr(in, f) + case *ColumnDefinition: + return VisitRefOfColumnDefinition(in, f) + case *ColumnType: + return VisitRefOfColumnType(in, f) + case Columns: + return VisitColumns(in, f) + case Comments: + return VisitComments(in, f) + case *Commit: + return VisitRefOfCommit(in, f) + case *ComparisonExpr: + return VisitRefOfComparisonExpr(in, f) + case *ConstraintDefinition: + return VisitRefOfConstraintDefinition(in, f) + case *ConvertExpr: + return VisitRefOfConvertExpr(in, f) + case *ConvertType: + return VisitRefOfConvertType(in, f) + case *ConvertUsingExpr: + return VisitRefOfConvertUsingExpr(in, f) + case *CreateDatabase: + return VisitRefOfCreateDatabase(in, f) + case *CreateTable: + return VisitRefOfCreateTable(in, f) + case *CreateView: + return VisitRefOfCreateView(in, f) + case *CurTimeFuncExpr: + return VisitRefOfCurTimeFuncExpr(in, f) + case *Default: + return VisitRefOfDefault(in, f) + case *Delete: + return VisitRefOfDelete(in, f) + case *DerivedTable: + return VisitRefOfDerivedTable(in, f) + case *DropColumn: + return VisitRefOfDropColumn(in, f) + case *DropDatabase: + return VisitRefOfDropDatabase(in, f) + case *DropKey: + return VisitRefOfDropKey(in, f) + case *DropTable: + return VisitRefOfDropTable(in, f) + case *DropView: + return VisitRefOfDropView(in, f) + case *ExistsExpr: + return VisitRefOfExistsExpr(in, f) + case *ExplainStmt: + return VisitRefOfExplainStmt(in, f) + case *ExplainTab: + return VisitRefOfExplainTab(in, f) + case Exprs: + return VisitExprs(in, f) + case *Flush: + return VisitRefOfFlush(in, f) + case *Force: + return VisitRefOfForce(in, f) + case *ForeignKeyDefinition: + return VisitRefOfForeignKeyDefinition(in, f) + case *FuncExpr: + return VisitRefOfFuncExpr(in, f) + case GroupBy: + return VisitGroupBy(in, f) + case *GroupConcatExpr: + return VisitRefOfGroupConcatExpr(in, f) + case *IndexDefinition: + return VisitRefOfIndexDefinition(in, f) + case *IndexHints: + return VisitRefOfIndexHints(in, f) + case *IndexInfo: + return VisitRefOfIndexInfo(in, f) + case *Insert: + return VisitRefOfInsert(in, f) + case *IntervalExpr: + return VisitRefOfIntervalExpr(in, f) + case *IsExpr: + return VisitRefOfIsExpr(in, f) + case IsolationLevel: + return VisitIsolationLevel(in, f) + case JoinCondition: + return VisitJoinCondition(in, f) + case *JoinTableExpr: + return VisitRefOfJoinTableExpr(in, f) + case *KeyState: + return VisitRefOfKeyState(in, f) + case *Limit: + return VisitRefOfLimit(in, f) + case ListArg: + return VisitListArg(in, f) + case *Literal: + return VisitRefOfLiteral(in, f) + case *Load: + return VisitRefOfLoad(in, f) + case *LockOption: + return VisitRefOfLockOption(in, f) + case *LockTables: + return VisitRefOfLockTables(in, f) + case *MatchExpr: + return VisitRefOfMatchExpr(in, f) + case *ModifyColumn: + return VisitRefOfModifyColumn(in, f) + case *Nextval: + return VisitRefOfNextval(in, f) + case *NotExpr: + return VisitRefOfNotExpr(in, f) + case *NullVal: + return VisitRefOfNullVal(in, f) + case OnDup: + return VisitOnDup(in, f) + case *OptLike: + return VisitRefOfOptLike(in, f) + case *OrExpr: + return VisitRefOfOrExpr(in, f) + case *Order: + return VisitRefOfOrder(in, f) + case OrderBy: + return VisitOrderBy(in, f) + case *OrderByOption: + return VisitRefOfOrderByOption(in, f) + case *OtherAdmin: + return VisitRefOfOtherAdmin(in, f) + case *OtherRead: + return VisitRefOfOtherRead(in, f) + case *ParenSelect: + return VisitRefOfParenSelect(in, f) + case *ParenTableExpr: + return VisitRefOfParenTableExpr(in, f) + case *PartitionDefinition: + return VisitRefOfPartitionDefinition(in, f) + case *PartitionSpec: + return VisitRefOfPartitionSpec(in, f) + case Partitions: + return VisitPartitions(in, f) + case *RangeCond: + return VisitRefOfRangeCond(in, f) + case ReferenceAction: + return VisitReferenceAction(in, f) + case *Release: + return VisitRefOfRelease(in, f) + case *RenameIndex: + return VisitRefOfRenameIndex(in, f) + case *RenameTable: + return VisitRefOfRenameTable(in, f) + case *RenameTableName: + return VisitRefOfRenameTableName(in, f) + case *RevertMigration: + return VisitRefOfRevertMigration(in, f) + case *Rollback: + return VisitRefOfRollback(in, f) + case *SRollback: + return VisitRefOfSRollback(in, f) + case *Savepoint: + return VisitRefOfSavepoint(in, f) + case *Select: + return VisitRefOfSelect(in, f) + case SelectExprs: + return VisitSelectExprs(in, f) + case *SelectInto: + return VisitRefOfSelectInto(in, f) + case *Set: + return VisitRefOfSet(in, f) + case *SetExpr: + return VisitRefOfSetExpr(in, f) + case SetExprs: + return VisitSetExprs(in, f) + case *SetTransaction: + return VisitRefOfSetTransaction(in, f) + case *Show: + return VisitRefOfShow(in, f) + case *ShowBasic: + return VisitRefOfShowBasic(in, f) + case *ShowCreate: + return VisitRefOfShowCreate(in, f) + case *ShowFilter: + return VisitRefOfShowFilter(in, f) + case *ShowLegacy: + return VisitRefOfShowLegacy(in, f) + case *StarExpr: + return VisitRefOfStarExpr(in, f) + case *Stream: + return VisitRefOfStream(in, f) + case *Subquery: + return VisitRefOfSubquery(in, f) + case *SubstrExpr: + return VisitRefOfSubstrExpr(in, f) + case TableExprs: + return VisitTableExprs(in, f) + case TableIdent: + return VisitTableIdent(in, f) + case TableName: + return VisitTableName(in, f) + case TableNames: + return VisitTableNames(in, f) + case TableOptions: + return VisitTableOptions(in, f) + case *TableSpec: + return VisitRefOfTableSpec(in, f) + case *TablespaceOperation: + return VisitRefOfTablespaceOperation(in, f) + case *TimestampFuncExpr: + return VisitRefOfTimestampFuncExpr(in, f) + case *TruncateTable: + return VisitRefOfTruncateTable(in, f) + case *UnaryExpr: + return VisitRefOfUnaryExpr(in, f) + case *Union: + return VisitRefOfUnion(in, f) + case *UnionSelect: + return VisitRefOfUnionSelect(in, f) + case *UnlockTables: + return VisitRefOfUnlockTables(in, f) + case *Update: + return VisitRefOfUpdate(in, f) + case *UpdateExpr: + return VisitRefOfUpdateExpr(in, f) + case UpdateExprs: + return VisitUpdateExprs(in, f) + case *Use: + return VisitRefOfUse(in, f) + case *VStream: + return VisitRefOfVStream(in, f) + case ValTuple: + return VisitValTuple(in, f) + case *Validation: + return VisitRefOfValidation(in, f) + case Values: + return VisitValues(in, f) + case *ValuesFuncExpr: + return VisitRefOfValuesFuncExpr(in, f) + case VindexParam: + return VisitVindexParam(in, f) + case *VindexSpec: + return VisitRefOfVindexSpec(in, f) + case *When: + return VisitRefOfWhen(in, f) + case *Where: + return VisitRefOfWhere(in, f) + case *XorExpr: + return VisitRefOfXorExpr(in, f) + default: + // this should never happen + return nil + } +} +func VisitRefOfAddColumns(in *AddColumns, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + for _, el := range in.Columns { + if err := VisitRefOfColumnDefinition(el, f); err != nil { + return err + } + } + if err := VisitRefOfColName(in.First, f); err != nil { + return err + } + if err := VisitRefOfColName(in.After, f); err != nil { + return err + } + return nil +} +func VisitRefOfAddConstraintDefinition(in *AddConstraintDefinition, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitRefOfConstraintDefinition(in.ConstraintDefinition, f); err != nil { + return err + } + return nil +} +func VisitRefOfAddIndexDefinition(in *AddIndexDefinition, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitRefOfIndexDefinition(in.IndexDefinition, f); err != nil { + return err + } + return nil +} +func VisitRefOfAliasedExpr(in *AliasedExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.Expr, f); err != nil { + return err + } + if err := VisitColIdent(in.As, f); err != nil { + return err + } + return nil +} +func VisitRefOfAliasedTableExpr(in *AliasedTableExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitSimpleTableExpr(in.Expr, f); err != nil { + return err + } + if err := VisitPartitions(in.Partitions, f); err != nil { + return err + } + if err := VisitTableIdent(in.As, f); err != nil { + return err + } + if err := VisitRefOfIndexHints(in.Hints, f); err != nil { + return err + } + return nil +} +func VisitRefOfAlterCharset(in *AlterCharset, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + return nil +} +func VisitRefOfAlterColumn(in *AlterColumn, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitRefOfColName(in.Column, f); err != nil { + return err + } + if err := VisitExpr(in.DefaultVal, f); err != nil { + return err + } + return nil +} +func VisitRefOfAlterDatabase(in *AlterDatabase, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitTableIdent(in.DBName, f); err != nil { + return err + } + return nil +} +func VisitRefOfAlterMigration(in *AlterMigration, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + return nil +} +func VisitRefOfAlterTable(in *AlterTable, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitTableName(in.Table, f); err != nil { + return err + } + for _, el := range in.AlterOptions { + if err := VisitAlterOption(el, f); err != nil { + return err + } + } + if err := VisitRefOfPartitionSpec(in.PartitionSpec, f); err != nil { + return err + } + return nil +} +func VisitRefOfAlterView(in *AlterView, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitTableName(in.ViewName, f); err != nil { + return err + } + if err := VisitColumns(in.Columns, f); err != nil { + return err + } + if err := VisitSelectStatement(in.Select, f); err != nil { + return err + } + return nil +} +func VisitRefOfAlterVschema(in *AlterVschema, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitTableName(in.Table, f); err != nil { + return err + } + if err := VisitRefOfVindexSpec(in.VindexSpec, f); err != nil { + return err + } + for _, el := range in.VindexCols { + if err := VisitColIdent(el, f); err != nil { + return err + } + } + if err := VisitRefOfAutoIncSpec(in.AutoIncSpec, f); err != nil { + return err + } + return nil +} +func VisitRefOfAndExpr(in *AndExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.Left, f); err != nil { + return err + } + if err := VisitExpr(in.Right, f); err != nil { + return err + } + return nil +} +func VisitRefOfAutoIncSpec(in *AutoIncSpec, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitColIdent(in.Column, f); err != nil { + return err + } + if err := VisitTableName(in.Sequence, f); err != nil { + return err + } + return nil +} +func VisitRefOfBegin(in *Begin, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + return nil +} +func VisitRefOfBinaryExpr(in *BinaryExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.Left, f); err != nil { + return err + } + if err := VisitExpr(in.Right, f); err != nil { + return err + } + return nil +} +func VisitRefOfCallProc(in *CallProc, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitTableName(in.Name, f); err != nil { + return err + } + if err := VisitExprs(in.Params, f); err != nil { + return err + } + return nil +} +func VisitRefOfCaseExpr(in *CaseExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.Expr, f); err != nil { + return err + } + for _, el := range in.Whens { + if err := VisitRefOfWhen(el, f); err != nil { + return err + } + } + if err := VisitExpr(in.Else, f); err != nil { + return err + } + return nil +} +func VisitRefOfChangeColumn(in *ChangeColumn, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitRefOfColName(in.OldColumn, f); err != nil { + return err + } + if err := VisitRefOfColumnDefinition(in.NewColDefinition, f); err != nil { + return err + } + if err := VisitRefOfColName(in.First, f); err != nil { + return err + } + if err := VisitRefOfColName(in.After, f); err != nil { + return err + } + return nil +} +func VisitRefOfCheckConstraintDefinition(in *CheckConstraintDefinition, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.Expr, f); err != nil { + return err + } + return nil +} +func VisitColIdent(in ColIdent, f Visit) error { + if cont, err := f(in); err != nil || !cont { + return err + } + return nil +} +func VisitRefOfColName(in *ColName, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitColIdent(in.Name, f); err != nil { + return err + } + if err := VisitTableName(in.Qualifier, f); err != nil { + return err + } + return nil +} +func VisitRefOfCollateExpr(in *CollateExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.Expr, f); err != nil { + return err + } + return nil +} +func VisitRefOfColumnDefinition(in *ColumnDefinition, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitColIdent(in.Name, f); err != nil { + return err + } + return nil +} +func VisitRefOfColumnType(in *ColumnType, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitRefOfLiteral(in.Length, f); err != nil { + return err + } + if err := VisitRefOfLiteral(in.Scale, f); err != nil { + return err + } + return nil +} +func VisitColumns(in Columns, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + for _, el := range in { + if err := VisitColIdent(el, f); err != nil { + return err + } + } + return nil +} +func VisitComments(in Comments, f Visit) error { + _, err := f(in) + return err +} +func VisitRefOfCommit(in *Commit, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + return nil +} +func VisitRefOfComparisonExpr(in *ComparisonExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.Left, f); err != nil { + return err + } + if err := VisitExpr(in.Right, f); err != nil { + return err + } + if err := VisitExpr(in.Escape, f); err != nil { + return err + } + return nil +} +func VisitRefOfConstraintDefinition(in *ConstraintDefinition, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitColIdent(in.Name, f); err != nil { + return err + } + if err := VisitConstraintInfo(in.Details, f); err != nil { + return err + } + return nil +} +func VisitRefOfConvertExpr(in *ConvertExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.Expr, f); err != nil { + return err + } + if err := VisitRefOfConvertType(in.Type, f); err != nil { + return err + } + return nil +} +func VisitRefOfConvertType(in *ConvertType, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitRefOfLiteral(in.Length, f); err != nil { + return err + } + if err := VisitRefOfLiteral(in.Scale, f); err != nil { + return err + } + return nil +} +func VisitRefOfConvertUsingExpr(in *ConvertUsingExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.Expr, f); err != nil { + return err + } + return nil +} +func VisitRefOfCreateDatabase(in *CreateDatabase, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitComments(in.Comments, f); err != nil { + return err + } + if err := VisitTableIdent(in.DBName, f); err != nil { + return err + } + return nil +} +func VisitRefOfCreateTable(in *CreateTable, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitTableName(in.Table, f); err != nil { + return err + } + if err := VisitRefOfTableSpec(in.TableSpec, f); err != nil { + return err + } + if err := VisitRefOfOptLike(in.OptLike, f); err != nil { + return err + } + return nil +} +func VisitRefOfCreateView(in *CreateView, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitTableName(in.ViewName, f); err != nil { + return err + } + if err := VisitColumns(in.Columns, f); err != nil { + return err + } + if err := VisitSelectStatement(in.Select, f); err != nil { + return err + } + return nil +} +func VisitRefOfCurTimeFuncExpr(in *CurTimeFuncExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitColIdent(in.Name, f); err != nil { + return err + } + if err := VisitExpr(in.Fsp, f); err != nil { + return err + } + return nil +} +func VisitRefOfDefault(in *Default, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + return nil +} +func VisitRefOfDelete(in *Delete, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitComments(in.Comments, f); err != nil { + return err + } + if err := VisitTableNames(in.Targets, f); err != nil { + return err + } + if err := VisitTableExprs(in.TableExprs, f); err != nil { + return err + } + if err := VisitPartitions(in.Partitions, f); err != nil { + return err + } + if err := VisitRefOfWhere(in.Where, f); err != nil { + return err + } + if err := VisitOrderBy(in.OrderBy, f); err != nil { + return err + } + if err := VisitRefOfLimit(in.Limit, f); err != nil { + return err + } + return nil +} +func VisitRefOfDerivedTable(in *DerivedTable, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitSelectStatement(in.Select, f); err != nil { + return err + } + return nil +} +func VisitRefOfDropColumn(in *DropColumn, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitRefOfColName(in.Name, f); err != nil { + return err + } + return nil +} +func VisitRefOfDropDatabase(in *DropDatabase, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitComments(in.Comments, f); err != nil { + return err + } + if err := VisitTableIdent(in.DBName, f); err != nil { + return err + } + return nil +} +func VisitRefOfDropKey(in *DropKey, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitColIdent(in.Name, f); err != nil { + return err + } + return nil +} +func VisitRefOfDropTable(in *DropTable, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitTableNames(in.FromTables, f); err != nil { + return err + } + return nil +} +func VisitRefOfDropView(in *DropView, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitTableNames(in.FromTables, f); err != nil { + return err + } + return nil +} +func VisitRefOfExistsExpr(in *ExistsExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitRefOfSubquery(in.Subquery, f); err != nil { + return err + } + return nil +} +func VisitRefOfExplainStmt(in *ExplainStmt, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitStatement(in.Statement, f); err != nil { + return err + } + return nil +} +func VisitRefOfExplainTab(in *ExplainTab, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitTableName(in.Table, f); err != nil { + return err + } + return nil +} +func VisitExprs(in Exprs, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + for _, el := range in { + if err := VisitExpr(el, f); err != nil { + return err + } + } + return nil +} +func VisitRefOfFlush(in *Flush, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitTableNames(in.TableNames, f); err != nil { + return err + } + return nil +} +func VisitRefOfForce(in *Force, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + return nil +} +func VisitRefOfForeignKeyDefinition(in *ForeignKeyDefinition, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitColumns(in.Source, f); err != nil { + return err + } + if err := VisitTableName(in.ReferencedTable, f); err != nil { + return err + } + if err := VisitColumns(in.ReferencedColumns, f); err != nil { + return err + } + if err := VisitReferenceAction(in.OnDelete, f); err != nil { + return err + } + if err := VisitReferenceAction(in.OnUpdate, f); err != nil { + return err + } + return nil +} +func VisitRefOfFuncExpr(in *FuncExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitTableIdent(in.Qualifier, f); err != nil { + return err + } + if err := VisitColIdent(in.Name, f); err != nil { + return err + } + if err := VisitSelectExprs(in.Exprs, f); err != nil { + return err + } + return nil +} +func VisitGroupBy(in GroupBy, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + for _, el := range in { + if err := VisitExpr(el, f); err != nil { + return err + } + } + return nil +} +func VisitRefOfGroupConcatExpr(in *GroupConcatExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitSelectExprs(in.Exprs, f); err != nil { + return err + } + if err := VisitOrderBy(in.OrderBy, f); err != nil { + return err + } + if err := VisitRefOfLimit(in.Limit, f); err != nil { + return err + } + return nil +} +func VisitRefOfIndexDefinition(in *IndexDefinition, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitRefOfIndexInfo(in.Info, f); err != nil { + return err + } + return nil +} +func VisitRefOfIndexHints(in *IndexHints, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + for _, el := range in.Indexes { + if err := VisitColIdent(el, f); err != nil { + return err + } + } + return nil +} +func VisitRefOfIndexInfo(in *IndexInfo, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitColIdent(in.Name, f); err != nil { + return err + } + if err := VisitColIdent(in.ConstraintName, f); err != nil { + return err + } + return nil +} +func VisitRefOfInsert(in *Insert, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitComments(in.Comments, f); err != nil { + return err + } + if err := VisitTableName(in.Table, f); err != nil { + return err + } + if err := VisitPartitions(in.Partitions, f); err != nil { + return err + } + if err := VisitColumns(in.Columns, f); err != nil { + return err + } + if err := VisitInsertRows(in.Rows, f); err != nil { + return err + } + if err := VisitOnDup(in.OnDup, f); err != nil { + return err + } + return nil +} +func VisitRefOfIntervalExpr(in *IntervalExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.Expr, f); err != nil { + return err + } + return nil +} +func VisitRefOfIsExpr(in *IsExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.Expr, f); err != nil { + return err + } + return nil +} +func VisitJoinCondition(in JoinCondition, f Visit) error { + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.On, f); err != nil { + return err + } + if err := VisitColumns(in.Using, f); err != nil { + return err + } + return nil +} +func VisitRefOfJoinTableExpr(in *JoinTableExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitTableExpr(in.LeftExpr, f); err != nil { + return err + } + if err := VisitTableExpr(in.RightExpr, f); err != nil { + return err + } + if err := VisitJoinCondition(in.Condition, f); err != nil { + return err + } + return nil +} +func VisitRefOfKeyState(in *KeyState, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + return nil +} +func VisitRefOfLimit(in *Limit, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.Offset, f); err != nil { + return err + } + if err := VisitExpr(in.Rowcount, f); err != nil { + return err + } + return nil +} +func VisitListArg(in ListArg, f Visit) error { + _, err := f(in) + return err +} +func VisitRefOfLiteral(in *Literal, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + return nil +} +func VisitRefOfLoad(in *Load, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + return nil +} +func VisitRefOfLockOption(in *LockOption, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + return nil +} +func VisitRefOfLockTables(in *LockTables, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + return nil +} +func VisitRefOfMatchExpr(in *MatchExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitSelectExprs(in.Columns, f); err != nil { + return err + } + if err := VisitExpr(in.Expr, f); err != nil { + return err + } + return nil +} +func VisitRefOfModifyColumn(in *ModifyColumn, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitRefOfColumnDefinition(in.NewColDefinition, f); err != nil { + return err + } + if err := VisitRefOfColName(in.First, f); err != nil { + return err + } + if err := VisitRefOfColName(in.After, f); err != nil { + return err + } + return nil +} +func VisitRefOfNextval(in *Nextval, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.Expr, f); err != nil { + return err + } + return nil +} +func VisitRefOfNotExpr(in *NotExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.Expr, f); err != nil { + return err + } + return nil +} +func VisitRefOfNullVal(in *NullVal, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + return nil +} +func VisitOnDup(in OnDup, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + for _, el := range in { + if err := VisitRefOfUpdateExpr(el, f); err != nil { + return err + } + } + return nil +} +func VisitRefOfOptLike(in *OptLike, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitTableName(in.LikeTable, f); err != nil { + return err + } + return nil +} +func VisitRefOfOrExpr(in *OrExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.Left, f); err != nil { + return err + } + if err := VisitExpr(in.Right, f); err != nil { + return err + } + return nil +} +func VisitRefOfOrder(in *Order, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.Expr, f); err != nil { + return err + } + return nil +} +func VisitOrderBy(in OrderBy, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + for _, el := range in { + if err := VisitRefOfOrder(el, f); err != nil { + return err + } + } + return nil +} +func VisitRefOfOrderByOption(in *OrderByOption, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitColumns(in.Cols, f); err != nil { + return err + } + return nil +} +func VisitRefOfOtherAdmin(in *OtherAdmin, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + return nil +} +func VisitRefOfOtherRead(in *OtherRead, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + return nil +} +func VisitRefOfParenSelect(in *ParenSelect, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitSelectStatement(in.Select, f); err != nil { + return err + } + return nil +} +func VisitRefOfParenTableExpr(in *ParenTableExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitTableExprs(in.Exprs, f); err != nil { + return err + } + return nil +} +func VisitRefOfPartitionDefinition(in *PartitionDefinition, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitColIdent(in.Name, f); err != nil { + return err + } + if err := VisitExpr(in.Limit, f); err != nil { + return err + } + return nil +} +func VisitRefOfPartitionSpec(in *PartitionSpec, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitPartitions(in.Names, f); err != nil { + return err + } + if err := VisitRefOfLiteral(in.Number, f); err != nil { + return err + } + if err := VisitTableName(in.TableName, f); err != nil { + return err + } + for _, el := range in.Definitions { + if err := VisitRefOfPartitionDefinition(el, f); err != nil { + return err + } + } + return nil +} +func VisitPartitions(in Partitions, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + for _, el := range in { + if err := VisitColIdent(el, f); err != nil { + return err + } + } + return nil +} +func VisitRefOfRangeCond(in *RangeCond, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.Left, f); err != nil { + return err + } + if err := VisitExpr(in.From, f); err != nil { + return err + } + if err := VisitExpr(in.To, f); err != nil { + return err + } + return nil +} +func VisitRefOfRelease(in *Release, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitColIdent(in.Name, f); err != nil { + return err + } + return nil +} +func VisitRefOfRenameIndex(in *RenameIndex, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitColIdent(in.OldName, f); err != nil { + return err + } + if err := VisitColIdent(in.NewName, f); err != nil { + return err + } + return nil +} +func VisitRefOfRenameTable(in *RenameTable, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + return nil +} +func VisitRefOfRenameTableName(in *RenameTableName, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitTableName(in.Table, f); err != nil { + return err + } + return nil +} +func VisitRefOfRevertMigration(in *RevertMigration, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + return nil +} +func VisitRefOfRollback(in *Rollback, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + return nil +} +func VisitRefOfSRollback(in *SRollback, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitColIdent(in.Name, f); err != nil { + return err + } + return nil +} +func VisitRefOfSavepoint(in *Savepoint, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitColIdent(in.Name, f); err != nil { + return err + } + return nil +} +func VisitRefOfSelect(in *Select, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitComments(in.Comments, f); err != nil { + return err + } + if err := VisitSelectExprs(in.SelectExprs, f); err != nil { + return err + } + if err := VisitTableExprs(in.From, f); err != nil { + return err + } + if err := VisitRefOfWhere(in.Where, f); err != nil { + return err + } + if err := VisitGroupBy(in.GroupBy, f); err != nil { + return err + } + if err := VisitRefOfWhere(in.Having, f); err != nil { + return err + } + if err := VisitOrderBy(in.OrderBy, f); err != nil { + return err + } + if err := VisitRefOfLimit(in.Limit, f); err != nil { + return err + } + if err := VisitRefOfSelectInto(in.Into, f); err != nil { + return err + } + return nil +} +func VisitSelectExprs(in SelectExprs, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + for _, el := range in { + if err := VisitSelectExpr(el, f); err != nil { + return err + } + } + return nil +} +func VisitRefOfSelectInto(in *SelectInto, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + return nil +} +func VisitRefOfSet(in *Set, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitComments(in.Comments, f); err != nil { + return err + } + if err := VisitSetExprs(in.Exprs, f); err != nil { + return err + } + return nil +} +func VisitRefOfSetExpr(in *SetExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitColIdent(in.Name, f); err != nil { + return err + } + if err := VisitExpr(in.Expr, f); err != nil { + return err + } + return nil +} +func VisitSetExprs(in SetExprs, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + for _, el := range in { + if err := VisitRefOfSetExpr(el, f); err != nil { + return err + } + } + return nil +} +func VisitRefOfSetTransaction(in *SetTransaction, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitSQLNode(in.SQLNode, f); err != nil { + return err + } + if err := VisitComments(in.Comments, f); err != nil { + return err + } + for _, el := range in.Characteristics { + if err := VisitCharacteristic(el, f); err != nil { + return err + } + } + return nil +} +func VisitRefOfShow(in *Show, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitShowInternal(in.Internal, f); err != nil { + return err + } + return nil +} +func VisitRefOfShowBasic(in *ShowBasic, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitTableName(in.Tbl, f); err != nil { + return err + } + if err := VisitTableIdent(in.DbName, f); err != nil { + return err + } + if err := VisitRefOfShowFilter(in.Filter, f); err != nil { + return err + } + return nil +} +func VisitRefOfShowCreate(in *ShowCreate, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitTableName(in.Op, f); err != nil { + return err + } + return nil +} +func VisitRefOfShowFilter(in *ShowFilter, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.Filter, f); err != nil { + return err + } + return nil +} +func VisitRefOfShowLegacy(in *ShowLegacy, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitTableName(in.OnTable, f); err != nil { + return err + } + if err := VisitTableName(in.Table, f); err != nil { + return err + } + if err := VisitExpr(in.ShowCollationFilterOpt, f); err != nil { + return err + } + return nil +} +func VisitRefOfStarExpr(in *StarExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitTableName(in.TableName, f); err != nil { + return err + } + return nil +} +func VisitRefOfStream(in *Stream, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitComments(in.Comments, f); err != nil { + return err + } + if err := VisitSelectExpr(in.SelectExpr, f); err != nil { + return err + } + if err := VisitTableName(in.Table, f); err != nil { + return err + } + return nil +} +func VisitRefOfSubquery(in *Subquery, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitSelectStatement(in.Select, f); err != nil { + return err + } + return nil +} +func VisitRefOfSubstrExpr(in *SubstrExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitRefOfColName(in.Name, f); err != nil { + return err + } + if err := VisitRefOfLiteral(in.StrVal, f); err != nil { + return err + } + if err := VisitExpr(in.From, f); err != nil { + return err + } + if err := VisitExpr(in.To, f); err != nil { + return err + } + return nil +} +func VisitTableExprs(in TableExprs, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + for _, el := range in { + if err := VisitTableExpr(el, f); err != nil { + return err + } + } + return nil +} +func VisitTableIdent(in TableIdent, f Visit) error { + if cont, err := f(in); err != nil || !cont { + return err + } + return nil +} +func VisitTableName(in TableName, f Visit) error { + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitTableIdent(in.Name, f); err != nil { + return err + } + if err := VisitTableIdent(in.Qualifier, f); err != nil { + return err + } + return nil +} +func VisitTableNames(in TableNames, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + for _, el := range in { + if err := VisitTableName(el, f); err != nil { + return err + } + } + return nil +} +func VisitTableOptions(in TableOptions, f Visit) error { + _, err := f(in) + return err +} +func VisitRefOfTableSpec(in *TableSpec, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + for _, el := range in.Columns { + if err := VisitRefOfColumnDefinition(el, f); err != nil { + return err + } + } + for _, el := range in.Indexes { + if err := VisitRefOfIndexDefinition(el, f); err != nil { + return err + } + } + for _, el := range in.Constraints { + if err := VisitRefOfConstraintDefinition(el, f); err != nil { + return err + } + } + if err := VisitTableOptions(in.Options, f); err != nil { + return err + } + return nil +} +func VisitRefOfTablespaceOperation(in *TablespaceOperation, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + return nil +} +func VisitRefOfTimestampFuncExpr(in *TimestampFuncExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.Expr1, f); err != nil { + return err + } + if err := VisitExpr(in.Expr2, f); err != nil { + return err + } + return nil +} +func VisitRefOfTruncateTable(in *TruncateTable, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitTableName(in.Table, f); err != nil { + return err + } + return nil +} +func VisitRefOfUnaryExpr(in *UnaryExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.Expr, f); err != nil { + return err + } + return nil +} +func VisitRefOfUnion(in *Union, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitSelectStatement(in.FirstStatement, f); err != nil { + return err + } + for _, el := range in.UnionSelects { + if err := VisitRefOfUnionSelect(el, f); err != nil { + return err + } + } + if err := VisitOrderBy(in.OrderBy, f); err != nil { + return err + } + if err := VisitRefOfLimit(in.Limit, f); err != nil { + return err + } + return nil +} +func VisitRefOfUnionSelect(in *UnionSelect, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitSelectStatement(in.Statement, f); err != nil { + return err + } + return nil +} +func VisitRefOfUnlockTables(in *UnlockTables, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + return nil +} +func VisitRefOfUpdate(in *Update, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitComments(in.Comments, f); err != nil { + return err + } + if err := VisitTableExprs(in.TableExprs, f); err != nil { + return err + } + if err := VisitUpdateExprs(in.Exprs, f); err != nil { + return err + } + if err := VisitRefOfWhere(in.Where, f); err != nil { + return err + } + if err := VisitOrderBy(in.OrderBy, f); err != nil { + return err + } + if err := VisitRefOfLimit(in.Limit, f); err != nil { + return err + } + return nil +} +func VisitRefOfUpdateExpr(in *UpdateExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitRefOfColName(in.Name, f); err != nil { + return err + } + if err := VisitExpr(in.Expr, f); err != nil { + return err + } + return nil +} +func VisitUpdateExprs(in UpdateExprs, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + for _, el := range in { + if err := VisitRefOfUpdateExpr(el, f); err != nil { + return err + } + } + return nil +} +func VisitRefOfUse(in *Use, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitTableIdent(in.DBName, f); err != nil { + return err + } + return nil +} +func VisitRefOfVStream(in *VStream, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitComments(in.Comments, f); err != nil { + return err + } + if err := VisitSelectExpr(in.SelectExpr, f); err != nil { + return err + } + if err := VisitTableName(in.Table, f); err != nil { + return err + } + if err := VisitRefOfWhere(in.Where, f); err != nil { + return err + } + if err := VisitRefOfLimit(in.Limit, f); err != nil { + return err + } + return nil +} +func VisitValTuple(in ValTuple, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + for _, el := range in { + if err := VisitExpr(el, f); err != nil { + return err + } + } + return nil +} +func VisitRefOfValidation(in *Validation, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + return nil +} +func VisitValues(in Values, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + for _, el := range in { + if err := VisitValTuple(el, f); err != nil { + return err + } + } + return nil +} +func VisitRefOfValuesFuncExpr(in *ValuesFuncExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitRefOfColName(in.Name, f); err != nil { + return err + } + return nil +} +func VisitVindexParam(in VindexParam, f Visit) error { + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitColIdent(in.Key, f); err != nil { + return err + } + return nil +} +func VisitRefOfVindexSpec(in *VindexSpec, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitColIdent(in.Name, f); err != nil { + return err + } + if err := VisitColIdent(in.Type, f); err != nil { + return err + } + for _, el := range in.Params { + if err := VisitVindexParam(el, f); err != nil { + return err + } + } + return nil +} +func VisitRefOfWhen(in *When, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.Cond, f); err != nil { + return err + } + if err := VisitExpr(in.Val, f); err != nil { + return err + } + return nil +} +func VisitRefOfWhere(in *Where, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.Expr, f); err != nil { + return err + } + return nil +} +func VisitRefOfXorExpr(in *XorExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.Left, f); err != nil { + return err + } + if err := VisitExpr(in.Right, f); err != nil { + return err + } + return nil +} +func VisitAlterOption(in AlterOption, f Visit) error { + if in == nil { + return nil + } + switch in := in.(type) { + case *AddColumns: + return VisitRefOfAddColumns(in, f) + case *AddConstraintDefinition: + return VisitRefOfAddConstraintDefinition(in, f) + case *AddIndexDefinition: + return VisitRefOfAddIndexDefinition(in, f) + case AlgorithmValue: + return VisitAlgorithmValue(in, f) + case *AlterCharset: + return VisitRefOfAlterCharset(in, f) + case *AlterColumn: + return VisitRefOfAlterColumn(in, f) + case *ChangeColumn: + return VisitRefOfChangeColumn(in, f) + case *DropColumn: + return VisitRefOfDropColumn(in, f) + case *DropKey: + return VisitRefOfDropKey(in, f) + case *Force: + return VisitRefOfForce(in, f) + case *KeyState: + return VisitRefOfKeyState(in, f) + case *LockOption: + return VisitRefOfLockOption(in, f) + case *ModifyColumn: + return VisitRefOfModifyColumn(in, f) + case *OrderByOption: + return VisitRefOfOrderByOption(in, f) + case *RenameIndex: + return VisitRefOfRenameIndex(in, f) + case *RenameTableName: + return VisitRefOfRenameTableName(in, f) + case TableOptions: + return VisitTableOptions(in, f) + case *TablespaceOperation: + return VisitRefOfTablespaceOperation(in, f) + case *Validation: + return VisitRefOfValidation(in, f) + default: + // this should never happen + return nil + } +} +func VisitCharacteristic(in Characteristic, f Visit) error { + if in == nil { + return nil + } + switch in := in.(type) { + case AccessMode: + return VisitAccessMode(in, f) + case IsolationLevel: + return VisitIsolationLevel(in, f) + default: + // this should never happen + return nil + } +} +func VisitColTuple(in ColTuple, f Visit) error { + if in == nil { + return nil + } + switch in := in.(type) { + case ListArg: + return VisitListArg(in, f) + case *Subquery: + return VisitRefOfSubquery(in, f) + case ValTuple: + return VisitValTuple(in, f) + default: + // this should never happen + return nil + } +} +func VisitConstraintInfo(in ConstraintInfo, f Visit) error { + if in == nil { + return nil + } + switch in := in.(type) { + case *CheckConstraintDefinition: + return VisitRefOfCheckConstraintDefinition(in, f) + case *ForeignKeyDefinition: + return VisitRefOfForeignKeyDefinition(in, f) + default: + // this should never happen + return nil + } +} +func VisitDBDDLStatement(in DBDDLStatement, f Visit) error { + if in == nil { + return nil + } + switch in := in.(type) { + case *AlterDatabase: + return VisitRefOfAlterDatabase(in, f) + case *CreateDatabase: + return VisitRefOfCreateDatabase(in, f) + case *DropDatabase: + return VisitRefOfDropDatabase(in, f) + default: + // this should never happen + return nil + } +} +func VisitDDLStatement(in DDLStatement, f Visit) error { + if in == nil { + return nil + } + switch in := in.(type) { + case *AlterTable: + return VisitRefOfAlterTable(in, f) + case *AlterView: + return VisitRefOfAlterView(in, f) + case *CreateTable: + return VisitRefOfCreateTable(in, f) + case *CreateView: + return VisitRefOfCreateView(in, f) + case *DropTable: + return VisitRefOfDropTable(in, f) + case *DropView: + return VisitRefOfDropView(in, f) + case *RenameTable: + return VisitRefOfRenameTable(in, f) + case *TruncateTable: + return VisitRefOfTruncateTable(in, f) + default: + // this should never happen + return nil + } +} +func VisitExplain(in Explain, f Visit) error { + if in == nil { + return nil + } + switch in := in.(type) { + case *ExplainStmt: + return VisitRefOfExplainStmt(in, f) + case *ExplainTab: + return VisitRefOfExplainTab(in, f) + default: + // this should never happen + return nil + } +} +func VisitExpr(in Expr, f Visit) error { + if in == nil { + return nil + } + switch in := in.(type) { + case *AndExpr: + return VisitRefOfAndExpr(in, f) + case Argument: + return VisitArgument(in, f) + case *BinaryExpr: + return VisitRefOfBinaryExpr(in, f) + case BoolVal: + return VisitBoolVal(in, f) + case *CaseExpr: + return VisitRefOfCaseExpr(in, f) + case *ColName: + return VisitRefOfColName(in, f) + case *CollateExpr: + return VisitRefOfCollateExpr(in, f) + case *ComparisonExpr: + return VisitRefOfComparisonExpr(in, f) + case *ConvertExpr: + return VisitRefOfConvertExpr(in, f) + case *ConvertUsingExpr: + return VisitRefOfConvertUsingExpr(in, f) + case *CurTimeFuncExpr: + return VisitRefOfCurTimeFuncExpr(in, f) + case *Default: + return VisitRefOfDefault(in, f) + case *ExistsExpr: + return VisitRefOfExistsExpr(in, f) + case *FuncExpr: + return VisitRefOfFuncExpr(in, f) + case *GroupConcatExpr: + return VisitRefOfGroupConcatExpr(in, f) + case *IntervalExpr: + return VisitRefOfIntervalExpr(in, f) + case *IsExpr: + return VisitRefOfIsExpr(in, f) + case ListArg: + return VisitListArg(in, f) + case *Literal: + return VisitRefOfLiteral(in, f) + case *MatchExpr: + return VisitRefOfMatchExpr(in, f) + case *NotExpr: + return VisitRefOfNotExpr(in, f) + case *NullVal: + return VisitRefOfNullVal(in, f) + case *OrExpr: + return VisitRefOfOrExpr(in, f) + case *RangeCond: + return VisitRefOfRangeCond(in, f) + case *Subquery: + return VisitRefOfSubquery(in, f) + case *SubstrExpr: + return VisitRefOfSubstrExpr(in, f) + case *TimestampFuncExpr: + return VisitRefOfTimestampFuncExpr(in, f) + case *UnaryExpr: + return VisitRefOfUnaryExpr(in, f) + case ValTuple: + return VisitValTuple(in, f) + case *ValuesFuncExpr: + return VisitRefOfValuesFuncExpr(in, f) + case *XorExpr: + return VisitRefOfXorExpr(in, f) + default: + // this should never happen + return nil + } +} +func VisitInsertRows(in InsertRows, f Visit) error { + if in == nil { + return nil + } + switch in := in.(type) { + case *ParenSelect: + return VisitRefOfParenSelect(in, f) + case *Select: + return VisitRefOfSelect(in, f) + case *Union: + return VisitRefOfUnion(in, f) + case Values: + return VisitValues(in, f) + default: + // this should never happen + return nil + } +} +func VisitSelectExpr(in SelectExpr, f Visit) error { + if in == nil { + return nil + } + switch in := in.(type) { + case *AliasedExpr: + return VisitRefOfAliasedExpr(in, f) + case *Nextval: + return VisitRefOfNextval(in, f) + case *StarExpr: + return VisitRefOfStarExpr(in, f) + default: + // this should never happen + return nil + } +} +func VisitSelectStatement(in SelectStatement, f Visit) error { + if in == nil { + return nil + } + switch in := in.(type) { + case *ParenSelect: + return VisitRefOfParenSelect(in, f) + case *Select: + return VisitRefOfSelect(in, f) + case *Union: + return VisitRefOfUnion(in, f) + default: + // this should never happen + return nil + } +} +func VisitShowInternal(in ShowInternal, f Visit) error { + if in == nil { + return nil + } + switch in := in.(type) { + case *ShowBasic: + return VisitRefOfShowBasic(in, f) + case *ShowCreate: + return VisitRefOfShowCreate(in, f) + case *ShowLegacy: + return VisitRefOfShowLegacy(in, f) + default: + // this should never happen + return nil + } +} +func VisitSimpleTableExpr(in SimpleTableExpr, f Visit) error { + if in == nil { + return nil + } + switch in := in.(type) { + case *DerivedTable: + return VisitRefOfDerivedTable(in, f) + case TableName: + return VisitTableName(in, f) + default: + // this should never happen + return nil + } +} +func VisitStatement(in Statement, f Visit) error { + if in == nil { + return nil + } + switch in := in.(type) { + case *AlterDatabase: + return VisitRefOfAlterDatabase(in, f) + case *AlterMigration: + return VisitRefOfAlterMigration(in, f) + case *AlterTable: + return VisitRefOfAlterTable(in, f) + case *AlterView: + return VisitRefOfAlterView(in, f) + case *AlterVschema: + return VisitRefOfAlterVschema(in, f) + case *Begin: + return VisitRefOfBegin(in, f) + case *CallProc: + return VisitRefOfCallProc(in, f) + case *Commit: + return VisitRefOfCommit(in, f) + case *CreateDatabase: + return VisitRefOfCreateDatabase(in, f) + case *CreateTable: + return VisitRefOfCreateTable(in, f) + case *CreateView: + return VisitRefOfCreateView(in, f) + case *Delete: + return VisitRefOfDelete(in, f) + case *DropDatabase: + return VisitRefOfDropDatabase(in, f) + case *DropTable: + return VisitRefOfDropTable(in, f) + case *DropView: + return VisitRefOfDropView(in, f) + case *ExplainStmt: + return VisitRefOfExplainStmt(in, f) + case *ExplainTab: + return VisitRefOfExplainTab(in, f) + case *Flush: + return VisitRefOfFlush(in, f) + case *Insert: + return VisitRefOfInsert(in, f) + case *Load: + return VisitRefOfLoad(in, f) + case *LockTables: + return VisitRefOfLockTables(in, f) + case *OtherAdmin: + return VisitRefOfOtherAdmin(in, f) + case *OtherRead: + return VisitRefOfOtherRead(in, f) + case *ParenSelect: + return VisitRefOfParenSelect(in, f) + case *Release: + return VisitRefOfRelease(in, f) + case *RenameTable: + return VisitRefOfRenameTable(in, f) + case *RevertMigration: + return VisitRefOfRevertMigration(in, f) + case *Rollback: + return VisitRefOfRollback(in, f) + case *SRollback: + return VisitRefOfSRollback(in, f) + case *Savepoint: + return VisitRefOfSavepoint(in, f) + case *Select: + return VisitRefOfSelect(in, f) + case *Set: + return VisitRefOfSet(in, f) + case *SetTransaction: + return VisitRefOfSetTransaction(in, f) + case *Show: + return VisitRefOfShow(in, f) + case *Stream: + return VisitRefOfStream(in, f) + case *TruncateTable: + return VisitRefOfTruncateTable(in, f) + case *Union: + return VisitRefOfUnion(in, f) + case *UnlockTables: + return VisitRefOfUnlockTables(in, f) + case *Update: + return VisitRefOfUpdate(in, f) + case *Use: + return VisitRefOfUse(in, f) + case *VStream: + return VisitRefOfVStream(in, f) + default: + // this should never happen + return nil + } +} +func VisitTableExpr(in TableExpr, f Visit) error { + if in == nil { + return nil + } + switch in := in.(type) { + case *AliasedTableExpr: + return VisitRefOfAliasedTableExpr(in, f) + case *JoinTableExpr: + return VisitRefOfJoinTableExpr(in, f) + case *ParenTableExpr: + return VisitRefOfParenTableExpr(in, f) + default: + // this should never happen + return nil + } +} +func VisitAccessMode(in AccessMode, f Visit) error { + _, err := f(in) + return err +} +func VisitAlgorithmValue(in AlgorithmValue, f Visit) error { + _, err := f(in) + return err +} +func VisitArgument(in Argument, f Visit) error { + _, err := f(in) + return err +} +func VisitBoolVal(in BoolVal, f Visit) error { + _, err := f(in) + return err +} +func VisitIsolationLevel(in IsolationLevel, f Visit) error { + _, err := f(in) + return err +} +func VisitReferenceAction(in ReferenceAction, f Visit) error { + _, err := f(in) + return err +} +func VisitRefOfColIdent(in *ColIdent, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + return nil +} +func VisitRefOfJoinCondition(in *JoinCondition, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.On, f); err != nil { + return err + } + if err := VisitColumns(in.Using, f); err != nil { + return err + } + return nil +} +func VisitRefOfTableIdent(in *TableIdent, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + return nil +} +func VisitRefOfTableName(in *TableName, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitTableIdent(in.Name, f); err != nil { + return err + } + if err := VisitTableIdent(in.Qualifier, f); err != nil { + return err + } + return nil +} +func VisitRefOfVindexParam(in *VindexParam, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitColIdent(in.Key, f); err != nil { + return err + } + return nil +} diff --git a/go/vt/sqlparser/cached_size.go b/go/vt/sqlparser/cached_size.go new file mode 100644 index 00000000000..3f2eb013846 --- /dev/null +++ b/go/vt/sqlparser/cached_size.go @@ -0,0 +1,2351 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by Sizegen. DO NOT EDIT. + +package sqlparser + +type cachedObject interface { + CachedSize(alloc bool) int64 +} + +func (cached *AddColumns) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(40) + } + // field Columns []*vitess.io/vitess/go/vt/sqlparser.ColumnDefinition + { + size += int64(cap(cached.Columns)) * int64(8) + for _, elem := range cached.Columns { + size += elem.CachedSize(true) + } + } + // field First *vitess.io/vitess/go/vt/sqlparser.ColName + size += cached.First.CachedSize(true) + // field After *vitess.io/vitess/go/vt/sqlparser.ColName + size += cached.After.CachedSize(true) + return size +} +func (cached *AddConstraintDefinition) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(8) + } + // field ConstraintDefinition *vitess.io/vitess/go/vt/sqlparser.ConstraintDefinition + size += cached.ConstraintDefinition.CachedSize(true) + return size +} +func (cached *AddIndexDefinition) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(8) + } + // field IndexDefinition *vitess.io/vitess/go/vt/sqlparser.IndexDefinition + size += cached.IndexDefinition.CachedSize(true) + return size +} +func (cached *AliasedExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(56) + } + // field Expr vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Expr.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field As vitess.io/vitess/go/vt/sqlparser.ColIdent + size += cached.As.CachedSize(false) + return size +} +func (cached *AliasedTableExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(64) + } + // field Expr vitess.io/vitess/go/vt/sqlparser.SimpleTableExpr + if cc, ok := cached.Expr.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Partitions vitess.io/vitess/go/vt/sqlparser.Partitions + { + size += int64(cap(cached.Partitions)) * int64(40) + for _, elem := range cached.Partitions { + size += elem.CachedSize(false) + } + } + // field As vitess.io/vitess/go/vt/sqlparser.TableIdent + size += cached.As.CachedSize(false) + // field Hints *vitess.io/vitess/go/vt/sqlparser.IndexHints + size += cached.Hints.CachedSize(true) + return size +} +func (cached *AlterCharset) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field CharacterSet string + size += int64(len(cached.CharacterSet)) + // field Collate string + size += int64(len(cached.Collate)) + return size +} +func (cached *AlterColumn) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field Column *vitess.io/vitess/go/vt/sqlparser.ColName + size += cached.Column.CachedSize(true) + // field DefaultVal vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.DefaultVal.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *AlterDatabase) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(49) + } + // field DBName vitess.io/vitess/go/vt/sqlparser.TableIdent + size += cached.DBName.CachedSize(false) + // field AlterOptions []vitess.io/vitess/go/vt/sqlparser.CollateAndCharset + { + size += int64(cap(cached.AlterOptions)) * int64(24) + for _, elem := range cached.AlterOptions { + size += elem.CachedSize(false) + } + } + return size +} +func (cached *AlterMigration) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + // field UUID string + size += int64(len(cached.UUID)) + return size +} +func (cached *AlterTable) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(65) + } + // field Table vitess.io/vitess/go/vt/sqlparser.TableName + size += cached.Table.CachedSize(false) + // field AlterOptions []vitess.io/vitess/go/vt/sqlparser.AlterOption + { + size += int64(cap(cached.AlterOptions)) * int64(16) + for _, elem := range cached.AlterOptions { + if cc, ok := elem.(cachedObject); ok { + size += cc.CachedSize(true) + } + } + } + // field PartitionSpec *vitess.io/vitess/go/vt/sqlparser.PartitionSpec + size += cached.PartitionSpec.CachedSize(true) + return size +} +func (cached *AlterView) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(136) + } + // field ViewName vitess.io/vitess/go/vt/sqlparser.TableName + size += cached.ViewName.CachedSize(false) + // field Algorithm string + size += int64(len(cached.Algorithm)) + // field Definer string + size += int64(len(cached.Definer)) + // field Security string + size += int64(len(cached.Security)) + // field Columns vitess.io/vitess/go/vt/sqlparser.Columns + { + size += int64(cap(cached.Columns)) * int64(40) + for _, elem := range cached.Columns { + size += elem.CachedSize(false) + } + } + // field Select vitess.io/vitess/go/vt/sqlparser.SelectStatement + if cc, ok := cached.Select.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field CheckOption string + size += int64(len(cached.CheckOption)) + return size +} +func (cached *AlterVschema) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(80) + } + // field Table vitess.io/vitess/go/vt/sqlparser.TableName + size += cached.Table.CachedSize(false) + // field VindexSpec *vitess.io/vitess/go/vt/sqlparser.VindexSpec + size += cached.VindexSpec.CachedSize(true) + // field VindexCols []vitess.io/vitess/go/vt/sqlparser.ColIdent + { + size += int64(cap(cached.VindexCols)) * int64(40) + for _, elem := range cached.VindexCols { + size += elem.CachedSize(false) + } + } + // field AutoIncSpec *vitess.io/vitess/go/vt/sqlparser.AutoIncSpec + size += cached.AutoIncSpec.CachedSize(true) + return size +} +func (cached *AndExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field Left vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Left.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Right vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Right.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *AutoIncSpec) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(72) + } + // field Column vitess.io/vitess/go/vt/sqlparser.ColIdent + size += cached.Column.CachedSize(false) + // field Sequence vitess.io/vitess/go/vt/sqlparser.TableName + size += cached.Sequence.CachedSize(false) + return size +} +func (cached *BinaryExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(40) + } + // field Left vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Left.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Right vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Right.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *BindVarNeeds) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(73) + } + // field NeedFunctionResult []string + { + size += int64(cap(cached.NeedFunctionResult)) * int64(16) + for _, elem := range cached.NeedFunctionResult { + size += int64(len(elem)) + } + } + // field NeedSystemVariable []string + { + size += int64(cap(cached.NeedSystemVariable)) * int64(16) + for _, elem := range cached.NeedSystemVariable { + size += int64(len(elem)) + } + } + // field NeedUserDefinedVariables []string + { + size += int64(cap(cached.NeedUserDefinedVariables)) * int64(16) + for _, elem := range cached.NeedUserDefinedVariables { + size += int64(len(elem)) + } + } + return size +} +func (cached *CallProc) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(56) + } + // field Name vitess.io/vitess/go/vt/sqlparser.TableName + size += cached.Name.CachedSize(false) + // field Params vitess.io/vitess/go/vt/sqlparser.Exprs + { + size += int64(cap(cached.Params)) * int64(16) + for _, elem := range cached.Params { + if cc, ok := elem.(cachedObject); ok { + size += cc.CachedSize(true) + } + } + } + return size +} +func (cached *CaseExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(56) + } + // field Expr vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Expr.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Whens []*vitess.io/vitess/go/vt/sqlparser.When + { + size += int64(cap(cached.Whens)) * int64(8) + for _, elem := range cached.Whens { + size += elem.CachedSize(true) + } + } + // field Else vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Else.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *ChangeColumn) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field OldColumn *vitess.io/vitess/go/vt/sqlparser.ColName + size += cached.OldColumn.CachedSize(true) + // field NewColDefinition *vitess.io/vitess/go/vt/sqlparser.ColumnDefinition + size += cached.NewColDefinition.CachedSize(true) + // field First *vitess.io/vitess/go/vt/sqlparser.ColName + size += cached.First.CachedSize(true) + // field After *vitess.io/vitess/go/vt/sqlparser.ColName + size += cached.After.CachedSize(true) + return size +} +func (cached *CheckConstraintDefinition) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(17) + } + // field Expr vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Expr.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *ColIdent) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(40) + } + // field val string + size += int64(len(cached.val)) + // field lowered string + size += int64(len(cached.lowered)) + return size +} +func (cached *ColName) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(88) + } + // field Name vitess.io/vitess/go/vt/sqlparser.ColIdent + size += cached.Name.CachedSize(false) + // field Qualifier vitess.io/vitess/go/vt/sqlparser.TableName + size += cached.Qualifier.CachedSize(false) + return size +} +func (cached *CollateAndCharset) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + // field Value string + size += int64(len(cached.Value)) + return size +} +func (cached *CollateExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field Expr vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Expr.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Charset string + size += int64(len(cached.Charset)) + return size +} +func (cached *ColumnDefinition) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(144) + } + // field Name vitess.io/vitess/go/vt/sqlparser.ColIdent + size += cached.Name.CachedSize(false) + // field Type vitess.io/vitess/go/vt/sqlparser.ColumnType + size += cached.Type.CachedSize(false) + return size +} +func (cached *ColumnType) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(104) + } + // field Type string + size += int64(len(cached.Type)) + // field Options *vitess.io/vitess/go/vt/sqlparser.ColumnTypeOptions + size += cached.Options.CachedSize(true) + // field Length *vitess.io/vitess/go/vt/sqlparser.Literal + size += cached.Length.CachedSize(true) + // field Scale *vitess.io/vitess/go/vt/sqlparser.Literal + size += cached.Scale.CachedSize(true) + // field Charset string + size += int64(len(cached.Charset)) + // field Collate string + size += int64(len(cached.Collate)) + // field EnumValues []string + { + size += int64(cap(cached.EnumValues)) * int64(16) + for _, elem := range cached.EnumValues { + size += int64(len(elem)) + } + } + return size +} +func (cached *ColumnTypeOptions) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(64) + } + // field Null *bool + size += int64(1) + // field Default vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Default.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field OnUpdate vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.OnUpdate.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Comment *vitess.io/vitess/go/vt/sqlparser.Literal + size += cached.Comment.CachedSize(true) + return size +} +func (cached *ComparisonExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(56) + } + // field Left vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Left.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Right vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Right.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Escape vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Escape.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *ConstraintDefinition) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(56) + } + // field Name vitess.io/vitess/go/vt/sqlparser.ColIdent + size += cached.Name.CachedSize(false) + // field Details vitess.io/vitess/go/vt/sqlparser.ConstraintInfo + if cc, ok := cached.Details.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *ConvertExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + // field Expr vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Expr.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Type *vitess.io/vitess/go/vt/sqlparser.ConvertType + size += cached.Type.CachedSize(true) + return size +} +func (cached *ConvertType) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(56) + } + // field Type string + size += int64(len(cached.Type)) + // field Length *vitess.io/vitess/go/vt/sqlparser.Literal + size += cached.Length.CachedSize(true) + // field Scale *vitess.io/vitess/go/vt/sqlparser.Literal + size += cached.Scale.CachedSize(true) + // field Charset string + size += int64(len(cached.Charset)) + return size +} +func (cached *ConvertUsingExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field Expr vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Expr.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Type string + size += int64(len(cached.Type)) + return size +} +func (cached *CreateDatabase) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(73) + } + // field Comments vitess.io/vitess/go/vt/sqlparser.Comments + { + size += int64(cap(cached.Comments)) * int64(16) + for _, elem := range cached.Comments { + size += int64(len(elem)) + } + } + // field DBName vitess.io/vitess/go/vt/sqlparser.TableIdent + size += cached.DBName.CachedSize(false) + // field CreateOptions []vitess.io/vitess/go/vt/sqlparser.CollateAndCharset + { + size += int64(cap(cached.CreateOptions)) * int64(24) + for _, elem := range cached.CreateOptions { + size += elem.CachedSize(false) + } + } + return size +} +func (cached *CreateTable) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(65) + } + // field Table vitess.io/vitess/go/vt/sqlparser.TableName + size += cached.Table.CachedSize(false) + // field TableSpec *vitess.io/vitess/go/vt/sqlparser.TableSpec + size += cached.TableSpec.CachedSize(true) + // field OptLike *vitess.io/vitess/go/vt/sqlparser.OptLike + size += cached.OptLike.CachedSize(true) + return size +} +func (cached *CreateView) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(137) + } + // field ViewName vitess.io/vitess/go/vt/sqlparser.TableName + size += cached.ViewName.CachedSize(false) + // field Algorithm string + size += int64(len(cached.Algorithm)) + // field Definer string + size += int64(len(cached.Definer)) + // field Security string + size += int64(len(cached.Security)) + // field Columns vitess.io/vitess/go/vt/sqlparser.Columns + { + size += int64(cap(cached.Columns)) * int64(40) + for _, elem := range cached.Columns { + size += elem.CachedSize(false) + } + } + // field Select vitess.io/vitess/go/vt/sqlparser.SelectStatement + if cc, ok := cached.Select.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field CheckOption string + size += int64(len(cached.CheckOption)) + return size +} +func (cached *CurTimeFuncExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(56) + } + // field Name vitess.io/vitess/go/vt/sqlparser.ColIdent + size += cached.Name.CachedSize(false) + // field Fsp vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Fsp.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *Default) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(16) + } + // field ColName string + size += int64(len(cached.ColName)) + return size +} +func (cached *Delete) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(144) + } + // field Comments vitess.io/vitess/go/vt/sqlparser.Comments + { + size += int64(cap(cached.Comments)) * int64(16) + for _, elem := range cached.Comments { + size += int64(len(elem)) + } + } + // field Targets vitess.io/vitess/go/vt/sqlparser.TableNames + { + size += int64(cap(cached.Targets)) * int64(32) + for _, elem := range cached.Targets { + size += elem.CachedSize(false) + } + } + // field TableExprs vitess.io/vitess/go/vt/sqlparser.TableExprs + { + size += int64(cap(cached.TableExprs)) * int64(16) + for _, elem := range cached.TableExprs { + if cc, ok := elem.(cachedObject); ok { + size += cc.CachedSize(true) + } + } + } + // field Partitions vitess.io/vitess/go/vt/sqlparser.Partitions + { + size += int64(cap(cached.Partitions)) * int64(40) + for _, elem := range cached.Partitions { + size += elem.CachedSize(false) + } + } + // field Where *vitess.io/vitess/go/vt/sqlparser.Where + size += cached.Where.CachedSize(true) + // field OrderBy vitess.io/vitess/go/vt/sqlparser.OrderBy + { + size += int64(cap(cached.OrderBy)) * int64(8) + for _, elem := range cached.OrderBy { + size += elem.CachedSize(true) + } + } + // field Limit *vitess.io/vitess/go/vt/sqlparser.Limit + size += cached.Limit.CachedSize(true) + return size +} +func (cached *DerivedTable) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(16) + } + // field Select vitess.io/vitess/go/vt/sqlparser.SelectStatement + if cc, ok := cached.Select.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *DropColumn) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(8) + } + // field Name *vitess.io/vitess/go/vt/sqlparser.ColName + size += cached.Name.CachedSize(true) + return size +} +func (cached *DropDatabase) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(41) + } + // field Comments vitess.io/vitess/go/vt/sqlparser.Comments + { + size += int64(cap(cached.Comments)) * int64(16) + for _, elem := range cached.Comments { + size += int64(len(elem)) + } + } + // field DBName vitess.io/vitess/go/vt/sqlparser.TableIdent + size += cached.DBName.CachedSize(false) + return size +} +func (cached *DropKey) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field Name vitess.io/vitess/go/vt/sqlparser.ColIdent + size += cached.Name.CachedSize(false) + return size +} +func (cached *DropTable) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(33) + } + // field FromTables vitess.io/vitess/go/vt/sqlparser.TableNames + { + size += int64(cap(cached.FromTables)) * int64(32) + for _, elem := range cached.FromTables { + size += elem.CachedSize(false) + } + } + return size +} +func (cached *DropView) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(25) + } + // field FromTables vitess.io/vitess/go/vt/sqlparser.TableNames + { + size += int64(cap(cached.FromTables)) * int64(32) + for _, elem := range cached.FromTables { + size += elem.CachedSize(false) + } + } + return size +} +func (cached *ExistsExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(8) + } + // field Subquery *vitess.io/vitess/go/vt/sqlparser.Subquery + size += cached.Subquery.CachedSize(true) + return size +} +func (cached *ExplainStmt) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + // field Statement vitess.io/vitess/go/vt/sqlparser.Statement + if cc, ok := cached.Statement.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *ExplainTab) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field Table vitess.io/vitess/go/vt/sqlparser.TableName + size += cached.Table.CachedSize(false) + // field Wild string + size += int64(len(cached.Wild)) + return size +} +func (cached *Flush) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(58) + } + // field FlushOptions []string + { + size += int64(cap(cached.FlushOptions)) * int64(16) + for _, elem := range cached.FlushOptions { + size += int64(len(elem)) + } + } + // field TableNames vitess.io/vitess/go/vt/sqlparser.TableNames + { + size += int64(cap(cached.TableNames)) * int64(32) + for _, elem := range cached.TableNames { + size += elem.CachedSize(false) + } + } + return size +} +func (cached *ForeignKeyDefinition) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(96) + } + // field Source vitess.io/vitess/go/vt/sqlparser.Columns + { + size += int64(cap(cached.Source)) * int64(40) + for _, elem := range cached.Source { + size += elem.CachedSize(false) + } + } + // field ReferencedTable vitess.io/vitess/go/vt/sqlparser.TableName + size += cached.ReferencedTable.CachedSize(false) + // field ReferencedColumns vitess.io/vitess/go/vt/sqlparser.Columns + { + size += int64(cap(cached.ReferencedColumns)) * int64(40) + for _, elem := range cached.ReferencedColumns { + size += elem.CachedSize(false) + } + } + return size +} +func (cached *FuncExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(88) + } + // field Qualifier vitess.io/vitess/go/vt/sqlparser.TableIdent + size += cached.Qualifier.CachedSize(false) + // field Name vitess.io/vitess/go/vt/sqlparser.ColIdent + size += cached.Name.CachedSize(false) + // field Exprs vitess.io/vitess/go/vt/sqlparser.SelectExprs + { + size += int64(cap(cached.Exprs)) * int64(16) + for _, elem := range cached.Exprs { + if cc, ok := elem.(cachedObject); ok { + size += cc.CachedSize(true) + } + } + } + return size +} +func (cached *GroupConcatExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(80) + } + // field Exprs vitess.io/vitess/go/vt/sqlparser.SelectExprs + { + size += int64(cap(cached.Exprs)) * int64(16) + for _, elem := range cached.Exprs { + if cc, ok := elem.(cachedObject); ok { + size += cc.CachedSize(true) + } + } + } + // field OrderBy vitess.io/vitess/go/vt/sqlparser.OrderBy + { + size += int64(cap(cached.OrderBy)) * int64(8) + for _, elem := range cached.OrderBy { + size += elem.CachedSize(true) + } + } + // field Separator string + size += int64(len(cached.Separator)) + // field Limit *vitess.io/vitess/go/vt/sqlparser.Limit + size += cached.Limit.CachedSize(true) + return size +} +func (cached *IndexColumn) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(49) + } + // field Column vitess.io/vitess/go/vt/sqlparser.ColIdent + size += cached.Column.CachedSize(false) + // field Length *vitess.io/vitess/go/vt/sqlparser.Literal + size += cached.Length.CachedSize(true) + return size +} +func (cached *IndexDefinition) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(56) + } + // field Info *vitess.io/vitess/go/vt/sqlparser.IndexInfo + size += cached.Info.CachedSize(true) + // field Columns []*vitess.io/vitess/go/vt/sqlparser.IndexColumn + { + size += int64(cap(cached.Columns)) * int64(8) + for _, elem := range cached.Columns { + size += elem.CachedSize(true) + } + } + // field Options []*vitess.io/vitess/go/vt/sqlparser.IndexOption + { + size += int64(cap(cached.Options)) * int64(8) + for _, elem := range cached.Options { + size += elem.CachedSize(true) + } + } + return size +} +func (cached *IndexHints) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field Indexes []vitess.io/vitess/go/vt/sqlparser.ColIdent + { + size += int64(cap(cached.Indexes)) * int64(40) + for _, elem := range cached.Indexes { + size += elem.CachedSize(false) + } + } + return size +} +func (cached *IndexInfo) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(100) + } + // field Type string + size += int64(len(cached.Type)) + // field Name vitess.io/vitess/go/vt/sqlparser.ColIdent + size += cached.Name.CachedSize(false) + // field ConstraintName vitess.io/vitess/go/vt/sqlparser.ColIdent + size += cached.ConstraintName.CachedSize(false) + return size +} +func (cached *IndexOption) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(40) + } + // field Name string + size += int64(len(cached.Name)) + // field Value *vitess.io/vitess/go/vt/sqlparser.Literal + size += cached.Value.CachedSize(true) + // field String string + size += int64(len(cached.String)) + return size +} +func (cached *Insert) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(160) + } + // field Comments vitess.io/vitess/go/vt/sqlparser.Comments + { + size += int64(cap(cached.Comments)) * int64(16) + for _, elem := range cached.Comments { + size += int64(len(elem)) + } + } + // field Table vitess.io/vitess/go/vt/sqlparser.TableName + size += cached.Table.CachedSize(false) + // field Partitions vitess.io/vitess/go/vt/sqlparser.Partitions + { + size += int64(cap(cached.Partitions)) * int64(40) + for _, elem := range cached.Partitions { + size += elem.CachedSize(false) + } + } + // field Columns vitess.io/vitess/go/vt/sqlparser.Columns + { + size += int64(cap(cached.Columns)) * int64(40) + for _, elem := range cached.Columns { + size += elem.CachedSize(false) + } + } + // field Rows vitess.io/vitess/go/vt/sqlparser.InsertRows + if cc, ok := cached.Rows.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field OnDup vitess.io/vitess/go/vt/sqlparser.OnDup + { + size += int64(cap(cached.OnDup)) * int64(8) + for _, elem := range cached.OnDup { + size += elem.CachedSize(true) + } + } + return size +} +func (cached *IntervalExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field Expr vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Expr.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Unit string + size += int64(len(cached.Unit)) + return size +} +func (cached *IsExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + // field Expr vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Expr.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *JoinCondition) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(40) + } + // field On vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.On.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Using vitess.io/vitess/go/vt/sqlparser.Columns + { + size += int64(cap(cached.Using)) * int64(40) + for _, elem := range cached.Using { + size += elem.CachedSize(false) + } + } + return size +} +func (cached *JoinTableExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(80) + } + // field LeftExpr vitess.io/vitess/go/vt/sqlparser.TableExpr + if cc, ok := cached.LeftExpr.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field RightExpr vitess.io/vitess/go/vt/sqlparser.TableExpr + if cc, ok := cached.RightExpr.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Condition vitess.io/vitess/go/vt/sqlparser.JoinCondition + size += cached.Condition.CachedSize(false) + return size +} +func (cached *KeyState) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(1) + } + return size +} +func (cached *Limit) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field Offset vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Offset.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Rowcount vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Rowcount.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *Literal) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + // field Val string + size += int64(len(cached.Val)) + return size +} +func (cached *LockOption) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(1) + } + return size +} +func (cached *LockTables) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + // field Tables vitess.io/vitess/go/vt/sqlparser.TableAndLockTypes + { + size += int64(cap(cached.Tables)) * int64(8) + for _, elem := range cached.Tables { + size += elem.CachedSize(true) + } + } + return size +} +func (cached *MatchExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(41) + } + // field Columns vitess.io/vitess/go/vt/sqlparser.SelectExprs + { + size += int64(cap(cached.Columns)) * int64(16) + for _, elem := range cached.Columns { + if cc, ok := elem.(cachedObject); ok { + size += cc.CachedSize(true) + } + } + } + // field Expr vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Expr.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *ModifyColumn) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + // field NewColDefinition *vitess.io/vitess/go/vt/sqlparser.ColumnDefinition + size += cached.NewColDefinition.CachedSize(true) + // field First *vitess.io/vitess/go/vt/sqlparser.ColName + size += cached.First.CachedSize(true) + // field After *vitess.io/vitess/go/vt/sqlparser.ColName + size += cached.After.CachedSize(true) + return size +} +func (cached *Nextval) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(16) + } + // field Expr vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Expr.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *NotExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(16) + } + // field Expr vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Expr.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *OptLike) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field LikeTable vitess.io/vitess/go/vt/sqlparser.TableName + size += cached.LikeTable.CachedSize(false) + return size +} +func (cached *OrExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field Left vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Left.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Right vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Right.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *Order) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(17) + } + // field Expr vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Expr.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *OrderByOption) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + // field Cols vitess.io/vitess/go/vt/sqlparser.Columns + { + size += int64(cap(cached.Cols)) * int64(40) + for _, elem := range cached.Cols { + size += elem.CachedSize(false) + } + } + return size +} +func (cached *ParenSelect) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(16) + } + // field Select vitess.io/vitess/go/vt/sqlparser.SelectStatement + if cc, ok := cached.Select.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *ParenTableExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + // field Exprs vitess.io/vitess/go/vt/sqlparser.TableExprs + { + size += int64(cap(cached.Exprs)) * int64(16) + for _, elem := range cached.Exprs { + if cc, ok := elem.(cachedObject); ok { + size += cc.CachedSize(true) + } + } + } + return size +} +func (cached *ParsedQuery) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(40) + } + // field Query string + size += int64(len(cached.Query)) + // field bindLocations []vitess.io/vitess/go/vt/sqlparser.bindLocation + { + size += int64(cap(cached.bindLocations)) * int64(16) + } + return size +} +func (cached *PartitionDefinition) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(57) + } + // field Name vitess.io/vitess/go/vt/sqlparser.ColIdent + size += cached.Name.CachedSize(false) + // field Limit vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Limit.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *PartitionSpec) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(112) + } + // field Names vitess.io/vitess/go/vt/sqlparser.Partitions + { + size += int64(cap(cached.Names)) * int64(40) + for _, elem := range cached.Names { + size += elem.CachedSize(false) + } + } + // field Number *vitess.io/vitess/go/vt/sqlparser.Literal + size += cached.Number.CachedSize(true) + // field TableName vitess.io/vitess/go/vt/sqlparser.TableName + size += cached.TableName.CachedSize(false) + // field Definitions []*vitess.io/vitess/go/vt/sqlparser.PartitionDefinition + { + size += int64(cap(cached.Definitions)) * int64(8) + for _, elem := range cached.Definitions { + size += elem.CachedSize(true) + } + } + return size +} +func (cached *RangeCond) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(56) + } + // field Left vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Left.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field From vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.From.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field To vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.To.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *Release) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(40) + } + // field Name vitess.io/vitess/go/vt/sqlparser.ColIdent + size += cached.Name.CachedSize(false) + return size +} +func (cached *RenameIndex) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(80) + } + // field OldName vitess.io/vitess/go/vt/sqlparser.ColIdent + size += cached.OldName.CachedSize(false) + // field NewName vitess.io/vitess/go/vt/sqlparser.ColIdent + size += cached.NewName.CachedSize(false) + return size +} +func (cached *RenameTable) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + // field TablePairs []*vitess.io/vitess/go/vt/sqlparser.RenameTablePair + { + size += int64(cap(cached.TablePairs)) * int64(8) + for _, elem := range cached.TablePairs { + size += elem.CachedSize(true) + } + } + return size +} +func (cached *RenameTableName) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field Table vitess.io/vitess/go/vt/sqlparser.TableName + size += cached.Table.CachedSize(false) + return size +} +func (cached *RenameTablePair) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(64) + } + // field FromTable vitess.io/vitess/go/vt/sqlparser.TableName + size += cached.FromTable.CachedSize(false) + // field ToTable vitess.io/vitess/go/vt/sqlparser.TableName + size += cached.ToTable.CachedSize(false) + return size +} +func (cached *RevertMigration) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(16) + } + // field UUID string + size += int64(len(cached.UUID)) + return size +} +func (cached *SRollback) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(40) + } + // field Name vitess.io/vitess/go/vt/sqlparser.ColIdent + size += cached.Name.CachedSize(false) + return size +} +func (cached *Savepoint) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(40) + } + // field Name vitess.io/vitess/go/vt/sqlparser.ColIdent + size += cached.Name.CachedSize(false) + return size +} +func (cached *Select) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(176) + } + // field Cache *bool + size += int64(1) + // field Comments vitess.io/vitess/go/vt/sqlparser.Comments + { + size += int64(cap(cached.Comments)) * int64(16) + for _, elem := range cached.Comments { + size += int64(len(elem)) + } + } + // field SelectExprs vitess.io/vitess/go/vt/sqlparser.SelectExprs + { + size += int64(cap(cached.SelectExprs)) * int64(16) + for _, elem := range cached.SelectExprs { + if cc, ok := elem.(cachedObject); ok { + size += cc.CachedSize(true) + } + } + } + // field From vitess.io/vitess/go/vt/sqlparser.TableExprs + { + size += int64(cap(cached.From)) * int64(16) + for _, elem := range cached.From { + if cc, ok := elem.(cachedObject); ok { + size += cc.CachedSize(true) + } + } + } + // field Where *vitess.io/vitess/go/vt/sqlparser.Where + size += cached.Where.CachedSize(true) + // field GroupBy vitess.io/vitess/go/vt/sqlparser.GroupBy + { + size += int64(cap(cached.GroupBy)) * int64(16) + for _, elem := range cached.GroupBy { + if cc, ok := elem.(cachedObject); ok { + size += cc.CachedSize(true) + } + } + } + // field Having *vitess.io/vitess/go/vt/sqlparser.Where + size += cached.Having.CachedSize(true) + // field OrderBy vitess.io/vitess/go/vt/sqlparser.OrderBy + { + size += int64(cap(cached.OrderBy)) * int64(8) + for _, elem := range cached.OrderBy { + size += elem.CachedSize(true) + } + } + // field Limit *vitess.io/vitess/go/vt/sqlparser.Limit + size += cached.Limit.CachedSize(true) + // field Into *vitess.io/vitess/go/vt/sqlparser.SelectInto + size += cached.Into.CachedSize(true) + return size +} +func (cached *SelectInto) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(104) + } + // field FileName string + size += int64(len(cached.FileName)) + // field Charset string + size += int64(len(cached.Charset)) + // field FormatOption string + size += int64(len(cached.FormatOption)) + // field ExportOption string + size += int64(len(cached.ExportOption)) + // field Manifest string + size += int64(len(cached.Manifest)) + // field Overwrite string + size += int64(len(cached.Overwrite)) + return size +} +func (cached *Set) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field Comments vitess.io/vitess/go/vt/sqlparser.Comments + { + size += int64(cap(cached.Comments)) * int64(16) + for _, elem := range cached.Comments { + size += int64(len(elem)) + } + } + // field Exprs vitess.io/vitess/go/vt/sqlparser.SetExprs + { + size += int64(cap(cached.Exprs)) * int64(8) + for _, elem := range cached.Exprs { + size += elem.CachedSize(true) + } + } + return size +} +func (cached *SetExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(64) + } + // field Name vitess.io/vitess/go/vt/sqlparser.ColIdent + size += cached.Name.CachedSize(false) + // field Expr vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Expr.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *SetTransaction) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(72) + } + // field SQLNode vitess.io/vitess/go/vt/sqlparser.SQLNode + if cc, ok := cached.SQLNode.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Comments vitess.io/vitess/go/vt/sqlparser.Comments + { + size += int64(cap(cached.Comments)) * int64(16) + for _, elem := range cached.Comments { + size += int64(len(elem)) + } + } + // field Characteristics []vitess.io/vitess/go/vt/sqlparser.Characteristic + { + size += int64(cap(cached.Characteristics)) * int64(16) + for _, elem := range cached.Characteristics { + if cc, ok := elem.(cachedObject); ok { + size += cc.CachedSize(true) + } + } + } + return size +} +func (cached *Show) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(16) + } + // field Internal vitess.io/vitess/go/vt/sqlparser.ShowInternal + if cc, ok := cached.Internal.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *ShowBasic) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(64) + } + // field Tbl vitess.io/vitess/go/vt/sqlparser.TableName + size += cached.Tbl.CachedSize(false) + // field DbName vitess.io/vitess/go/vt/sqlparser.TableIdent + size += cached.DbName.CachedSize(false) + // field Filter *vitess.io/vitess/go/vt/sqlparser.ShowFilter + size += cached.Filter.CachedSize(true) + return size +} +func (cached *ShowCreate) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(40) + } + // field Op vitess.io/vitess/go/vt/sqlparser.TableName + size += cached.Op.CachedSize(false) + return size +} +func (cached *ShowFilter) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field Like string + size += int64(len(cached.Like)) + // field Filter vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Filter.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *ShowLegacy) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(128) + } + // field Extended string + size += int64(len(cached.Extended)) + // field Type string + size += int64(len(cached.Type)) + // field OnTable vitess.io/vitess/go/vt/sqlparser.TableName + size += cached.OnTable.CachedSize(false) + // field Table vitess.io/vitess/go/vt/sqlparser.TableName + size += cached.Table.CachedSize(false) + // field ShowTablesOpt *vitess.io/vitess/go/vt/sqlparser.ShowTablesOpt + size += cached.ShowTablesOpt.CachedSize(true) + // field ShowCollationFilterOpt vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.ShowCollationFilterOpt.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *ShowTablesOpt) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(40) + } + // field Full string + size += int64(len(cached.Full)) + // field DbName string + size += int64(len(cached.DbName)) + // field Filter *vitess.io/vitess/go/vt/sqlparser.ShowFilter + size += cached.Filter.CachedSize(true) + return size +} +func (cached *StarExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field TableName vitess.io/vitess/go/vt/sqlparser.TableName + size += cached.TableName.CachedSize(false) + return size +} +func (cached *Stream) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(72) + } + // field Comments vitess.io/vitess/go/vt/sqlparser.Comments + { + size += int64(cap(cached.Comments)) * int64(16) + for _, elem := range cached.Comments { + size += int64(len(elem)) + } + } + // field SelectExpr vitess.io/vitess/go/vt/sqlparser.SelectExpr + if cc, ok := cached.SelectExpr.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Table vitess.io/vitess/go/vt/sqlparser.TableName + size += cached.Table.CachedSize(false) + return size +} +func (cached *Subquery) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(16) + } + // field Select vitess.io/vitess/go/vt/sqlparser.SelectStatement + if cc, ok := cached.Select.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *SubstrExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field Name *vitess.io/vitess/go/vt/sqlparser.ColName + size += cached.Name.CachedSize(true) + // field StrVal *vitess.io/vitess/go/vt/sqlparser.Literal + size += cached.StrVal.CachedSize(true) + // field From vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.From.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field To vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.To.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *TableAndLockType) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(17) + } + // field Table vitess.io/vitess/go/vt/sqlparser.TableExpr + if cc, ok := cached.Table.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *TableIdent) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(16) + } + // field v string + size += int64(len(cached.v)) + return size +} +func (cached *TableName) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field Name vitess.io/vitess/go/vt/sqlparser.TableIdent + size += cached.Name.CachedSize(false) + // field Qualifier vitess.io/vitess/go/vt/sqlparser.TableIdent + size += cached.Qualifier.CachedSize(false) + return size +} +func (cached *TableOption) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(64) + } + // field Name string + size += int64(len(cached.Name)) + // field Value *vitess.io/vitess/go/vt/sqlparser.Literal + size += cached.Value.CachedSize(true) + // field String string + size += int64(len(cached.String)) + // field Tables vitess.io/vitess/go/vt/sqlparser.TableNames + { + size += int64(cap(cached.Tables)) * int64(32) + for _, elem := range cached.Tables { + size += elem.CachedSize(false) + } + } + return size +} +func (cached *TableSpec) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(96) + } + // field Columns []*vitess.io/vitess/go/vt/sqlparser.ColumnDefinition + { + size += int64(cap(cached.Columns)) * int64(8) + for _, elem := range cached.Columns { + size += elem.CachedSize(true) + } + } + // field Indexes []*vitess.io/vitess/go/vt/sqlparser.IndexDefinition + { + size += int64(cap(cached.Indexes)) * int64(8) + for _, elem := range cached.Indexes { + size += elem.CachedSize(true) + } + } + // field Constraints []*vitess.io/vitess/go/vt/sqlparser.ConstraintDefinition + { + size += int64(cap(cached.Constraints)) * int64(8) + for _, elem := range cached.Constraints { + size += elem.CachedSize(true) + } + } + // field Options vitess.io/vitess/go/vt/sqlparser.TableOptions + { + size += int64(cap(cached.Options)) * int64(8) + for _, elem := range cached.Options { + size += elem.CachedSize(true) + } + } + return size +} +func (cached *TablespaceOperation) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(1) + } + return size +} +func (cached *TimestampFuncExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(64) + } + // field Name string + size += int64(len(cached.Name)) + // field Expr1 vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Expr1.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Expr2 vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Expr2.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Unit string + size += int64(len(cached.Unit)) + return size +} +func (cached *TruncateTable) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field Table vitess.io/vitess/go/vt/sqlparser.TableName + size += cached.Table.CachedSize(false) + return size +} +func (cached *UnaryExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + // field Expr vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Expr.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *Union) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(73) + } + // field FirstStatement vitess.io/vitess/go/vt/sqlparser.SelectStatement + if cc, ok := cached.FirstStatement.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field UnionSelects []*vitess.io/vitess/go/vt/sqlparser.UnionSelect + { + size += int64(cap(cached.UnionSelects)) * int64(8) + for _, elem := range cached.UnionSelects { + size += elem.CachedSize(true) + } + } + // field OrderBy vitess.io/vitess/go/vt/sqlparser.OrderBy + { + size += int64(cap(cached.OrderBy)) * int64(8) + for _, elem := range cached.OrderBy { + size += elem.CachedSize(true) + } + } + // field Limit *vitess.io/vitess/go/vt/sqlparser.Limit + size += cached.Limit.CachedSize(true) + return size +} +func (cached *UnionSelect) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + // field Statement vitess.io/vitess/go/vt/sqlparser.SelectStatement + if cc, ok := cached.Statement.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *Update) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(120) + } + // field Comments vitess.io/vitess/go/vt/sqlparser.Comments + { + size += int64(cap(cached.Comments)) * int64(16) + for _, elem := range cached.Comments { + size += int64(len(elem)) + } + } + // field TableExprs vitess.io/vitess/go/vt/sqlparser.TableExprs + { + size += int64(cap(cached.TableExprs)) * int64(16) + for _, elem := range cached.TableExprs { + if cc, ok := elem.(cachedObject); ok { + size += cc.CachedSize(true) + } + } + } + // field Exprs vitess.io/vitess/go/vt/sqlparser.UpdateExprs + { + size += int64(cap(cached.Exprs)) * int64(8) + for _, elem := range cached.Exprs { + size += elem.CachedSize(true) + } + } + // field Where *vitess.io/vitess/go/vt/sqlparser.Where + size += cached.Where.CachedSize(true) + // field OrderBy vitess.io/vitess/go/vt/sqlparser.OrderBy + { + size += int64(cap(cached.OrderBy)) * int64(8) + for _, elem := range cached.OrderBy { + size += elem.CachedSize(true) + } + } + // field Limit *vitess.io/vitess/go/vt/sqlparser.Limit + size += cached.Limit.CachedSize(true) + return size +} +func (cached *UpdateExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + // field Name *vitess.io/vitess/go/vt/sqlparser.ColName + size += cached.Name.CachedSize(true) + // field Expr vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Expr.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *Use) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(16) + } + // field DBName vitess.io/vitess/go/vt/sqlparser.TableIdent + size += cached.DBName.CachedSize(false) + return size +} +func (cached *VStream) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(88) + } + // field Comments vitess.io/vitess/go/vt/sqlparser.Comments + { + size += int64(cap(cached.Comments)) * int64(16) + for _, elem := range cached.Comments { + size += int64(len(elem)) + } + } + // field SelectExpr vitess.io/vitess/go/vt/sqlparser.SelectExpr + if cc, ok := cached.SelectExpr.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Table vitess.io/vitess/go/vt/sqlparser.TableName + size += cached.Table.CachedSize(false) + // field Where *vitess.io/vitess/go/vt/sqlparser.Where + size += cached.Where.CachedSize(true) + // field Limit *vitess.io/vitess/go/vt/sqlparser.Limit + size += cached.Limit.CachedSize(true) + return size +} +func (cached *Validation) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(1) + } + return size +} +func (cached *ValuesFuncExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(8) + } + // field Name *vitess.io/vitess/go/vt/sqlparser.ColName + size += cached.Name.CachedSize(true) + return size +} +func (cached *VindexParam) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(56) + } + // field Key vitess.io/vitess/go/vt/sqlparser.ColIdent + size += cached.Key.CachedSize(false) + // field Val string + size += int64(len(cached.Val)) + return size +} +func (cached *VindexSpec) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(104) + } + // field Name vitess.io/vitess/go/vt/sqlparser.ColIdent + size += cached.Name.CachedSize(false) + // field Type vitess.io/vitess/go/vt/sqlparser.ColIdent + size += cached.Type.CachedSize(false) + // field Params []vitess.io/vitess/go/vt/sqlparser.VindexParam + { + size += int64(cap(cached.Params)) * int64(56) + for _, elem := range cached.Params { + size += elem.CachedSize(false) + } + } + return size +} +func (cached *When) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field Cond vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Cond.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Val vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Val.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *Where) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + // field Expr vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Expr.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *XorExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field Left vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Left.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Right vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Right.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} diff --git a/go/vt/sqlparser/comments.go b/go/vt/sqlparser/comments.go index 1b5d91d297b..94ad9936085 100644 --- a/go/vt/sqlparser/comments.go +++ b/go/vt/sqlparser/comments.go @@ -181,6 +181,9 @@ func ExtractMysqlComment(sql string) (string, string) { if endOfVersionIndex < 0 { return "", "" } + if endOfVersionIndex < 5 { + endOfVersionIndex = 0 + } version := sql[0:endOfVersionIndex] innerSQL := strings.TrimFunc(sql[endOfVersionIndex:], unicode.IsSpace) diff --git a/go/vt/sqlparser/constants.go b/go/vt/sqlparser/constants.go index bbea16d83df..152e728df75 100644 --- a/go/vt/sqlparser/constants.go +++ b/go/vt/sqlparser/constants.go @@ -16,8 +16,10 @@ limitations under the License. package sqlparser +// String constants to be used in ast. const ( // Select.Distinct + AllStr = "all " DistinctStr = "distinct " StraightJoinHint = "straight_join " SQLCalcFoundRowsStr = "sql_calc_found_rows " @@ -209,16 +211,33 @@ const ( LowPriorityWriteStr = "low_priority write" // ShowCommand Types - CharsetStr = " charset" - CollationStr = " collation" - DatabaseStr = " databases" - FunctionStr = " function status" - PrivilegeStr = " privileges" - ProcedureStr = " procedure status" - StatusGlobalStr = " global status" - StatusSessionStr = " status" - VariableGlobalStr = " global variables" - VariableSessionStr = " variables" + CharsetStr = " charset" + CollationStr = " collation" + ColumnStr = " columns" + CreateDbStr = " create database" + CreateEStr = " create event" + CreateFStr = " create function" + CreateProcStr = " create procedure" + CreateTblStr = " create table" + CreateTrStr = " create trigger" + CreateVStr = " create view" + DatabaseStr = " databases" + FunctionCStr = " function code" + FunctionStr = " function status" + IndexStr = " indexes" + OpenTableStr = " open tables" + PrivilegeStr = " privileges" + ProcedureCStr = " procedure code" + ProcedureStr = " procedure status" + StatusGlobalStr = " global status" + StatusSessionStr = " status" + TableStr = " tables" + TableStatusStr = " table status" + TriggerStr = " triggers" + VariableGlobalStr = " global variables" + VariableSessionStr = " variables" + KeyspaceStr = " keyspaces" + VitessMigrationsStr = " vitess_migrations" // DropKeyType strings PrimaryKeyTypeStr = "primary key" @@ -259,7 +278,6 @@ const ( DropDDLAction RenameDDLAction TruncateDDLAction - FlushDDLAction CreateVindexDDLAction DropVindexDDLAction AddVschemaTableDDLAction @@ -268,6 +286,7 @@ const ( DropColVindexDDLAction AddSequenceDDLAction AddAutoIncDDLAction + RevertDDLAction ) // Constants for Enum Type - Scope @@ -451,14 +470,31 @@ const ( UnknownCommandType ShowCommandType = iota Charset Collation + Column + CreateDb + CreateE + CreateF + CreateProc + CreateTbl + CreateTr + CreateV Database + FunctionC Function + Index + OpenTable Privilege + ProcedureC Procedure StatusGlobal StatusSession + Table + TableStatus + Trigger VariableGlobal VariableSession + VitessMigrations + Keyspace ) // DropKeyType constants @@ -475,3 +511,10 @@ const ( SharedType ExclusiveType ) + +const ( + RetryMigrationType AlterMigrationType = iota + CompleteMigrationType + CancelMigrationType + CancelAllMigrationType +) diff --git a/go/vt/sqlparser/expression_converter.go b/go/vt/sqlparser/expression_converter.go index d1f7720d949..21297cca3ff 100644 --- a/go/vt/sqlparser/expression_converter.go +++ b/go/vt/sqlparser/expression_converter.go @@ -33,11 +33,11 @@ func Convert(e Expr) (evalengine.Expr, error) { case *Literal: switch node.Type { case IntVal: - return evalengine.NewLiteralIntFromBytes(node.Val) + return evalengine.NewLiteralIntFromBytes(node.Bytes()) case FloatVal: - return evalengine.NewLiteralFloat(node.Val) + return evalengine.NewLiteralFloat(node.Bytes()) case StrVal: - return evalengine.NewLiteralString(node.Val), nil + return evalengine.NewLiteralString(node.Bytes()), nil } case BoolVal: if node { diff --git a/go/vt/sqlparser/goyacc/goyacc.go b/go/vt/sqlparser/goyacc/goyacc.go new file mode 100644 index 00000000000..567a2240664 --- /dev/null +++ b/go/vt/sqlparser/goyacc/goyacc.go @@ -0,0 +1,3765 @@ +/* +Derived from Inferno's utils/iyacc/yacc.c +http://code.google.com/p/inferno-os/source/browse/utils/iyacc/yacc.c + +This copyright NOTICE applies to all files in this directory and +subdirectories, unless another copyright notice appears in a given +file or subdirectory. If you take substantial code from this software to use in +other programs, you must somehow include with it an appropriate +copyright notice that includes the copyright notice and the other +notices below. It is fine (and often tidier) to do that in a separate +file such as NOTICE, LICENCE or COPYING. + + Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. + Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) + Portions Copyright © 1997-1999 Vita Nuova Limited + Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) + Portions Copyright © 2004,2006 Bruce Ellis + Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) + Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others + Portions Copyright © 2009 The Go Authors. All rights reserved. + Portions Copyright © 2021 The Vitess Authors. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ + +package main + +// yacc +// major difference is lack of stem ("y" variable) +// + +import ( + "bufio" + "bytes" + "flag" + "fmt" + "go/format" + "io/ioutil" + "os" + "regexp" + "sort" + "strconv" + "strings" + "unicode" +) + +// the following are adjustable +// according to memory size +const ( + ACTSIZE = 120000 + NSTATES = 8000 + TEMPSIZE = 8000 + + SYMINC = 50 // increase for non-term or term + RULEINC = 50 // increase for max rule length prodptr[i] + PRODINC = 100 // increase for productions prodptr + WSETINC = 50 // increase for working sets wsets + STATEINC = 200 // increase for states statemem + + PRIVATE = 0xE000 // unicode private use + + // relationships which must hold: + // TEMPSIZE >= NTERMS + NNONTERM + 1; + // TEMPSIZE >= NSTATES; + // + + NTBASE = 010000 + ERRCODE = 8190 + ACCEPTCODE = 8191 + YYLEXUNK = 3 + TOKSTART = 4 //index of first defined token +) + +// no, left, right, binary assoc. +const ( + LASC = iota + 1 + RASC + BASC +) + +// flags for state generation +const ( + DONE = iota + MUSTDO + MUSTLOOKAHEAD +) + +// flags for a rule having an action, and being reduced +const ( + ACTFLAG = 1 << (iota + 2) + REDFLAG +) + +// output parser flags +const yyFlag = -1000 + +// parse tokens +const ( + IDENTIFIER = PRIVATE + iota + MARK + TERM + LEFT + RIGHT + BINARY + PREC + LCURLY + IDENTCOLON + NUMBER + START + TYPEDEF + TYPENAME + STRUCT + UNION + ERROR +) + +const ENDFILE = 0 +const EMPTY = 1 +const WHOKNOWS = 0 +const OK = 1 +const NOMORE = -1000 + +// macros for getting associativity and precedence levels +func ASSOC(i int) int { return i & 3 } + +func PLEVEL(i int) int { return (i >> 4) & 077 } + +func TYPE(i int) int { return (i >> 10) & 077 } + +// macros for setting associativity and precedence levels +func SETASC(i, j int) int { return i | j } + +func SETPLEV(i, j int) int { return i | (j << 4) } + +func SETTYPE(i, j int) int { return i | (j << 10) } + +// I/O descriptors +var finput *bufio.Reader // input file +var stderr *bufio.Writer +var ftable *bufio.Writer // y.go file +var fcode = &bytes.Buffer{} // saved code +var ftypes = &bytes.Buffer{} // saved type definitions +var foutput *bufio.Writer // y.output file + +var writtenImports bool // output file has recorded an import of "fmt" + +var oflag string // -o [y.go] - y.go file +var vflag string // -v [y.output] - y.output file +var lflag bool // -l - disable line directives +var prefix string // name prefix for identifiers, default yy +var allowFastAppend bool + +func init() { + flag.StringVar(&oflag, "o", "y.go", "parser output") + flag.StringVar(&prefix, "p", "yy", "name prefix to use in generated code") + flag.StringVar(&vflag, "v", "y.output", "create parsing tables") + flag.BoolVar(&lflag, "l", false, "disable line directives") + flag.BoolVar(&allowFastAppend, "fast-append", false, "enable fast-append optimization") +} + +var initialstacksize = 16 + +// communication variables between various I/O routines +var infile string // input file name +var numbval int // value of an input number +var tokname string // input token name, slop for runes and 0 +var tokflag = false + +// structure declarations +type Lkset []int + +type Pitem struct { + prod []int + off int // offset within the production + first int // first term or non-term in item + prodno int // production number for sorting +} + +type Item struct { + pitem Pitem + look Lkset +} + +type Symb struct { + name string + noconst bool + value int +} + +type Wset struct { + pitem Pitem + flag int + ws Lkset +} + +// storage of types +var ntypes int // number of types defined +var typeset = make(map[int]string) // pointers to type tags + +// token information + +var ntokens = 0 // number of tokens +var tokset []Symb +var toklev []int // vector with the precedence of the terminals + +// nonterminal information + +var nnonter = -1 // the number of nonterminals +var nontrst []Symb +var start int // start symbol + +// state information + +var nstate = 0 // number of states +var pstate = make([]int, NSTATES+2) // index into statemem to the descriptions of the states +var statemem []Item +var tystate = make([]int, NSTATES) // contains type information about the states +var tstates []int // states generated by terminal gotos +var ntstates []int // states generated by nonterminal gotos +var mstates = make([]int, NSTATES) // chain of overflows of term/nonterm generation lists +var lastred int // number of last reduction of a state +var defact = make([]int, NSTATES) // default actions of states + +// lookahead set information + +var nolook = 0 // flag to turn off lookahead computations +var tbitset = 0 // size of lookahead sets +var clset Lkset // temporary storage for lookahead computations + +// working set information + +var wsets []Wset +var cwp int + +// storage for action table + +var amem []int // action table storage +var memp int // next free action table position +var indgo = make([]int, NSTATES) // index to the stored goto table + +// temporary vector, indexable by states, terms, or ntokens + +var temp1 = make([]int, TEMPSIZE) // temporary storage, indexed by terms + ntokens or states +var lineno = 1 // current input line number +var fatfl = 1 // if on, error is fatal +var nerrors = 0 // number of errors + +// assigned token type values + +var extval = 0 + +// grammar rule information + +var nprod = 1 // number of productions +var prdptr [][]int // pointers to descriptions of productions +var levprd []int // precedence levels for the productions +var rlines []int // line number for this rule + +// statistics collection variables + +var zzgoent = 0 +var zzgobest = 0 +var zzacent = 0 +var zzexcp = 0 +var zzclose = 0 +var zzrrconf = 0 +var zzsrconf = 0 +var zzstate = 0 + +// optimizer arrays + +var yypgo [][]int +var optst [][]int +var ggreed []int +var pgo []int + +var maxspr int // maximum spread of any entry +var maxoff int // maximum offset into a array +var maxa int + +// storage for information about the nonterminals + +var pres [][][]int // vector of pointers to productions yielding each nonterminal +var pfirst []Lkset +var pempty []int // vector of nonterminals nontrivially deriving e + +// random stuff picked out from between functions + +var indebug = 0 // debugging flag for cpfir +var pidebug = 0 // debugging flag for putitem +var gsdebug = 0 // debugging flag for stagen +var cldebug = 0 // debugging flag for closure +var pkdebug = 0 // debugging flag for apack +var g2debug = 0 // debugging for go2gen +var adb = 0 // debugging for callopt + +type Resrv struct { + name string + value int +} + +var resrv = []Resrv{ + {"binary", BINARY}, + {"left", LEFT}, + {"nonassoc", BINARY}, + {"prec", PREC}, + {"right", RIGHT}, + {"start", START}, + {"term", TERM}, + {"token", TERM}, + {"type", TYPEDEF}, + {"union", UNION}, + {"struct", STRUCT}, + {"error", ERROR}, +} + +type Error struct { + lineno int + tokens []string + msg string +} + +var errors []Error + +type Row struct { + actions []int + defaultAction int +} + +var stateTable []Row + +var zznewstate = 0 + +const EOF = -1 + +func main() { + + setup() // initialize and read productions + + tbitset = (ntokens + 32) / 32 + cpres() // make table of which productions yield a given nonterminal + cempty() // make a table of which nonterminals can match the empty string + cpfir() // make a table of firsts of nonterminals + + stagen() // generate the states + + yypgo = make([][]int, nnonter+1) + optst = make([][]int, nstate) + output() // write the states and the tables + go2out() + + hideprod() + summary() + + callopt() + + typeinfo() + others() + + exit(0) +} + +func setup() { + var j, ty int + + stderr = bufio.NewWriter(os.Stderr) + foutput = nil + + flag.Parse() + if flag.NArg() != 1 { + usage() + } + if initialstacksize < 1 { + // never set so cannot happen + fmt.Fprintf(stderr, "yacc: stack size too small\n") + usage() + } + yaccpar = strings.Replace(yaccpartext, "$$", prefix, -1) + openup() + + fmt.Fprintf(ftable, "// Code generated by goyacc %s. DO NOT EDIT.\n", strings.Join(os.Args[1:], " ")) + + defin(0, "$end") + extval = PRIVATE // tokens start in unicode 'private use' + defin(0, "error") + defin(1, "$accept") + defin(0, "$unk") + i := 0 + + t := gettok() + +outer: + for { + switch t { + default: + errorf("syntax error tok=%v", t-PRIVATE) + + case MARK, ENDFILE: + break outer + + case ';': + // Do nothing. + + case START: + t = gettok() + if t != IDENTIFIER { + errorf("bad %%start construction") + } + start = chfind(1, tokname) + + case ERROR: + lno := lineno + var tokens []string + for { + t := gettok() + if t == ':' { + break + } + if t != IDENTIFIER && t != IDENTCOLON { + errorf("bad syntax in %%error") + } + tokens = append(tokens, tokname) + if t == IDENTCOLON { + break + } + } + if gettok() != IDENTIFIER { + errorf("bad syntax in %%error") + } + errors = append(errors, Error{lno, tokens, tokname}) + + case TYPEDEF: + t = gettok() + if t != TYPENAME { + errorf("bad syntax in %%type") + } + ty = numbval + for { + t = gettok() + switch t { + case IDENTIFIER: + t = chfind(1, tokname) + if t < NTBASE { + j = TYPE(toklev[t]) + if j != 0 && j != ty { + errorf("type redeclaration of token %s", + tokset[t].name) + } else { + toklev[t] = SETTYPE(toklev[t], ty) + } + } else { + j = nontrst[t-NTBASE].value + if j != 0 && j != ty { + errorf("type redeclaration of nonterminal %v", + nontrst[t-NTBASE].name) + } else { + nontrst[t-NTBASE].value = ty + } + } + continue + + case ',': + continue + } + break + } + continue + + case UNION: + parsetypes(true) + + case STRUCT: + parsetypes(false) + + case LEFT, BINARY, RIGHT, TERM: + // nonzero means new prec. and assoc. + lev := t - TERM + if lev != 0 { + i++ + } + ty = 0 + + // get identifiers so defined + t = gettok() + + // there is a type defined + if t == TYPENAME { + ty = numbval + t = gettok() + } + for { + switch t { + case ',': + t = gettok() + continue + + case ';': + // Do nothing. + + case IDENTIFIER: + j = chfind(0, tokname) + if j >= NTBASE { + errorf("%v defined earlier as nonterminal", tokname) + } + if lev != 0 { + if ASSOC(toklev[j]) != 0 { + errorf("redeclaration of precedence of %v", tokname) + } + toklev[j] = SETASC(toklev[j], lev) + toklev[j] = SETPLEV(toklev[j], i) + } + if ty != 0 { + if TYPE(toklev[j]) != 0 { + errorf("redeclaration of type of %v", tokname) + } + toklev[j] = SETTYPE(toklev[j], ty) + } + t = gettok() + if t == NUMBER { + tokset[j].value = numbval + t = gettok() + } + + continue + } + break + } + continue + + case LCURLY: + cpycode() + } + t = gettok() + } + + if t == ENDFILE { + errorf("unexpected EOF before %%") + } + + fmt.Fprintf(fcode, "switch %snt {\n", prefix) + + moreprod() + prdptr[0] = []int{NTBASE, start, 1, 0} + + nprod = 1 + curprod := make([]int, RULEINC) + t = gettok() + if t != IDENTCOLON { + errorf("bad syntax on first rule") + } + + if start == 0 { + prdptr[0][1] = chfind(1, tokname) + } + + // read rules + // put into prdptr array in the format + // target + // followed by id's of terminals and non-terminals + // followed by -nprod + + for t != MARK && t != ENDFILE { + mem := 0 + + // process a rule + rlines[nprod] = lineno + ruleline := lineno + if t == '|' { + curprod[mem] = prdptr[nprod-1][0] + mem++ + } else if t == IDENTCOLON { + curprod[mem] = chfind(1, tokname) + if curprod[mem] < NTBASE { + lerrorf(ruleline, "token illegal on LHS of grammar rule") + } + mem++ + } else { + lerrorf(ruleline, "illegal rule: missing semicolon or | ?") + } + + // read rule body + t = gettok() + for { + for t == IDENTIFIER { + curprod[mem] = chfind(1, tokname) + if curprod[mem] < NTBASE { + levprd[nprod] = toklev[curprod[mem]] + } + mem++ + if mem >= len(curprod) { + ncurprod := make([]int, mem+RULEINC) + copy(ncurprod, curprod) + curprod = ncurprod + } + t = gettok() + } + if t == PREC { + if gettok() != IDENTIFIER { + lerrorf(ruleline, "illegal %%prec syntax") + } + j = chfind(2, tokname) + if j >= NTBASE { + lerrorf(ruleline, "nonterminal "+nontrst[j-NTBASE].name+" illegal after %%prec") + } + levprd[nprod] = toklev[j] + t = gettok() + } + if t != '=' { + break + } + levprd[nprod] |= ACTFLAG + fmt.Fprintf(fcode, "\n\tcase %v:", nprod) + fmt.Fprintf(fcode, "\n\t\t%sDollar = %sS[%spt-%v:%spt+1]", prefix, prefix, prefix, mem-1, prefix) + + var act bytes.Buffer + var unionType string + cpyact(&act, curprod, mem, &unionType) + + if unionType != "" { + fmt.Fprintf(fcode, "\n\t\tvar %sLOCAL %s", prefix, unionType) + } + fcode.Write(act.Bytes()) + if unionType != "" { + fmt.Fprintf(fcode, "\n\t\t%sVAL.union = %sLOCAL", prefix, prefix) + } + + // action within rule... + t = gettok() + if t == IDENTIFIER { + // make it a nonterminal + j = chfind(1, fmt.Sprintf("$$%v", nprod)) + + // + // the current rule will become rule number nprod+1 + // enter null production for action + // + prdptr[nprod] = make([]int, 2) + prdptr[nprod][0] = j + prdptr[nprod][1] = -nprod + + // update the production information + nprod++ + moreprod() + levprd[nprod] = levprd[nprod-1] & ^ACTFLAG + levprd[nprod-1] = ACTFLAG + rlines[nprod] = lineno + + // make the action appear in the original rule + curprod[mem] = j + mem++ + if mem >= len(curprod) { + ncurprod := make([]int, mem+RULEINC) + copy(ncurprod, curprod) + curprod = ncurprod + } + } + } + + for t == ';' { + t = gettok() + } + curprod[mem] = -nprod + mem++ + + // check that default action is reasonable + if ntypes != 0 && (levprd[nprod]&ACTFLAG) == 0 && + nontrst[curprod[0]-NTBASE].value != 0 { + // no explicit action, LHS has value + tempty := curprod[1] + if tempty < 0 { + lerrorf(ruleline, "must return a value, since LHS has a type") + } + if tempty >= NTBASE { + tempty = nontrst[tempty-NTBASE].value + } else { + tempty = TYPE(toklev[tempty]) + } + if tempty != nontrst[curprod[0]-NTBASE].value { + lerrorf(ruleline, "default action causes potential type clash") + } + } + moreprod() + prdptr[nprod] = make([]int, mem) + copy(prdptr[nprod], curprod) + nprod++ + moreprod() + levprd[nprod] = 0 + } + + if TEMPSIZE < ntokens+nnonter+1 { + errorf("too many tokens (%d) or non-terminals (%d)", ntokens, nnonter) + } + + // + // end of all rules + // dump out the prefix code + // + + fmt.Fprintf(fcode, "\n\t}") + + // put out non-literal terminals + for i := TOKSTART; i <= ntokens; i++ { + // non-literals + if !tokset[i].noconst { + fmt.Fprintf(ftable, "const %v = %v\n", tokset[i].name, tokset[i].value) + } + } + + // put out names of tokens + ftable.WriteRune('\n') + fmt.Fprintf(ftable, "var %sToknames = [...]string{\n", prefix) + for i := 1; i <= ntokens; i++ { + fmt.Fprintf(ftable, "\t%q,\n", tokset[i].name) + } + fmt.Fprintf(ftable, "}\n") + + // put out names of states. + // commented out to avoid a huge table just for debugging. + // re-enable to have the names in the binary. + ftable.WriteRune('\n') + fmt.Fprintf(ftable, "var %sStatenames = [...]string{\n", prefix) + // for i:=TOKSTART; i<=ntokens; i++ { + // fmt.Fprintf(ftable, "\t%q,\n", tokset[i].name); + // } + fmt.Fprintf(ftable, "}\n") + + ftable.WriteRune('\n') + fmt.Fprintf(ftable, "const %sEofCode = 1\n", prefix) + fmt.Fprintf(ftable, "const %sErrCode = 2\n", prefix) + fmt.Fprintf(ftable, "const %sInitialStackSize = %v\n", prefix, initialstacksize) + + // + // copy any postfix code + // + if t == MARK { + if !lflag { + fmt.Fprintf(ftable, "\n//line %v:%v\n", infile, lineno) + } + for { + c := getrune(finput) + if c == EOF { + break + } + ftable.WriteRune(c) + } + } +} + +// +// allocate enough room to hold another production +// +func moreprod() { + n := len(prdptr) + if nprod >= n { + nn := n + PRODINC + aprod := make([][]int, nn) + alevprd := make([]int, nn) + arlines := make([]int, nn) + + copy(aprod, prdptr) + copy(alevprd, levprd) + copy(arlines, rlines) + + prdptr = aprod + levprd = alevprd + rlines = arlines + } +} + +// +// define s to be a terminal if nt==0 +// or a nonterminal if nt==1 +// +func defin(nt int, s string) int { + val := 0 + if nt != 0 { + nnonter++ + if nnonter >= len(nontrst) { + anontrst := make([]Symb, nnonter+SYMINC) + copy(anontrst, nontrst) + nontrst = anontrst + } + nontrst[nnonter] = Symb{name: s} + return NTBASE + nnonter + } + + // must be a token + ntokens++ + if ntokens >= len(tokset) { + nn := ntokens + SYMINC + atokset := make([]Symb, nn) + atoklev := make([]int, nn) + + copy(atoklev, toklev) + copy(atokset, tokset) + + tokset = atokset + toklev = atoklev + } + tokset[ntokens].name = s + toklev[ntokens] = 0 + + // establish value for token + // single character literal + if s[0] == '\'' || s[0] == '"' { + q, err := strconv.Unquote(s) + if err != nil { + errorf("invalid token: %s", err) + } + rq := []rune(q) + if len(rq) != 1 { + errorf("character token too long: %s", s) + } + val = int(rq[0]) + if val == 0 { + errorf("token value 0 is illegal") + } + tokset[ntokens].noconst = true + } else { + val = extval + extval++ + if s[0] == '$' { + tokset[ntokens].noconst = true + } + } + + tokset[ntokens].value = val + return ntokens +} + +var peekline = 0 + +func gettok() int { + var i int + var match, c rune + + tokname = "" + for { + lineno += peekline + peekline = 0 + c = getrune(finput) + for c == ' ' || c == '\n' || c == '\t' || c == '\v' || c == '\r' { + if c == '\n' { + lineno++ + } + c = getrune(finput) + } + + // skip comment -- fix + if c != '/' { + break + } + lineno += skipcom() + } + + switch c { + case EOF: + if tokflag { + fmt.Printf(">>> ENDFILE %v\n", lineno) + } + return ENDFILE + + case '{': + ungetrune(finput, c) + if tokflag { + fmt.Printf(">>> ={ %v\n", lineno) + } + return '=' + + case '<': + // get, and look up, a type name (union member name) + c = getrune(finput) + for c != '>' && c != EOF && c != '\n' { + tokname += string(c) + c = getrune(finput) + } + + if c != '>' { + errorf("unterminated < ... > clause") + } + + for i = 1; i <= ntypes; i++ { + if typeset[i] == tokname { + numbval = i + if tokflag { + fmt.Printf(">>> TYPENAME old <%v> %v\n", tokname, lineno) + } + return TYPENAME + } + } + ntypes++ + numbval = ntypes + typeset[numbval] = tokname + if tokflag { + fmt.Printf(">>> TYPENAME new <%v> %v\n", tokname, lineno) + } + return TYPENAME + + case '"', '\'': + match = c + tokname = string(c) + for { + c = getrune(finput) + if c == '\n' || c == EOF { + errorf("illegal or missing ' or \"") + } + if c == '\\' { + tokname += string('\\') + c = getrune(finput) + } else if c == match { + if tokflag { + fmt.Printf(">>> IDENTIFIER \"%v\" %v\n", tokname, lineno) + } + tokname += string(c) + return IDENTIFIER + } + tokname += string(c) + } + + case '%': + c = getrune(finput) + switch c { + case '%': + if tokflag { + fmt.Printf(">>> MARK %%%% %v\n", lineno) + } + return MARK + case '=': + if tokflag { + fmt.Printf(">>> PREC %%= %v\n", lineno) + } + return PREC + case '{': + if tokflag { + fmt.Printf(">>> LCURLY %%{ %v\n", lineno) + } + return LCURLY + } + + getword(c) + // find a reserved word + for i := range resrv { + if tokname == resrv[i].name { + if tokflag { + fmt.Printf(">>> %%%v %v %v\n", tokname, + resrv[i].value-PRIVATE, lineno) + } + return resrv[i].value + } + } + errorf("invalid escape, or illegal reserved word: %v", tokname) + + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + numbval = int(c - '0') + for { + c = getrune(finput) + if !isdigit(c) { + break + } + numbval = numbval*10 + int(c-'0') + } + ungetrune(finput, c) + if tokflag { + fmt.Printf(">>> NUMBER %v %v\n", numbval, lineno) + } + return NUMBER + + default: + if isword(c) || c == '.' || c == '$' { + getword(c) + break + } + if tokflag { + fmt.Printf(">>> OPERATOR %v %v\n", string(c), lineno) + } + return int(c) + } + + // look ahead to distinguish IDENTIFIER from IDENTCOLON + c = getrune(finput) + for c == ' ' || c == '\t' || c == '\n' || c == '\v' || c == '\r' || c == '/' { + if c == '\n' { + peekline++ + } + // look for comments + if c == '/' { + peekline += skipcom() + } + c = getrune(finput) + } + if c == ':' { + if tokflag { + fmt.Printf(">>> IDENTCOLON %v: %v\n", tokname, lineno) + } + return IDENTCOLON + } + + ungetrune(finput, c) + if tokflag { + fmt.Printf(">>> IDENTIFIER %v %v\n", tokname, lineno) + } + return IDENTIFIER +} + +func getword(c rune) { + tokname = "" + for isword(c) || isdigit(c) || c == '.' || c == '$' { + tokname += string(c) + c = getrune(finput) + } + ungetrune(finput, c) +} + +// +// determine the type of a symbol +// +func fdtype(t int) (int, string) { + var v int + var s string + + if t >= NTBASE { + v = nontrst[t-NTBASE].value + s = nontrst[t-NTBASE].name + } else { + v = TYPE(toklev[t]) + s = tokset[t].name + } + return v, s +} + +func chfind(t int, s string) int { + if s[0] == '"' || s[0] == '\'' { + t = 0 + } + for i := 0; i <= ntokens; i++ { + if s == tokset[i].name { + return i + } + } + for i := 0; i <= nnonter; i++ { + if s == nontrst[i].name { + return NTBASE + i + } + } + + // cannot find name + if t > 1 { + errorf("%v should have been defined earlier", s) + } + return defin(t, s) +} + +const ( + startUnion = iota + skippingLeadingBlanks + readingMember + skippingLaterBlanks + readingType +) + +type gotypeinfo struct { + typename string + union bool +} + +var gotypes = make(map[string]*gotypeinfo) + +func typeinfo() { + if !lflag { + fmt.Fprintf(ftable, "\n//line %v:%v\n", infile, lineno) + } + fmt.Fprintf(ftable, "type %sSymType struct {", prefix) + for _, tt := range gotypes { + if tt.union { + fmt.Fprintf(ftable, "\n\tunion interface{}") + break + } + } + ftable.Write(ftypes.Bytes()) + fmt.Fprintf(ftable, "\n\tyys int") + fmt.Fprintf(ftable, "\n}\n\n") + + var sortedTypes []string + for member, tt := range gotypes { + if tt.union { + sortedTypes = append(sortedTypes, member) + } + } + sort.Strings(sortedTypes) + + for _, member := range sortedTypes { + tt := gotypes[member] + fmt.Fprintf(ftable, "\nfunc (st *%sSymType) %sUnion() %s {\n", prefix, member, tt.typename) + fmt.Fprintf(ftable, "\tv, _ := st.union.(%s)\n", tt.typename) + fmt.Fprintf(ftable, "\treturn v\n") + fmt.Fprintf(ftable, "}\n") + } +} + +// +// copy the union declaration to the output, and the define file if present +// +func parsetypes(union bool) { + var member, typ bytes.Buffer + state := startUnion + +out: + for { + c := getrune(finput) + if c == EOF { + errorf("EOF encountered while processing %%union") + } + switch c { + case '\n': + lineno++ + if state == readingType { + gotypes[member.String()] = &gotypeinfo{ + typename: typ.String(), + union: union, + } + if !union { + fmt.Fprintf(ftypes, "\n\t%s %s", member.Bytes(), typ.Bytes()) + } + member.Reset() + typ.Reset() + } + state = skippingLeadingBlanks + default: + switch state { + case skippingLeadingBlanks: + if c == ' ' || c == '\t' { + continue + } + if c == '}' { + break out + } + state = readingMember + member.WriteRune(c) + case readingMember: + if c == ' ' || c == '\t' { + state = skippingLaterBlanks + continue + } + member.WriteRune(c) + case skippingLaterBlanks: + if c == ' ' || c == '\t' { + continue + } + state = readingType + typ.WriteRune(c) + case readingType: + typ.WriteRune(c) + } + } + } +} + +// +// saves code between %{ and %} +// adds an import for __fmt__ the first time +// +func cpycode() { + lno := lineno + + c := getrune(finput) + if c == '\n' { + c = getrune(finput) + lineno++ + } + if !lflag { + fmt.Fprintf(ftable, "\n//line %v:%v\n", infile, lineno) + } + // accumulate until %} + code := make([]rune, 0, 1024) + for c != EOF { + if c == '%' { + c = getrune(finput) + if c == '}' { + emitcode(code, lno+1) + return + } + code = append(code, '%') + } + code = append(code, c) + if c == '\n' { + lineno++ + } + c = getrune(finput) + } + lineno = lno + errorf("eof before %%}") +} + +// +// emits code saved up from between %{ and %} +// called by cpycode +// adds an import for __yyfmt__ after the package clause +// +func emitcode(code []rune, lineno int) { + for i, line := range lines(code) { + writecode(line) + if !writtenImports && isPackageClause(line) { + fmt.Fprintln(ftable, `import (`) + fmt.Fprintln(ftable, `__yyfmt__ "fmt"`) + fmt.Fprintln(ftable, `__yyunsafe__ "unsafe"`) + fmt.Fprintln(ftable, `)`) + if !lflag { + fmt.Fprintf(ftable, "//line %v:%v\n\t\t", infile, lineno+i) + } + writtenImports = true + } + } +} + +// +// does this line look like a package clause? not perfect: might be confused by early comments. +// +func isPackageClause(line []rune) bool { + line = skipspace(line) + + // must be big enough. + if len(line) < len("package X\n") { + return false + } + + // must start with "package" + for i, r := range []rune("package") { + if line[i] != r { + return false + } + } + line = skipspace(line[len("package"):]) + + // must have another identifier. + if len(line) == 0 || (!unicode.IsLetter(line[0]) && line[0] != '_') { + return false + } + for len(line) > 0 { + if !unicode.IsLetter(line[0]) && !unicode.IsDigit(line[0]) && line[0] != '_' { + break + } + line = line[1:] + } + line = skipspace(line) + + // eol, newline, or comment must follow + if len(line) == 0 { + return true + } + if line[0] == '\r' || line[0] == '\n' { + return true + } + if len(line) >= 2 { + return line[0] == '/' && (line[1] == '/' || line[1] == '*') + } + return false +} + +// +// skip initial spaces +// +func skipspace(line []rune) []rune { + for len(line) > 0 { + if line[0] != ' ' && line[0] != '\t' { + break + } + line = line[1:] + } + return line +} + +// +// break code into lines +// +func lines(code []rune) [][]rune { + l := make([][]rune, 0, 100) + for len(code) > 0 { + // one line per loop + var i int + for i = range code { + if code[i] == '\n' { + break + } + } + l = append(l, code[:i+1]) + code = code[i+1:] + } + return l +} + +// +// writes code to ftable +// +func writecode(code []rune) { + for _, r := range code { + ftable.WriteRune(r) + } +} + +// +// skip over comments +// skipcom is called after reading a '/' +// +func skipcom() int { + c := getrune(finput) + if c == '/' { + for c != EOF { + if c == '\n' { + return 1 + } + c = getrune(finput) + } + errorf("EOF inside comment") + return 0 + } + if c != '*' { + errorf("illegal comment") + } + + nl := 0 // lines skipped + c = getrune(finput) + +l1: + switch c { + case '*': + c = getrune(finput) + if c == '/' { + break + } + goto l1 + + case '\n': + nl++ + fallthrough + + default: + c = getrune(finput) + goto l1 + } + return nl +} + +var fastAppendRe = regexp.MustCompile(`\s+append\(\$[$1],`) + +func cpyyvalaccess(fcode *bytes.Buffer, curprod []int, tok int, unionType *string) { + if ntypes == 0 { + fmt.Fprintf(fcode, "%sVAL", prefix) + return + } + + if tok < 0 { + tok, _ = fdtype(curprod[0]) + } + ti, ok := gotypes[typeset[tok]] + if !ok { + errorf("missing Go type information for %s", typeset[tok]) + } + if !ti.union { + fmt.Fprintf(fcode, "%sVAL.%s", prefix, typeset[tok]) + return + } + + var buf bytes.Buffer + lvalue := false + fastAppend := false + +loop: + for { + c := getrune(finput) + + switch c { + case ' ', '\t': + buf.WriteRune(c) + + case '=': + lvalue = true + if allowFastAppend && *unionType == "" { + peek, err := finput.Peek(16) + if err != nil { + errorf("failed to scan forward: %v", err) + } + match := fastAppendRe.Find(peek) + if len(match) > 0 { + fastAppend = true + for range match { + _ = getrune(finput) + } + } else { + buf.WriteRune(c) + } + break loop + } + + buf.WriteRune(c) + break loop + + default: + buf.WriteRune(c) + break loop + } + } + + if fastAppend { + fmt.Fprintf(fcode, "\t%sSLICE := (*%s)(%sIaddr(%sVAL.union))\n", prefix, ti.typename, prefix, prefix) + fmt.Fprintf(fcode, "\t*%sSLICE = append(*%sSLICE, ", prefix, prefix) + } else if lvalue { + fmt.Fprintf(fcode, "%sLOCAL", prefix) + *unionType = ti.typename + } else if *unionType == "" { + fmt.Fprintf(fcode, "%sVAL.%sUnion()", prefix, typeset[tok]) + } else { + fmt.Fprintf(fcode, "%sLOCAL", prefix) + } + fcode.Write(buf.Bytes()) +} + +// +// copy action to the next ; or closing } +// +func cpyact(fcode *bytes.Buffer, curprod []int, max int, unionType *string) { + if !lflag { + fmt.Fprintf(fcode, "\n//line %v:%v", infile, lineno) + } + fmt.Fprint(fcode, "\n\t\t") + + lno := lineno + brac := 0 + +loop: + for { + c := getrune(finput) + + swt: + switch c { + case ';': + if brac == 0 { + fcode.WriteRune(c) + return + } + + case '{': + brac++ + + case '$': + s := 1 + tok := -1 + c = getrune(finput) + + // type description + if c == '<' { + ungetrune(finput, c) + if gettok() != TYPENAME { + errorf("bad syntax on $ clause") + } + tok = numbval + c = getrune(finput) + } + if c == '$' { + cpyyvalaccess(fcode, curprod, tok, unionType) + continue loop + } + if c == '-' { + s = -s + c = getrune(finput) + } + j := 0 + if isdigit(c) { + for isdigit(c) { + j = j*10 + int(c-'0') + c = getrune(finput) + } + ungetrune(finput, c) + j = j * s + if j >= max { + errorf("Illegal use of $%v", j) + } + } else if isword(c) || c == '.' { + // look for $name + ungetrune(finput, c) + if gettok() != IDENTIFIER { + errorf("$ must be followed by an identifier") + } + tokn := chfind(2, tokname) + fnd := -1 + c = getrune(finput) + if c != '@' { + ungetrune(finput, c) + } else if gettok() != NUMBER { + errorf("@ must be followed by number") + } else { + fnd = numbval + } + for j = 1; j < max; j++ { + if tokn == curprod[j] { + fnd-- + if fnd <= 0 { + break + } + } + } + if j >= max { + errorf("$name or $name@number not found") + } + } else { + fcode.WriteRune('$') + if s < 0 { + fcode.WriteRune('-') + } + ungetrune(finput, c) + continue loop + } + fmt.Fprintf(fcode, "%sDollar[%v]", prefix, j) + + // put out the proper tag + if ntypes != 0 { + if j <= 0 && tok < 0 { + errorf("must specify type of $%v", j) + } + if tok < 0 { + tok, _ = fdtype(curprod[j]) + } + ti, ok := gotypes[typeset[tok]] + if !ok { + errorf("missing Go type information for %s", typeset[tok]) + } + if ti.union { + fmt.Fprintf(fcode, ".%sUnion()", typeset[tok]) + } else { + fmt.Fprintf(fcode, ".%s", typeset[tok]) + } + } + continue loop + + case '}': + brac-- + if brac != 0 { + break + } + fcode.WriteRune(c) + return + + case '/': + nc := getrune(finput) + if nc != '/' && nc != '*' { + ungetrune(finput, nc) + break + } + // a comment + fcode.WriteRune(c) + fcode.WriteRune(nc) + c = getrune(finput) + for c != EOF { + switch { + case c == '\n': + lineno++ + if nc == '/' { // end of // comment + break swt + } + case c == '*' && nc == '*': // end of /* comment? + nnc := getrune(finput) + if nnc == '/' { + fcode.WriteRune('*') + fcode.WriteRune('/') + continue loop + } + ungetrune(finput, nnc) + } + fcode.WriteRune(c) + c = getrune(finput) + } + errorf("EOF inside comment") + + case '\'', '"': + // character string or constant + match := c + fcode.WriteRune(c) + c = getrune(finput) + for c != EOF { + if c == '\\' { + fcode.WriteRune(c) + c = getrune(finput) + if c == '\n' { + lineno++ + } + } else if c == match { + break swt + } + if c == '\n' { + errorf("newline in string or char const") + } + fcode.WriteRune(c) + c = getrune(finput) + } + errorf("EOF in string or character constant") + + case EOF: + lineno = lno + errorf("action does not terminate") + + case '\n': + fmt.Fprint(fcode, "\n\t") + lineno++ + continue loop + } + + fcode.WriteRune(c) + } +} + +func openup() { + infile = flag.Arg(0) + finput = open(infile) + if finput == nil { + errorf("cannot open %v", infile) + } + + foutput = nil + if vflag != "" { + foutput = create(vflag) + if foutput == nil { + errorf("can't create file %v", vflag) + } + } + + ftable = nil + if oflag == "" { + oflag = "y.go" + } + ftable = create(oflag) + if ftable == nil { + errorf("can't create file %v", oflag) + } + +} + +// +// return a pointer to the name of symbol i +// +func symnam(i int) string { + var s string + + if i >= NTBASE { + s = nontrst[i-NTBASE].name + } else { + s = tokset[i].name + } + return s +} + +// +// set elements 0 through n-1 to c +// +func aryfil(v []int, n, c int) { + for i := 0; i < n; i++ { + v[i] = c + } +} + +// +// compute an array with the beginnings of productions yielding given nonterminals +// The array pres points to these lists +// the array pyield has the lists: the total size is only NPROD+1 +// +func cpres() { + pres = make([][][]int, nnonter+1) + curres := make([][]int, nprod) + + if false { + for j := 0; j <= nnonter; j++ { + fmt.Printf("nnonter[%v] = %v\n", j, nontrst[j].name) + } + for j := 0; j < nprod; j++ { + fmt.Printf("prdptr[%v][0] = %v+NTBASE\n", j, prdptr[j][0]-NTBASE) + } + } + + fatfl = 0 // make undefined symbols nonfatal + for i := 0; i <= nnonter; i++ { + n := 0 + c := i + NTBASE + for j := 0; j < nprod; j++ { + if prdptr[j][0] == c { + curres[n] = prdptr[j][1:] + n++ + } + } + if n == 0 { + errorf("nonterminal %v not defined", nontrst[i].name) + continue + } + pres[i] = make([][]int, n) + copy(pres[i], curres) + } + fatfl = 1 + if nerrors != 0 { + summary() + exit(1) + } +} + +// +// mark nonterminals which derive the empty string +// also, look for nonterminals which don't derive any token strings +// +func cempty() { + var i, p, np int + var prd []int + + pempty = make([]int, nnonter+1) + + // first, use the array pempty to detect productions that can never be reduced + // set pempty to WHONOWS + aryfil(pempty, nnonter+1, WHOKNOWS) + + // now, look at productions, marking nonterminals which derive something +more: + for { + for i = 0; i < nprod; i++ { + prd = prdptr[i] + if pempty[prd[0]-NTBASE] != 0 { + continue + } + np = len(prd) - 1 + for p = 1; p < np; p++ { + if prd[p] >= NTBASE && pempty[prd[p]-NTBASE] == WHOKNOWS { + break + } + } + // production can be derived + if p == np { + pempty[prd[0]-NTBASE] = OK + continue more + } + } + break + } + + // now, look at the nonterminals, to see if they are all OK + for i = 0; i <= nnonter; i++ { + // the added production rises or falls as the start symbol ... + if i == 0 { + continue + } + if pempty[i] != OK { + fatfl = 0 + errorf("nonterminal " + nontrst[i].name + " never derives any token string") + } + } + + if nerrors != 0 { + summary() + exit(1) + } + + // now, compute the pempty array, to see which nonterminals derive the empty string + // set pempty to WHOKNOWS + aryfil(pempty, nnonter+1, WHOKNOWS) + + // loop as long as we keep finding empty nonterminals + +again: + for { + next: + for i = 1; i < nprod; i++ { + // not known to be empty + prd = prdptr[i] + if pempty[prd[0]-NTBASE] != WHOKNOWS { + continue + } + np = len(prd) - 1 + for p = 1; p < np; p++ { + if prd[p] < NTBASE || pempty[prd[p]-NTBASE] != EMPTY { + continue next + } + } + + // we have a nontrivially empty nonterminal + pempty[prd[0]-NTBASE] = EMPTY + + // got one ... try for another + continue again + } + return + } +} + +// +// compute an array with the first of nonterminals +// +func cpfir() { + var s, n, p, np, ch, i int + var curres [][]int + var prd []int + + wsets = make([]Wset, nnonter+WSETINC) + pfirst = make([]Lkset, nnonter+1) + for i = 0; i <= nnonter; i++ { + wsets[i].ws = mkset() + pfirst[i] = mkset() + curres = pres[i] + n = len(curres) + + // initially fill the sets + for s = 0; s < n; s++ { + prd = curres[s] + np = len(prd) - 1 + for p = 0; p < np; p++ { + ch = prd[p] + if ch < NTBASE { + setbit(pfirst[i], ch) + break + } + if pempty[ch-NTBASE] == 0 { + break + } + } + } + } + + // now, reflect transitivity + changes := 1 + for changes != 0 { + changes = 0 + for i = 0; i <= nnonter; i++ { + curres = pres[i] + n = len(curres) + for s = 0; s < n; s++ { + prd = curres[s] + np = len(prd) - 1 + for p = 0; p < np; p++ { + ch = prd[p] - NTBASE + if ch < 0 { + break + } + changes |= setunion(pfirst[i], pfirst[ch]) + if pempty[ch] == 0 { + break + } + } + } + } + } + + if indebug == 0 { + return + } + if foutput != nil { + for i = 0; i <= nnonter; i++ { + fmt.Fprintf(foutput, "\n%v: %v %v\n", + nontrst[i].name, pfirst[i], pempty[i]) + } + } +} + +// +// generate the states +// +func stagen() { + // initialize + nstate = 0 + tstates = make([]int, ntokens+1) // states generated by terminal gotos + ntstates = make([]int, nnonter+1) // states generated by nonterminal gotos + amem = make([]int, ACTSIZE) + memp = 0 + + clset = mkset() + pstate[0] = 0 + pstate[1] = 0 + aryfil(clset, tbitset, 0) + putitem(Pitem{prdptr[0], 0, 0, 0}, clset) + tystate[0] = MUSTDO + nstate = 1 + pstate[2] = pstate[1] + + // + // now, the main state generation loop + // first pass generates all of the states + // later passes fix up lookahead + // could be sped up a lot by remembering + // results of the first pass rather than recomputing + // + first := 1 + for more := 1; more != 0; first = 0 { + more = 0 + for i := 0; i < nstate; i++ { + if tystate[i] != MUSTDO { + continue + } + + tystate[i] = DONE + aryfil(temp1, nnonter+1, 0) + + // take state i, close it, and do gotos + closure(i) + + // generate goto's + for p := 0; p < cwp; p++ { + pi := wsets[p] + if pi.flag != 0 { + continue + } + wsets[p].flag = 1 + c := pi.pitem.first + if c <= 1 { + if pstate[i+1]-pstate[i] <= p { + tystate[i] = MUSTLOOKAHEAD + } + continue + } + + // do a goto on c + putitem(wsets[p].pitem, wsets[p].ws) + for q := p + 1; q < cwp; q++ { + // this item contributes to the goto + if c == wsets[q].pitem.first { + putitem(wsets[q].pitem, wsets[q].ws) + wsets[q].flag = 1 + } + } + + if c < NTBASE { + state(c) // register new state + } else { + temp1[c-NTBASE] = state(c) + } + } + + if gsdebug != 0 && foutput != nil { + fmt.Fprintf(foutput, "%v: ", i) + for j := 0; j <= nnonter; j++ { + if temp1[j] != 0 { + fmt.Fprintf(foutput, "%v %v,", nontrst[j].name, temp1[j]) + } + } + fmt.Fprintf(foutput, "\n") + } + + if first != 0 { + indgo[i] = apack(temp1[1:], nnonter-1) - 1 + } + + more++ + } + } +} + +// +// generate the closure of state i +// +func closure(i int) { + zzclose++ + + // first, copy kernel of state i to wsets + cwp = 0 + q := pstate[i+1] + for p := pstate[i]; p < q; p++ { + wsets[cwp].pitem = statemem[p].pitem + wsets[cwp].flag = 1 // this item must get closed + copy(wsets[cwp].ws, statemem[p].look) + cwp++ + } + + // now, go through the loop, closing each item + work := 1 + for work != 0 { + work = 0 + for u := 0; u < cwp; u++ { + if wsets[u].flag == 0 { + continue + } + + // dot is before c + c := wsets[u].pitem.first + if c < NTBASE { + wsets[u].flag = 0 + // only interesting case is where . is before nonterminal + continue + } + + // compute the lookahead + aryfil(clset, tbitset, 0) + + // find items involving c + for v := u; v < cwp; v++ { + if wsets[v].flag != 1 || wsets[v].pitem.first != c { + continue + } + pi := wsets[v].pitem.prod + ipi := wsets[v].pitem.off + 1 + + wsets[v].flag = 0 + if nolook != 0 { + continue + } + + ch := pi[ipi] + ipi++ + for ch > 0 { + // terminal symbol + if ch < NTBASE { + setbit(clset, ch) + break + } + + // nonterminal symbol + setunion(clset, pfirst[ch-NTBASE]) + if pempty[ch-NTBASE] == 0 { + break + } + ch = pi[ipi] + ipi++ + } + if ch <= 0 { + setunion(clset, wsets[v].ws) + } + } + + // + // now loop over productions derived from c + // + curres := pres[c-NTBASE] + n := len(curres) + + nexts: + // initially fill the sets + for s := 0; s < n; s++ { + prd := curres[s] + + // + // put these items into the closure + // is the item there + // + for v := 0; v < cwp; v++ { + // yes, it is there + if wsets[v].pitem.off == 0 && + aryeq(wsets[v].pitem.prod, prd) != 0 { + if nolook == 0 && + setunion(wsets[v].ws, clset) != 0 { + wsets[v].flag = 1 + work = 1 + } + continue nexts + } + } + + // not there; make a new entry + if cwp >= len(wsets) { + awsets := make([]Wset, cwp+WSETINC) + copy(awsets, wsets) + wsets = awsets + } + wsets[cwp].pitem = Pitem{prd, 0, prd[0], -prd[len(prd)-1]} + wsets[cwp].flag = 1 + wsets[cwp].ws = mkset() + if nolook == 0 { + work = 1 + copy(wsets[cwp].ws, clset) + } + cwp++ + } + } + } + + // have computed closure; flags are reset; return + if cldebug != 0 && foutput != nil { + fmt.Fprintf(foutput, "\nState %v, nolook = %v\n", i, nolook) + for u := 0; u < cwp; u++ { + if wsets[u].flag != 0 { + fmt.Fprintf(foutput, "flag set\n") + } + wsets[u].flag = 0 + fmt.Fprintf(foutput, "\t%v", writem(wsets[u].pitem)) + prlook(wsets[u].ws) + fmt.Fprintf(foutput, "\n") + } + } +} + +// +// sorts last state,and sees if it equals earlier ones. returns state number +// +func state(c int) int { + zzstate++ + p1 := pstate[nstate] + p2 := pstate[nstate+1] + if p1 == p2 { + return 0 // null state + } + + // sort the items + var k, l int + for k = p1 + 1; k < p2; k++ { // make k the biggest + for l = k; l > p1; l-- { + if statemem[l].pitem.prodno < statemem[l-1].pitem.prodno || + statemem[l].pitem.prodno == statemem[l-1].pitem.prodno && + statemem[l].pitem.off < statemem[l-1].pitem.off { + s := statemem[l] + statemem[l] = statemem[l-1] + statemem[l-1] = s + } else { + break + } + } + } + + size1 := p2 - p1 // size of state + + var i int + if c >= NTBASE { + i = ntstates[c-NTBASE] + } else { + i = tstates[c] + } + +look: + for ; i != 0; i = mstates[i] { + // get ith state + q1 := pstate[i] + q2 := pstate[i+1] + size2 := q2 - q1 + if size1 != size2 { + continue + } + k = p1 + for l = q1; l < q2; l++ { + if aryeq(statemem[l].pitem.prod, statemem[k].pitem.prod) == 0 || + statemem[l].pitem.off != statemem[k].pitem.off { + continue look + } + k++ + } + + // found it + pstate[nstate+1] = pstate[nstate] // delete last state + + // fix up lookaheads + if nolook != 0 { + return i + } + k = p1 + for l = q1; l < q2; l++ { + if setunion(statemem[l].look, statemem[k].look) != 0 { + tystate[i] = MUSTDO + } + k++ + } + return i + } + + // state is new + zznewstate++ + if nolook != 0 { + errorf("yacc state/nolook error") + } + pstate[nstate+2] = p2 + if nstate+1 >= NSTATES { + errorf("too many states") + } + if c >= NTBASE { + mstates[nstate] = ntstates[c-NTBASE] + ntstates[c-NTBASE] = nstate + } else { + mstates[nstate] = tstates[c] + tstates[c] = nstate + } + tystate[nstate] = MUSTDO + nstate++ + return nstate - 1 +} + +func putitem(p Pitem, set Lkset) { + p.off++ + p.first = p.prod[p.off] + + if pidebug != 0 && foutput != nil { + fmt.Fprintf(foutput, "putitem(%v), state %v\n", writem(p), nstate) + } + j := pstate[nstate+1] + if j >= len(statemem) { + asm := make([]Item, j+STATEINC) + copy(asm, statemem) + statemem = asm + } + statemem[j].pitem = p + if nolook == 0 { + s := mkset() + copy(s, set) + statemem[j].look = s + } + j++ + pstate[nstate+1] = j +} + +// +// creates output string for item pointed to by pp +// +func writem(pp Pitem) string { + var i int + + p := pp.prod + q := chcopy(nontrst[prdptr[pp.prodno][0]-NTBASE].name) + ": " + npi := pp.off + + pi := aryeq(p, prdptr[pp.prodno]) + + for { + c := ' ' + if pi == npi { + c = '.' + } + q += string(c) + + i = p[pi] + pi++ + if i <= 0 { + break + } + q += chcopy(symnam(i)) + } + + // an item calling for a reduction + i = p[npi] + if i < 0 { + q += fmt.Sprintf(" (%v)", -i) + } + + return q +} + +// +// pack state i from temp1 into amem +// +func apack(p []int, n int) int { + // + // we don't need to worry about checking because + // we will only look at entries known to be there... + // eliminate leading and trailing 0's + // + off := 0 + pp := 0 + for ; pp <= n && p[pp] == 0; pp++ { + off-- + } + + // no actions + if pp > n { + return 0 + } + for ; n > pp && p[n] == 0; n-- { + } + p = p[pp : n+1] + + // now, find a place for the elements from p to q, inclusive + r := len(amem) - len(p) + +nextk: + for rr := 0; rr <= r; rr++ { + qq := rr + for pp = 0; pp < len(p); pp++ { + if p[pp] != 0 { + if p[pp] != amem[qq] && amem[qq] != 0 { + continue nextk + } + } + qq++ + } + + // we have found an acceptable k + if pkdebug != 0 && foutput != nil { + fmt.Fprintf(foutput, "off = %v, k = %v\n", off+rr, rr) + } + qq = rr + for pp = 0; pp < len(p); pp++ { + if p[pp] != 0 { + if qq > memp { + memp = qq + } + amem[qq] = p[pp] + } + qq++ + } + if pkdebug != 0 && foutput != nil { + for pp = 0; pp <= memp; pp += 10 { + fmt.Fprintf(foutput, "\n") + for qq = pp; qq <= pp+9; qq++ { + fmt.Fprintf(foutput, "%v ", amem[qq]) + } + fmt.Fprintf(foutput, "\n") + } + } + return off + rr + } + errorf("no space in action table") + return 0 +} + +// +// print the output for the states +// +func output() { + var c, u, v int + + if !lflag { + fmt.Fprintf(ftable, "\n//line yacctab:1") + } + fmt.Fprintf(ftable, "\nvar %sExca = [...]int{\n", prefix) + + if len(errors) > 0 { + stateTable = make([]Row, nstate) + } + + noset := mkset() + + // output the stuff for state i + for i := 0; i < nstate; i++ { + nolook = 0 + if tystate[i] != MUSTLOOKAHEAD { + nolook = 1 + } + closure(i) + + // output actions + nolook = 1 + aryfil(temp1, ntokens+nnonter+1, 0) + for u = 0; u < cwp; u++ { + c = wsets[u].pitem.first + if c > 1 && c < NTBASE && temp1[c] == 0 { + for v = u; v < cwp; v++ { + if c == wsets[v].pitem.first { + putitem(wsets[v].pitem, noset) + } + } + temp1[c] = state(c) + } else if c > NTBASE { + c -= NTBASE + if temp1[c+ntokens] == 0 { + temp1[c+ntokens] = amem[indgo[i]+c] + } + } + } + if i == 1 { + temp1[1] = ACCEPTCODE + } + + // now, we have the shifts; look at the reductions + lastred = 0 + for u = 0; u < cwp; u++ { + c = wsets[u].pitem.first + + // reduction + if c > 0 { + continue + } + lastred = -c + us := wsets[u].ws + for k := 0; k <= ntokens; k++ { + if bitset(us, k) == 0 { + continue + } + if temp1[k] == 0 { + temp1[k] = c + } else if temp1[k] < 0 { // reduce/reduce conflict + if foutput != nil { + fmt.Fprintf(foutput, + "\n %v: reduce/reduce conflict (red'ns "+ + "%v and %v) on %v", + i, -temp1[k], lastred, symnam(k)) + } + if -temp1[k] > lastred { + temp1[k] = -lastred + } + zzrrconf++ + } else { + // potential shift/reduce conflict + precftn(lastred, k, i) + } + } + } + wract(i) + } + + fmt.Fprintf(ftable, "}\n") + ftable.WriteRune('\n') + fmt.Fprintf(ftable, "const %sPrivate = %v\n", prefix, PRIVATE) +} + +// +// decide a shift/reduce conflict by precedence. +// r is a rule number, t a token number +// the conflict is in state s +// temp1[t] is changed to reflect the action +// +func precftn(r, t, s int) { + var action int + + lp := levprd[r] + lt := toklev[t] + if PLEVEL(lt) == 0 || PLEVEL(lp) == 0 { + // conflict + if foutput != nil { + fmt.Fprintf(foutput, + "\n%v: shift/reduce conflict (shift %v(%v), red'n %v(%v)) on %v", + s, temp1[t], PLEVEL(lt), r, PLEVEL(lp), symnam(t)) + } + zzsrconf++ + return + } + if PLEVEL(lt) == PLEVEL(lp) { + action = ASSOC(lt) + } else if PLEVEL(lt) > PLEVEL(lp) { + action = RASC // shift + } else { + action = LASC + } // reduce + switch action { + case BASC: // error action + temp1[t] = ERRCODE + case LASC: // reduce + temp1[t] = -r + } +} + +// +// output state i +// temp1 has the actions, lastred the default +// +func wract(i int) { + var p, p1 int + + // find the best choice for lastred + lastred = 0 + ntimes := 0 + for j := 0; j <= ntokens; j++ { + if temp1[j] >= 0 { + continue + } + if temp1[j]+lastred == 0 { + continue + } + // count the number of appearances of temp1[j] + count := 0 + tred := -temp1[j] + levprd[tred] |= REDFLAG + for p = 0; p <= ntokens; p++ { + if temp1[p]+tred == 0 { + count++ + } + } + if count > ntimes { + lastred = tred + ntimes = count + } + } + + // + // for error recovery, arrange that, if there is a shift on the + // error recovery token, `error', that the default be the error action + // + if temp1[2] > 0 { + lastred = 0 + } + + // clear out entries in temp1 which equal lastred + // count entries in optst table + n := 0 + for p = 0; p <= ntokens; p++ { + p1 = temp1[p] + if p1+lastred == 0 { + temp1[p] = 0 + p1 = 0 + } + if p1 > 0 && p1 != ACCEPTCODE && p1 != ERRCODE { + n++ + } + } + + wrstate(i) + defact[i] = lastred + flag := 0 + os := make([]int, n*2) + n = 0 + for p = 0; p <= ntokens; p++ { + p1 = temp1[p] + if p1 != 0 { + if p1 < 0 { + p1 = -p1 + } else if p1 == ACCEPTCODE { + p1 = -1 + } else if p1 == ERRCODE { + p1 = 0 + } else { + os[n] = p + n++ + os[n] = p1 + n++ + zzacent++ + continue + } + if flag == 0 { + fmt.Fprintf(ftable, "\t-1, %v,\n", i) + } + flag++ + fmt.Fprintf(ftable, "\t%v, %v,\n", p, p1) + zzexcp++ + } + } + if flag != 0 { + defact[i] = -2 + fmt.Fprintf(ftable, "\t-2, %v,\n", lastred) + } + optst[i] = os +} + +// +// writes state i +// +func wrstate(i int) { + var j0, j1, u int + var pp, qq int + + if len(errors) > 0 { + actions := append([]int(nil), temp1...) + defaultAction := ERRCODE + if lastred != 0 { + defaultAction = -lastred + } + stateTable[i] = Row{actions, defaultAction} + } + + if foutput == nil { + return + } + fmt.Fprintf(foutput, "\nstate %v\n", i) + qq = pstate[i+1] + for pp = pstate[i]; pp < qq; pp++ { + fmt.Fprintf(foutput, "\t%v\n", writem(statemem[pp].pitem)) + } + if tystate[i] == MUSTLOOKAHEAD { + // print out empty productions in closure + for u = pstate[i+1] - pstate[i]; u < cwp; u++ { + if wsets[u].pitem.first < 0 { + fmt.Fprintf(foutput, "\t%v\n", writem(wsets[u].pitem)) + } + } + } + + // check for state equal to another + for j0 = 0; j0 <= ntokens; j0++ { + j1 = temp1[j0] + if j1 != 0 { + fmt.Fprintf(foutput, "\n\t%v ", symnam(j0)) + + // shift, error, or accept + if j1 > 0 { + if j1 == ACCEPTCODE { + fmt.Fprintf(foutput, "accept") + } else if j1 == ERRCODE { + fmt.Fprintf(foutput, "error") + } else { + fmt.Fprintf(foutput, "shift %v", j1) + } + } else { + fmt.Fprintf(foutput, "reduce %v (src line %v)", -j1, rlines[-j1]) + } + } + } + + // output the final production + if lastred != 0 { + fmt.Fprintf(foutput, "\n\t. reduce %v (src line %v)\n\n", + lastred, rlines[lastred]) + } else { + fmt.Fprintf(foutput, "\n\t. error\n\n") + } + + // now, output nonterminal actions + j1 = ntokens + for j0 = 1; j0 <= nnonter; j0++ { + j1++ + if temp1[j1] != 0 { + fmt.Fprintf(foutput, "\t%v goto %v\n", symnam(j0+NTBASE), temp1[j1]) + } + } +} + +// +// output the gotos for the nontermninals +// +func go2out() { + for i := 1; i <= nnonter; i++ { + go2gen(i) + + // find the best one to make default + best := -1 + times := 0 + + // is j the most frequent + for j := 0; j < nstate; j++ { + if tystate[j] == 0 { + continue + } + if tystate[j] == best { + continue + } + + // is tystate[j] the most frequent + count := 0 + cbest := tystate[j] + for k := j; k < nstate; k++ { + if tystate[k] == cbest { + count++ + } + } + if count > times { + best = cbest + times = count + } + } + + // best is now the default entry + zzgobest += times - 1 + n := 0 + for j := 0; j < nstate; j++ { + if tystate[j] != 0 && tystate[j] != best { + n++ + } + } + goent := make([]int, 2*n+1) + n = 0 + for j := 0; j < nstate; j++ { + if tystate[j] != 0 && tystate[j] != best { + goent[n] = j + n++ + goent[n] = tystate[j] + n++ + zzgoent++ + } + } + + // now, the default + if best == -1 { + best = 0 + } + + zzgoent++ + goent[n] = best + yypgo[i] = goent + } +} + +// +// output the gotos for nonterminal c +// +func go2gen(c int) { + var i, cc, p, q int + + // first, find nonterminals with gotos on c + aryfil(temp1, nnonter+1, 0) + temp1[c] = 1 + work := 1 + for work != 0 { + work = 0 + for i = 0; i < nprod; i++ { + // cc is a nonterminal with a goto on c + cc = prdptr[i][1] - NTBASE + if cc >= 0 && temp1[cc] != 0 { + // thus, the left side of production i does too + cc = prdptr[i][0] - NTBASE + if temp1[cc] == 0 { + work = 1 + temp1[cc] = 1 + } + } + } + } + + // now, we have temp1[c] = 1 if a goto on c in closure of cc + if g2debug != 0 && foutput != nil { + fmt.Fprintf(foutput, "%v: gotos on ", nontrst[c].name) + for i = 0; i <= nnonter; i++ { + if temp1[i] != 0 { + fmt.Fprintf(foutput, "%v ", nontrst[i].name) + } + } + fmt.Fprintf(foutput, "\n") + } + + // now, go through and put gotos into tystate + aryfil(tystate, nstate, 0) + for i = 0; i < nstate; i++ { + q = pstate[i+1] + for p = pstate[i]; p < q; p++ { + cc = statemem[p].pitem.first + if cc >= NTBASE { + // goto on c is possible + if temp1[cc-NTBASE] != 0 { + tystate[i] = amem[indgo[i]+c] + break + } + } + } + } +} + +// +// in order to free up the mem and amem arrays for the optimizer, +// and still be able to output yyr1, etc., after the sizes of +// the action array is known, we hide the nonterminals +// derived by productions in levprd. +// +func hideprod() { + nred := 0 + levprd[0] = 0 + for i := 1; i < nprod; i++ { + if (levprd[i] & REDFLAG) == 0 { + if foutput != nil { + fmt.Fprintf(foutput, "Rule not reduced: %v\n", + writem(Pitem{prdptr[i], 0, 0, i})) + } + fmt.Printf("rule %v never reduced\n", writem(Pitem{prdptr[i], 0, 0, i})) + nred++ + } + levprd[i] = prdptr[i][0] - NTBASE + } + if nred != 0 { + fmt.Printf("%v rules never reduced\n", nred) + } +} + +func callopt() { + var j, k, p, q, i int + var v []int + + pgo = make([]int, nnonter+1) + pgo[0] = 0 + maxoff = 0 + maxspr = 0 + for i = 0; i < nstate; i++ { + k = 32000 + j = 0 + v = optst[i] + q = len(v) + for p = 0; p < q; p += 2 { + if v[p] > j { + j = v[p] + } + if v[p] < k { + k = v[p] + } + } + + // nontrivial situation + if k <= j { + // j is now the range + // j -= k; // call scj + if k > maxoff { + maxoff = k + } + } + tystate[i] = q + 2*j + if j > maxspr { + maxspr = j + } + } + + // initialize ggreed table + ggreed = make([]int, nnonter+1) + for i = 1; i <= nnonter; i++ { + ggreed[i] = 1 + j = 0 + + // minimum entry index is always 0 + v = yypgo[i] + q = len(v) - 1 + for p = 0; p < q; p += 2 { + ggreed[i] += 2 + if v[p] > j { + j = v[p] + } + } + ggreed[i] = ggreed[i] + 2*j + if j > maxoff { + maxoff = j + } + } + + // now, prepare to put the shift actions into the amem array + for i = 0; i < ACTSIZE; i++ { + amem[i] = 0 + } + maxa = 0 + for i = 0; i < nstate; i++ { + if tystate[i] == 0 && adb > 1 { + fmt.Fprintf(ftable, "State %v: null\n", i) + } + indgo[i] = yyFlag + } + + i = nxti() + for i != NOMORE { + if i >= 0 { + stin(i) + } else { + gin(-i) + } + i = nxti() + } + + // print amem array + if adb > 2 { + for p = 0; p <= maxa; p += 10 { + fmt.Fprintf(ftable, "%v ", p) + for i = 0; i < 10; i++ { + fmt.Fprintf(ftable, "%v ", amem[p+i]) + } + ftable.WriteRune('\n') + } + } + + aoutput() + osummary() +} + +// +// finds the next i +// +func nxti() int { + max := 0 + maxi := 0 + for i := 1; i <= nnonter; i++ { + if ggreed[i] >= max { + max = ggreed[i] + maxi = -i + } + } + for i := 0; i < nstate; i++ { + if tystate[i] >= max { + max = tystate[i] + maxi = i + } + } + if max == 0 { + return NOMORE + } + return maxi +} + +func gin(i int) { + var s int + + // enter gotos on nonterminal i into array amem + ggreed[i] = 0 + + q := yypgo[i] + nq := len(q) - 1 + + // now, find amem place for it +nextgp: + for p := 0; p < ACTSIZE; p++ { + if amem[p] != 0 { + continue + } + for r := 0; r < nq; r += 2 { + s = p + q[r] + 1 + if s > maxa { + maxa = s + if maxa >= ACTSIZE { + errorf("a array overflow") + } + } + if amem[s] != 0 { + continue nextgp + } + } + + // we have found amem spot + amem[p] = q[nq] + if p > maxa { + maxa = p + } + for r := 0; r < nq; r += 2 { + s = p + q[r] + 1 + amem[s] = q[r+1] + } + pgo[i] = p + if adb > 1 { + fmt.Fprintf(ftable, "Nonterminal %v, entry at %v\n", i, pgo[i]) + } + return + } + errorf("cannot place goto %v\n", i) +} + +func stin(i int) { + var s int + + tystate[i] = 0 + + // enter state i into the amem array + q := optst[i] + nq := len(q) + +nextn: + // find an acceptable place + for n := -maxoff; n < ACTSIZE; n++ { + flag := 0 + for r := 0; r < nq; r += 2 { + s = q[r] + n + if s < 0 || s > ACTSIZE { + continue nextn + } + if amem[s] == 0 { + flag++ + } else if amem[s] != q[r+1] { + continue nextn + } + } + + // check the position equals another only if the states are identical + for j := 0; j < nstate; j++ { + if indgo[j] == n { + + // we have some disagreement + if flag != 0 { + continue nextn + } + if nq == len(optst[j]) { + + // states are equal + indgo[i] = n + if adb > 1 { + fmt.Fprintf(ftable, "State %v: entry at"+ + "%v equals state %v\n", + i, n, j) + } + return + } + + // we have some disagreement + continue nextn + } + } + + for r := 0; r < nq; r += 2 { + s = q[r] + n + if s > maxa { + maxa = s + } + if amem[s] != 0 && amem[s] != q[r+1] { + errorf("clobber of a array, pos'n %v, by %v", s, q[r+1]) + } + amem[s] = q[r+1] + } + indgo[i] = n + if adb > 1 { + fmt.Fprintf(ftable, "State %v: entry at %v\n", i, indgo[i]) + } + return + } + errorf("Error; failure to place state %v", i) +} + +// +// this version is for limbo +// write out the optimized parser +// +func aoutput() { + ftable.WriteRune('\n') + fmt.Fprintf(ftable, "const %sLast = %v\n", prefix, maxa+1) + arout("Act", amem, maxa+1) + arout("Pact", indgo, nstate) + arout("Pgo", pgo, nnonter+1) +} + +// +// put out other arrays, copy the parsers +// +func others() { + var i, j int + + arout("R1", levprd, nprod) + aryfil(temp1, nprod, 0) + + // + //yyr2 is the number of rules for each production + // + for i = 1; i < nprod; i++ { + temp1[i] = len(prdptr[i]) - 2 + } + arout("R2", temp1, nprod) + + aryfil(temp1, nstate, -1000) + for i = 0; i <= ntokens; i++ { + for j := tstates[i]; j != 0; j = mstates[j] { + temp1[j] = i + } + } + for i = 0; i <= nnonter; i++ { + for j = ntstates[i]; j != 0; j = mstates[j] { + temp1[j] = -i + } + } + arout("Chk", temp1, nstate) + arout("Def", defact, nstate) + + // put out token translation tables + // table 1 has 0-256 + aryfil(temp1, 256, 0) + c := 0 + for i = 1; i <= ntokens; i++ { + j = tokset[i].value + if j >= 0 && j < 256 { + if temp1[j] != 0 { + fmt.Print("yacc bug -- cannot have 2 different Ts with same value\n") + fmt.Printf(" %s and %s\n", tokset[i].name, tokset[temp1[j]].name) + nerrors++ + } + temp1[j] = i + if j > c { + c = j + } + } + } + for i = 0; i <= c; i++ { + if temp1[i] == 0 { + temp1[i] = YYLEXUNK + } + } + arout("Tok1", temp1, c+1) + + // table 2 has PRIVATE-PRIVATE+256 + aryfil(temp1, 256, 0) + c = 0 + for i = 1; i <= ntokens; i++ { + j = tokset[i].value - PRIVATE + if j >= 0 && j < 256 { + if temp1[j] != 0 { + fmt.Print("yacc bug -- cannot have 2 different Ts with same value\n") + fmt.Printf(" %s and %s\n", tokset[i].name, tokset[temp1[j]].name) + nerrors++ + } + temp1[j] = i + if j > c { + c = j + } + } + } + arout("Tok2", temp1, c+1) + + // table 3 has everything else + ftable.WriteRune('\n') + fmt.Fprintf(ftable, "var %sTok3 = [...]int{\n\t", prefix) + c = 0 + for i = 1; i <= ntokens; i++ { + j = tokset[i].value + if j >= 0 && j < 256 { + continue + } + if j >= PRIVATE && j < 256+PRIVATE { + continue + } + + if c%5 != 0 { + ftable.WriteRune(' ') + } + fmt.Fprintf(ftable, "%d, %d,", j, i) + c++ + if c%5 == 0 { + fmt.Fprint(ftable, "\n\t") + } + } + if c%5 != 0 { + ftable.WriteRune(' ') + } + fmt.Fprintf(ftable, "%d,\n}\n", 0) + + // Custom error messages. + fmt.Fprintf(ftable, "\n") + fmt.Fprintf(ftable, "var %sErrorMessages = [...]struct {\n", prefix) + fmt.Fprintf(ftable, "\tstate int\n") + fmt.Fprintf(ftable, "\ttoken int\n") + fmt.Fprintf(ftable, "\tmsg string\n") + fmt.Fprintf(ftable, "}{\n") + for _, error := range errors { + lineno = error.lineno + state, token := runMachine(error.tokens) + fmt.Fprintf(ftable, "\t{%v, %v, %s},\n", state, token, error.msg) + } + fmt.Fprintf(ftable, "}\n") + + // copy parser text + ch := getrune(finput) + for ch != EOF { + ftable.WriteRune(ch) + ch = getrune(finput) + } + + // copy yaccpar + if !lflag { + fmt.Fprintf(ftable, "\n//line yaccpar:1\n") + } + + parts := strings.SplitN(yaccpar, prefix+"run()", 2) + fmt.Fprintf(ftable, "%v", parts[0]) + ftable.Write(fcode.Bytes()) + fmt.Fprintf(ftable, "%v", parts[1]) +} + +func runMachine(tokens []string) (state, token int) { + var stack []int + i := 0 + token = -1 + +Loop: + if token < 0 { + token = chfind(2, tokens[i]) + i++ + } + + row := stateTable[state] + + c := token + if token >= NTBASE { + c = token - NTBASE + ntokens + } + action := row.actions[c] + if action == 0 { + action = row.defaultAction + } + + switch { + case action == ACCEPTCODE: + errorf("tokens are accepted") + return + case action == ERRCODE: + if token >= NTBASE { + errorf("error at non-terminal token %s", symnam(token)) + } + return + case action > 0: + // Shift to state action. + stack = append(stack, state) + state = action + token = -1 + goto Loop + default: + // Reduce by production -action. + prod := prdptr[-action] + if rhsLen := len(prod) - 2; rhsLen > 0 { + n := len(stack) - rhsLen + state = stack[n] + stack = stack[:n] + } + if token >= 0 { + i-- + } + token = prod[0] + goto Loop + } +} + +func arout(s string, v []int, n int) { + s = prefix + s + ftable.WriteRune('\n') + fmt.Fprintf(ftable, "var %v = [...]int{", s) + for i := 0; i < n; i++ { + if i%10 == 0 { + fmt.Fprintf(ftable, "\n\t") + } else { + ftable.WriteRune(' ') + } + fmt.Fprintf(ftable, "%d,", v[i]) + } + fmt.Fprintf(ftable, "\n}\n") +} + +// +// output the summary on y.output +// +func summary() { + if foutput != nil { + fmt.Fprintf(foutput, "\n%v terminals, %v nonterminals\n", ntokens, nnonter+1) + fmt.Fprintf(foutput, "%v grammar rules, %v/%v states\n", nprod, nstate, NSTATES) + fmt.Fprintf(foutput, "%v shift/reduce, %v reduce/reduce conflicts reported\n", zzsrconf, zzrrconf) + fmt.Fprintf(foutput, "%v working sets used\n", len(wsets)) + fmt.Fprintf(foutput, "memory: parser %v/%v\n", memp, ACTSIZE) + fmt.Fprintf(foutput, "%v extra closures\n", zzclose-2*nstate) + fmt.Fprintf(foutput, "%v shift entries, %v exceptions\n", zzacent, zzexcp) + fmt.Fprintf(foutput, "%v goto entries\n", zzgoent) + fmt.Fprintf(foutput, "%v entries saved by goto default\n", zzgobest) + } + if zzsrconf != 0 || zzrrconf != 0 { + fmt.Printf("\nconflicts: ") + if zzsrconf != 0 { + fmt.Printf("%v shift/reduce", zzsrconf) + } + if zzsrconf != 0 && zzrrconf != 0 { + fmt.Printf(", ") + } + if zzrrconf != 0 { + fmt.Printf("%v reduce/reduce", zzrrconf) + } + fmt.Printf("\n") + } +} + +// +// write optimizer summary +// +func osummary() { + if foutput == nil { + return + } + i := 0 + for p := maxa; p >= 0; p-- { + if amem[p] == 0 { + i++ + } + } + + fmt.Fprintf(foutput, "Optimizer space used: output %v/%v\n", maxa+1, ACTSIZE) + fmt.Fprintf(foutput, "%v table entries, %v zero\n", maxa+1, i) + fmt.Fprintf(foutput, "maximum spread: %v, maximum offset: %v\n", maxspr, maxoff) +} + +// +// copies and protects "'s in q +// +func chcopy(q string) string { + s := "" + i := 0 + j := 0 + for i = 0; i < len(q); i++ { + if q[i] == '"' { + s += q[j:i] + "\\" + j = i + } + } + return s + q[j:i] +} + +func usage() { + fmt.Fprintf(stderr, "usage: yacc [-o output] [-v parsetable] input\n") + exit(1) +} + +func bitset(set Lkset, bit int) int { return set[bit>>5] & (1 << uint(bit&31)) } + +func setbit(set Lkset, bit int) { set[bit>>5] |= (1 << uint(bit&31)) } + +func mkset() Lkset { return make([]int, tbitset) } + +// +// set a to the union of a and b +// return 1 if b is not a subset of a, 0 otherwise +// +func setunion(a, b []int) int { + sub := 0 + for i := 0; i < tbitset; i++ { + x := a[i] + y := x | b[i] + a[i] = y + if y != x { + sub = 1 + } + } + return sub +} + +func prlook(p Lkset) { + if p == nil { + fmt.Fprintf(foutput, "\tNULL") + return + } + fmt.Fprintf(foutput, " { ") + for j := 0; j <= ntokens; j++ { + if bitset(p, j) != 0 { + fmt.Fprintf(foutput, "%v ", symnam(j)) + } + } + fmt.Fprintf(foutput, "}") +} + +// +// utility routines +// +var peekrune rune + +func isdigit(c rune) bool { return c >= '0' && c <= '9' } + +func isword(c rune) bool { + return c >= 0xa0 || c == '_' || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') +} + +// +// return 1 if 2 arrays are equal +// return 0 if not equal +// +func aryeq(a []int, b []int) int { + n := len(a) + if len(b) != n { + return 0 + } + for ll := 0; ll < n; ll++ { + if a[ll] != b[ll] { + return 0 + } + } + return 1 +} + +func getrune(f *bufio.Reader) rune { + var r rune + + if peekrune != 0 { + if peekrune == EOF { + return EOF + } + r = peekrune + peekrune = 0 + return r + } + + c, n, err := f.ReadRune() + if n == 0 { + return EOF + } + if err != nil { + errorf("read error: %v", err) + } + //fmt.Printf("rune = %v n=%v\n", string(c), n); + return c +} + +func ungetrune(f *bufio.Reader, c rune) { + if f != finput { + panic("ungetc - not finput") + } + if peekrune != 0 { + panic("ungetc - 2nd unget") + } + peekrune = c +} + +func open(s string) *bufio.Reader { + fi, err := os.Open(s) + if err != nil { + errorf("error opening %v: %v", s, err) + } + //fmt.Printf("open %v\n", s); + return bufio.NewReader(fi) +} + +func create(s string) *bufio.Writer { + fo, err := os.Create(s) + if err != nil { + errorf("error creating %v: %v", s, err) + } + //fmt.Printf("create %v mode %v\n", s); + return bufio.NewWriter(fo) +} + +// +// write out error comment +// +func lerrorf(lineno int, s string, v ...interface{}) { + nerrors++ + fmt.Fprintf(stderr, s, v...) + fmt.Fprintf(stderr, ": %v:%v\n", infile, lineno) + if fatfl != 0 { + summary() + exit(1) + } +} + +func errorf(s string, v ...interface{}) { + lerrorf(lineno, s, v...) +} + +func exit(status int) { + if ftable != nil { + ftable.Flush() + ftable = nil + gofmt() + } + if foutput != nil { + foutput.Flush() + foutput = nil + } + if stderr != nil { + stderr.Flush() + stderr = nil + } + os.Exit(status) +} + +func gofmt() { + src, err := ioutil.ReadFile(oflag) + if err != nil { + return + } + src, err = format.Source(src) + if err != nil { + return + } + ioutil.WriteFile(oflag, src, 0666) +} + +var yaccpar string // will be processed version of yaccpartext: s/$$/prefix/g +var yaccpartext = ` +/* parser for yacc output */ + +func $$Iaddr(v interface{}) __yyunsafe__.Pointer { + type h struct { + t __yyunsafe__.Pointer + p __yyunsafe__.Pointer + } + return (*h)(__yyunsafe__.Pointer(&v)).p +} + +var ( + $$Debug = 0 + $$ErrorVerbose = false +) + +type $$Lexer interface { + Lex(lval *$$SymType) int + Error(s string) +} + +type $$Parser interface { + Parse($$Lexer) int + Lookahead() int +} + +type $$ParserImpl struct { + lval $$SymType + stack [$$InitialStackSize]$$SymType + char int +} + +func (p *$$ParserImpl) Lookahead() int { + return p.char +} + +func $$NewParser() $$Parser { + return &$$ParserImpl{} +} + +const $$Flag = -1000 + +func $$Tokname(c int) string { + if c >= 1 && c-1 < len($$Toknames) { + if $$Toknames[c-1] != "" { + return $$Toknames[c-1] + } + } + return __yyfmt__.Sprintf("tok-%v", c) +} + +func $$Statname(s int) string { + if s >= 0 && s < len($$Statenames) { + if $$Statenames[s] != "" { + return $$Statenames[s] + } + } + return __yyfmt__.Sprintf("state-%v", s) +} + +func $$ErrorMessage(state, lookAhead int) string { + const TOKSTART = 4 + + if !$$ErrorVerbose { + return "syntax error" + } + + for _, e := range $$ErrorMessages { + if e.state == state && e.token == lookAhead { + return "syntax error: " + e.msg + } + } + + res := "syntax error: unexpected " + $$Tokname(lookAhead) + + // To match Bison, suggest at most four expected tokens. + expected := make([]int, 0, 4) + + // Look for shiftable tokens. + base := $$Pact[state] + for tok := TOKSTART; tok-1 < len($$Toknames); tok++ { + if n := base + tok; n >= 0 && n < $$Last && $$Chk[$$Act[n]] == tok { + if len(expected) == cap(expected) { + return res + } + expected = append(expected, tok) + } + } + + if $$Def[state] == -2 { + i := 0 + for $$Exca[i] != -1 || $$Exca[i+1] != state { + i += 2 + } + + // Look for tokens that we accept or reduce. + for i += 2; $$Exca[i] >= 0; i += 2 { + tok := $$Exca[i] + if tok < TOKSTART || $$Exca[i+1] == 0 { + continue + } + if len(expected) == cap(expected) { + return res + } + expected = append(expected, tok) + } + + // If the default action is to accept or reduce, give up. + if $$Exca[i+1] != 0 { + return res + } + } + + for i, tok := range expected { + if i == 0 { + res += ", expecting " + } else { + res += " or " + } + res += $$Tokname(tok) + } + return res +} + +func $$lex1(lex $$Lexer, lval *$$SymType) (char, token int) { + token = 0 + char = lex.Lex(lval) + if char <= 0 { + token = $$Tok1[0] + goto out + } + if char < len($$Tok1) { + token = $$Tok1[char] + goto out + } + if char >= $$Private { + if char < $$Private+len($$Tok2) { + token = $$Tok2[char-$$Private] + goto out + } + } + for i := 0; i < len($$Tok3); i += 2 { + token = $$Tok3[i+0] + if token == char { + token = $$Tok3[i+1] + goto out + } + } + +out: + if token == 0 { + token = $$Tok2[1] /* unknown char */ + } + if $$Debug >= 3 { + __yyfmt__.Printf("lex %s(%d)\n", $$Tokname(token), uint(char)) + } + return char, token +} + +func $$Parse($$lex $$Lexer) int { + return $$NewParser().Parse($$lex) +} + +func ($$rcvr *$$ParserImpl) Parse($$lex $$Lexer) int { + var $$n int + var $$VAL $$SymType + var $$Dollar []$$SymType + _ = $$Dollar // silence set and not used + $$S := $$rcvr.stack[:] + + Nerrs := 0 /* number of errors */ + Errflag := 0 /* error recovery flag */ + $$state := 0 + $$rcvr.char = -1 + $$token := -1 // $$rcvr.char translated into internal numbering + defer func() { + // Make sure we report no lookahead when not parsing. + $$state = -1 + $$rcvr.char = -1 + $$token = -1 + }() + $$p := -1 + goto $$stack + +ret0: + return 0 + +ret1: + return 1 + +$$stack: + /* put a state and value onto the stack */ + if $$Debug >= 4 { + __yyfmt__.Printf("char %v in %v\n", $$Tokname($$token), $$Statname($$state)) + } + + $$p++ + if $$p >= len($$S) { + nyys := make([]$$SymType, len($$S)*2) + copy(nyys, $$S) + $$S = nyys + } + $$S[$$p] = $$VAL + $$S[$$p].yys = $$state + +$$newstate: + $$n = $$Pact[$$state] + if $$n <= $$Flag { + goto $$default /* simple state */ + } + if $$rcvr.char < 0 { + $$rcvr.char, $$token = $$lex1($$lex, &$$rcvr.lval) + } + $$n += $$token + if $$n < 0 || $$n >= $$Last { + goto $$default + } + $$n = $$Act[$$n] + if $$Chk[$$n] == $$token { /* valid shift */ + $$rcvr.char = -1 + $$token = -1 + $$VAL = $$rcvr.lval + $$state = $$n + if Errflag > 0 { + Errflag-- + } + goto $$stack + } + +$$default: + /* default state action */ + $$n = $$Def[$$state] + if $$n == -2 { + if $$rcvr.char < 0 { + $$rcvr.char, $$token = $$lex1($$lex, &$$rcvr.lval) + } + + /* look through exception table */ + xi := 0 + for { + if $$Exca[xi+0] == -1 && $$Exca[xi+1] == $$state { + break + } + xi += 2 + } + for xi += 2; ; xi += 2 { + $$n = $$Exca[xi+0] + if $$n < 0 || $$n == $$token { + break + } + } + $$n = $$Exca[xi+1] + if $$n < 0 { + goto ret0 + } + } + if $$n == 0 { + /* error ... attempt to resume parsing */ + switch Errflag { + case 0: /* brand new error */ + $$lex.Error($$ErrorMessage($$state, $$token)) + Nerrs++ + if $$Debug >= 1 { + __yyfmt__.Printf("%s", $$Statname($$state)) + __yyfmt__.Printf(" saw %s\n", $$Tokname($$token)) + } + fallthrough + + case 1, 2: /* incompletely recovered error ... try again */ + Errflag = 3 + + /* find a state where "error" is a legal shift action */ + for $$p >= 0 { + $$n = $$Pact[$$S[$$p].yys] + $$ErrCode + if $$n >= 0 && $$n < $$Last { + $$state = $$Act[$$n] /* simulate a shift of "error" */ + if $$Chk[$$state] == $$ErrCode { + goto $$stack + } + } + + /* the current p has no shift on "error", pop stack */ + if $$Debug >= 2 { + __yyfmt__.Printf("error recovery pops state %d\n", $$S[$$p].yys) + } + $$p-- + } + /* there is no state on the stack with an error shift ... abort */ + goto ret1 + + case 3: /* no shift yet; clobber input char */ + if $$Debug >= 2 { + __yyfmt__.Printf("error recovery discards %s\n", $$Tokname($$token)) + } + if $$token == $$EofCode { + goto ret1 + } + $$rcvr.char = -1 + $$token = -1 + goto $$newstate /* try again in the same state */ + } + } + + /* reduction by production $$n */ + if $$Debug >= 2 { + __yyfmt__.Printf("reduce %v in:\n\t%v\n", $$n, $$Statname($$state)) + } + + $$nt := $$n + $$pt := $$p + _ = $$pt // guard against "declared and not used" + + $$p -= $$R2[$$n] + // $$p is now the index of $0. Perform the default action. Iff the + // reduced production is ε, $1 is possibly out of range. + if $$p+1 >= len($$S) { + nyys := make([]$$SymType, len($$S)*2) + copy(nyys, $$S) + $$S = nyys + } + $$VAL = $$S[$$p+1] + + /* consult goto table to find next state */ + $$n = $$R1[$$n] + $$g := $$Pgo[$$n] + $$j := $$g + $$S[$$p].yys + 1 + + if $$j >= $$Last { + $$state = $$Act[$$g] + } else { + $$state = $$Act[$$j] + if $$Chk[$$state] != -$$n { + $$state = $$Act[$$g] + } + } + // dummy call; replaced with literal code + $$run() + goto $$stack /* stack new state and value */ +} +` diff --git a/go/vt/sqlparser/keywords.go b/go/vt/sqlparser/keywords.go new file mode 100644 index 00000000000..253daed0390 --- /dev/null +++ b/go/vt/sqlparser/keywords.go @@ -0,0 +1,641 @@ +package sqlparser + +import ( + "fmt" + "sort" + "strings" +) + +type keyword struct { + name string + id int +} + +func (k *keyword) match(input []byte) bool { + if len(input) != len(k.name) { + return false + } + for i, c := range input { + if 'A' <= c && c <= 'Z' { + c += 'a' - 'A' + } + if k.name[i] != c { + return false + } + } + return true +} + +func (k *keyword) matchStr(input string) bool { + return keywordASCIIMatch(input, k.name) +} + +func keywordASCIIMatch(input string, expected string) bool { + if len(input) != len(expected) { + return false + } + for i := 0; i < len(input); i++ { + c := input[i] + if 'A' <= c && c <= 'Z' { + c += 'a' - 'A' + } + if expected[i] != c { + return false + } + } + return true +} + +// keywords is a table of mysql keywords that fall into two categories: +// 1) keywords considered reserved by MySQL +// 2) keywords for us to handle specially in sql.y +// +// Those marked as UNUSED are likely reserved keywords. We add them here so that +// when rewriting queries we can properly backtick quote them so they don't cause issues +// +// NOTE: If you add new keywords, add them also to the reserved_keywords or +// non_reserved_keywords grammar in sql.y -- this will allow the keyword to be used +// in identifiers. See the docs for each grammar to determine which one to put it into. +var keywords = []keyword{ + {"accessible", UNUSED}, + {"action", ACTION}, + {"add", ADD}, + {"after", AFTER}, + {"against", AGAINST}, + {"algorithm", ALGORITHM}, + {"all", ALL}, + {"alter", ALTER}, + {"analyze", ANALYZE}, + {"and", AND}, + {"as", AS}, + {"asc", ASC}, + {"asensitive", UNUSED}, + {"auto_increment", AUTO_INCREMENT}, + {"avg_row_length", AVG_ROW_LENGTH}, + {"before", UNUSED}, + {"begin", BEGIN}, + {"between", BETWEEN}, + {"bigint", BIGINT}, + {"binary", BINARY}, + {"_binary", UNDERSCORE_BINARY}, + {"_utf8mb4", UNDERSCORE_UTF8MB4}, + {"_utf8", UNDERSCORE_UTF8}, + {"_latin1", UNDERSCORE_LATIN1}, + {"bit", BIT}, + {"blob", BLOB}, + {"bool", BOOL}, + {"boolean", BOOLEAN}, + {"both", UNUSED}, + {"by", BY}, + {"call", CALL}, + {"cancel", CANCEL}, + {"cascade", CASCADE}, + {"cascaded", CASCADED}, + {"case", CASE}, + {"cast", CAST}, + {"channel", CHANNEL}, + {"change", CHANGE}, + {"char", CHAR}, + {"character", CHARACTER}, + {"charset", CHARSET}, + {"check", CHECK}, + {"checksum", CHECKSUM}, + {"coalesce", COALESCE}, + {"code", CODE}, + {"collate", COLLATE}, + {"collation", COLLATION}, + {"column", COLUMN}, + {"columns", COLUMNS}, + {"comment", COMMENT_KEYWORD}, + {"committed", COMMITTED}, + {"commit", COMMIT}, + {"compact", COMPACT}, + {"complete", COMPLETE}, + {"compressed", COMPRESSED}, + {"compression", COMPRESSION}, + {"condition", UNUSED}, + {"connection", CONNECTION}, + {"constraint", CONSTRAINT}, + {"continue", UNUSED}, + {"convert", CONVERT}, + {"copy", COPY}, + {"substr", SUBSTR}, + {"substring", SUBSTRING}, + {"create", CREATE}, + {"cross", CROSS}, + {"csv", CSV}, + {"current_date", CURRENT_DATE}, + {"current_time", CURRENT_TIME}, + {"current_timestamp", CURRENT_TIMESTAMP}, + {"current_user", CURRENT_USER}, + {"cursor", UNUSED}, + {"data", DATA}, + {"database", DATABASE}, + {"databases", DATABASES}, + {"day_hour", UNUSED}, + {"day_microsecond", UNUSED}, + {"day_minute", UNUSED}, + {"day_second", UNUSED}, + {"date", DATE}, + {"datetime", DATETIME}, + {"dec", UNUSED}, + {"decimal", DECIMAL}, + {"declare", UNUSED}, + {"default", DEFAULT}, + {"definer", DEFINER}, + {"delay_key_write", DELAY_KEY_WRITE}, + {"delayed", UNUSED}, + {"delete", DELETE}, + {"desc", DESC}, + {"describe", DESCRIBE}, + {"deterministic", UNUSED}, + {"directory", DIRECTORY}, + {"disable", DISABLE}, + {"discard", DISCARD}, + {"disk", DISK}, + {"distinct", DISTINCT}, + {"distinctrow", DISTINCTROW}, + {"div", DIV}, + {"double", DOUBLE}, + {"do", DO}, + {"drop", DROP}, + {"dumpfile", DUMPFILE}, + {"duplicate", DUPLICATE}, + {"dynamic", DYNAMIC}, + {"each", UNUSED}, + {"else", ELSE}, + {"elseif", UNUSED}, + {"enable", ENABLE}, + {"enclosed", ENCLOSED}, + {"encryption", ENCRYPTION}, + {"end", END}, + {"enforced", ENFORCED}, + {"engine", ENGINE}, + {"engines", ENGINES}, + {"enum", ENUM}, + {"error", ERROR}, + {"escape", ESCAPE}, + {"escaped", ESCAPED}, + {"event", EVENT}, + {"exchange", EXCHANGE}, + {"exclusive", EXCLUSIVE}, + {"exists", EXISTS}, + {"exit", UNUSED}, + {"explain", EXPLAIN}, + {"expansion", EXPANSION}, + {"export", EXPORT}, + {"extended", EXTENDED}, + {"false", FALSE}, + {"fetch", UNUSED}, + {"fields", FIELDS}, + {"first", FIRST}, + {"fixed", FIXED}, + {"float", FLOAT_TYPE}, + {"float4", UNUSED}, + {"float8", UNUSED}, + {"flush", FLUSH}, + {"for", FOR}, + {"force", FORCE}, + {"foreign", FOREIGN}, + {"format", FORMAT}, + {"from", FROM}, + {"full", FULL}, + {"fulltext", FULLTEXT}, + {"function", FUNCTION}, + {"general", GENERAL}, + {"generated", UNUSED}, + {"geometry", GEOMETRY}, + {"geometrycollection", GEOMETRYCOLLECTION}, + {"get", UNUSED}, + {"global", GLOBAL}, + {"grant", UNUSED}, + {"group", GROUP}, + {"group_concat", GROUP_CONCAT}, + {"having", HAVING}, + {"header", HEADER}, + {"high_priority", UNUSED}, + {"hosts", HOSTS}, + {"hour_microsecond", UNUSED}, + {"hour_minute", UNUSED}, + {"hour_second", UNUSED}, + {"if", IF}, + {"ignore", IGNORE}, + {"import", IMPORT}, + {"in", IN}, + {"index", INDEX}, + {"indexes", INDEXES}, + {"infile", UNUSED}, + {"inout", UNUSED}, + {"inner", INNER}, + {"inplace", INPLACE}, + {"insensitive", UNUSED}, + {"insert", INSERT}, + {"insert_method", INSERT_METHOD}, + {"int", INT}, + {"int1", UNUSED}, + {"int2", UNUSED}, + {"int3", UNUSED}, + {"int4", UNUSED}, + {"int8", UNUSED}, + {"integer", INTEGER}, + {"interval", INTERVAL}, + {"into", INTO}, + {"io_after_gtids", UNUSED}, + {"is", IS}, + {"isolation", ISOLATION}, + {"iterate", UNUSED}, + {"invoker", INVOKER}, + {"join", JOIN}, + {"json", JSON}, + {"key", KEY}, + {"keys", KEYS}, + {"keyspaces", KEYSPACES}, + {"key_block_size", KEY_BLOCK_SIZE}, + {"kill", UNUSED}, + {"last", LAST}, + {"language", LANGUAGE}, + {"last_insert_id", LAST_INSERT_ID}, + {"leading", UNUSED}, + {"leave", UNUSED}, + {"left", LEFT}, + {"less", LESS}, + {"level", LEVEL}, + {"like", LIKE}, + {"limit", LIMIT}, + {"linear", UNUSED}, + {"lines", LINES}, + {"linestring", LINESTRING}, + {"load", LOAD}, + {"local", LOCAL}, + {"localtime", LOCALTIME}, + {"localtimestamp", LOCALTIMESTAMP}, + {"lock", LOCK}, + {"logs", LOGS}, + {"long", UNUSED}, + {"longblob", LONGBLOB}, + {"longtext", LONGTEXT}, + {"loop", UNUSED}, + {"low_priority", LOW_PRIORITY}, + {"manifest", MANIFEST}, + {"master_bind", UNUSED}, + {"match", MATCH}, + {"max_rows", MAX_ROWS}, + {"maxvalue", MAXVALUE}, + {"mediumblob", MEDIUMBLOB}, + {"mediumint", MEDIUMINT}, + {"mediumtext", MEDIUMTEXT}, + {"memory", MEMORY}, + {"merge", MERGE}, + {"middleint", UNUSED}, + {"min_rows", MIN_ROWS}, + {"minute_microsecond", UNUSED}, + {"minute_second", UNUSED}, + {"mod", MOD}, + {"mode", MODE}, + {"modify", MODIFY}, + {"modifies", UNUSED}, + {"multilinestring", MULTILINESTRING}, + {"multipoint", MULTIPOINT}, + {"multipolygon", MULTIPOLYGON}, + {"name", NAME}, + {"names", NAMES}, + {"natural", NATURAL}, + {"nchar", NCHAR}, + {"next", NEXT}, + {"no", NO}, + {"none", NONE}, + {"not", NOT}, + {"no_write_to_binlog", NO_WRITE_TO_BINLOG}, + {"null", NULL}, + {"numeric", NUMERIC}, + {"off", OFF}, + {"offset", OFFSET}, + {"on", ON}, + {"only", ONLY}, + {"open", OPEN}, + {"optimize", OPTIMIZE}, + {"optimizer_costs", OPTIMIZER_COSTS}, + {"option", OPTION}, + {"optionally", OPTIONALLY}, + {"or", OR}, + {"order", ORDER}, + {"out", UNUSED}, + {"outer", OUTER}, + {"outfile", OUTFILE}, + {"overwrite", OVERWRITE}, + {"pack_keys", PACK_KEYS}, + {"parser", PARSER}, + {"partition", PARTITION}, + {"partitioning", PARTITIONING}, + {"password", PASSWORD}, + {"plugins", PLUGINS}, + {"point", POINT}, + {"polygon", POLYGON}, + {"precision", UNUSED}, + {"primary", PRIMARY}, + {"privileges", PRIVILEGES}, + {"processlist", PROCESSLIST}, + {"procedure", PROCEDURE}, + {"query", QUERY}, + {"range", UNUSED}, + {"read", READ}, + {"reads", UNUSED}, + {"read_write", UNUSED}, + {"real", REAL}, + {"rebuild", REBUILD}, + {"redundant", REDUNDANT}, + {"references", REFERENCES}, + {"regexp", REGEXP}, + {"relay", RELAY}, + {"release", RELEASE}, + {"remove", REMOVE}, + {"rename", RENAME}, + {"reorganize", REORGANIZE}, + {"repair", REPAIR}, + {"repeat", UNUSED}, + {"repeatable", REPEATABLE}, + {"replace", REPLACE}, + {"require", UNUSED}, + {"resignal", UNUSED}, + {"restrict", RESTRICT}, + {"return", UNUSED}, + {"retry", RETRY}, + {"revert", REVERT}, + {"revoke", UNUSED}, + {"right", RIGHT}, + {"rlike", REGEXP}, + {"rollback", ROLLBACK}, + {"row_format", ROW_FORMAT}, + {"s3", S3}, + {"savepoint", SAVEPOINT}, + {"schema", SCHEMA}, + {"schemas", SCHEMAS}, + {"second_microsecond", UNUSED}, + {"security", SECURITY}, + {"select", SELECT}, + {"sensitive", UNUSED}, + {"separator", SEPARATOR}, + {"sequence", SEQUENCE}, + {"serializable", SERIALIZABLE}, + {"session", SESSION}, + {"set", SET}, + {"share", SHARE}, + {"shared", SHARED}, + {"show", SHOW}, + {"signal", UNUSED}, + {"signed", SIGNED}, + {"slow", SLOW}, + {"smallint", SMALLINT}, + {"spatial", SPATIAL}, + {"specific", UNUSED}, + {"sql", SQL}, + {"sqlexception", UNUSED}, + {"sqlstate", UNUSED}, + {"sqlwarning", UNUSED}, + {"sql_big_result", UNUSED}, + {"sql_cache", SQL_CACHE}, + {"sql_calc_found_rows", SQL_CALC_FOUND_ROWS}, + {"sql_no_cache", SQL_NO_CACHE}, + {"sql_small_result", UNUSED}, + {"ssl", UNUSED}, + {"start", START}, + {"starting", STARTING}, + {"stats_auto_recalc", STATS_AUTO_RECALC}, + {"stats_persistent", STATS_PERSISTENT}, + {"stats_sample_pages", STATS_SAMPLE_PAGES}, + {"status", STATUS}, + {"storage", STORAGE}, + {"stored", UNUSED}, + {"straight_join", STRAIGHT_JOIN}, + {"stream", STREAM}, + {"vstream", VSTREAM}, + {"table", TABLE}, + {"tables", TABLES}, + {"tablespace", TABLESPACE}, + {"temporary", TEMPORARY}, + {"temptable", TEMPTABLE}, + {"terminated", TERMINATED}, + {"text", TEXT}, + {"than", THAN}, + {"then", THEN}, + {"time", TIME}, + {"timestamp", TIMESTAMP}, + {"timestampadd", TIMESTAMPADD}, + {"timestampdiff", TIMESTAMPDIFF}, + {"tinyblob", TINYBLOB}, + {"tinyint", TINYINT}, + {"tinytext", TINYTEXT}, + {"to", TO}, + {"trailing", UNUSED}, + {"transaction", TRANSACTION}, + {"tree", TREE}, + {"traditional", TRADITIONAL}, + {"trigger", TRIGGER}, + {"triggers", TRIGGERS}, + {"true", TRUE}, + {"truncate", TRUNCATE}, + {"uncommitted", UNCOMMITTED}, + {"undefined", UNDEFINED}, + {"undo", UNUSED}, + {"union", UNION}, + {"unique", UNIQUE}, + {"unlock", UNLOCK}, + {"unsigned", UNSIGNED}, + {"update", UPDATE}, + {"upgrade", UPGRADE}, + {"usage", UNUSED}, + {"use", USE}, + {"user", USER}, + {"user_resources", USER_RESOURCES}, + {"using", USING}, + {"utc_date", UTC_DATE}, + {"utc_time", UTC_TIME}, + {"utc_timestamp", UTC_TIMESTAMP}, + {"validation", VALIDATION}, + {"values", VALUES}, + {"variables", VARIABLES}, + {"varbinary", VARBINARY}, + {"varchar", VARCHAR}, + {"varcharacter", UNUSED}, + {"varying", UNUSED}, + {"virtual", UNUSED}, + {"vindex", VINDEX}, + {"vindexes", VINDEXES}, + {"view", VIEW}, + {"vitess", VITESS}, + {"vitess_keyspaces", VITESS_KEYSPACES}, + {"vitess_metadata", VITESS_METADATA}, + {"vitess_shards", VITESS_SHARDS}, + {"vitess_tablets", VITESS_TABLETS}, + {"vitess_migration", VITESS_MIGRATION}, + {"vitess_migrations", VITESS_MIGRATIONS}, + {"vschema", VSCHEMA}, + {"warnings", WARNINGS}, + {"when", WHEN}, + {"where", WHERE}, + {"while", UNUSED}, + {"with", WITH}, + {"without", WITHOUT}, + {"work", WORK}, + {"write", WRITE}, + {"xor", XOR}, + {"year", YEAR}, + {"year_month", UNUSED}, + {"zerofill", ZEROFILL}, +} + +// keywordStrings contains the reverse mapping of token to keyword strings +var keywordStrings = map[int]string{} + +// keywordLookupTable is a perfect hash map that maps **case insensitive** keyword names to their ids +var keywordLookupTable *perfectTable + +func init() { + for _, kw := range keywords { + if kw.id == UNUSED { + continue + } + if kw.name != strings.ToLower(kw.name) { + panic(fmt.Sprintf("keyword %q must be lowercase in table", kw.name)) + } + keywordStrings[kw.id] = kw.name + } + + keywordLookupTable = buildKeywordTable(keywords) +} + +// KeywordString returns the string corresponding to the given keyword +func KeywordString(id int) string { + str, ok := keywordStrings[id] + if !ok { + return "" + } + return str +} + +type perfectTable struct { + keys []keyword + level0 []uint32 // power of 2 size + level0Mask int // len(Level0) - 1 + level1 []uint32 // power of 2 size >= len(keys) + level1Mask int // len(Level1) - 1 +} + +const offset64 = uint64(14695981039346656037) +const prime64 = uint64(1099511628211) + +func fnv1aI(h uint64, s []byte) uint64 { + for _, c := range s { + if 'A' <= c && c <= 'Z' { + c += 'a' - 'A' + } + h = (h ^ uint64(c)) * prime64 + } + return h +} + +func fnv1aIstr(h uint64, s string) uint64 { + for i := 0; i < len(s); i++ { + c := s[i] + if 'A' <= c && c <= 'Z' { + c += 'a' - 'A' + } + h = (h ^ uint64(c)) * prime64 + } + return h +} + +// buildKeywordTable generates a perfect hash map for all the keywords using the "Hash, displace, and compress" +// algorithm described in http://cmph.sourceforge.net/papers/esa09.pdf. +func buildKeywordTable(keywords []keyword) *perfectTable { + type indexBucket struct { + n int + vals []int + } + + nextPow2 := func(n int) int { + for i := 1; ; i *= 2 { + if i >= n { + return i + } + } + } + + var ( + level0 = make([]uint32, nextPow2(len(keywords)/4)) + level0Mask = len(level0) - 1 + level1 = make([]uint32, nextPow2(len(keywords))) + level1Mask = len(level1) - 1 + sparseBuckets = make([][]int, len(level0)) + zeroSeed = offset64 + ) + for i, kw := range keywords { + n := int(fnv1aIstr(zeroSeed, kw.name)) & level0Mask + sparseBuckets[n] = append(sparseBuckets[n], i) + } + var buckets []indexBucket + for n, vals := range sparseBuckets { + if len(vals) > 0 { + buckets = append(buckets, indexBucket{n, vals}) + } + } + sort.Slice(buckets, func(i, j int) bool { + return len(buckets[i].vals) > len(buckets[j].vals) + }) + + occ := make([]bool, len(level1)) + var tmpOcc []int + for _, bucket := range buckets { + var seed uint64 + trySeed: + tmpOcc = tmpOcc[:0] + for _, i := range bucket.vals { + n := int(fnv1aIstr(seed, keywords[i].name)) & level1Mask + if occ[n] { + for _, n := range tmpOcc { + occ[n] = false + } + seed++ + goto trySeed + } + occ[n] = true + tmpOcc = append(tmpOcc, n) + level1[n] = uint32(i) + } + level0[bucket.n] = uint32(seed) + } + + return &perfectTable{ + keys: keywords, + level0: level0, + level0Mask: level0Mask, + level1: level1, + level1Mask: level1Mask, + } +} + +// Lookup looks up the given keyword on the perfect map for keywords. +// The provided bytes are not modified and are compared **case insensitively** +func (t *perfectTable) Lookup(keyword []byte) (int, bool) { + i0 := int(fnv1aI(offset64, keyword)) & t.level0Mask + seed := t.level0[i0] + i1 := int(fnv1aI(uint64(seed), keyword)) & t.level1Mask + cell := &t.keys[int(t.level1[i1])] + if cell.match(keyword) { + return cell.id, true + } + return 0, false +} + +// LookupString looks up the given keyword on the perfect map for keywords. +// The provided string is compared **case insensitively** +func (t *perfectTable) LookupString(keyword string) (int, bool) { + i0 := int(fnv1aIstr(offset64, keyword)) & t.level0Mask + seed := t.level0[i0] + i1 := int(fnv1aIstr(uint64(seed), keyword)) & t.level1Mask + cell := &t.keys[int(t.level1[i1])] + if cell.matchStr(keyword) { + return cell.id, true + } + return 0, false +} diff --git a/go/vt/sqlparser/keywords_test.go b/go/vt/sqlparser/keywords_test.go new file mode 100644 index 00000000000..903ef18a87f --- /dev/null +++ b/go/vt/sqlparser/keywords_test.go @@ -0,0 +1,15 @@ +package sqlparser + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestKeywordTable(t *testing.T) { + for _, kw := range keywords { + lookup, ok := keywordLookupTable.LookupString(kw.name) + require.Truef(t, ok, "keyword %q failed to match", kw.name) + require.Equalf(t, lookup, kw.id, "keyword %q matched to %d (expected %d)", kw.name, lookup, kw.id) + } +} diff --git a/go/vt/sqlparser/normalizer.go b/go/vt/sqlparser/normalizer.go index ea5f7c3ee08..e4626a57b6d 100644 --- a/go/vt/sqlparser/normalizer.go +++ b/go/vt/sqlparser/normalizer.go @@ -24,6 +24,9 @@ import ( querypb "vitess.io/vitess/go/vt/proto/query" ) +// BindVars is a set of reserved bind variables from a SQL statement +type BindVars map[string]struct{} + // Normalize changes the statement to use bind values, and // updates the bind vars to those values. The supplied prefix // is used to generate the bind var names. The function ensures @@ -31,9 +34,10 @@ import ( // Within Select constructs, bind vars are deduped. This allows // us to identify vindex equality. Otherwise, every value is // treated as distinct. -func Normalize(stmt Statement, bindVars map[string]*querypb.BindVariable, prefix string) { - nz := newNormalizer(stmt, bindVars, prefix) - Rewrite(stmt, nz.WalkStatement, nil) +func Normalize(stmt Statement, known BindVars, bindVars map[string]*querypb.BindVariable, prefix string) error { + nz := newNormalizer(known, bindVars, prefix) + _ = Rewrite(stmt, nz.WalkStatement, nil) + return nz.err } type normalizer struct { @@ -42,13 +46,14 @@ type normalizer struct { reserved map[string]struct{} counter int vals map[string]string + err error } -func newNormalizer(stmt Statement, bindVars map[string]*querypb.BindVariable, prefix string) *normalizer { +func newNormalizer(reserved map[string]struct{}, bindVars map[string]*querypb.BindVariable, prefix string) *normalizer { return &normalizer{ bindVars: bindVars, prefix: prefix, - reserved: GetBindvars(stmt), + reserved: reserved, counter: 1, vals: make(map[string]string), } @@ -63,7 +68,7 @@ func (nz *normalizer) WalkStatement(cursor *Cursor) bool { case *Set, *Show, *Begin, *Commit, *Rollback, *Savepoint, *SetTransaction, DDLStatement, *SRollback, *Release, *OtherAdmin, *OtherRead: return false case *Select: - Rewrite(node, nz.WalkSelect, nil) + _ = Rewrite(node, nz.WalkSelect, nil) // Don't continue return false case *Literal: @@ -77,7 +82,7 @@ func (nz *normalizer) WalkStatement(cursor *Cursor) bool { case *ConvertType: // we should not rewrite the type description return false } - return true + return nz.err == nil // only continue if we haven't found any errors } // WalkSelect normalizes the AST in Select mode. @@ -98,7 +103,7 @@ func (nz *normalizer) WalkSelect(cursor *Cursor) bool { // we should not rewrite the type description return false } - return true + return nz.err == nil // only continue if we haven't found any errors } func (nz *normalizer) convertLiteralDedup(node *Literal, cursor *Cursor) { @@ -123,9 +128,9 @@ func (nz *normalizer) convertLiteralDedup(node *Literal, cursor *Cursor) { // Prefixing strings with "'" ensures that a string // and number that have the same representation don't // collide. - key = "'" + string(node.Val) + key = "'" + node.Val } else { - key = string(node.Val) + key = node.Val } bvname, ok := nz.vals[key] if !ok { @@ -136,7 +141,7 @@ func (nz *normalizer) convertLiteralDedup(node *Literal, cursor *Cursor) { } // Modify the AST node to a bindvar. - cursor.Replace(NewArgument([]byte(":" + bvname))) + cursor.Replace(NewArgument(":" + bvname)) } // convertLiteral converts an Literal without the dedup. @@ -149,7 +154,7 @@ func (nz *normalizer) convertLiteral(node *Literal, cursor *Cursor) { bvname := nz.newName() nz.bindVars[bvname] = bval - cursor.Replace(NewArgument([]byte(":" + bvname))) + cursor.Replace(NewArgument(":" + bvname)) } // convertComparison attempts to convert IN clauses to @@ -192,11 +197,11 @@ func (nz *normalizer) sqlToBindvar(node SQLNode) *querypb.BindVariable { var err error switch node.Type { case StrVal: - v, err = sqltypes.NewValue(sqltypes.VarBinary, node.Val) + v, err = sqltypes.NewValue(sqltypes.VarBinary, node.Bytes()) case IntVal: - v, err = sqltypes.NewValue(sqltypes.Int64, node.Val) + v, err = sqltypes.NewValue(sqltypes.Int64, node.Bytes()) case FloatVal: - v, err = sqltypes.NewValue(sqltypes.Float64, node.Val) + v, err = sqltypes.NewValue(sqltypes.Float64, node.Bytes()) default: return nil } @@ -220,8 +225,6 @@ func (nz *normalizer) newName() string { } // GetBindvars returns a map of the bind vars referenced in the statement. -// TODO(sougou); This function gets called again from vtgate/planbuilder. -// Ideally, this should be done only once. func GetBindvars(stmt Statement) map[string]struct{} { bindvars := make(map[string]struct{}) _ = Walk(func(node SQLNode) (kontinue bool, err error) { diff --git a/go/vt/sqlparser/normalizer_test.go b/go/vt/sqlparser/normalizer_test.go index c28d3c61ba5..09bee85773f 100644 --- a/go/vt/sqlparser/normalizer_test.go +++ b/go/vt/sqlparser/normalizer_test.go @@ -21,6 +21,8 @@ import ( "reflect" "testing" + "github.com/stretchr/testify/require" + "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" ) @@ -228,8 +230,10 @@ func TestNormalize(t *testing.T) { t.Error(err) continue } + known := GetBindvars(stmt) bv := make(map[string]*querypb.BindVariable) - Normalize(stmt, bv, prefix) + require.NoError(t, + Normalize(stmt, known, bv, prefix)) outstmt := String(stmt) if outstmt != tc.outstmt { t.Errorf("Query:\n%s:\n%s, want\n%s", tc.in, outstmt, tc.outstmt) @@ -266,11 +270,85 @@ BenchmarkNormalize-8 500000 3620 ns/op 1461 B/op */ func BenchmarkNormalize(b *testing.B) { sql := "select 'abcd', 20, 30.0, eid from a where 1=eid and name='3'" - ast, err := Parse(sql) + ast, reservedVars, err := Parse2(sql) if err != nil { b.Fatal(err) } for i := 0; i < b.N; i++ { - Normalize(ast, map[string]*querypb.BindVariable{}, "") + require.NoError(b, + Normalize(ast, reservedVars, map[string]*querypb.BindVariable{}, "")) + } +} + +func BenchmarkNormalizeTraces(b *testing.B) { + for _, trace := range []string{"django_queries.txt", "lobsters.sql.gz"} { + b.Run(trace, func(b *testing.B) { + queries := loadQueries(b, trace) + if len(queries) > 10000 { + queries = queries[:10000] + } + + parsed := make([]Statement, 0, len(queries)) + reservedVars := make([]BindVars, 0, len(queries)) + for _, q := range queries { + pp, kb, err := Parse2(q) + if err != nil { + b.Fatal(err) + } + parsed = append(parsed, pp) + reservedVars = append(reservedVars, kb) + } + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + for i, query := range parsed { + _ = Normalize(query, reservedVars[i], map[string]*querypb.BindVariable{}, "") + } + } + }) + } +} + +func BenchmarkNormalizeVTGate(b *testing.B) { + const keyspace = "main_keyspace" + + queries := loadQueries(b, "lobsters.sql.gz") + if len(queries) > 10000 { + queries = queries[:10000] + } + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + for _, sql := range queries { + stmt, reservedVars, err := Parse2(sql) + if err != nil { + b.Fatal(err) + } + + query := sql + statement := stmt + bindVarNeeds := &BindVarNeeds{} + bindVars := make(map[string]*querypb.BindVariable) + _ = IgnoreMaxMaxMemoryRowsDirective(stmt) + + // Normalize if possible and retry. + if CanNormalize(stmt) || MustRewriteAST(stmt) { + result, err := PrepareAST(stmt, reservedVars, bindVars, "vtg", true, keyspace) + if err != nil { + b.Fatal(err) + } + statement = result.AST + bindVarNeeds = result.BindVarNeeds + query = String(statement) + } + + _ = query + _ = statement + _ = bindVarNeeds + } } } diff --git a/go/vt/sqlparser/parse_next_test.go b/go/vt/sqlparser/parse_next_test.go index 9f4e9c486d9..493afa4a698 100644 --- a/go/vt/sqlparser/parse_next_test.go +++ b/go/vt/sqlparser/parse_next_test.go @@ -32,7 +32,7 @@ func TestParseNextValid(t *testing.T) { sql.WriteRune(';') } - tokens := NewTokenizer(&sql) + tokens := NewStringTokenizer(sql.String()) for i, tcase := range validSQL { input := tcase.input + ";" want := tcase.output diff --git a/go/vt/sqlparser/parse_test.go b/go/vt/sqlparser/parse_test.go index bda582166d7..96a23114341 100644 --- a/go/vt/sqlparser/parse_test.go +++ b/go/vt/sqlparser/parse_test.go @@ -19,9 +19,12 @@ package sqlparser import ( "bufio" "bytes" + "compress/gzip" "fmt" + "io" "math/rand" "os" + "path" "strings" "sync" "testing" @@ -65,6 +68,14 @@ var ( input: "select a from t", }, { input: "select $ from t", + }, { + // shift/reduce conflict on CHARSET, should throw an error on shifting which will be ignored as it is a DDL + input: "alter database charset = 'utf16';", + output: "alter database", + partialDDL: true, + }, { + input: "alter database charset charset = 'utf16'", + output: "alter database `charset` character set 'utf16'", }, { input: "select a.b as a$b from $test$", }, { @@ -77,7 +88,7 @@ var ( input: "select 1 from t # aa\n", output: "select 1 from t", }, { - input: "select 1 --aa\nfrom t", + input: "select 1 -- aa\nfrom t", output: "select 1 from t", }, { input: "select 1 #aa\nfrom t", @@ -447,7 +458,8 @@ var ( input: "select /* % no space */ 1 from t where a = b%c", output: "select /* % no space */ 1 from t where a = b % c", }, { - input: "select /* u+ */ 1 from t where a = +b", + input: "select /* u+ */ 1 from t where a = +b", + output: "select /* u+ */ 1 from t where a = b", }, { input: "select /* u- */ 1 from t where a = -b", }, { @@ -607,7 +619,8 @@ var ( input: "select /* binary unary */ a- -b from t", output: "select /* binary unary */ a - -b from t", }, { - input: "select /* - - */ - -b from t", + input: "select /* - - */ - -b from t", + output: "select /* - - */ b from t", }, { input: "select /* binary binary */ binary binary b from t", }, { @@ -732,10 +745,10 @@ var ( }, { input: "insert /* bool expression on duplicate */ into a values (1, 2) on duplicate key update b = func(a), c = a > d", }, { - input: "insert into user(username, `status`) values ('Chuck', default(`status`))", + input: "insert into `user`(username, `status`) values ('Chuck', default(`status`))", }, { input: "insert into user(format, tree, vitess) values ('Chuck', 42, 'Barry')", - output: "insert into user(`format`, `tree`, `vitess`) values ('Chuck', 42, 'Barry')", + output: "insert into `user`(`format`, `tree`, `vitess`) values ('Chuck', 42, 'Barry')", }, { input: "insert into customer () values ()", output: "insert into customer values ()", @@ -840,6 +853,9 @@ var ( }, { input: "set character set 'utf8'", output: "set charset 'utf8'", + }, { + input: "set s = 1--4", + output: "set s = 1 - -4", }, { input: "set character set \"utf8\"", output: "set charset 'utf8'", @@ -901,6 +917,12 @@ var ( input: "set @variable = 42", }, { input: "set @period.variable = 42", + }, { + input: "set S= +++-++-+(4+1)", + output: "set S = 4 + 1", + }, { + input: "set S= +- - - - -(4+1)", + output: "set S = -(4 + 1)", }, { input: "alter table a add foo int first v", output: "alter table a add column foo int first v", @@ -996,6 +1018,8 @@ var ( }, { input: "alter table a partition by range (id) (partition p0 values less than (10), partition p1 values less than (maxvalue))", output: "alter table a", + }, { + input: "alter table `Post With Space` drop foreign key `Post With Space_ibfk_1`", }, { input: "alter table a add column (id int, id2 char(23))", }, { @@ -1021,7 +1045,7 @@ var ( output: "alter table a add constraint b unique key c (id)", }, { input: "alter table a add constraint check (id)", - output: "alter table a add check (id) enforced", + output: "alter table a add check (id)", }, { input: "alter table a add id int", output: "alter table a add column id int", @@ -1051,6 +1075,12 @@ var ( }, { input: "alter table a drop id", output: "alter table a drop column id", + }, { + input: "ALTER TABLE `product115s` CHANGE `part_number` `part_number` varchar(255) DEFAULT '0' NOT NULL", + output: "alter table product115s change column part_number part_number varchar(255) not null default '0'", + }, { + input: "ALTER TABLE distributors ADD CONSTRAINT zipchk CHECK (char_length(zipcode) = 5)", + output: "alter table distributors add constraint zipchk check (char_length(zipcode) = 5)", }, { input: "alter database character set geostd8", }, { @@ -1078,6 +1108,12 @@ var ( output: "alter database d collate 'utf8_bin' character set geostd8 character set geostd8", }, { input: "create table a", + }, { + input: "CREATE TABLE a", + output: "create table a", + }, { + input: "create table `a`", + output: "create table a", }, { input: "create table a (\n\t`a` int\n)", output: "create table a (\n\ta int\n)", @@ -1085,9 +1121,21 @@ var ( input: "create table `by` (\n\t`by` char\n)", }, { input: "create table test (\n\t__year year(4)\n)", + }, { + input: "create table a (\n\ta int not null\n)", + }, { + input: "create table a (\n\ta int not null default 0\n)", + }, { + input: "create table a (a int not null default 0, primary key(a))", + output: "create table a (\n\ta int not null default 0,\n\tprimary key (a)\n)", + }, { + input: "create table a (`a column` int)", + output: "create table a (\n\t`a column` int\n)", + }, { + input: "create table a (\n\ta varchar(32) not null default ''\n)", }, { input: "create table if not exists a (\n\t`a` int\n)", - output: "create table a (\n\ta int\n)", + output: "create table if not exists a (\n\ta int\n)", }, { input: "create table a ignore me this is garbage", output: "create table a", @@ -1097,6 +1145,26 @@ var ( }, { input: "create table a (b1 bool not null primary key, b2 boolean not null)", output: "create table a (\n\tb1 bool not null primary key,\n\tb2 boolean not null\n)", + }, { + input: "create table a (b1 bool NOT NULL PRIMARY KEY, b2 boolean not null, KEY b2_idx(b))", + output: "create table a (\n\tb1 bool not null primary key,\n\tb2 boolean not null,\n\tKEY b2_idx (b)\n)", + }, { + input: "create temporary table a (\n\tid bigint\n)", + }, { + input: "CREATE TABLE pkai (id INT PRIMARY KEY AUTO_INCREMENT);", + output: "create table pkai (\n\tid INT auto_increment primary key\n)", + }, { + input: "CREATE TABLE aipk (id INT AUTO_INCREMENT PRIMARY KEY)", + output: "create table aipk (\n\tid INT auto_increment primary key\n)", + }, { + // This test case is added because MySQL supports this behaviour. + // It allows the user to specify null and not null multiple times. + // The last value specified is used. + input: "create table foo (f timestamp null not null , g timestamp not null null)", + output: "create table foo (\n\tf timestamp not null,\n\tg timestamp null\n)", + }, { + // Tests unicode character § + input: "create table invalid_enum_value_name (\n\there_be_enum enum('$§!') default null\n)", }, { input: "alter vschema create vindex hash_vdx using hash", }, { @@ -1147,7 +1215,7 @@ var ( output: "alter vschema on a add vindex hash (id) using hash", }, { input: "alter vschema on user add vindex name_lookup_vdx (name) using lookup_hash with owner=user, table=name_user_idx, from=name, to=user_id", - output: "alter vschema on user add vindex name_lookup_vdx (`name`) using lookup_hash with owner=user, table=name_user_idx, from=name, to=user_id", + output: "alter vschema on `user` add vindex name_lookup_vdx (`name`) using lookup_hash with owner=user, table=name_user_idx, from=name, to=user_id", }, { input: "alter vschema on user2 add vindex name_lastname_lookup_vdx (name,lastname) using lookup with owner=`user`, table=`name_lastname_keyspace_id_map`, from=`name,lastname`, to=`keyspace_id`", output: "alter vschema on user2 add vindex name_lastname_lookup_vdx (`name`, lastname) using lookup with owner=user, table=name_lastname_keyspace_id_map, from=name,lastname, to=keyspace_id", @@ -1181,11 +1249,14 @@ var ( output: "alter table b add spatial index a (col1)", }, { input: "create fulltext index a on b (col1) key_block_size=12 with parser a comment 'string' algorithm inplace lock none", - output: "alter table b add fulltext index a (col1) key_block_size 12 with parser a comment 'string' algorithm inplace lock none", + output: "alter table b add fulltext index a (col1) key_block_size 12 with parser a comment 'string', algorithm = inplace, lock none", }, { input: "create index a on b ((col1 + col2), (col1*col2))", output: "alter table b add index a ()", partialDDL: true, + }, { + input: "create fulltext index b using btree on A (col1 desc, col2) algorithm = inplace lock = none", + output: "alter table A add fulltext index b (col1 desc, col2) using btree, algorithm = inplace, lock none", }, { input: "create algorithm = merge sql security definer view a as select * from e", }, { @@ -1213,8 +1284,8 @@ var ( input: "rename table a to b", output: "rename table a to b", }, { - input: "rename table a to b, b to c", - output: "rename table a to b, b to c", + input: "rename table x.a to b, b to c", + output: "rename table x.a to b, b to c", }, { input: "drop view a,B,c", output: "drop view a, b, c", @@ -1227,21 +1298,35 @@ var ( }, { input: "drop table if exists a,b restrict", output: "drop table if exists a, b", + }, { + input: "drop temporary table if exists a, b", }, { input: "drop view if exists a cascade", output: "drop view if exists a", }, { - input: "drop index b on a", - output: "alter table a", + input: "drop index b on a lock = none algorithm default", + output: "alter table a drop key b, lock none, algorithm = default", + }, { + input: "drop index `PRIMARY` on a lock none", + output: "alter table a drop primary key, lock none", }, { input: "analyze table a", output: "otherread", }, { - input: "flush tables", - output: "flush", + input: "flush tables", }, { - input: "flush tables with read lock", - output: "flush", + input: "flush tables with read lock", + }, { + input: "flush tables a, c.v, b", + }, { + input: "flush local tables a, c.v, b with read lock", + }, { + input: "flush tables a, c.v, b for export", + }, { + input: "flush local binary logs, engine logs, error logs, general logs, hosts, logs, privileges, optimizer_costs", + }, { + input: "flush no_write_to_binlog slow logs, status, user_resources, relay logs, relay logs for channel s", + output: "flush local slow logs, status, user_resources, relay logs, relay logs for channel s", }, { input: "show binary logs", output: "show binary logs", @@ -1273,28 +1358,22 @@ var ( input: "show collation where `Charset` = 'utf8' and `Collation` = 'utf8_bin'", output: "show collation where `Charset` = 'utf8' and `Collation` = 'utf8_bin'", }, { - input: "show create database d", - output: "show create database", + input: "show create database d", }, { - input: "show create event e", - output: "show create event", + input: "show create event e", }, { input: "show create function f", }, { - input: "show create procedure p", - output: "show create procedure", + input: "show create procedure p", }, { - input: "show create table t", - output: "show create table t", + input: "show create table t", }, { - input: "show create trigger t", - output: "show create trigger", + input: "show create trigger t", }, { input: "show create user u", output: "show create user", }, { - input: "show create view v", - output: "show create view", + input: "show create view v", }, { input: "show databases", output: "show databases", @@ -1327,17 +1406,18 @@ var ( input: "show grants for 'root@localhost'", output: "show grants", }, { - input: "show index from t", + input: "show index from t", + output: "show indexes from t", }, { input: "show indexes from t", }, { - input: "show keys from t", + input: "show keys from t", + output: "show indexes from t", }, { input: "show master status", output: "show master", }, { - input: "show open tables", - output: "show open", + input: "show open tables", }, { input: "show plugins", output: "show plugins", @@ -1443,10 +1523,10 @@ var ( output: "show variables", }, { input: "show vitess_keyspaces", - output: "show databases", + output: "show keyspaces", }, { input: "show vitess_keyspaces like '%'", - output: "show databases like '%'", + output: "show keyspaces like '%'", }, { input: "show vitess_shards", }, { @@ -1463,6 +1543,26 @@ var ( input: "show vschema vindexes", }, { input: "show vschema vindexes on t", + }, { + input: "show vitess_migrations", + }, { + input: "show vitess_migrations from ks", + }, { + input: "show vitess_migrations from ks where col = 42", + }, { + input: `show vitess_migrations from ks like '%pattern'`, + }, { + input: "show vitess_migrations like '9748c3b7_7fdb_11eb_ac2c_f875a4d24e90'", + }, { + input: "revert vitess_migration '9748c3b7_7fdb_11eb_ac2c_f875a4d24e90'", + }, { + input: "alter vitess_migration '9748c3b7_7fdb_11eb_ac2c_f875a4d24e90' retry", + }, { + input: "alter vitess_migration '9748c3b7_7fdb_11eb_ac2c_f875a4d24e90' complete", + }, { + input: "alter vitess_migration '9748c3b7_7fdb_11eb_ac2c_f875a4d24e90' cancel", + }, { + input: "alter vitess_migration cancel all", }, { input: "show warnings", output: "show warnings", @@ -1498,13 +1598,13 @@ var ( output: "explain select * from t", }, { input: "desc foobar", - output: "otherread", + output: "explain foobar", }, { - input: "explain t1", - output: "otherread", + input: "explain t1", }, { - input: "explain t1 col", - output: "otherread", + input: "explain t1 col", + }, { + input: "explain t1 '%col%'", }, { input: "explain select * from t", }, { @@ -1696,12 +1796,12 @@ var ( }, { input: "rollback", }, { - input: "create database test_db", + input: "create database /* simple */ test_db", }, { input: "create schema test_db", output: "create database test_db", }, { - input: "create database if not exists test_db", + input: "create database /* simple */ if not exists test_db", }, { input: "create schema if not exists test_db", output: "create database if not exists test_db", @@ -1714,12 +1814,15 @@ var ( output: "create database test_db", partialDDL: true, }, { - input: "drop database test_db", + input: "CREATE DATABASE /*!32312 IF NOT EXISTS*/ `mysql` /*!40100 DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci */ /*!80016 DEFAULT ENCRYPTION='N' */;", + output: "create database if not exists mysql default character set utf8mb4 collate utf8mb4_0900_ai_ci", + }, { + input: "drop database /* simple */ test_db", }, { input: "drop schema test_db", output: "drop database test_db", }, { - input: "drop database if exists test_db", + input: "drop database /* simple */ if exists test_db", }, { input: "delete a.*, b.* from tbl_a a, tbl_b b where a.id = b.id and b.name = 'test'", output: "delete a, b from tbl_a as a, tbl_b as b where a.id = b.id and b.`name` = 'test'", @@ -1728,6 +1831,12 @@ var ( output: "select distinct a.* from (select 1 from dual union all select 1 from dual) as a", }, { input: "select `weird function name`() from t", + }, { + input: "select all* from t", + output: "select * from t", + }, { + input: "select distinct* from t", + output: "select distinct * from t", }, { input: "select status() from t", // should not escape function names that are keywords }, { @@ -1740,22 +1849,22 @@ var ( output: "show full columns from AO_E8B6CC_ISSUE_MAPPING from jiradb like '%'", }, { input: "SHOW KEYS FROM `AO_E8B6CC_ISSUE_MAPPING` FROM `jiradb`", - output: "show keys from AO_E8B6CC_ISSUE_MAPPING from jiradb", + output: "show indexes from AO_E8B6CC_ISSUE_MAPPING from jiradb", }, { input: "SHOW CREATE TABLE `jiradb`.`AO_E8B6CC_ISSUE_MAPPING`", output: "show create table jiradb.AO_E8B6CC_ISSUE_MAPPING", }, { input: "SHOW INDEX FROM `AO_E8B6CC_ISSUE_MAPPING` FROM `jiradb`", - output: "show index from AO_E8B6CC_ISSUE_MAPPING from jiradb", + output: "show indexes from AO_E8B6CC_ISSUE_MAPPING from jiradb", }, { input: "SHOW FULL TABLES FROM `jiradb` LIKE '%'", output: "show full tables from jiradb like '%'", }, { input: "SHOW EXTENDED INDEX FROM `AO_E8B6CC_PROJECT_MAPPING` FROM `jiradb`", - output: "show extended index from AO_E8B6CC_PROJECT_MAPPING from jiradb", + output: "show indexes from AO_E8B6CC_PROJECT_MAPPING from jiradb", }, { input: "SHOW EXTENDED KEYS FROM `AO_E8B6CC_ISSUE_MAPPING` FROM `jiradb`", - output: "show extended keys from AO_E8B6CC_ISSUE_MAPPING from jiradb", + output: "show indexes from AO_E8B6CC_ISSUE_MAPPING from jiradb", }, { input: "SHOW CREATE TABLE `jiradb`.`AO_E8B6CC_ISSUE_MAPPING`", output: "show create table jiradb.AO_E8B6CC_ISSUE_MAPPING", @@ -1765,12 +1874,12 @@ var ( "\tc1 int,\n" + "\tc2 int,\n" + "\tc3 int,\n" + - "\tcheck (c1 != c2) enforced,\n" + - "\tcheck (c1 > 10) enforced,\n" + - "\tconstraint c2_positive check (c2 > 0) enforced,\n" + - "\tcheck (c3 < 100) enforced,\n" + - "\tconstraint c1_nonzero check (c1 != 0) enforced,\n" + - "\tcheck (c1 > c3) enforced\n)", + "\tcheck (c1 != c2),\n" + + "\tcheck (c1 > 10),\n" + + "\tconstraint c2_positive check (c2 > 0),\n" + + "\tcheck (c3 < 100),\n" + + "\tconstraint c1_nonzero check (c1 != 0),\n" + + "\tcheck (c1 > c3)\n)", }, { input: "SHOW INDEXES FROM `AO_E8B6CC_ISSUE_MAPPING` FROM `jiradb`", output: "show indexes from AO_E8B6CC_ISSUE_MAPPING from jiradb", @@ -1779,10 +1888,10 @@ var ( output: "show full tables from jiradb like '%'", }, { input: "SHOW EXTENDED INDEXES FROM `AO_E8B6CC_PROJECT_MAPPING` FROM `jiradb`", - output: "show extended indexes from AO_E8B6CC_PROJECT_MAPPING from jiradb", + output: "show indexes from AO_E8B6CC_PROJECT_MAPPING from jiradb", }, { input: "SHOW EXTENDED INDEXES IN `AO_E8B6CC_PROJECT_MAPPING` IN `jiradb`", - output: "show extended indexes from AO_E8B6CC_PROJECT_MAPPING from jiradb", + output: "show indexes from AO_E8B6CC_PROJECT_MAPPING from jiradb", }, { input: "do 1", output: "otheradmin", @@ -1810,6 +1919,14 @@ var ( input: "release savepoint a", }, { input: "release savepoint `@@@;a`", + }, { + input: "call proc()", + }, { + input: "call qualified.proc()", + }, { + input: "call proc(1, 'foo')", + }, { + input: "call proc(@param)", }} ) @@ -1826,12 +1943,10 @@ func TestValid(t *testing.T) { t.Errorf("Parsing failed. \nExpected/Got:\n%s\n%s", tcase.output, out) } - // CREATE INDEX currently only has 5.7 specifications. + // Some statements currently only have 5.7 specifications. // For mysql 8.0 syntax, the query is not entirely parsed. // Add more structs as we go on adding full parsing support for DDL constructs for 5.7 syntax. switch x := tree.(type) { - case *CreateIndex: - assert.Equal(t, !tcase.partialDDL, x.IsFullyParsed()) case *CreateDatabase: assert.Equal(t, !tcase.partialDDL, x.IsFullyParsed()) case *AlterDatabase: @@ -1887,7 +2002,7 @@ func TestInvalid(t *testing.T) { err: "syntax error", }, { input: "/*!*/", - err: "empty statement", + err: "Query was empty", }} for _, tcase := range invalidSQL { @@ -1924,6 +2039,9 @@ func TestCaseSensitivity(t *testing.T) { }, { input: "alter table A rename to B", output: "alter table A rename B", + }, { + input: "alter table `A r` rename to `B r`", + output: "alter table `A r` rename `B r`", }, { input: "rename table A to B", }, { @@ -1934,7 +2052,7 @@ func TestCaseSensitivity(t *testing.T) { output: "drop table if exists B", }, { input: "drop index b on A", - output: "alter table A", + output: "alter table A drop key b", }, { input: "select a from B", }, { @@ -2196,7 +2314,7 @@ func TestConvert(t *testing.T) { output: "syntax error at position 33", }, { input: "/* a comment */", - output: "empty statement", + output: "Query was empty", }, { input: "set transaction isolation level 12345", output: "syntax error at position 38 near '12345'", @@ -2217,12 +2335,16 @@ func TestSelectInto(t *testing.T) { }{{ input: "select * from t order by name limit 100 into outfile s3 'out_file_name'", output: "select * from t order by `name` asc limit 100 into outfile s3 'out_file_name'", + }, { + input: `select * from TestPerson into outfile s3 's3://test-bucket/export_import/export/users.csv' fields terminated by ',' enclosed by '\"' escaped by '\\' overwrite on`, }, { input: "select * from t into dumpfile 'out_file_name'", }, { - input: "select * from t into outfile 'out_file_name' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n'", + input: "select * from t into outfile 'out_file_name' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\\n'", + }, { + input: "select * from t into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\\n' manifest on overwrite off", }, { - input: "select * from t into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n' manifest on overwrite off", + input: "select * from t into outfile s3 'out_file_name' character set binary lines terminated by '\\n' starting by 'a' manifest on overwrite off", }, { input: "select * from (select * from t union select * from t2) as t3 where t3.name in (select col from t4) into outfile s3 'out_file_name'", output: "select * from (select * from t union select * from t2) as t3 where t3.`name` in (select col from t4) into outfile s3 'out_file_name'", @@ -2231,6 +2353,12 @@ func TestSelectInto(t *testing.T) { input: "select * from t limit 100 into outfile s3 'out_file_name' union select * from t2", }, { input: "select * from (select * from t into outfile s3 'inner_outfile') as t2 into outfile s3 'out_file_name'", + }, { + input: `select * from TestPerson into outfile s3 's3://test-bucket/export_import/export/users.csv' character set 'utf8' overwrite on`, + }, { + input: `select * from t1 into outfile '/tmp/foo.csv' fields escaped by '\\' terminated by '\n'`, + }, { + input: `select * from t1 into outfile '/tmp/foo.csv' fields escaped by 'c' terminated by '\n' enclosed by '\t'`, }} for _, tcase := range validSQL { @@ -2271,28 +2399,28 @@ func TestPositionedErr(t *testing.T) { output PositionedErr }{{ input: "select convert('abc' as date) from t", - output: PositionedErr{"syntax error", 24, []byte("as")}, + output: PositionedErr{"syntax error", 24, "as"}, }, { input: "select convert from t", - output: PositionedErr{"syntax error", 20, []byte("from")}, + output: PositionedErr{"syntax error", 20, "from"}, }, { input: "select cast('foo', decimal) from t", - output: PositionedErr{"syntax error", 19, nil}, + output: PositionedErr{"syntax error", 19, ""}, }, { input: "select convert('abc', datetime(4+9)) from t", - output: PositionedErr{"syntax error", 34, nil}, + output: PositionedErr{"syntax error", 34, ""}, }, { input: "select convert('abc', decimal(4+9)) from t", - output: PositionedErr{"syntax error", 33, nil}, + output: PositionedErr{"syntax error", 33, ""}, }, { input: "set transaction isolation level 12345", - output: PositionedErr{"syntax error", 38, []byte("12345")}, + output: PositionedErr{"syntax error", 38, "12345"}, }, { input: "select * from a left join b", - output: PositionedErr{"syntax error", 28, nil}, + output: PositionedErr{"syntax error", 28, ""}, }, { input: "select a from (select * from tbl)", - output: PositionedErr{"syntax error", 34, nil}, + output: PositionedErr{"syntax error", 34, ""}, }} for _, tcase := range invalidSQL { @@ -2301,7 +2429,7 @@ func TestPositionedErr(t *testing.T) { if posErr, ok := err.(PositionedErr); !ok { t.Errorf("%s: %v expected PositionedErr, got (%T) %v", tcase.input, err, err, tcase.output) - } else if posErr.Pos != tcase.output.Pos || !bytes.Equal(posErr.Near, tcase.output.Near) || err.Error() != tcase.output.Error() { + } else if posErr.Pos != tcase.output.Pos || posErr.Near != tcase.output.Near || err.Error() != tcase.output.Error() { t.Errorf("%s: %v, want: %v", tcase.input, err, tcase.output) } } @@ -2453,6 +2581,14 @@ func TestCreateTable(t *testing.T) { " col_multipolygon2 multipolygon not null\n" + ")", + // test null columns + "create table foo (\n" + + " id int primary key,\n" + + " a varchar(255) null,\n" + + " b varchar(255) null default 'foo',\n" + + " c timestamp null default current_timestamp()\n" + + ")", + // test defining indexes separately "create table t (\n" + " id int auto_increment,\n" + @@ -2529,6 +2665,15 @@ func TestCreateTable(t *testing.T) { " constraint second_ibfk_1 foreign key (k, j) references simple (a, b) on update cascade\n" + ")", + // constraint name with spaces + "create table `Post With Space` (\n" + + " id int(11) not null auto_increment,\n" + + " user_id int(11) not null,\n" + + " primary key (id),\n" + + " unique key post_user_unique (user_id),\n" + + " constraint `Post With Space_ibfk_1` foreign key (user_id) references `User` (id)\n" + + ") ENGINE Innodb", + // table options "create table t (\n" + " id int auto_increment\n" + @@ -2972,11 +3117,6 @@ var ( input: "select /* aa", output: "syntax error at position 13 near '/* aa'", excludeMulti: true, - }, { - // non_reserved keywords are currently not permitted everywhere - input: "create database repair", - output: "syntax error at position 23 near 'repair'", - excludeMulti: true, }} ) @@ -3024,120 +3164,137 @@ func TestSkipToEnd(t *testing.T) { } } -func TestParseDjangoQueries(t *testing.T) { - - file, err := os.Open("./test_queries/django_queries.txt") - if err != nil { - t.Errorf(" Error: %v", err) - } +func loadQueries(t testing.TB, filename string) (queries []string) { + file, err := os.Open(path.Join("testdata", filename)) + require.NoError(t, err) defer file.Close() - scanner := bufio.NewScanner(file) - - for scanner.Scan() { - _, err := Parse(string(scanner.Text())) + var read io.Reader + if strings.HasSuffix(filename, ".gz") { + gzread, err := gzip.NewReader(file) if err != nil { - t.Error(scanner.Text()) - t.Errorf(" Error: %v", err) + t.Fatal(err) } + defer gzread.Close() + read = gzread + } else { + read = file } -} -// Benchmark run on 6/23/17, prior to improvements: -// BenchmarkParse1-4 100000 16334 ns/op -// BenchmarkParse2-4 30000 44121 ns/op - -// Benchmark run on 9/3/18, comparing pooled parser performance. -// -// benchmark old ns/op new ns/op delta -// BenchmarkNormalize-4 2540 2533 -0.28% -// BenchmarkParse1-4 18269 13330 -27.03% -// BenchmarkParse2-4 46703 41255 -11.67% -// BenchmarkParse2Parallel-4 22246 20707 -6.92% -// BenchmarkParse3-4 4064743 4083135 +0.45% -// -// benchmark old allocs new allocs delta -// BenchmarkNormalize-4 27 27 +0.00% -// BenchmarkParse1-4 75 74 -1.33% -// BenchmarkParse2-4 264 263 -0.38% -// BenchmarkParse2Parallel-4 176 175 -0.57% -// BenchmarkParse3-4 360 361 +0.28% -// -// benchmark old bytes new bytes delta -// BenchmarkNormalize-4 821 821 +0.00% -// BenchmarkParse1-4 22776 2307 -89.87% -// BenchmarkParse2-4 28352 7881 -72.20% -// BenchmarkParse2Parallel-4 25712 5235 -79.64% -// BenchmarkParse3-4 6352082 6336307 -0.25% - -const ( - sql1 = "select 'abcd', 20, 30.0, eid from a where 1=eid and name='3'" - sql2 = "select aaaa, bbb, ccc, ddd, eeee, ffff, gggg, hhhh, iiii from tttt, ttt1, ttt3 where aaaa = bbbb and bbbb = cccc and dddd+1 = eeee group by fff, gggg having hhhh = iiii and iiii = jjjj order by kkkk, llll limit 3, 4" -) + scanner := bufio.NewScanner(read) + for scanner.Scan() { + queries = append(queries, scanner.Text()) + } + return queries +} -func BenchmarkParse1(b *testing.B) { - sql := sql1 - for i := 0; i < b.N; i++ { - ast, err := Parse(sql) +func TestParseDjangoQueries(t *testing.T) { + for _, query := range loadQueries(t, "django_queries.txt") { + _, err := Parse(query) if err != nil { - b.Fatal(err) + t.Errorf("failed to parse %q: %v", query, err) } - _ = String(ast) } } -func BenchmarkParse2(b *testing.B) { - sql := sql2 - for i := 0; i < b.N; i++ { - ast, err := Parse(sql) +func TestParseLobstersQueries(t *testing.T) { + for _, query := range loadQueries(t, "lobsters.sql.gz") { + _, err := Parse(query) if err != nil { - b.Fatal(err) + t.Errorf("failed to parse %q: %v", query, err) } - _ = String(ast) } } -func BenchmarkParse2Parallel(b *testing.B) { - sql := sql2 - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - ast, err := Parse(sql) - if err != nil { - b.Fatal(err) +func BenchmarkParseTraces(b *testing.B) { + for _, trace := range []string{"django_queries.txt", "lobsters.sql.gz"} { + b.Run(trace, func(b *testing.B) { + queries := loadQueries(b, trace) + if len(queries) > 10000 { + queries = queries[:10000] } - _ = ast - } - }) -} + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + for _, query := range queries { + _, err := Parse(query) + if err != nil { + b.Fatal(err) + } + } + } + }) + } -var benchQuery string +} -func init() { - // benchQuerySize is the approximate size of the query. - benchQuerySize := 1000000 +func BenchmarkParseStress(b *testing.B) { + const ( + sql1 = "select 'abcd', 20, 30.0, eid from a where 1=eid and name='3'" + sql2 = "select aaaa, bbb, ccc, ddd, eeee, ffff, gggg, hhhh, iiii from tttt, ttt1, ttt3 where aaaa = bbbb and bbbb = cccc and dddd+1 = eeee group by fff, gggg having hhhh = iiii and iiii = jjjj order by kkkk, llll limit 3, 4" + ) - // Size of value is 1/10 size of query. Then we add - // 10 such values to the where clause. - var baseval bytes.Buffer - for i := 0; i < benchQuerySize/100; i++ { - // Add an escape character: This will force the upcoming - // tokenizer improvement to still create a copy of the string. - // Then we can see if avoiding the copy will be worth it. - baseval.WriteString("\\'123456789") - } + for i, sql := range []string{sql1, sql2} { + b.Run(fmt.Sprintf("sql%d", i), func(b *testing.B) { + var buf bytes.Buffer + buf.WriteString(sql) + querySQL := buf.String() + b.ReportAllocs() + b.ResetTimer() - var buf bytes.Buffer - buf.WriteString("select a from t1 where v = 1") - for i := 0; i < 10; i++ { - fmt.Fprintf(&buf, " and v%d = \"%d%s\"", i, i, baseval.String()) + for i := 0; i < b.N; i++ { + _, err := Parse(querySQL) + if err != nil { + b.Fatal(err) + } + } + }) } - benchQuery = buf.String() } func BenchmarkParse3(b *testing.B) { - for i := 0; i < b.N; i++ { - if _, err := Parse(benchQuery); err != nil { - b.Fatal(err) + largeQueryBenchmark := func(b *testing.B, escape bool) { + b.Helper() + + // benchQuerySize is the approximate size of the query. + benchQuerySize := 1000000 + + // Size of value is 1/10 size of query. Then we add + // 10 such values to the where clause. + var baseval bytes.Buffer + for i := 0; i < benchQuerySize/100; i++ { + // Add an escape character: This will force the upcoming + // tokenizer improvement to still create a copy of the string. + // Then we can see if avoiding the copy will be worth it. + if escape { + baseval.WriteString("\\'123456789") + } else { + baseval.WriteString("123456789") + } + } + + var buf bytes.Buffer + buf.WriteString("select a from t1 where v = 1") + for i := 0; i < 10; i++ { + fmt.Fprintf(&buf, " and v%d = \"%d%s\"", i, i, baseval.String()) + } + benchQuery := buf.String() + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + if _, err := Parse(benchQuery); err != nil { + b.Fatal(err) + } } } + + b.Run("normal", func(b *testing.B) { + largeQueryBenchmark(b, false) + }) + + b.Run("escaped", func(b *testing.B) { + largeQueryBenchmark(b, true) + }) } diff --git a/go/vt/sqlparser/parser.go b/go/vt/sqlparser/parser.go index 08df99efb65..c06cb662e6c 100644 --- a/go/vt/sqlparser/parser.go +++ b/go/vt/sqlparser/parser.go @@ -17,10 +17,8 @@ limitations under the License. package sqlparser import ( - "errors" "fmt" "io" - "runtime/debug" "sync" "vitess.io/vitess/go/vt/log" @@ -30,33 +28,27 @@ import ( ) // parserPool is a pool for parser objects. -var parserPool = sync.Pool{} +var parserPool = sync.Pool{ + New: func() interface{} { + return &yyParserImpl{} + }, +} // zeroParser is a zero-initialized parser to help reinitialize the parser for pooling. -var zeroParser = *(yyNewParser().(*yyParserImpl)) +var zeroParser yyParserImpl + +// MySQLVersion is the version of MySQL that the parser would emulate +var MySQLVersion string = "50709" // yyParsePooled is a wrapper around yyParse that pools the parser objects. There isn't a -// particularly good reason to use yyParse directly, since it immediately discards its parser. What -// would be ideal down the line is to actually pool the stacks themselves rather than the parser -// objects, as per https://github.com/cznic/goyacc/blob/master/main.go. However, absent an upstream -// change to goyacc, this is the next best option. +// particularly good reason to use yyParse directly, since it immediately discards its parser. // // N.B: Parser pooling means that you CANNOT take references directly to parse stack variables (e.g. // $$ = &$4) in sql.y rules. You must instead add an intermediate reference like so: // showCollationFilterOpt := $4 // $$ = &Show{Type: string($2), ShowCollationFilterOpt: &showCollationFilterOpt} func yyParsePooled(yylex yyLexer) int { - // Being very particular about using the base type and not an interface type b/c we depend on - // the implementation to know how to reinitialize the parser. - var parser *yyParserImpl - - i := parserPool.Get() - if i != nil { - parser = i.(*yyParserImpl) - } else { - parser = yyNewParser().(*yyParserImpl) - } - + parser := parserPool.Get().(*yyParserImpl) defer func() { *parser = zeroParser parserPool.Put(parser) @@ -76,28 +68,34 @@ func yyParsePooled(yylex yyLexer) int { // a set of types, define the function as iTypeName. // This will help avoid name collisions. -// Parse parses the SQL in full and returns a Statement, which -// is the AST representation of the query. If a DDL statement +// Parse2 parses the SQL in full and returns a Statement, which +// is the AST representation of the query, and a set of BindVars, which are all the +// bind variables that were found in the original SQL query. If a DDL statement // is partially parsed but still contains a syntax error, the // error is ignored and the DDL is returned anyway. -func Parse(sql string) (Statement, error) { +func Parse2(sql string) (Statement, BindVars, error) { tokenizer := NewStringTokenizer(sql) if yyParsePooled(tokenizer) != 0 { if tokenizer.partialDDL != nil { if typ, val := tokenizer.Scan(); typ != 0 { - return nil, fmt.Errorf("extra characters encountered after end of DDL: '%s'", string(val)) + return nil, nil, fmt.Errorf("extra characters encountered after end of DDL: '%s'", string(val)) } log.Warningf("ignoring error parsing DDL '%s': %v", sql, tokenizer.LastError) tokenizer.ParseTree = tokenizer.partialDDL - return tokenizer.ParseTree, nil + return tokenizer.ParseTree, tokenizer.BindVars, nil } - return nil, vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, tokenizer.LastError.Error()) + return nil, nil, vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, tokenizer.LastError.Error()) } if tokenizer.ParseTree == nil { - log.Infof("Empty Statement: %s", debug.Stack()) - return nil, ErrEmpty + return nil, nil, ErrEmpty } - return tokenizer.ParseTree, nil + return tokenizer.ParseTree, tokenizer.BindVars, nil +} + +// Parse behaves like Parse2 but does not return a set of bind variables +func Parse(sql string) (Statement, error) { + stmt, _, err := Parse2(sql) + return stmt, err } // ParseStrictDDL is the same as Parse except it errors on @@ -108,8 +106,6 @@ func ParseStrictDDL(sql string) (Statement, error) { return nil, tokenizer.LastError } if tokenizer.ParseTree == nil { - log.Infof("Empty Statement DDL: %s", debug.Stack()) - return nil, ErrEmpty } return tokenizer.ParseTree, nil @@ -137,11 +133,11 @@ func ParseNextStrictDDL(tokenizer *Tokenizer) (Statement, error) { } func parseNext(tokenizer *Tokenizer, strict bool) (Statement, error) { - if tokenizer.lastChar == ';' { - tokenizer.next() + if tokenizer.cur() == ';' { + tokenizer.skip(1) tokenizer.skipBlank() } - if tokenizer.lastChar == eofChar { + if tokenizer.cur() == eofChar { return nil, io.EOF } @@ -161,7 +157,7 @@ func parseNext(tokenizer *Tokenizer, strict bool) (Statement, error) { } // ErrEmpty is a sentinel error returned when parsing empty statements. -var ErrEmpty = errors.New("empty statement") +var ErrEmpty = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.EmptyQuery, "Query was empty") // SplitStatement returns the first sql statement up to either a ; or EOF // and the remainder from the given buffer @@ -178,7 +174,7 @@ func SplitStatement(blob string) (string, string, error) { return "", "", tokenizer.LastError } if tkn == ';' { - return blob[:tokenizer.Position-2], blob[tokenizer.Position-1:], nil + return blob[:tokenizer.Pos-1], blob[tokenizer.Pos:], nil } return blob, "", nil } @@ -198,14 +194,14 @@ loop: tkn, _ = tokenizer.Scan() switch tkn { case ';': - stmt = blob[stmtBegin : tokenizer.Position-2] + stmt = blob[stmtBegin : tokenizer.Pos-1] if !emptyStatement { pieces = append(pieces, stmt) emptyStatement = true } - stmtBegin = tokenizer.Position - 1 + stmtBegin = tokenizer.Pos case 0, eofChar: - blobTail := tokenizer.Position - 2 + blobTail := tokenizer.Pos - 1 if stmtBegin < blobTail { stmt = blob[stmtBegin : blobTail+1] if !emptyStatement { @@ -229,6 +225,6 @@ func String(node SQLNode) string { } buf := NewTrackedBuffer(nil) - buf.Myprintf("%v", node) + node.formatFast(buf) return buf.String() } diff --git a/go/vt/sqlparser/precedence_test.go b/go/vt/sqlparser/precedence_test.go index 801a8faa1d2..7b917f8e698 100644 --- a/go/vt/sqlparser/precedence_test.go +++ b/go/vt/sqlparser/precedence_test.go @@ -132,6 +132,7 @@ func TestParens(t *testing.T) { {in: "(a & b) | c", expected: "a & b | c"}, {in: "not (a=b and c=d)", expected: "not (a = b and c = d)"}, {in: "not (a=b) and c=d", expected: "not a = b and c = d"}, + {in: "(not (a=b)) and c=d", expected: "not a = b and c = d"}, {in: "-(12)", expected: "-12"}, {in: "-(12 + 12)", expected: "-(12 + 12)"}, {in: "(1 > 2) and (1 = b)", expected: "1 > 2 and 1 = b"}, diff --git a/go/vt/sqlparser/random_expr.go b/go/vt/sqlparser/random_expr.go index a2c1638c1de..61ab7ed6536 100644 --- a/go/vt/sqlparser/random_expr.go +++ b/go/vt/sqlparser/random_expr.go @@ -126,13 +126,13 @@ func (g *generator) randomBool() bool { func (g *generator) intLiteral() Expr { t := fmt.Sprintf("%d", g.r.Intn(1000)-g.r.Intn((1000))) - return NewIntLiteral([]byte(t)) + return NewIntLiteral(t) } var words = []string{"ox", "ant", "ape", "asp", "bat", "bee", "boa", "bug", "cat", "cod", "cow", "cub", "doe", "dog", "eel", "eft", "elf", "elk", "emu", "ewe", "fly", "fox", "gar", "gnu", "hen", "hog", "imp", "jay", "kid", "kit", "koi", "lab", "man", "owl", "pig", "pug", "pup", "ram", "rat", "ray", "yak", "bass", "bear", "bird", "boar", "buck", "bull", "calf", "chow", "clam", "colt", "crab", "crow", "dane", "deer", "dodo", "dory", "dove", "drum", "duck", "fawn", "fish", "flea", "foal", "fowl", "frog", "gnat", "goat", "grub", "gull", "hare", "hawk", "ibex", "joey", "kite", "kiwi", "lamb", "lark", "lion", "loon", "lynx", "mako", "mink", "mite", "mole", "moth", "mule", "mutt", "newt", "orca", "oryx", "pika", "pony", "puma", "seal", "shad", "slug", "sole", "stag", "stud", "swan", "tahr", "teal", "tick", "toad", "tuna", "wasp", "wolf", "worm", "wren", "yeti", "adder", "akita", "alien", "aphid", "bison", "boxer", "bream", "bunny", "burro", "camel", "chimp", "civet", "cobra", "coral", "corgi", "crane", "dingo", "drake", "eagle", "egret", "filly", "finch", "gator", "gecko", "ghost", "ghoul", "goose", "guppy", "heron", "hippo", "horse", "hound", "husky", "hyena", "koala", "krill", "leech", "lemur", "liger", "llama", "louse", "macaw", "midge", "molly", "moose", "moray", "mouse", "panda", "perch", "prawn", "quail", "racer", "raven", "rhino", "robin", "satyr", "shark", "sheep", "shrew", "skink", "skunk", "sloth", "snail", "snake", "snipe", "squid", "stork", "swift", "swine", "tapir", "tetra", "tiger", "troll", "trout", "viper", "wahoo", "whale", "zebra", "alpaca", "amoeba", "baboon", "badger", "beagle", "bedbug", "beetle", "bengal", "bobcat", "caiman", "cattle", "cicada", "collie", "condor", "cougar", "coyote", "dassie", "donkey", "dragon", "earwig", "falcon", "feline", "ferret", "gannet", "gibbon", "glider", "goblin", "gopher", "grouse", "guinea", "hermit", "hornet", "iguana", "impala", "insect", "jackal", "jaguar", "jennet", "kitten", "kodiak", "lizard", "locust", "maggot", "magpie", "mammal", "mantis", "marlin", "marmot", "marten", "martin", "mayfly", "minnow", "monkey", "mullet", "muskox", "ocelot", "oriole", "osprey", "oyster", "parrot", "pigeon", "piglet", "poodle", "possum", "python", "quagga", "rabbit", "raptor", "rodent", "roughy", "salmon", "sawfly", "serval", "shiner", "shrimp", "spider", "sponge", "tarpon", "thrush", "tomcat", "toucan", "turkey", "turtle", "urchin", "vervet", "walrus", "weasel", "weevil", "wombat", "anchovy", "anemone", "bluejay", "buffalo", "bulldog", "buzzard", "caribou", "catfish", "chamois", "cheetah", "chicken", "chigger", "cowbird", "crappie", "crawdad", "cricket", "dogfish", "dolphin", "firefly", "garfish", "gazelle", "gelding", "giraffe", "gobbler", "gorilla", "goshawk", "grackle", "griffon", "grizzly", "grouper", "haddock", "hagfish", "halibut", "hamster", "herring", "jackass", "javelin", "jawfish", "jaybird", "katydid", "ladybug", "lamprey", "lemming", "leopard", "lioness", "lobster", "macaque", "mallard", "mammoth", "manatee", "mastiff", "meerkat", "mollusk", "monarch", "mongrel", "monitor", "monster", "mudfish", "muskrat", "mustang", "narwhal", "oarfish", "octopus", "opossum", "ostrich", "panther", "peacock", "pegasus", "pelican", "penguin", "phoenix", "piranha", "polecat", "primate", "quetzal", "raccoon", "rattler", "redbird", "redfish", "reptile", "rooster", "sawfish", "sculpin", "seagull", "skylark", "snapper", "spaniel", "sparrow", "sunbeam", "sunbird", "sunfish", "tadpole", "termite", "terrier", "unicorn", "vulture", "wallaby", "walleye", "warthog", "whippet", "wildcat", "aardvark", "airedale", "albacore", "anteater", "antelope", "arachnid", "barnacle", "basilisk", "blowfish", "bluebird", "bluegill", "bonefish", "bullfrog", "cardinal", "chipmunk", "cockatoo", "crayfish", "dinosaur", "doberman", "duckling", "elephant", "escargot", "flamingo", "flounder", "foxhound", "glowworm", "goldfish", "grubworm", "hedgehog", "honeybee", "hookworm", "humpback", "kangaroo", "killdeer", "kingfish", "labrador", "lacewing", "ladybird", "lionfish", "longhorn", "mackerel", "malamute", "marmoset", "mastodon", "moccasin", "mongoose", "monkfish", "mosquito", "pangolin", "parakeet", "pheasant", "pipefish", "platypus", "polliwog", "porpoise", "reindeer", "ringtail", "sailfish", "scorpion", "seahorse", "seasnail", "sheepdog", "shepherd", "silkworm", "squirrel", "stallion", "starfish", "starling", "stingray", "stinkbug", "sturgeon", "terrapin", "titmouse", "tortoise", "treefrog", "werewolf", "woodcock"} func (g *generator) stringLiteral() Expr { - return NewStrLiteral([]byte(g.randomOfS(words))) + return NewStrLiteral(g.randomOfS(words)) } func (g *generator) stringExpr() Expr { diff --git a/go/vt/sqlparser/redact_query.go b/go/vt/sqlparser/redact_query.go index 55b760178f8..04295c1509f 100644 --- a/go/vt/sqlparser/redact_query.go +++ b/go/vt/sqlparser/redact_query.go @@ -23,13 +23,16 @@ func RedactSQLQuery(sql string) (string, error) { bv := map[string]*querypb.BindVariable{} sqlStripped, comments := SplitMarginComments(sql) - stmt, err := Parse(sqlStripped) + stmt, reservedVars, err := Parse2(sqlStripped) if err != nil { return "", err } prefix := "redacted" - Normalize(stmt, bv, prefix) + err = Normalize(stmt, reservedVars, bv, prefix) + if err != nil { + return "", err + } return comments.Leading + String(stmt) + comments.Trailing, nil } diff --git a/go/vt/sqlparser/rewriter.go b/go/vt/sqlparser/rewriter.go deleted file mode 100644 index 4cda2deca5e..00000000000 --- a/go/vt/sqlparser/rewriter.go +++ /dev/null @@ -1,1747 +0,0 @@ -// Code generated by visitorgen/main/main.go. DO NOT EDIT. - -package sqlparser - -//go:generate go run ./visitorgen/main -input=ast.go -output=rewriter.go - -import ( - "reflect" -) - -type replacerFunc func(newNode, parent SQLNode) - -// application carries all the shared data so we can pass it around cheaply. -type application struct { - pre, post ApplyFunc - cursor Cursor -} - -func replaceAddColumnsAfter(newNode, parent SQLNode) { - parent.(*AddColumns).After = newNode.(*ColName) -} - -type replaceAddColumnsColumns int - -func (r *replaceAddColumnsColumns) replace(newNode, container SQLNode) { - container.(*AddColumns).Columns[int(*r)] = newNode.(*ColumnDefinition) -} - -func (r *replaceAddColumnsColumns) inc() { - *r++ -} - -func replaceAddColumnsFirst(newNode, parent SQLNode) { - parent.(*AddColumns).First = newNode.(*ColName) -} - -func replaceAddConstraintDefinitionConstraintDefinition(newNode, parent SQLNode) { - parent.(*AddConstraintDefinition).ConstraintDefinition = newNode.(*ConstraintDefinition) -} - -func replaceAddIndexDefinitionIndexDefinition(newNode, parent SQLNode) { - parent.(*AddIndexDefinition).IndexDefinition = newNode.(*IndexDefinition) -} - -func replaceAliasedExprAs(newNode, parent SQLNode) { - parent.(*AliasedExpr).As = newNode.(ColIdent) -} - -func replaceAliasedExprExpr(newNode, parent SQLNode) { - parent.(*AliasedExpr).Expr = newNode.(Expr) -} - -func replaceAliasedTableExprAs(newNode, parent SQLNode) { - parent.(*AliasedTableExpr).As = newNode.(TableIdent) -} - -func replaceAliasedTableExprExpr(newNode, parent SQLNode) { - parent.(*AliasedTableExpr).Expr = newNode.(SimpleTableExpr) -} - -func replaceAliasedTableExprHints(newNode, parent SQLNode) { - parent.(*AliasedTableExpr).Hints = newNode.(*IndexHints) -} - -func replaceAliasedTableExprPartitions(newNode, parent SQLNode) { - parent.(*AliasedTableExpr).Partitions = newNode.(Partitions) -} - -func replaceAlterColumnColumn(newNode, parent SQLNode) { - parent.(*AlterColumn).Column = newNode.(*ColName) -} - -func replaceAlterColumnDefaultVal(newNode, parent SQLNode) { - parent.(*AlterColumn).DefaultVal = newNode.(Expr) -} - -type replaceAlterTableAlterOptions int - -func (r *replaceAlterTableAlterOptions) replace(newNode, container SQLNode) { - container.(*AlterTable).AlterOptions[int(*r)] = newNode.(AlterOption) -} - -func (r *replaceAlterTableAlterOptions) inc() { - *r++ -} - -func replaceAlterTablePartitionSpec(newNode, parent SQLNode) { - parent.(*AlterTable).PartitionSpec = newNode.(*PartitionSpec) -} - -func replaceAlterTableTable(newNode, parent SQLNode) { - parent.(*AlterTable).Table = newNode.(TableName) -} - -func replaceAlterViewColumns(newNode, parent SQLNode) { - parent.(*AlterView).Columns = newNode.(Columns) -} - -func replaceAlterViewSelect(newNode, parent SQLNode) { - parent.(*AlterView).Select = newNode.(SelectStatement) -} - -func replaceAlterViewViewName(newNode, parent SQLNode) { - parent.(*AlterView).ViewName = newNode.(TableName) -} - -func replaceAlterVschemaAutoIncSpec(newNode, parent SQLNode) { - parent.(*AlterVschema).AutoIncSpec = newNode.(*AutoIncSpec) -} - -func replaceAlterVschemaTable(newNode, parent SQLNode) { - parent.(*AlterVschema).Table = newNode.(TableName) -} - -type replaceAlterVschemaVindexCols int - -func (r *replaceAlterVschemaVindexCols) replace(newNode, container SQLNode) { - container.(*AlterVschema).VindexCols[int(*r)] = newNode.(ColIdent) -} - -func (r *replaceAlterVschemaVindexCols) inc() { - *r++ -} - -func replaceAlterVschemaVindexSpec(newNode, parent SQLNode) { - parent.(*AlterVschema).VindexSpec = newNode.(*VindexSpec) -} - -func replaceAndExprLeft(newNode, parent SQLNode) { - parent.(*AndExpr).Left = newNode.(Expr) -} - -func replaceAndExprRight(newNode, parent SQLNode) { - parent.(*AndExpr).Right = newNode.(Expr) -} - -func replaceAutoIncSpecColumn(newNode, parent SQLNode) { - parent.(*AutoIncSpec).Column = newNode.(ColIdent) -} - -func replaceAutoIncSpecSequence(newNode, parent SQLNode) { - parent.(*AutoIncSpec).Sequence = newNode.(TableName) -} - -func replaceBinaryExprLeft(newNode, parent SQLNode) { - parent.(*BinaryExpr).Left = newNode.(Expr) -} - -func replaceBinaryExprRight(newNode, parent SQLNode) { - parent.(*BinaryExpr).Right = newNode.(Expr) -} - -func replaceCaseExprElse(newNode, parent SQLNode) { - parent.(*CaseExpr).Else = newNode.(Expr) -} - -func replaceCaseExprExpr(newNode, parent SQLNode) { - parent.(*CaseExpr).Expr = newNode.(Expr) -} - -type replaceCaseExprWhens int - -func (r *replaceCaseExprWhens) replace(newNode, container SQLNode) { - container.(*CaseExpr).Whens[int(*r)] = newNode.(*When) -} - -func (r *replaceCaseExprWhens) inc() { - *r++ -} - -func replaceChangeColumnAfter(newNode, parent SQLNode) { - parent.(*ChangeColumn).After = newNode.(*ColName) -} - -func replaceChangeColumnFirst(newNode, parent SQLNode) { - parent.(*ChangeColumn).First = newNode.(*ColName) -} - -func replaceChangeColumnNewColDefinition(newNode, parent SQLNode) { - parent.(*ChangeColumn).NewColDefinition = newNode.(*ColumnDefinition) -} - -func replaceChangeColumnOldColumn(newNode, parent SQLNode) { - parent.(*ChangeColumn).OldColumn = newNode.(*ColName) -} - -func replaceCheckConstraintDefinitionExpr(newNode, parent SQLNode) { - parent.(*CheckConstraintDefinition).Expr = newNode.(Expr) -} - -func replaceColNameName(newNode, parent SQLNode) { - parent.(*ColName).Name = newNode.(ColIdent) -} - -func replaceColNameQualifier(newNode, parent SQLNode) { - parent.(*ColName).Qualifier = newNode.(TableName) -} - -func replaceCollateExprExpr(newNode, parent SQLNode) { - parent.(*CollateExpr).Expr = newNode.(Expr) -} - -func replaceColumnDefinitionName(newNode, parent SQLNode) { - parent.(*ColumnDefinition).Name = newNode.(ColIdent) -} - -func replaceColumnTypeComment(newNode, parent SQLNode) { - parent.(*ColumnType).Comment = newNode.(*Literal) -} - -func replaceColumnTypeDefault(newNode, parent SQLNode) { - parent.(*ColumnType).Default = newNode.(Expr) -} - -func replaceColumnTypeLength(newNode, parent SQLNode) { - parent.(*ColumnType).Length = newNode.(*Literal) -} - -func replaceColumnTypeOnUpdate(newNode, parent SQLNode) { - parent.(*ColumnType).OnUpdate = newNode.(Expr) -} - -func replaceColumnTypeScale(newNode, parent SQLNode) { - parent.(*ColumnType).Scale = newNode.(*Literal) -} - -type replaceColumnsItems int - -func (r *replaceColumnsItems) replace(newNode, container SQLNode) { - container.(Columns)[int(*r)] = newNode.(ColIdent) -} - -func (r *replaceColumnsItems) inc() { - *r++ -} - -func replaceComparisonExprEscape(newNode, parent SQLNode) { - parent.(*ComparisonExpr).Escape = newNode.(Expr) -} - -func replaceComparisonExprLeft(newNode, parent SQLNode) { - parent.(*ComparisonExpr).Left = newNode.(Expr) -} - -func replaceComparisonExprRight(newNode, parent SQLNode) { - parent.(*ComparisonExpr).Right = newNode.(Expr) -} - -func replaceConstraintDefinitionDetails(newNode, parent SQLNode) { - parent.(*ConstraintDefinition).Details = newNode.(ConstraintInfo) -} - -func replaceConvertExprExpr(newNode, parent SQLNode) { - parent.(*ConvertExpr).Expr = newNode.(Expr) -} - -func replaceConvertExprType(newNode, parent SQLNode) { - parent.(*ConvertExpr).Type = newNode.(*ConvertType) -} - -func replaceConvertTypeLength(newNode, parent SQLNode) { - parent.(*ConvertType).Length = newNode.(*Literal) -} - -func replaceConvertTypeScale(newNode, parent SQLNode) { - parent.(*ConvertType).Scale = newNode.(*Literal) -} - -func replaceConvertUsingExprExpr(newNode, parent SQLNode) { - parent.(*ConvertUsingExpr).Expr = newNode.(Expr) -} - -func replaceCreateIndexName(newNode, parent SQLNode) { - parent.(*CreateIndex).Name = newNode.(ColIdent) -} - -func replaceCreateIndexTable(newNode, parent SQLNode) { - parent.(*CreateIndex).Table = newNode.(TableName) -} - -func replaceCreateTableOptLike(newNode, parent SQLNode) { - parent.(*CreateTable).OptLike = newNode.(*OptLike) -} - -func replaceCreateTableTable(newNode, parent SQLNode) { - parent.(*CreateTable).Table = newNode.(TableName) -} - -func replaceCreateTableTableSpec(newNode, parent SQLNode) { - parent.(*CreateTable).TableSpec = newNode.(*TableSpec) -} - -func replaceCreateViewColumns(newNode, parent SQLNode) { - parent.(*CreateView).Columns = newNode.(Columns) -} - -func replaceCreateViewSelect(newNode, parent SQLNode) { - parent.(*CreateView).Select = newNode.(SelectStatement) -} - -func replaceCreateViewViewName(newNode, parent SQLNode) { - parent.(*CreateView).ViewName = newNode.(TableName) -} - -func replaceCurTimeFuncExprFsp(newNode, parent SQLNode) { - parent.(*CurTimeFuncExpr).Fsp = newNode.(Expr) -} - -func replaceCurTimeFuncExprName(newNode, parent SQLNode) { - parent.(*CurTimeFuncExpr).Name = newNode.(ColIdent) -} - -func replaceDDLFromTables(newNode, parent SQLNode) { - parent.(*DDL).FromTables = newNode.(TableNames) -} - -func replaceDDLOptLike(newNode, parent SQLNode) { - parent.(*DDL).OptLike = newNode.(*OptLike) -} - -func replaceDDLPartitionSpec(newNode, parent SQLNode) { - parent.(*DDL).PartitionSpec = newNode.(*PartitionSpec) -} - -func replaceDDLTable(newNode, parent SQLNode) { - parent.(*DDL).Table = newNode.(TableName) -} - -func replaceDDLTableSpec(newNode, parent SQLNode) { - parent.(*DDL).TableSpec = newNode.(*TableSpec) -} - -func replaceDDLToTables(newNode, parent SQLNode) { - parent.(*DDL).ToTables = newNode.(TableNames) -} - -func replaceDeleteComments(newNode, parent SQLNode) { - parent.(*Delete).Comments = newNode.(Comments) -} - -func replaceDeleteLimit(newNode, parent SQLNode) { - parent.(*Delete).Limit = newNode.(*Limit) -} - -func replaceDeleteOrderBy(newNode, parent SQLNode) { - parent.(*Delete).OrderBy = newNode.(OrderBy) -} - -func replaceDeletePartitions(newNode, parent SQLNode) { - parent.(*Delete).Partitions = newNode.(Partitions) -} - -func replaceDeleteTableExprs(newNode, parent SQLNode) { - parent.(*Delete).TableExprs = newNode.(TableExprs) -} - -func replaceDeleteTargets(newNode, parent SQLNode) { - parent.(*Delete).Targets = newNode.(TableNames) -} - -func replaceDeleteWhere(newNode, parent SQLNode) { - parent.(*Delete).Where = newNode.(*Where) -} - -func replaceDerivedTableSelect(newNode, parent SQLNode) { - parent.(*DerivedTable).Select = newNode.(SelectStatement) -} - -func replaceDropColumnName(newNode, parent SQLNode) { - parent.(*DropColumn).Name = newNode.(*ColName) -} - -func replaceDropTableFromTables(newNode, parent SQLNode) { - parent.(*DropTable).FromTables = newNode.(TableNames) -} - -func replaceDropViewFromTables(newNode, parent SQLNode) { - parent.(*DropView).FromTables = newNode.(TableNames) -} - -func replaceExistsExprSubquery(newNode, parent SQLNode) { - parent.(*ExistsExpr).Subquery = newNode.(*Subquery) -} - -func replaceExplainStatement(newNode, parent SQLNode) { - parent.(*Explain).Statement = newNode.(Statement) -} - -type replaceExprsItems int - -func (r *replaceExprsItems) replace(newNode, container SQLNode) { - container.(Exprs)[int(*r)] = newNode.(Expr) -} - -func (r *replaceExprsItems) inc() { - *r++ -} - -func replaceForeignKeyDefinitionOnDelete(newNode, parent SQLNode) { - parent.(*ForeignKeyDefinition).OnDelete = newNode.(ReferenceAction) -} - -func replaceForeignKeyDefinitionOnUpdate(newNode, parent SQLNode) { - parent.(*ForeignKeyDefinition).OnUpdate = newNode.(ReferenceAction) -} - -func replaceForeignKeyDefinitionReferencedColumns(newNode, parent SQLNode) { - parent.(*ForeignKeyDefinition).ReferencedColumns = newNode.(Columns) -} - -func replaceForeignKeyDefinitionReferencedTable(newNode, parent SQLNode) { - parent.(*ForeignKeyDefinition).ReferencedTable = newNode.(TableName) -} - -func replaceForeignKeyDefinitionSource(newNode, parent SQLNode) { - parent.(*ForeignKeyDefinition).Source = newNode.(Columns) -} - -func replaceFuncExprExprs(newNode, parent SQLNode) { - parent.(*FuncExpr).Exprs = newNode.(SelectExprs) -} - -func replaceFuncExprName(newNode, parent SQLNode) { - parent.(*FuncExpr).Name = newNode.(ColIdent) -} - -func replaceFuncExprQualifier(newNode, parent SQLNode) { - parent.(*FuncExpr).Qualifier = newNode.(TableIdent) -} - -type replaceGroupByItems int - -func (r *replaceGroupByItems) replace(newNode, container SQLNode) { - container.(GroupBy)[int(*r)] = newNode.(Expr) -} - -func (r *replaceGroupByItems) inc() { - *r++ -} - -func replaceGroupConcatExprExprs(newNode, parent SQLNode) { - parent.(*GroupConcatExpr).Exprs = newNode.(SelectExprs) -} - -func replaceGroupConcatExprLimit(newNode, parent SQLNode) { - parent.(*GroupConcatExpr).Limit = newNode.(*Limit) -} - -func replaceGroupConcatExprOrderBy(newNode, parent SQLNode) { - parent.(*GroupConcatExpr).OrderBy = newNode.(OrderBy) -} - -func replaceIndexDefinitionInfo(newNode, parent SQLNode) { - parent.(*IndexDefinition).Info = newNode.(*IndexInfo) -} - -type replaceIndexHintsIndexes int - -func (r *replaceIndexHintsIndexes) replace(newNode, container SQLNode) { - container.(*IndexHints).Indexes[int(*r)] = newNode.(ColIdent) -} - -func (r *replaceIndexHintsIndexes) inc() { - *r++ -} - -func replaceIndexInfoConstraintName(newNode, parent SQLNode) { - parent.(*IndexInfo).ConstraintName = newNode.(ColIdent) -} - -func replaceIndexInfoName(newNode, parent SQLNode) { - parent.(*IndexInfo).Name = newNode.(ColIdent) -} - -func replaceInsertColumns(newNode, parent SQLNode) { - parent.(*Insert).Columns = newNode.(Columns) -} - -func replaceInsertComments(newNode, parent SQLNode) { - parent.(*Insert).Comments = newNode.(Comments) -} - -func replaceInsertOnDup(newNode, parent SQLNode) { - parent.(*Insert).OnDup = newNode.(OnDup) -} - -func replaceInsertPartitions(newNode, parent SQLNode) { - parent.(*Insert).Partitions = newNode.(Partitions) -} - -func replaceInsertRows(newNode, parent SQLNode) { - parent.(*Insert).Rows = newNode.(InsertRows) -} - -func replaceInsertTable(newNode, parent SQLNode) { - parent.(*Insert).Table = newNode.(TableName) -} - -func replaceIntervalExprExpr(newNode, parent SQLNode) { - parent.(*IntervalExpr).Expr = newNode.(Expr) -} - -func replaceIsExprExpr(newNode, parent SQLNode) { - parent.(*IsExpr).Expr = newNode.(Expr) -} - -func replaceJoinConditionOn(newNode, parent SQLNode) { - tmp := parent.(JoinCondition) - tmp.On = newNode.(Expr) -} - -func replaceJoinConditionUsing(newNode, parent SQLNode) { - tmp := parent.(JoinCondition) - tmp.Using = newNode.(Columns) -} - -func replaceJoinTableExprCondition(newNode, parent SQLNode) { - parent.(*JoinTableExpr).Condition = newNode.(JoinCondition) -} - -func replaceJoinTableExprLeftExpr(newNode, parent SQLNode) { - parent.(*JoinTableExpr).LeftExpr = newNode.(TableExpr) -} - -func replaceJoinTableExprRightExpr(newNode, parent SQLNode) { - parent.(*JoinTableExpr).RightExpr = newNode.(TableExpr) -} - -func replaceLimitOffset(newNode, parent SQLNode) { - parent.(*Limit).Offset = newNode.(Expr) -} - -func replaceLimitRowcount(newNode, parent SQLNode) { - parent.(*Limit).Rowcount = newNode.(Expr) -} - -func replaceMatchExprColumns(newNode, parent SQLNode) { - parent.(*MatchExpr).Columns = newNode.(SelectExprs) -} - -func replaceMatchExprExpr(newNode, parent SQLNode) { - parent.(*MatchExpr).Expr = newNode.(Expr) -} - -func replaceModifyColumnAfter(newNode, parent SQLNode) { - parent.(*ModifyColumn).After = newNode.(*ColName) -} - -func replaceModifyColumnFirst(newNode, parent SQLNode) { - parent.(*ModifyColumn).First = newNode.(*ColName) -} - -func replaceModifyColumnNewColDefinition(newNode, parent SQLNode) { - parent.(*ModifyColumn).NewColDefinition = newNode.(*ColumnDefinition) -} - -func replaceNextvalExpr(newNode, parent SQLNode) { - tmp := parent.(Nextval) - tmp.Expr = newNode.(Expr) -} - -func replaceNotExprExpr(newNode, parent SQLNode) { - parent.(*NotExpr).Expr = newNode.(Expr) -} - -type replaceOnDupItems int - -func (r *replaceOnDupItems) replace(newNode, container SQLNode) { - container.(OnDup)[int(*r)] = newNode.(*UpdateExpr) -} - -func (r *replaceOnDupItems) inc() { - *r++ -} - -func replaceOptLikeLikeTable(newNode, parent SQLNode) { - parent.(*OptLike).LikeTable = newNode.(TableName) -} - -func replaceOrExprLeft(newNode, parent SQLNode) { - parent.(*OrExpr).Left = newNode.(Expr) -} - -func replaceOrExprRight(newNode, parent SQLNode) { - parent.(*OrExpr).Right = newNode.(Expr) -} - -func replaceOrderExpr(newNode, parent SQLNode) { - parent.(*Order).Expr = newNode.(Expr) -} - -type replaceOrderByItems int - -func (r *replaceOrderByItems) replace(newNode, container SQLNode) { - container.(OrderBy)[int(*r)] = newNode.(*Order) -} - -func (r *replaceOrderByItems) inc() { - *r++ -} - -func replaceOrderByOptionCols(newNode, parent SQLNode) { - parent.(*OrderByOption).Cols = newNode.(Columns) -} - -func replaceParenSelectSelect(newNode, parent SQLNode) { - parent.(*ParenSelect).Select = newNode.(SelectStatement) -} - -func replaceParenTableExprExprs(newNode, parent SQLNode) { - parent.(*ParenTableExpr).Exprs = newNode.(TableExprs) -} - -func replacePartitionDefinitionLimit(newNode, parent SQLNode) { - parent.(*PartitionDefinition).Limit = newNode.(Expr) -} - -func replacePartitionDefinitionName(newNode, parent SQLNode) { - parent.(*PartitionDefinition).Name = newNode.(ColIdent) -} - -type replacePartitionSpecDefinitions int - -func (r *replacePartitionSpecDefinitions) replace(newNode, container SQLNode) { - container.(*PartitionSpec).Definitions[int(*r)] = newNode.(*PartitionDefinition) -} - -func (r *replacePartitionSpecDefinitions) inc() { - *r++ -} - -func replacePartitionSpecNames(newNode, parent SQLNode) { - parent.(*PartitionSpec).Names = newNode.(Partitions) -} - -func replacePartitionSpecNumber(newNode, parent SQLNode) { - parent.(*PartitionSpec).Number = newNode.(*Literal) -} - -func replacePartitionSpecTableName(newNode, parent SQLNode) { - parent.(*PartitionSpec).TableName = newNode.(TableName) -} - -type replacePartitionsItems int - -func (r *replacePartitionsItems) replace(newNode, container SQLNode) { - container.(Partitions)[int(*r)] = newNode.(ColIdent) -} - -func (r *replacePartitionsItems) inc() { - *r++ -} - -func replaceRangeCondFrom(newNode, parent SQLNode) { - parent.(*RangeCond).From = newNode.(Expr) -} - -func replaceRangeCondLeft(newNode, parent SQLNode) { - parent.(*RangeCond).Left = newNode.(Expr) -} - -func replaceRangeCondTo(newNode, parent SQLNode) { - parent.(*RangeCond).To = newNode.(Expr) -} - -func replaceReleaseName(newNode, parent SQLNode) { - parent.(*Release).Name = newNode.(ColIdent) -} - -func replaceRenameTableTable(newNode, parent SQLNode) { - parent.(*RenameTable).Table = newNode.(TableName) -} - -func replaceSRollbackName(newNode, parent SQLNode) { - parent.(*SRollback).Name = newNode.(ColIdent) -} - -func replaceSavepointName(newNode, parent SQLNode) { - parent.(*Savepoint).Name = newNode.(ColIdent) -} - -func replaceSelectComments(newNode, parent SQLNode) { - parent.(*Select).Comments = newNode.(Comments) -} - -func replaceSelectFrom(newNode, parent SQLNode) { - parent.(*Select).From = newNode.(TableExprs) -} - -func replaceSelectGroupBy(newNode, parent SQLNode) { - parent.(*Select).GroupBy = newNode.(GroupBy) -} - -func replaceSelectHaving(newNode, parent SQLNode) { - parent.(*Select).Having = newNode.(*Where) -} - -func replaceSelectInto(newNode, parent SQLNode) { - parent.(*Select).Into = newNode.(*SelectInto) -} - -func replaceSelectLimit(newNode, parent SQLNode) { - parent.(*Select).Limit = newNode.(*Limit) -} - -func replaceSelectOrderBy(newNode, parent SQLNode) { - parent.(*Select).OrderBy = newNode.(OrderBy) -} - -func replaceSelectSelectExprs(newNode, parent SQLNode) { - parent.(*Select).SelectExprs = newNode.(SelectExprs) -} - -func replaceSelectWhere(newNode, parent SQLNode) { - parent.(*Select).Where = newNode.(*Where) -} - -type replaceSelectExprsItems int - -func (r *replaceSelectExprsItems) replace(newNode, container SQLNode) { - container.(SelectExprs)[int(*r)] = newNode.(SelectExpr) -} - -func (r *replaceSelectExprsItems) inc() { - *r++ -} - -func replaceSetComments(newNode, parent SQLNode) { - parent.(*Set).Comments = newNode.(Comments) -} - -func replaceSetExprs(newNode, parent SQLNode) { - parent.(*Set).Exprs = newNode.(SetExprs) -} - -func replaceSetExprExpr(newNode, parent SQLNode) { - parent.(*SetExpr).Expr = newNode.(Expr) -} - -func replaceSetExprName(newNode, parent SQLNode) { - parent.(*SetExpr).Name = newNode.(ColIdent) -} - -type replaceSetExprsItems int - -func (r *replaceSetExprsItems) replace(newNode, container SQLNode) { - container.(SetExprs)[int(*r)] = newNode.(*SetExpr) -} - -func (r *replaceSetExprsItems) inc() { - *r++ -} - -type replaceSetTransactionCharacteristics int - -func (r *replaceSetTransactionCharacteristics) replace(newNode, container SQLNode) { - container.(*SetTransaction).Characteristics[int(*r)] = newNode.(Characteristic) -} - -func (r *replaceSetTransactionCharacteristics) inc() { - *r++ -} - -func replaceSetTransactionComments(newNode, parent SQLNode) { - parent.(*SetTransaction).Comments = newNode.(Comments) -} - -func replaceShowInternal(newNode, parent SQLNode) { - parent.(*Show).Internal = newNode.(ShowInternal) -} - -func replaceShowBasicFilter(newNode, parent SQLNode) { - parent.(*ShowBasic).Filter = newNode.(*ShowFilter) -} - -func replaceShowColumnsFilter(newNode, parent SQLNode) { - parent.(*ShowColumns).Filter = newNode.(*ShowFilter) -} - -func replaceShowColumnsTable(newNode, parent SQLNode) { - parent.(*ShowColumns).Table = newNode.(TableName) -} - -func replaceShowFilterFilter(newNode, parent SQLNode) { - parent.(*ShowFilter).Filter = newNode.(Expr) -} - -func replaceShowLegacyOnTable(newNode, parent SQLNode) { - parent.(*ShowLegacy).OnTable = newNode.(TableName) -} - -func replaceShowLegacyShowCollationFilterOpt(newNode, parent SQLNode) { - parent.(*ShowLegacy).ShowCollationFilterOpt = newNode.(Expr) -} - -func replaceShowLegacyTable(newNode, parent SQLNode) { - parent.(*ShowLegacy).Table = newNode.(TableName) -} - -func replaceShowTableStatusFilter(newNode, parent SQLNode) { - parent.(*ShowTableStatus).Filter = newNode.(*ShowFilter) -} - -func replaceStarExprTableName(newNode, parent SQLNode) { - parent.(*StarExpr).TableName = newNode.(TableName) -} - -func replaceStreamComments(newNode, parent SQLNode) { - parent.(*Stream).Comments = newNode.(Comments) -} - -func replaceStreamSelectExpr(newNode, parent SQLNode) { - parent.(*Stream).SelectExpr = newNode.(SelectExpr) -} - -func replaceStreamTable(newNode, parent SQLNode) { - parent.(*Stream).Table = newNode.(TableName) -} - -func replaceSubquerySelect(newNode, parent SQLNode) { - parent.(*Subquery).Select = newNode.(SelectStatement) -} - -func replaceSubstrExprFrom(newNode, parent SQLNode) { - parent.(*SubstrExpr).From = newNode.(Expr) -} - -func replaceSubstrExprName(newNode, parent SQLNode) { - parent.(*SubstrExpr).Name = newNode.(*ColName) -} - -func replaceSubstrExprStrVal(newNode, parent SQLNode) { - parent.(*SubstrExpr).StrVal = newNode.(*Literal) -} - -func replaceSubstrExprTo(newNode, parent SQLNode) { - parent.(*SubstrExpr).To = newNode.(Expr) -} - -type replaceTableExprsItems int - -func (r *replaceTableExprsItems) replace(newNode, container SQLNode) { - container.(TableExprs)[int(*r)] = newNode.(TableExpr) -} - -func (r *replaceTableExprsItems) inc() { - *r++ -} - -func replaceTableNameName(newNode, parent SQLNode) { - tmp := parent.(TableName) - tmp.Name = newNode.(TableIdent) -} - -func replaceTableNameQualifier(newNode, parent SQLNode) { - tmp := parent.(TableName) - tmp.Qualifier = newNode.(TableIdent) -} - -type replaceTableNamesItems int - -func (r *replaceTableNamesItems) replace(newNode, container SQLNode) { - container.(TableNames)[int(*r)] = newNode.(TableName) -} - -func (r *replaceTableNamesItems) inc() { - *r++ -} - -type replaceTableSpecColumns int - -func (r *replaceTableSpecColumns) replace(newNode, container SQLNode) { - container.(*TableSpec).Columns[int(*r)] = newNode.(*ColumnDefinition) -} - -func (r *replaceTableSpecColumns) inc() { - *r++ -} - -type replaceTableSpecConstraints int - -func (r *replaceTableSpecConstraints) replace(newNode, container SQLNode) { - container.(*TableSpec).Constraints[int(*r)] = newNode.(*ConstraintDefinition) -} - -func (r *replaceTableSpecConstraints) inc() { - *r++ -} - -type replaceTableSpecIndexes int - -func (r *replaceTableSpecIndexes) replace(newNode, container SQLNode) { - container.(*TableSpec).Indexes[int(*r)] = newNode.(*IndexDefinition) -} - -func (r *replaceTableSpecIndexes) inc() { - *r++ -} - -func replaceTableSpecOptions(newNode, parent SQLNode) { - parent.(*TableSpec).Options = newNode.(TableOptions) -} - -func replaceTimestampFuncExprExpr1(newNode, parent SQLNode) { - parent.(*TimestampFuncExpr).Expr1 = newNode.(Expr) -} - -func replaceTimestampFuncExprExpr2(newNode, parent SQLNode) { - parent.(*TimestampFuncExpr).Expr2 = newNode.(Expr) -} - -func replaceUnaryExprExpr(newNode, parent SQLNode) { - parent.(*UnaryExpr).Expr = newNode.(Expr) -} - -func replaceUnionFirstStatement(newNode, parent SQLNode) { - parent.(*Union).FirstStatement = newNode.(SelectStatement) -} - -func replaceUnionLimit(newNode, parent SQLNode) { - parent.(*Union).Limit = newNode.(*Limit) -} - -func replaceUnionOrderBy(newNode, parent SQLNode) { - parent.(*Union).OrderBy = newNode.(OrderBy) -} - -type replaceUnionUnionSelects int - -func (r *replaceUnionUnionSelects) replace(newNode, container SQLNode) { - container.(*Union).UnionSelects[int(*r)] = newNode.(*UnionSelect) -} - -func (r *replaceUnionUnionSelects) inc() { - *r++ -} - -func replaceUnionSelectStatement(newNode, parent SQLNode) { - parent.(*UnionSelect).Statement = newNode.(SelectStatement) -} - -func replaceUpdateComments(newNode, parent SQLNode) { - parent.(*Update).Comments = newNode.(Comments) -} - -func replaceUpdateExprs(newNode, parent SQLNode) { - parent.(*Update).Exprs = newNode.(UpdateExprs) -} - -func replaceUpdateLimit(newNode, parent SQLNode) { - parent.(*Update).Limit = newNode.(*Limit) -} - -func replaceUpdateOrderBy(newNode, parent SQLNode) { - parent.(*Update).OrderBy = newNode.(OrderBy) -} - -func replaceUpdateTableExprs(newNode, parent SQLNode) { - parent.(*Update).TableExprs = newNode.(TableExprs) -} - -func replaceUpdateWhere(newNode, parent SQLNode) { - parent.(*Update).Where = newNode.(*Where) -} - -func replaceUpdateExprExpr(newNode, parent SQLNode) { - parent.(*UpdateExpr).Expr = newNode.(Expr) -} - -func replaceUpdateExprName(newNode, parent SQLNode) { - parent.(*UpdateExpr).Name = newNode.(*ColName) -} - -type replaceUpdateExprsItems int - -func (r *replaceUpdateExprsItems) replace(newNode, container SQLNode) { - container.(UpdateExprs)[int(*r)] = newNode.(*UpdateExpr) -} - -func (r *replaceUpdateExprsItems) inc() { - *r++ -} - -func replaceUseDBName(newNode, parent SQLNode) { - parent.(*Use).DBName = newNode.(TableIdent) -} - -func replaceVStreamComments(newNode, parent SQLNode) { - parent.(*VStream).Comments = newNode.(Comments) -} - -func replaceVStreamLimit(newNode, parent SQLNode) { - parent.(*VStream).Limit = newNode.(*Limit) -} - -func replaceVStreamSelectExpr(newNode, parent SQLNode) { - parent.(*VStream).SelectExpr = newNode.(SelectExpr) -} - -func replaceVStreamTable(newNode, parent SQLNode) { - parent.(*VStream).Table = newNode.(TableName) -} - -func replaceVStreamWhere(newNode, parent SQLNode) { - parent.(*VStream).Where = newNode.(*Where) -} - -type replaceValTupleItems int - -func (r *replaceValTupleItems) replace(newNode, container SQLNode) { - container.(ValTuple)[int(*r)] = newNode.(Expr) -} - -func (r *replaceValTupleItems) inc() { - *r++ -} - -type replaceValuesItems int - -func (r *replaceValuesItems) replace(newNode, container SQLNode) { - container.(Values)[int(*r)] = newNode.(ValTuple) -} - -func (r *replaceValuesItems) inc() { - *r++ -} - -func replaceValuesFuncExprName(newNode, parent SQLNode) { - parent.(*ValuesFuncExpr).Name = newNode.(*ColName) -} - -func replaceVindexParamKey(newNode, parent SQLNode) { - tmp := parent.(VindexParam) - tmp.Key = newNode.(ColIdent) -} - -func replaceVindexSpecName(newNode, parent SQLNode) { - parent.(*VindexSpec).Name = newNode.(ColIdent) -} - -type replaceVindexSpecParams int - -func (r *replaceVindexSpecParams) replace(newNode, container SQLNode) { - container.(*VindexSpec).Params[int(*r)] = newNode.(VindexParam) -} - -func (r *replaceVindexSpecParams) inc() { - *r++ -} - -func replaceVindexSpecType(newNode, parent SQLNode) { - parent.(*VindexSpec).Type = newNode.(ColIdent) -} - -func replaceWhenCond(newNode, parent SQLNode) { - parent.(*When).Cond = newNode.(Expr) -} - -func replaceWhenVal(newNode, parent SQLNode) { - parent.(*When).Val = newNode.(Expr) -} - -func replaceWhereExpr(newNode, parent SQLNode) { - parent.(*Where).Expr = newNode.(Expr) -} - -func replaceXorExprLeft(newNode, parent SQLNode) { - parent.(*XorExpr).Left = newNode.(Expr) -} - -func replaceXorExprRight(newNode, parent SQLNode) { - parent.(*XorExpr).Right = newNode.(Expr) -} - -// apply is where the visiting happens. Here is where we keep the big switch-case that will be used -// to do the actual visiting of SQLNodes -func (a *application) apply(parent, node SQLNode, replacer replacerFunc) { - if node == nil || isNilValue(node) { - return - } - - // avoid heap-allocating a new cursor for each apply call; reuse a.cursor instead - saved := a.cursor - a.cursor.replacer = replacer - a.cursor.node = node - a.cursor.parent = parent - - if a.pre != nil && !a.pre(&a.cursor) { - a.cursor = saved - return - } - - // walk children - // (the order of the cases is alphabetical) - switch n := node.(type) { - case nil: - case AccessMode: - - case *AddColumns: - a.apply(node, n.After, replaceAddColumnsAfter) - replacerColumns := replaceAddColumnsColumns(0) - replacerColumnsB := &replacerColumns - for _, item := range n.Columns { - a.apply(node, item, replacerColumnsB.replace) - replacerColumnsB.inc() - } - a.apply(node, n.First, replaceAddColumnsFirst) - - case *AddConstraintDefinition: - a.apply(node, n.ConstraintDefinition, replaceAddConstraintDefinitionConstraintDefinition) - - case *AddIndexDefinition: - a.apply(node, n.IndexDefinition, replaceAddIndexDefinitionIndexDefinition) - - case AlgorithmValue: - - case *AliasedExpr: - a.apply(node, n.As, replaceAliasedExprAs) - a.apply(node, n.Expr, replaceAliasedExprExpr) - - case *AliasedTableExpr: - a.apply(node, n.As, replaceAliasedTableExprAs) - a.apply(node, n.Expr, replaceAliasedTableExprExpr) - a.apply(node, n.Hints, replaceAliasedTableExprHints) - a.apply(node, n.Partitions, replaceAliasedTableExprPartitions) - - case *AlterCharset: - - case *AlterColumn: - a.apply(node, n.Column, replaceAlterColumnColumn) - a.apply(node, n.DefaultVal, replaceAlterColumnDefaultVal) - - case *AlterDatabase: - - case *AlterTable: - replacerAlterOptions := replaceAlterTableAlterOptions(0) - replacerAlterOptionsB := &replacerAlterOptions - for _, item := range n.AlterOptions { - a.apply(node, item, replacerAlterOptionsB.replace) - replacerAlterOptionsB.inc() - } - a.apply(node, n.PartitionSpec, replaceAlterTablePartitionSpec) - a.apply(node, n.Table, replaceAlterTableTable) - - case *AlterView: - a.apply(node, n.Columns, replaceAlterViewColumns) - a.apply(node, n.Select, replaceAlterViewSelect) - a.apply(node, n.ViewName, replaceAlterViewViewName) - - case *AlterVschema: - a.apply(node, n.AutoIncSpec, replaceAlterVschemaAutoIncSpec) - a.apply(node, n.Table, replaceAlterVschemaTable) - replacerVindexCols := replaceAlterVschemaVindexCols(0) - replacerVindexColsB := &replacerVindexCols - for _, item := range n.VindexCols { - a.apply(node, item, replacerVindexColsB.replace) - replacerVindexColsB.inc() - } - a.apply(node, n.VindexSpec, replaceAlterVschemaVindexSpec) - - case *AndExpr: - a.apply(node, n.Left, replaceAndExprLeft) - a.apply(node, n.Right, replaceAndExprRight) - - case Argument: - - case *AutoIncSpec: - a.apply(node, n.Column, replaceAutoIncSpecColumn) - a.apply(node, n.Sequence, replaceAutoIncSpecSequence) - - case *Begin: - - case *BinaryExpr: - a.apply(node, n.Left, replaceBinaryExprLeft) - a.apply(node, n.Right, replaceBinaryExprRight) - - case BoolVal: - - case *CaseExpr: - a.apply(node, n.Else, replaceCaseExprElse) - a.apply(node, n.Expr, replaceCaseExprExpr) - replacerWhens := replaceCaseExprWhens(0) - replacerWhensB := &replacerWhens - for _, item := range n.Whens { - a.apply(node, item, replacerWhensB.replace) - replacerWhensB.inc() - } - - case *ChangeColumn: - a.apply(node, n.After, replaceChangeColumnAfter) - a.apply(node, n.First, replaceChangeColumnFirst) - a.apply(node, n.NewColDefinition, replaceChangeColumnNewColDefinition) - a.apply(node, n.OldColumn, replaceChangeColumnOldColumn) - - case *CheckConstraintDefinition: - a.apply(node, n.Expr, replaceCheckConstraintDefinitionExpr) - - case ColIdent: - - case *ColName: - a.apply(node, n.Name, replaceColNameName) - a.apply(node, n.Qualifier, replaceColNameQualifier) - - case *CollateExpr: - a.apply(node, n.Expr, replaceCollateExprExpr) - - case *ColumnDefinition: - a.apply(node, n.Name, replaceColumnDefinitionName) - - case *ColumnType: - a.apply(node, n.Comment, replaceColumnTypeComment) - a.apply(node, n.Default, replaceColumnTypeDefault) - a.apply(node, n.Length, replaceColumnTypeLength) - a.apply(node, n.OnUpdate, replaceColumnTypeOnUpdate) - a.apply(node, n.Scale, replaceColumnTypeScale) - - case Columns: - replacer := replaceColumnsItems(0) - replacerRef := &replacer - for _, item := range n { - a.apply(node, item, replacerRef.replace) - replacerRef.inc() - } - - case Comments: - - case *Commit: - - case *ComparisonExpr: - a.apply(node, n.Escape, replaceComparisonExprEscape) - a.apply(node, n.Left, replaceComparisonExprLeft) - a.apply(node, n.Right, replaceComparisonExprRight) - - case *ConstraintDefinition: - a.apply(node, n.Details, replaceConstraintDefinitionDetails) - - case *ConvertExpr: - a.apply(node, n.Expr, replaceConvertExprExpr) - a.apply(node, n.Type, replaceConvertExprType) - - case *ConvertType: - a.apply(node, n.Length, replaceConvertTypeLength) - a.apply(node, n.Scale, replaceConvertTypeScale) - - case *ConvertUsingExpr: - a.apply(node, n.Expr, replaceConvertUsingExprExpr) - - case *CreateDatabase: - - case *CreateIndex: - a.apply(node, n.Name, replaceCreateIndexName) - a.apply(node, n.Table, replaceCreateIndexTable) - - case *CreateTable: - a.apply(node, n.OptLike, replaceCreateTableOptLike) - a.apply(node, n.Table, replaceCreateTableTable) - a.apply(node, n.TableSpec, replaceCreateTableTableSpec) - - case *CreateView: - a.apply(node, n.Columns, replaceCreateViewColumns) - a.apply(node, n.Select, replaceCreateViewSelect) - a.apply(node, n.ViewName, replaceCreateViewViewName) - - case *CurTimeFuncExpr: - a.apply(node, n.Fsp, replaceCurTimeFuncExprFsp) - a.apply(node, n.Name, replaceCurTimeFuncExprName) - - case *DDL: - a.apply(node, n.FromTables, replaceDDLFromTables) - a.apply(node, n.OptLike, replaceDDLOptLike) - a.apply(node, n.PartitionSpec, replaceDDLPartitionSpec) - a.apply(node, n.Table, replaceDDLTable) - a.apply(node, n.TableSpec, replaceDDLTableSpec) - a.apply(node, n.ToTables, replaceDDLToTables) - - case *Default: - - case *Delete: - a.apply(node, n.Comments, replaceDeleteComments) - a.apply(node, n.Limit, replaceDeleteLimit) - a.apply(node, n.OrderBy, replaceDeleteOrderBy) - a.apply(node, n.Partitions, replaceDeletePartitions) - a.apply(node, n.TableExprs, replaceDeleteTableExprs) - a.apply(node, n.Targets, replaceDeleteTargets) - a.apply(node, n.Where, replaceDeleteWhere) - - case *DerivedTable: - a.apply(node, n.Select, replaceDerivedTableSelect) - - case *DropColumn: - a.apply(node, n.Name, replaceDropColumnName) - - case *DropDatabase: - - case *DropKey: - - case *DropTable: - a.apply(node, n.FromTables, replaceDropTableFromTables) - - case *DropView: - a.apply(node, n.FromTables, replaceDropViewFromTables) - - case *ExistsExpr: - a.apply(node, n.Subquery, replaceExistsExprSubquery) - - case *Explain: - a.apply(node, n.Statement, replaceExplainStatement) - - case Exprs: - replacer := replaceExprsItems(0) - replacerRef := &replacer - for _, item := range n { - a.apply(node, item, replacerRef.replace) - replacerRef.inc() - } - - case *Force: - - case *ForeignKeyDefinition: - a.apply(node, n.OnDelete, replaceForeignKeyDefinitionOnDelete) - a.apply(node, n.OnUpdate, replaceForeignKeyDefinitionOnUpdate) - a.apply(node, n.ReferencedColumns, replaceForeignKeyDefinitionReferencedColumns) - a.apply(node, n.ReferencedTable, replaceForeignKeyDefinitionReferencedTable) - a.apply(node, n.Source, replaceForeignKeyDefinitionSource) - - case *FuncExpr: - a.apply(node, n.Exprs, replaceFuncExprExprs) - a.apply(node, n.Name, replaceFuncExprName) - a.apply(node, n.Qualifier, replaceFuncExprQualifier) - - case GroupBy: - replacer := replaceGroupByItems(0) - replacerRef := &replacer - for _, item := range n { - a.apply(node, item, replacerRef.replace) - replacerRef.inc() - } - - case *GroupConcatExpr: - a.apply(node, n.Exprs, replaceGroupConcatExprExprs) - a.apply(node, n.Limit, replaceGroupConcatExprLimit) - a.apply(node, n.OrderBy, replaceGroupConcatExprOrderBy) - - case *IndexDefinition: - a.apply(node, n.Info, replaceIndexDefinitionInfo) - - case *IndexHints: - replacerIndexes := replaceIndexHintsIndexes(0) - replacerIndexesB := &replacerIndexes - for _, item := range n.Indexes { - a.apply(node, item, replacerIndexesB.replace) - replacerIndexesB.inc() - } - - case *IndexInfo: - a.apply(node, n.ConstraintName, replaceIndexInfoConstraintName) - a.apply(node, n.Name, replaceIndexInfoName) - - case *Insert: - a.apply(node, n.Columns, replaceInsertColumns) - a.apply(node, n.Comments, replaceInsertComments) - a.apply(node, n.OnDup, replaceInsertOnDup) - a.apply(node, n.Partitions, replaceInsertPartitions) - a.apply(node, n.Rows, replaceInsertRows) - a.apply(node, n.Table, replaceInsertTable) - - case *IntervalExpr: - a.apply(node, n.Expr, replaceIntervalExprExpr) - - case *IsExpr: - a.apply(node, n.Expr, replaceIsExprExpr) - - case IsolationLevel: - - case JoinCondition: - a.apply(node, n.On, replaceJoinConditionOn) - a.apply(node, n.Using, replaceJoinConditionUsing) - - case *JoinTableExpr: - a.apply(node, n.Condition, replaceJoinTableExprCondition) - a.apply(node, n.LeftExpr, replaceJoinTableExprLeftExpr) - a.apply(node, n.RightExpr, replaceJoinTableExprRightExpr) - - case *KeyState: - - case *Limit: - a.apply(node, n.Offset, replaceLimitOffset) - a.apply(node, n.Rowcount, replaceLimitRowcount) - - case ListArg: - - case *Literal: - - case *Load: - - case *LockOption: - - case *LockTables: - - case *MatchExpr: - a.apply(node, n.Columns, replaceMatchExprColumns) - a.apply(node, n.Expr, replaceMatchExprExpr) - - case *ModifyColumn: - a.apply(node, n.After, replaceModifyColumnAfter) - a.apply(node, n.First, replaceModifyColumnFirst) - a.apply(node, n.NewColDefinition, replaceModifyColumnNewColDefinition) - - case Nextval: - a.apply(node, n.Expr, replaceNextvalExpr) - - case *NotExpr: - a.apply(node, n.Expr, replaceNotExprExpr) - - case *NullVal: - - case OnDup: - replacer := replaceOnDupItems(0) - replacerRef := &replacer - for _, item := range n { - a.apply(node, item, replacerRef.replace) - replacerRef.inc() - } - - case *OptLike: - a.apply(node, n.LikeTable, replaceOptLikeLikeTable) - - case *OrExpr: - a.apply(node, n.Left, replaceOrExprLeft) - a.apply(node, n.Right, replaceOrExprRight) - - case *Order: - a.apply(node, n.Expr, replaceOrderExpr) - - case OrderBy: - replacer := replaceOrderByItems(0) - replacerRef := &replacer - for _, item := range n { - a.apply(node, item, replacerRef.replace) - replacerRef.inc() - } - - case *OrderByOption: - a.apply(node, n.Cols, replaceOrderByOptionCols) - - case *OtherAdmin: - - case *OtherRead: - - case *ParenSelect: - a.apply(node, n.Select, replaceParenSelectSelect) - - case *ParenTableExpr: - a.apply(node, n.Exprs, replaceParenTableExprExprs) - - case *PartitionDefinition: - a.apply(node, n.Limit, replacePartitionDefinitionLimit) - a.apply(node, n.Name, replacePartitionDefinitionName) - - case *PartitionSpec: - replacerDefinitions := replacePartitionSpecDefinitions(0) - replacerDefinitionsB := &replacerDefinitions - for _, item := range n.Definitions { - a.apply(node, item, replacerDefinitionsB.replace) - replacerDefinitionsB.inc() - } - a.apply(node, n.Names, replacePartitionSpecNames) - a.apply(node, n.Number, replacePartitionSpecNumber) - a.apply(node, n.TableName, replacePartitionSpecTableName) - - case Partitions: - replacer := replacePartitionsItems(0) - replacerRef := &replacer - for _, item := range n { - a.apply(node, item, replacerRef.replace) - replacerRef.inc() - } - - case *RangeCond: - a.apply(node, n.From, replaceRangeCondFrom) - a.apply(node, n.Left, replaceRangeCondLeft) - a.apply(node, n.To, replaceRangeCondTo) - - case ReferenceAction: - - case *Release: - a.apply(node, n.Name, replaceReleaseName) - - case *RenameIndex: - - case *RenameTable: - a.apply(node, n.Table, replaceRenameTableTable) - - case *Rollback: - - case *SRollback: - a.apply(node, n.Name, replaceSRollbackName) - - case *Savepoint: - a.apply(node, n.Name, replaceSavepointName) - - case *Select: - a.apply(node, n.Comments, replaceSelectComments) - a.apply(node, n.From, replaceSelectFrom) - a.apply(node, n.GroupBy, replaceSelectGroupBy) - a.apply(node, n.Having, replaceSelectHaving) - a.apply(node, n.Into, replaceSelectInto) - a.apply(node, n.Limit, replaceSelectLimit) - a.apply(node, n.OrderBy, replaceSelectOrderBy) - a.apply(node, n.SelectExprs, replaceSelectSelectExprs) - a.apply(node, n.Where, replaceSelectWhere) - - case SelectExprs: - replacer := replaceSelectExprsItems(0) - replacerRef := &replacer - for _, item := range n { - a.apply(node, item, replacerRef.replace) - replacerRef.inc() - } - - case *SelectInto: - - case *Set: - a.apply(node, n.Comments, replaceSetComments) - a.apply(node, n.Exprs, replaceSetExprs) - - case *SetExpr: - a.apply(node, n.Expr, replaceSetExprExpr) - a.apply(node, n.Name, replaceSetExprName) - - case SetExprs: - replacer := replaceSetExprsItems(0) - replacerRef := &replacer - for _, item := range n { - a.apply(node, item, replacerRef.replace) - replacerRef.inc() - } - - case *SetTransaction: - replacerCharacteristics := replaceSetTransactionCharacteristics(0) - replacerCharacteristicsB := &replacerCharacteristics - for _, item := range n.Characteristics { - a.apply(node, item, replacerCharacteristicsB.replace) - replacerCharacteristicsB.inc() - } - a.apply(node, n.Comments, replaceSetTransactionComments) - - case *Show: - a.apply(node, n.Internal, replaceShowInternal) - - case *ShowBasic: - a.apply(node, n.Filter, replaceShowBasicFilter) - - case *ShowColumns: - a.apply(node, n.Filter, replaceShowColumnsFilter) - a.apply(node, n.Table, replaceShowColumnsTable) - - case *ShowFilter: - a.apply(node, n.Filter, replaceShowFilterFilter) - - case *ShowLegacy: - a.apply(node, n.OnTable, replaceShowLegacyOnTable) - a.apply(node, n.ShowCollationFilterOpt, replaceShowLegacyShowCollationFilterOpt) - a.apply(node, n.Table, replaceShowLegacyTable) - - case *ShowTableStatus: - a.apply(node, n.Filter, replaceShowTableStatusFilter) - - case *StarExpr: - a.apply(node, n.TableName, replaceStarExprTableName) - - case *Stream: - a.apply(node, n.Comments, replaceStreamComments) - a.apply(node, n.SelectExpr, replaceStreamSelectExpr) - a.apply(node, n.Table, replaceStreamTable) - - case *Subquery: - a.apply(node, n.Select, replaceSubquerySelect) - - case *SubstrExpr: - a.apply(node, n.From, replaceSubstrExprFrom) - a.apply(node, n.Name, replaceSubstrExprName) - a.apply(node, n.StrVal, replaceSubstrExprStrVal) - a.apply(node, n.To, replaceSubstrExprTo) - - case TableExprs: - replacer := replaceTableExprsItems(0) - replacerRef := &replacer - for _, item := range n { - a.apply(node, item, replacerRef.replace) - replacerRef.inc() - } - - case TableIdent: - - case TableName: - a.apply(node, n.Name, replaceTableNameName) - a.apply(node, n.Qualifier, replaceTableNameQualifier) - - case TableNames: - replacer := replaceTableNamesItems(0) - replacerRef := &replacer - for _, item := range n { - a.apply(node, item, replacerRef.replace) - replacerRef.inc() - } - - case TableOptions: - - case *TableSpec: - replacerColumns := replaceTableSpecColumns(0) - replacerColumnsB := &replacerColumns - for _, item := range n.Columns { - a.apply(node, item, replacerColumnsB.replace) - replacerColumnsB.inc() - } - replacerConstraints := replaceTableSpecConstraints(0) - replacerConstraintsB := &replacerConstraints - for _, item := range n.Constraints { - a.apply(node, item, replacerConstraintsB.replace) - replacerConstraintsB.inc() - } - replacerIndexes := replaceTableSpecIndexes(0) - replacerIndexesB := &replacerIndexes - for _, item := range n.Indexes { - a.apply(node, item, replacerIndexesB.replace) - replacerIndexesB.inc() - } - a.apply(node, n.Options, replaceTableSpecOptions) - - case *TablespaceOperation: - - case *TimestampFuncExpr: - a.apply(node, n.Expr1, replaceTimestampFuncExprExpr1) - a.apply(node, n.Expr2, replaceTimestampFuncExprExpr2) - - case *UnaryExpr: - a.apply(node, n.Expr, replaceUnaryExprExpr) - - case *Union: - a.apply(node, n.FirstStatement, replaceUnionFirstStatement) - a.apply(node, n.Limit, replaceUnionLimit) - a.apply(node, n.OrderBy, replaceUnionOrderBy) - replacerUnionSelects := replaceUnionUnionSelects(0) - replacerUnionSelectsB := &replacerUnionSelects - for _, item := range n.UnionSelects { - a.apply(node, item, replacerUnionSelectsB.replace) - replacerUnionSelectsB.inc() - } - - case *UnionSelect: - a.apply(node, n.Statement, replaceUnionSelectStatement) - - case *UnlockTables: - - case *Update: - a.apply(node, n.Comments, replaceUpdateComments) - a.apply(node, n.Exprs, replaceUpdateExprs) - a.apply(node, n.Limit, replaceUpdateLimit) - a.apply(node, n.OrderBy, replaceUpdateOrderBy) - a.apply(node, n.TableExprs, replaceUpdateTableExprs) - a.apply(node, n.Where, replaceUpdateWhere) - - case *UpdateExpr: - a.apply(node, n.Expr, replaceUpdateExprExpr) - a.apply(node, n.Name, replaceUpdateExprName) - - case UpdateExprs: - replacer := replaceUpdateExprsItems(0) - replacerRef := &replacer - for _, item := range n { - a.apply(node, item, replacerRef.replace) - replacerRef.inc() - } - - case *Use: - a.apply(node, n.DBName, replaceUseDBName) - - case *VStream: - a.apply(node, n.Comments, replaceVStreamComments) - a.apply(node, n.Limit, replaceVStreamLimit) - a.apply(node, n.SelectExpr, replaceVStreamSelectExpr) - a.apply(node, n.Table, replaceVStreamTable) - a.apply(node, n.Where, replaceVStreamWhere) - - case ValTuple: - replacer := replaceValTupleItems(0) - replacerRef := &replacer - for _, item := range n { - a.apply(node, item, replacerRef.replace) - replacerRef.inc() - } - - case *Validation: - - case Values: - replacer := replaceValuesItems(0) - replacerRef := &replacer - for _, item := range n { - a.apply(node, item, replacerRef.replace) - replacerRef.inc() - } - - case *ValuesFuncExpr: - a.apply(node, n.Name, replaceValuesFuncExprName) - - case VindexParam: - a.apply(node, n.Key, replaceVindexParamKey) - - case *VindexSpec: - a.apply(node, n.Name, replaceVindexSpecName) - replacerParams := replaceVindexSpecParams(0) - replacerParamsB := &replacerParams - for _, item := range n.Params { - a.apply(node, item, replacerParamsB.replace) - replacerParamsB.inc() - } - a.apply(node, n.Type, replaceVindexSpecType) - - case *When: - a.apply(node, n.Cond, replaceWhenCond) - a.apply(node, n.Val, replaceWhenVal) - - case *Where: - a.apply(node, n.Expr, replaceWhereExpr) - - case *XorExpr: - a.apply(node, n.Left, replaceXorExprLeft) - a.apply(node, n.Right, replaceXorExprRight) - - default: - panic("unknown ast type " + reflect.TypeOf(node).String()) - } - - if a.post != nil && !a.post(&a.cursor) { - panic(abort) - } - - a.cursor = saved -} - -func isNilValue(i interface{}) bool { - valueOf := reflect.ValueOf(i) - kind := valueOf.Kind() - isNullable := kind == reflect.Ptr || kind == reflect.Array || kind == reflect.Slice - return isNullable && valueOf.IsNil() -} diff --git a/go/vt/sqlparser/rewriter_api.go b/go/vt/sqlparser/rewriter_api.go index 47c85e0473b..f79e3432b6d 100644 --- a/go/vt/sqlparser/rewriter_api.go +++ b/go/vt/sqlparser/rewriter_api.go @@ -36,25 +36,18 @@ package sqlparser // func Rewrite(node SQLNode, pre, post ApplyFunc) (result SQLNode) { parent := &struct{ SQLNode }{node} - defer func() { - if r := recover(); r != nil && r != abort { - panic(r) - } - result = parent.SQLNode - }() - - a := &application{ - pre: pre, - post: post, - cursor: Cursor{}, - } // this is the root-replacer, used when the user replaces the root of the ast replacer := func(newNode SQLNode, _ SQLNode) { parent.SQLNode = newNode } - a.apply(parent, node, replacer) + a := &application{ + pre: pre, + post: post, + } + + a.rewriteSQLNode(parent, node, replacer) return parent.SQLNode } @@ -67,8 +60,6 @@ func Rewrite(node SQLNode, pre, post ApplyFunc) (result SQLNode) { // See Rewrite for details. type ApplyFunc func(*Cursor) bool -var abort = new(int) // singleton, to signal termination of Apply - // A Cursor describes a node encountered during Apply. // Information about the node and its parent is available // from the Node and Parent methods. @@ -90,3 +81,11 @@ func (c *Cursor) Replace(newNode SQLNode) { c.replacer(newNode, c.parent) c.node = newNode } + +type replacerFunc func(newNode, parent SQLNode) + +// application carries all the shared data so we can pass it around cheaply. +type application struct { + pre, post ApplyFunc + cur Cursor +} diff --git a/go/vt/sqlparser/rewriter_test.go b/go/vt/sqlparser/rewriter_test.go new file mode 100644 index 00000000000..6887da8c1a8 --- /dev/null +++ b/go/vt/sqlparser/rewriter_test.go @@ -0,0 +1,58 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func BenchmarkVisitLargeExpression(b *testing.B) { + gen := newGenerator(1, 5) + exp := gen.expression() + + depth := 0 + for i := 0; i < b.N; i++ { + _ = Rewrite(exp, func(cursor *Cursor) bool { + depth++ + return true + }, func(cursor *Cursor) bool { + depth-- + return true + }) + } +} + +func TestChangeValueTypeGivesError(t *testing.T) { + parse, err := Parse("select * from a join b on a.id = b.id") + require.NoError(t, err) + + defer func() { + if r := recover(); r != nil { + require.Equal(t, "[BUG] tried to replace 'On' on 'JoinCondition'", r) + } + }() + _ = Rewrite(parse, func(cursor *Cursor) bool { + _, ok := cursor.Node().(*ComparisonExpr) + if ok { + cursor.Replace(&NullVal{}) // this is not a valid replacement because the container is a value type + } + return true + }, nil) + +} diff --git a/go/vt/sqlparser/sql.go b/go/vt/sqlparser/sql.go index 24ea77f6b03..c6b79430f96 100644 --- a/go/vt/sqlparser/sql.go +++ b/go/vt/sqlparser/sql.go @@ -1,25 +1,28 @@ -// Code generated by goyacc -o sql.go sql.y. DO NOT EDIT. +// Code generated by goyacc -fast-append -o sql.go sql.y. DO NOT EDIT. //line sql.y:18 package sqlparser -import __yyfmt__ "fmt" +import ( + __yyfmt__ "fmt" + __yyunsafe__ "unsafe" +) //line sql.y:18 -func setParseTree(yylex interface{}, stmt Statement) { +func setParseTree(yylex yyLexer, stmt Statement) { yylex.(*Tokenizer).ParseTree = stmt } -func setAllowComments(yylex interface{}, allow bool) { +func setAllowComments(yylex yyLexer, allow bool) { yylex.(*Tokenizer).AllowComments = allow } -func setDDL(yylex interface{}, node Statement) { +func setDDL(yylex yyLexer, node Statement) { yylex.(*Tokenizer).partialDDL = node } -func incNesting(yylex interface{}) bool { +func incNesting(yylex yyLexer) bool { yylex.(*Tokenizer).nesting++ if yylex.(*Tokenizer).nesting == 200 { return true @@ -27,116 +30,19 @@ func incNesting(yylex interface{}) bool { return false } -func decNesting(yylex interface{}) { +func decNesting(yylex yyLexer) { yylex.(*Tokenizer).nesting-- } // skipToEnd forces the lexer to end prematurely. Not all SQL statements // are supported by the Parser, thus calling skipToEnd will make the lexer // return EOF early. -func skipToEnd(yylex interface{}) { +func skipToEnd(yylex yyLexer) { yylex.(*Tokenizer).SkipToEnd = true } -//line sql.y:53 -type yySymType struct { - yys int - empty struct{} - statement Statement - selStmt SelectStatement - ddl *DDL - ins *Insert - byt byte - bytes []byte - bytes2 [][]byte - str string - strs []string - selectExprs SelectExprs - selectExpr SelectExpr - columns Columns - partitions Partitions - colName *ColName - tableExprs TableExprs - tableExpr TableExpr - joinCondition JoinCondition - tableName TableName - tableNames TableNames - indexHints *IndexHints - expr Expr - exprs Exprs - boolVal BoolVal - boolean bool - literal *Literal - colTuple ColTuple - values Values - valTuple ValTuple - subquery *Subquery - derivedTable *DerivedTable - whens []*When - when *When - orderBy OrderBy - order *Order - limit *Limit - updateExprs UpdateExprs - setExprs SetExprs - updateExpr *UpdateExpr - setExpr *SetExpr - characteristic Characteristic - characteristics []Characteristic - colIdent ColIdent - tableIdent TableIdent - convertType *ConvertType - aliasedTableName *AliasedTableExpr - TableSpec *TableSpec - columnType ColumnType - colKeyOpt ColumnKeyOption - optVal Expr - LengthScaleOption LengthScaleOption - columnDefinition *ColumnDefinition - columnDefinitions []*ColumnDefinition - indexDefinition *IndexDefinition - indexInfo *IndexInfo - indexOption *IndexOption - indexOptions []*IndexOption - indexColumn *IndexColumn - indexColumns []*IndexColumn - constraintDefinition *ConstraintDefinition - constraintInfo ConstraintInfo - ReferenceAction ReferenceAction - partDefs []*PartitionDefinition - partDef *PartitionDefinition - partSpec *PartitionSpec - partSpecs []*PartitionSpec - vindexParam VindexParam - vindexParams []VindexParam - showFilter *ShowFilter - optLike *OptLike - isolationLevel IsolationLevel - insertAction InsertAction - scope Scope - ignore Ignore - lock Lock - joinType JoinType - comparisonExprOperator ComparisonExprOperator - isExprOperator IsExprOperator - matchExprOption MatchExprOption - orderDirection OrderDirection - explainType ExplainType - selectInto *SelectInto - createIndex *CreateIndex - createDatabase *CreateDatabase - alterDatabase *AlterDatabase - collateAndCharset CollateAndCharset - collateAndCharsets []CollateAndCharset - createTable *CreateTable - tableAndLockTypes []*TableAndLockType - tableAndLockType *TableAndLockType - lockType LockType - alterTable *AlterTable - alterOption AlterOption - alterOptions []AlterOption - tableOption *TableOption - tableOptions TableOptions +func bindVariable(yylex yyLexer, bvar string) { + yylex.(*Tokenizer).BindVars[bvar] = struct{}{} } const LEX_ERROR = 57346 @@ -171,404 +77,427 @@ const LOCK = 57374 const UNLOCK = 57375 const KEYS = 57376 const DO = 57377 -const DISTINCTROW = 57378 -const PARSER = 57379 -const OUTFILE = 57380 -const S3 = 57381 -const DATA = 57382 -const LOAD = 57383 -const LINES = 57384 -const TERMINATED = 57385 -const ESCAPED = 57386 -const ENCLOSED = 57387 -const DUMPFILE = 57388 -const CSV = 57389 -const HEADER = 57390 -const MANIFEST = 57391 -const OVERWRITE = 57392 -const STARTING = 57393 -const OPTIONALLY = 57394 -const VALUES = 57395 -const LAST_INSERT_ID = 57396 -const NEXT = 57397 -const VALUE = 57398 -const SHARE = 57399 -const MODE = 57400 -const SQL_NO_CACHE = 57401 -const SQL_CACHE = 57402 -const SQL_CALC_FOUND_ROWS = 57403 -const JOIN = 57404 -const STRAIGHT_JOIN = 57405 -const LEFT = 57406 -const RIGHT = 57407 -const INNER = 57408 -const OUTER = 57409 -const CROSS = 57410 -const NATURAL = 57411 -const USE = 57412 -const FORCE = 57413 -const ON = 57414 -const USING = 57415 -const INPLACE = 57416 -const COPY = 57417 -const ALGORITHM = 57418 -const NONE = 57419 -const SHARED = 57420 -const EXCLUSIVE = 57421 -const ID = 57422 -const AT_ID = 57423 -const AT_AT_ID = 57424 -const HEX = 57425 -const STRING = 57426 -const INTEGRAL = 57427 -const FLOAT = 57428 -const HEXNUM = 57429 -const VALUE_ARG = 57430 -const LIST_ARG = 57431 -const COMMENT = 57432 -const COMMENT_KEYWORD = 57433 -const BIT_LITERAL = 57434 -const COMPRESSION = 57435 -const NULL = 57436 -const TRUE = 57437 -const FALSE = 57438 -const OFF = 57439 -const DISCARD = 57440 -const IMPORT = 57441 -const ENABLE = 57442 -const DISABLE = 57443 -const TABLESPACE = 57444 -const OR = 57445 -const XOR = 57446 -const AND = 57447 -const NOT = 57448 -const BETWEEN = 57449 -const CASE = 57450 -const WHEN = 57451 -const THEN = 57452 -const ELSE = 57453 -const END = 57454 -const LE = 57455 -const GE = 57456 -const NE = 57457 -const NULL_SAFE_EQUAL = 57458 -const IS = 57459 -const LIKE = 57460 -const REGEXP = 57461 -const IN = 57462 -const SHIFT_LEFT = 57463 -const SHIFT_RIGHT = 57464 -const DIV = 57465 -const MOD = 57466 -const UNARY = 57467 -const COLLATE = 57468 -const BINARY = 57469 -const UNDERSCORE_BINARY = 57470 -const UNDERSCORE_UTF8MB4 = 57471 -const UNDERSCORE_UTF8 = 57472 -const UNDERSCORE_LATIN1 = 57473 -const INTERVAL = 57474 -const JSON_EXTRACT_OP = 57475 -const JSON_UNQUOTE_EXTRACT_OP = 57476 -const CREATE = 57477 -const ALTER = 57478 -const DROP = 57479 -const RENAME = 57480 -const ANALYZE = 57481 -const ADD = 57482 -const FLUSH = 57483 -const CHANGE = 57484 -const MODIFY = 57485 -const SCHEMA = 57486 -const TABLE = 57487 -const INDEX = 57488 -const VIEW = 57489 -const TO = 57490 -const IGNORE = 57491 -const IF = 57492 -const UNIQUE = 57493 -const PRIMARY = 57494 -const COLUMN = 57495 -const SPATIAL = 57496 -const FULLTEXT = 57497 -const KEY_BLOCK_SIZE = 57498 -const CHECK = 57499 -const INDEXES = 57500 -const ACTION = 57501 -const CASCADE = 57502 -const CONSTRAINT = 57503 -const FOREIGN = 57504 -const NO = 57505 -const REFERENCES = 57506 -const RESTRICT = 57507 -const SHOW = 57508 -const DESCRIBE = 57509 -const EXPLAIN = 57510 -const DATE = 57511 -const ESCAPE = 57512 -const REPAIR = 57513 -const OPTIMIZE = 57514 -const TRUNCATE = 57515 -const COALESCE = 57516 -const EXCHANGE = 57517 -const REBUILD = 57518 -const PARTITIONING = 57519 -const REMOVE = 57520 -const MAXVALUE = 57521 -const PARTITION = 57522 -const REORGANIZE = 57523 -const LESS = 57524 -const THAN = 57525 -const PROCEDURE = 57526 -const TRIGGER = 57527 -const VINDEX = 57528 -const VINDEXES = 57529 -const DIRECTORY = 57530 -const NAME = 57531 -const UPGRADE = 57532 -const STATUS = 57533 -const VARIABLES = 57534 -const WARNINGS = 57535 -const CASCADED = 57536 -const DEFINER = 57537 -const OPTION = 57538 -const SQL = 57539 -const UNDEFINED = 57540 -const SEQUENCE = 57541 -const MERGE = 57542 -const TEMPTABLE = 57543 -const INVOKER = 57544 -const SECURITY = 57545 -const FIRST = 57546 -const AFTER = 57547 -const LAST = 57548 -const BEGIN = 57549 -const START = 57550 -const TRANSACTION = 57551 -const COMMIT = 57552 -const ROLLBACK = 57553 -const SAVEPOINT = 57554 -const RELEASE = 57555 -const WORK = 57556 -const BIT = 57557 -const TINYINT = 57558 -const SMALLINT = 57559 -const MEDIUMINT = 57560 -const INT = 57561 -const INTEGER = 57562 -const BIGINT = 57563 -const INTNUM = 57564 -const REAL = 57565 -const DOUBLE = 57566 -const FLOAT_TYPE = 57567 -const DECIMAL = 57568 -const NUMERIC = 57569 -const TIME = 57570 -const TIMESTAMP = 57571 -const DATETIME = 57572 -const YEAR = 57573 -const CHAR = 57574 -const VARCHAR = 57575 -const BOOL = 57576 -const CHARACTER = 57577 -const VARBINARY = 57578 -const NCHAR = 57579 -const TEXT = 57580 -const TINYTEXT = 57581 -const MEDIUMTEXT = 57582 -const LONGTEXT = 57583 -const BLOB = 57584 -const TINYBLOB = 57585 -const MEDIUMBLOB = 57586 -const LONGBLOB = 57587 -const JSON = 57588 -const ENUM = 57589 -const GEOMETRY = 57590 -const POINT = 57591 -const LINESTRING = 57592 -const POLYGON = 57593 -const GEOMETRYCOLLECTION = 57594 -const MULTIPOINT = 57595 -const MULTILINESTRING = 57596 -const MULTIPOLYGON = 57597 -const NULLX = 57598 -const AUTO_INCREMENT = 57599 -const APPROXNUM = 57600 -const SIGNED = 57601 -const UNSIGNED = 57602 -const ZEROFILL = 57603 -const COLLATION = 57604 -const DATABASES = 57605 -const SCHEMAS = 57606 -const TABLES = 57607 -const VITESS_METADATA = 57608 -const VSCHEMA = 57609 -const FULL = 57610 -const PROCESSLIST = 57611 -const COLUMNS = 57612 -const FIELDS = 57613 -const ENGINES = 57614 -const PLUGINS = 57615 -const EXTENDED = 57616 -const KEYSPACES = 57617 -const VITESS_KEYSPACES = 57618 -const VITESS_SHARDS = 57619 -const VITESS_TABLETS = 57620 -const CODE = 57621 -const PRIVILEGES = 57622 -const FUNCTION = 57623 -const NAMES = 57624 -const CHARSET = 57625 -const GLOBAL = 57626 -const SESSION = 57627 -const ISOLATION = 57628 -const LEVEL = 57629 -const READ = 57630 -const WRITE = 57631 -const ONLY = 57632 -const REPEATABLE = 57633 -const COMMITTED = 57634 -const UNCOMMITTED = 57635 -const SERIALIZABLE = 57636 -const CURRENT_TIMESTAMP = 57637 -const DATABASE = 57638 -const CURRENT_DATE = 57639 -const CURRENT_TIME = 57640 -const LOCALTIME = 57641 -const LOCALTIMESTAMP = 57642 -const CURRENT_USER = 57643 -const UTC_DATE = 57644 -const UTC_TIME = 57645 -const UTC_TIMESTAMP = 57646 -const REPLACE = 57647 -const CONVERT = 57648 -const CAST = 57649 -const SUBSTR = 57650 -const SUBSTRING = 57651 -const GROUP_CONCAT = 57652 -const SEPARATOR = 57653 -const TIMESTAMPADD = 57654 -const TIMESTAMPDIFF = 57655 -const MATCH = 57656 -const AGAINST = 57657 -const BOOLEAN = 57658 -const LANGUAGE = 57659 -const WITH = 57660 -const QUERY = 57661 -const EXPANSION = 57662 -const WITHOUT = 57663 -const VALIDATION = 57664 -const UNUSED = 57665 -const ARRAY = 57666 -const CUME_DIST = 57667 -const DESCRIPTION = 57668 -const DENSE_RANK = 57669 -const EMPTY = 57670 -const EXCEPT = 57671 -const FIRST_VALUE = 57672 -const GROUPING = 57673 -const GROUPS = 57674 -const JSON_TABLE = 57675 -const LAG = 57676 -const LAST_VALUE = 57677 -const LATERAL = 57678 -const LEAD = 57679 -const MEMBER = 57680 -const NTH_VALUE = 57681 -const NTILE = 57682 -const OF = 57683 -const OVER = 57684 -const PERCENT_RANK = 57685 -const RANK = 57686 -const RECURSIVE = 57687 -const ROW_NUMBER = 57688 -const SYSTEM = 57689 -const WINDOW = 57690 -const ACTIVE = 57691 -const ADMIN = 57692 -const BUCKETS = 57693 -const CLONE = 57694 -const COMPONENT = 57695 -const DEFINITION = 57696 -const ENFORCED = 57697 -const EXCLUDE = 57698 -const FOLLOWING = 57699 -const GEOMCOLLECTION = 57700 -const GET_MASTER_PUBLIC_KEY = 57701 -const HISTOGRAM = 57702 -const HISTORY = 57703 -const INACTIVE = 57704 -const INVISIBLE = 57705 -const LOCKED = 57706 -const MASTER_COMPRESSION_ALGORITHMS = 57707 -const MASTER_PUBLIC_KEY_PATH = 57708 -const MASTER_TLS_CIPHERSUITES = 57709 -const MASTER_ZSTD_COMPRESSION_LEVEL = 57710 -const NESTED = 57711 -const NETWORK_NAMESPACE = 57712 -const NOWAIT = 57713 -const NULLS = 57714 -const OJ = 57715 -const OLD = 57716 -const OPTIONAL = 57717 -const ORDINALITY = 57718 -const ORGANIZATION = 57719 -const OTHERS = 57720 -const PATH = 57721 -const PERSIST = 57722 -const PERSIST_ONLY = 57723 -const PRECEDING = 57724 -const PRIVILEGE_CHECKS_USER = 57725 -const PROCESS = 57726 -const RANDOM = 57727 -const REFERENCE = 57728 -const REQUIRE_ROW_FORMAT = 57729 -const RESOURCE = 57730 -const RESPECT = 57731 -const RESTART = 57732 -const RETAIN = 57733 -const REUSE = 57734 -const ROLE = 57735 -const SECONDARY = 57736 -const SECONDARY_ENGINE = 57737 -const SECONDARY_LOAD = 57738 -const SECONDARY_UNLOAD = 57739 -const SKIP = 57740 -const SRID = 57741 -const THREAD_PRIORITY = 57742 -const TIES = 57743 -const UNBOUNDED = 57744 -const VCPU = 57745 -const VISIBLE = 57746 -const FORMAT = 57747 -const TREE = 57748 -const VITESS = 57749 -const TRADITIONAL = 57750 -const LOCAL = 57751 -const LOW_PRIORITY = 57752 -const AVG_ROW_LENGTH = 57753 -const CONNECTION = 57754 -const CHECKSUM = 57755 -const DELAY_KEY_WRITE = 57756 -const ENCRYPTION = 57757 -const ENGINE = 57758 -const INSERT_METHOD = 57759 -const MAX_ROWS = 57760 -const MIN_ROWS = 57761 -const PACK_KEYS = 57762 -const PASSWORD = 57763 -const FIXED = 57764 -const DYNAMIC = 57765 -const COMPRESSED = 57766 -const REDUNDANT = 57767 -const COMPACT = 57768 -const ROW_FORMAT = 57769 -const STATS_AUTO_RECALC = 57770 -const STATS_PERSISTENT = 57771 -const STATS_SAMPLE_PAGES = 57772 -const STORAGE = 57773 -const MEMORY = 57774 -const DISK = 57775 +const CALL = 57378 +const DISTINCTROW = 57379 +const PARSER = 57380 +const OUTFILE = 57381 +const S3 = 57382 +const DATA = 57383 +const LOAD = 57384 +const LINES = 57385 +const TERMINATED = 57386 +const ESCAPED = 57387 +const ENCLOSED = 57388 +const DUMPFILE = 57389 +const CSV = 57390 +const HEADER = 57391 +const MANIFEST = 57392 +const OVERWRITE = 57393 +const STARTING = 57394 +const OPTIONALLY = 57395 +const VALUES = 57396 +const LAST_INSERT_ID = 57397 +const NEXT = 57398 +const VALUE = 57399 +const SHARE = 57400 +const MODE = 57401 +const SQL_NO_CACHE = 57402 +const SQL_CACHE = 57403 +const SQL_CALC_FOUND_ROWS = 57404 +const JOIN = 57405 +const STRAIGHT_JOIN = 57406 +const LEFT = 57407 +const RIGHT = 57408 +const INNER = 57409 +const OUTER = 57410 +const CROSS = 57411 +const NATURAL = 57412 +const USE = 57413 +const FORCE = 57414 +const ON = 57415 +const USING = 57416 +const INPLACE = 57417 +const COPY = 57418 +const ALGORITHM = 57419 +const NONE = 57420 +const SHARED = 57421 +const EXCLUSIVE = 57422 +const ID = 57423 +const AT_ID = 57424 +const AT_AT_ID = 57425 +const HEX = 57426 +const STRING = 57427 +const INTEGRAL = 57428 +const FLOAT = 57429 +const HEXNUM = 57430 +const VALUE_ARG = 57431 +const LIST_ARG = 57432 +const COMMENT = 57433 +const COMMENT_KEYWORD = 57434 +const BIT_LITERAL = 57435 +const COMPRESSION = 57436 +const NULL = 57437 +const TRUE = 57438 +const FALSE = 57439 +const OFF = 57440 +const DISCARD = 57441 +const IMPORT = 57442 +const ENABLE = 57443 +const DISABLE = 57444 +const TABLESPACE = 57445 +const OR = 57446 +const XOR = 57447 +const AND = 57448 +const NOT = 57449 +const BETWEEN = 57450 +const CASE = 57451 +const WHEN = 57452 +const THEN = 57453 +const ELSE = 57454 +const END = 57455 +const LE = 57456 +const GE = 57457 +const NE = 57458 +const NULL_SAFE_EQUAL = 57459 +const IS = 57460 +const LIKE = 57461 +const REGEXP = 57462 +const IN = 57463 +const SHIFT_LEFT = 57464 +const SHIFT_RIGHT = 57465 +const DIV = 57466 +const MOD = 57467 +const UNARY = 57468 +const COLLATE = 57469 +const BINARY = 57470 +const UNDERSCORE_BINARY = 57471 +const UNDERSCORE_UTF8MB4 = 57472 +const UNDERSCORE_UTF8 = 57473 +const UNDERSCORE_LATIN1 = 57474 +const INTERVAL = 57475 +const JSON_EXTRACT_OP = 57476 +const JSON_UNQUOTE_EXTRACT_OP = 57477 +const CREATE = 57478 +const ALTER = 57479 +const DROP = 57480 +const RENAME = 57481 +const ANALYZE = 57482 +const ADD = 57483 +const FLUSH = 57484 +const CHANGE = 57485 +const MODIFY = 57486 +const REVERT = 57487 +const SCHEMA = 57488 +const TABLE = 57489 +const INDEX = 57490 +const VIEW = 57491 +const TO = 57492 +const IGNORE = 57493 +const IF = 57494 +const UNIQUE = 57495 +const PRIMARY = 57496 +const COLUMN = 57497 +const SPATIAL = 57498 +const FULLTEXT = 57499 +const KEY_BLOCK_SIZE = 57500 +const CHECK = 57501 +const INDEXES = 57502 +const ACTION = 57503 +const CASCADE = 57504 +const CONSTRAINT = 57505 +const FOREIGN = 57506 +const NO = 57507 +const REFERENCES = 57508 +const RESTRICT = 57509 +const SHOW = 57510 +const DESCRIBE = 57511 +const EXPLAIN = 57512 +const DATE = 57513 +const ESCAPE = 57514 +const REPAIR = 57515 +const OPTIMIZE = 57516 +const TRUNCATE = 57517 +const COALESCE = 57518 +const EXCHANGE = 57519 +const REBUILD = 57520 +const PARTITIONING = 57521 +const REMOVE = 57522 +const MAXVALUE = 57523 +const PARTITION = 57524 +const REORGANIZE = 57525 +const LESS = 57526 +const THAN = 57527 +const PROCEDURE = 57528 +const TRIGGER = 57529 +const VINDEX = 57530 +const VINDEXES = 57531 +const DIRECTORY = 57532 +const NAME = 57533 +const UPGRADE = 57534 +const STATUS = 57535 +const VARIABLES = 57536 +const WARNINGS = 57537 +const CASCADED = 57538 +const DEFINER = 57539 +const OPTION = 57540 +const SQL = 57541 +const UNDEFINED = 57542 +const SEQUENCE = 57543 +const MERGE = 57544 +const TEMPORARY = 57545 +const TEMPTABLE = 57546 +const INVOKER = 57547 +const SECURITY = 57548 +const FIRST = 57549 +const AFTER = 57550 +const LAST = 57551 +const VITESS_MIGRATION = 57552 +const CANCEL = 57553 +const RETRY = 57554 +const COMPLETE = 57555 +const BEGIN = 57556 +const START = 57557 +const TRANSACTION = 57558 +const COMMIT = 57559 +const ROLLBACK = 57560 +const SAVEPOINT = 57561 +const RELEASE = 57562 +const WORK = 57563 +const BIT = 57564 +const TINYINT = 57565 +const SMALLINT = 57566 +const MEDIUMINT = 57567 +const INT = 57568 +const INTEGER = 57569 +const BIGINT = 57570 +const INTNUM = 57571 +const REAL = 57572 +const DOUBLE = 57573 +const FLOAT_TYPE = 57574 +const DECIMAL = 57575 +const NUMERIC = 57576 +const TIME = 57577 +const TIMESTAMP = 57578 +const DATETIME = 57579 +const YEAR = 57580 +const CHAR = 57581 +const VARCHAR = 57582 +const BOOL = 57583 +const CHARACTER = 57584 +const VARBINARY = 57585 +const NCHAR = 57586 +const TEXT = 57587 +const TINYTEXT = 57588 +const MEDIUMTEXT = 57589 +const LONGTEXT = 57590 +const BLOB = 57591 +const TINYBLOB = 57592 +const MEDIUMBLOB = 57593 +const LONGBLOB = 57594 +const JSON = 57595 +const ENUM = 57596 +const GEOMETRY = 57597 +const POINT = 57598 +const LINESTRING = 57599 +const POLYGON = 57600 +const GEOMETRYCOLLECTION = 57601 +const MULTIPOINT = 57602 +const MULTILINESTRING = 57603 +const MULTIPOLYGON = 57604 +const NULLX = 57605 +const AUTO_INCREMENT = 57606 +const APPROXNUM = 57607 +const SIGNED = 57608 +const UNSIGNED = 57609 +const ZEROFILL = 57610 +const COLLATION = 57611 +const DATABASES = 57612 +const SCHEMAS = 57613 +const TABLES = 57614 +const VITESS_METADATA = 57615 +const VSCHEMA = 57616 +const FULL = 57617 +const PROCESSLIST = 57618 +const COLUMNS = 57619 +const FIELDS = 57620 +const ENGINES = 57621 +const PLUGINS = 57622 +const EXTENDED = 57623 +const KEYSPACES = 57624 +const VITESS_KEYSPACES = 57625 +const VITESS_SHARDS = 57626 +const VITESS_TABLETS = 57627 +const VITESS_MIGRATIONS = 57628 +const CODE = 57629 +const PRIVILEGES = 57630 +const FUNCTION = 57631 +const OPEN = 57632 +const TRIGGERS = 57633 +const EVENT = 57634 +const USER = 57635 +const NAMES = 57636 +const CHARSET = 57637 +const GLOBAL = 57638 +const SESSION = 57639 +const ISOLATION = 57640 +const LEVEL = 57641 +const READ = 57642 +const WRITE = 57643 +const ONLY = 57644 +const REPEATABLE = 57645 +const COMMITTED = 57646 +const UNCOMMITTED = 57647 +const SERIALIZABLE = 57648 +const CURRENT_TIMESTAMP = 57649 +const DATABASE = 57650 +const CURRENT_DATE = 57651 +const CURRENT_TIME = 57652 +const LOCALTIME = 57653 +const LOCALTIMESTAMP = 57654 +const CURRENT_USER = 57655 +const UTC_DATE = 57656 +const UTC_TIME = 57657 +const UTC_TIMESTAMP = 57658 +const REPLACE = 57659 +const CONVERT = 57660 +const CAST = 57661 +const SUBSTR = 57662 +const SUBSTRING = 57663 +const GROUP_CONCAT = 57664 +const SEPARATOR = 57665 +const TIMESTAMPADD = 57666 +const TIMESTAMPDIFF = 57667 +const MATCH = 57668 +const AGAINST = 57669 +const BOOLEAN = 57670 +const LANGUAGE = 57671 +const WITH = 57672 +const QUERY = 57673 +const EXPANSION = 57674 +const WITHOUT = 57675 +const VALIDATION = 57676 +const UNUSED = 57677 +const ARRAY = 57678 +const CUME_DIST = 57679 +const DESCRIPTION = 57680 +const DENSE_RANK = 57681 +const EMPTY = 57682 +const EXCEPT = 57683 +const FIRST_VALUE = 57684 +const GROUPING = 57685 +const GROUPS = 57686 +const JSON_TABLE = 57687 +const LAG = 57688 +const LAST_VALUE = 57689 +const LATERAL = 57690 +const LEAD = 57691 +const MEMBER = 57692 +const NTH_VALUE = 57693 +const NTILE = 57694 +const OF = 57695 +const OVER = 57696 +const PERCENT_RANK = 57697 +const RANK = 57698 +const RECURSIVE = 57699 +const ROW_NUMBER = 57700 +const SYSTEM = 57701 +const WINDOW = 57702 +const ACTIVE = 57703 +const ADMIN = 57704 +const BUCKETS = 57705 +const CLONE = 57706 +const COMPONENT = 57707 +const DEFINITION = 57708 +const ENFORCED = 57709 +const EXCLUDE = 57710 +const FOLLOWING = 57711 +const GEOMCOLLECTION = 57712 +const GET_MASTER_PUBLIC_KEY = 57713 +const HISTOGRAM = 57714 +const HISTORY = 57715 +const INACTIVE = 57716 +const INVISIBLE = 57717 +const LOCKED = 57718 +const MASTER_COMPRESSION_ALGORITHMS = 57719 +const MASTER_PUBLIC_KEY_PATH = 57720 +const MASTER_TLS_CIPHERSUITES = 57721 +const MASTER_ZSTD_COMPRESSION_LEVEL = 57722 +const NESTED = 57723 +const NETWORK_NAMESPACE = 57724 +const NOWAIT = 57725 +const NULLS = 57726 +const OJ = 57727 +const OLD = 57728 +const OPTIONAL = 57729 +const ORDINALITY = 57730 +const ORGANIZATION = 57731 +const OTHERS = 57732 +const PATH = 57733 +const PERSIST = 57734 +const PERSIST_ONLY = 57735 +const PRECEDING = 57736 +const PRIVILEGE_CHECKS_USER = 57737 +const PROCESS = 57738 +const RANDOM = 57739 +const REFERENCE = 57740 +const REQUIRE_ROW_FORMAT = 57741 +const RESOURCE = 57742 +const RESPECT = 57743 +const RESTART = 57744 +const RETAIN = 57745 +const REUSE = 57746 +const ROLE = 57747 +const SECONDARY = 57748 +const SECONDARY_ENGINE = 57749 +const SECONDARY_LOAD = 57750 +const SECONDARY_UNLOAD = 57751 +const SKIP = 57752 +const SRID = 57753 +const THREAD_PRIORITY = 57754 +const TIES = 57755 +const UNBOUNDED = 57756 +const VCPU = 57757 +const VISIBLE = 57758 +const FORMAT = 57759 +const TREE = 57760 +const VITESS = 57761 +const TRADITIONAL = 57762 +const LOCAL = 57763 +const LOW_PRIORITY = 57764 +const NO_WRITE_TO_BINLOG = 57765 +const LOGS = 57766 +const ERROR = 57767 +const GENERAL = 57768 +const HOSTS = 57769 +const OPTIMIZER_COSTS = 57770 +const USER_RESOURCES = 57771 +const SLOW = 57772 +const CHANNEL = 57773 +const RELAY = 57774 +const EXPORT = 57775 +const AVG_ROW_LENGTH = 57776 +const CONNECTION = 57777 +const CHECKSUM = 57778 +const DELAY_KEY_WRITE = 57779 +const ENCRYPTION = 57780 +const ENGINE = 57781 +const INSERT_METHOD = 57782 +const MAX_ROWS = 57783 +const MIN_ROWS = 57784 +const PACK_KEYS = 57785 +const PASSWORD = 57786 +const FIXED = 57787 +const DYNAMIC = 57788 +const COMPRESSED = 57789 +const REDUNDANT = 57790 +const COMPACT = 57791 +const ROW_FORMAT = 57792 +const STATS_AUTO_RECALC = 57793 +const STATS_PERSISTENT = 57794 +const STATS_SAMPLE_PAGES = 57795 +const STORAGE = 57796 +const MEMORY = 57797 +const DISK = 57798 var yyToknames = [...]string{ "$end", @@ -606,6 +535,7 @@ var yyToknames = [...]string{ "UNLOCK", "KEYS", "DO", + "CALL", "DISTINCTROW", "PARSER", "OUTFILE", @@ -731,6 +661,7 @@ var yyToknames = [...]string{ "FLUSH", "CHANGE", "MODIFY", + "REVERT", "SCHEMA", "TABLE", "INDEX", @@ -788,12 +719,17 @@ var yyToknames = [...]string{ "UNDEFINED", "SEQUENCE", "MERGE", + "TEMPORARY", "TEMPTABLE", "INVOKER", "SECURITY", "FIRST", "AFTER", "LAST", + "VITESS_MIGRATION", + "CANCEL", + "RETRY", + "COMPLETE", "BEGIN", "START", "TRANSACTION", @@ -866,9 +802,14 @@ var yyToknames = [...]string{ "VITESS_KEYSPACES", "VITESS_SHARDS", "VITESS_TABLETS", + "VITESS_MIGRATIONS", "CODE", "PRIVILEGES", "FUNCTION", + "OPEN", + "TRIGGERS", + "EVENT", + "USER", "NAMES", "CHARSET", "GLOBAL", @@ -998,6 +939,17 @@ var yyToknames = [...]string{ "TRADITIONAL", "LOCAL", "LOW_PRIORITY", + "NO_WRITE_TO_BINLOG", + "LOGS", + "ERROR", + "GENERAL", + "HOSTS", + "OPTIMIZER_COSTS", + "USER_RESOURCES", + "SLOW", + "CHANNEL", + "RELAY", + "EXPORT", "AVG_ROW_LENGTH", "CONNECTION", "CHECKSUM", @@ -1035,2835 +987,3032 @@ var yyExca = [...]int{ -1, 1, 1, -1, -2, 0, - -1, 42, - 163, 911, - -2, 89, - -1, 43, - 1, 107, - 451, 107, - -2, 113, -1, 44, - 142, 113, - 252, 113, - 300, 113, - -2, 325, - -1, 51, - 34, 463, - 163, 463, - 175, 463, - 208, 477, - 209, 477, - -2, 465, - -1, 56, - 165, 487, - -2, 485, - -1, 80, - 55, 530, - -2, 538, - -1, 104, - 1, 108, - 451, 108, - -2, 113, - -1, 114, - 168, 230, - 169, 230, - -2, 319, - -1, 133, - 142, 113, - 252, 113, - 300, 113, - -2, 334, - -1, 550, - 149, 922, - -2, 918, - -1, 551, - 149, 923, - -2, 919, - -1, 568, - 55, 531, - -2, 543, - -1, 569, - 55, 532, - -2, 544, - -1, 589, - 117, 1249, - -2, 82, - -1, 590, - 117, 1136, - -2, 83, - -1, 596, - 117, 1184, - -2, 896, - -1, 730, - 117, 1078, - -2, 893, - -1, 762, - 174, 36, - 179, 36, - -2, 241, - -1, 841, - 1, 372, - 451, 372, - -2, 113, - -1, 1057, - 1, 268, - 451, 268, - -2, 113, - -1, 1130, - 168, 230, - 169, 230, - -2, 319, - -1, 1139, - 174, 37, - 179, 37, + 164, 938, + -2, 91, + -1, 45, + 1, 112, + 474, 112, + -2, 118, + -1, 46, + 143, 118, + 259, 118, + 312, 118, + -2, 326, + -1, 53, + 34, 472, + 165, 472, + 177, 472, + 210, 486, + 211, 486, + -2, 474, + -1, 58, + 167, 496, + -2, 494, + -1, 84, + 56, 564, + -2, 572, + -1, 109, + 1, 113, + 474, 113, + -2, 118, + -1, 119, + 170, 231, + 171, 231, + -2, 320, + -1, 138, + 143, 118, + 259, 118, + 312, 118, + -2, 335, + -1, 579, + 150, 959, + -2, 955, + -1, 580, + 150, 960, + -2, 956, + -1, 599, + 56, 565, + -2, 577, + -1, 600, + 56, 566, + -2, 578, + -1, 621, + 118, 1303, + -2, 84, + -1, 622, + 118, 1185, + -2, 85, + -1, 628, + 118, 1235, + -2, 932, + -1, 766, + 118, 1122, + -2, 929, + -1, 799, + 176, 38, + 181, 38, -2, 242, - -1, 1333, - 149, 925, - -2, 921, - -1, 1423, - 73, 64, - 81, 64, - -2, 68, - -1, 1444, + -1, 880, + 1, 373, + 474, 373, + -2, 118, + -1, 1120, 1, 269, - 451, 269, - -2, 113, - -1, 1831, - 5, 790, - 18, 790, - 20, 790, - 32, 790, - 82, 790, - -2, 569, - -1, 2046, - 45, 864, - -2, 862, + 474, 269, + -2, 118, + -1, 1198, + 170, 231, + 171, 231, + -2, 320, + -1, 1207, + 176, 39, + 181, 39, + -2, 243, + -1, 1420, + 150, 964, + -2, 958, + -1, 1512, + 74, 66, + 82, 66, + -2, 70, + -1, 1533, + 1, 270, + 474, 270, + -2, 118, + -1, 1945, + 5, 825, + 18, 825, + 20, 825, + 32, 825, + 83, 825, + -2, 604, + -1, 2160, + 46, 900, + -2, 894, } const yyPrivate = 57344 -const yyLast = 26967 +const yyLast = 28950 var yyAct = [...]int{ - 550, 2134, 2121, 1878, 2046, 1749, 2098, 2059, 493, 1718, - 1991, 1967, 1647, 79, 3, 1506, 1441, 1615, 1812, 522, - 1811, 508, 1370, 1875, 561, 960, 1005, 1648, 1356, 1808, - 1476, 491, 1722, 1012, 1461, 1481, 734, 1705, 1983, 1706, - 1823, 142, 1420, 1770, 853, 894, 1575, 173, 1327, 892, - 185, 1319, 458, 185, 1504, 1634, 792, 1248, 474, 1261, - 185, 128, 757, 1483, 594, 77, 1698, 1042, 1049, 1409, - 1137, 1402, 570, 1010, 1372, 1015, 1035, 495, 1032, 999, - 1353, 1296, 555, 1553, 741, 1144, 31, 738, 474, 484, - 1227, 474, 185, 474, 1039, 1472, 1033, 763, 1484, 1385, - 758, 759, 742, 1048, 1022, 1046, 75, 1425, 564, 172, - 1129, 1155, 1264, 111, 760, 112, 834, 770, 973, 145, - 1462, 105, 106, 481, 74, 976, 8, 7, 6, 1741, - 1740, 1993, 1535, 591, 1367, 1368, 1214, 2090, 1282, 1613, - 2043, 1946, 2020, 2019, 1854, 1962, 796, 795, 1963, 2140, - 2095, 2133, 1488, 735, 2070, 113, 2124, 76, 576, 580, - 1879, 1523, 556, 107, 2094, 2069, 1787, 1908, 185, 749, - 1614, 1678, 436, 1486, 1677, 797, 1837, 1679, 185, 863, - 847, 1838, 1839, 185, 1435, 482, 483, 794, 1542, 1436, - 1437, 890, 1541, 588, 174, 175, 176, 811, 462, 80, - 808, 809, 554, 812, 813, 814, 815, 553, 595, 818, - 819, 820, 821, 822, 823, 824, 825, 826, 827, 828, - 829, 830, 831, 832, 107, 753, 751, 750, 752, 1689, - 773, 861, 1455, 774, 82, 83, 84, 85, 86, 87, - 33, 872, 873, 68, 37, 38, 461, 1939, 1114, 798, - 799, 800, 1485, 1050, 449, 1051, 1369, 171, 2074, 805, - 472, 1899, 1897, 450, 470, 1281, 810, 864, 476, 889, - 166, 1723, 535, 447, 541, 542, 539, 540, 1505, 538, - 537, 536, 1538, 97, 1745, 1751, 1330, 1228, 2123, 543, - 544, 1746, 107, 835, 888, 108, 1233, 130, 1283, 1284, - 1285, 174, 175, 176, 842, 150, 869, 462, 174, 175, - 176, 1753, 444, 1754, 67, 102, 178, 179, 180, 862, - 1550, 456, 2032, 922, 921, 931, 932, 924, 925, 926, - 927, 928, 929, 930, 923, 462, 140, 933, 102, 94, - 817, 129, 102, 167, 1236, 98, 1237, 1238, 99, 100, - 1234, 816, 2091, 867, 868, 461, 1752, 865, 866, 147, - 1230, 148, 2016, 462, 1957, 772, 117, 118, 139, 138, - 165, 874, 781, 1507, 1204, 875, 872, 873, 2138, 779, - 1403, 790, 789, 461, 788, 787, 786, 881, 785, 883, - 437, 438, 439, 1232, 454, 455, 465, 784, 783, 778, - 451, 453, 466, 440, 441, 468, 467, 1853, 443, 442, - 754, 461, 446, 463, 1487, 1205, 1123, 1206, 134, 115, - 141, 122, 114, 1540, 135, 136, 880, 882, 151, 2068, - 791, 845, 1958, 886, 1231, 1557, 185, 739, 156, 123, - 739, 765, 766, 846, 737, 2141, 2110, 578, 746, 772, - 876, 879, 739, 126, 124, 119, 120, 121, 125, 474, - 474, 474, 1426, 116, 782, 170, 2075, 101, 104, 2008, - 462, 780, 127, 1143, 1142, 582, 1755, 474, 474, 1616, - 1618, 772, 856, 857, 858, 859, 860, 2060, 1529, 1241, - 101, 898, 904, 801, 101, 772, 1715, 1537, 1796, 771, - 1795, 772, 891, 1794, 747, 775, 765, 1733, 435, 895, - 896, 485, 177, 1549, 1771, 776, 1548, 2050, 461, 1525, - 945, 946, 1928, 878, 2136, 1836, 1639, 2137, 1583, 2135, - 2033, 1555, 1594, 777, 1515, 1431, 1554, 464, 877, 143, - 1026, 958, 851, 1442, 459, 1216, 1215, 1217, 1218, 1219, - 923, 1591, 933, 933, 185, 1674, 69, 1773, 1555, 460, - 1262, 1381, 885, 1554, 807, 913, 90, 1278, 943, 1789, - 772, 1003, 2024, 793, 887, 1617, 1821, 1265, 1229, 474, - 870, 1303, 185, 771, 185, 185, 1052, 474, 137, 1002, - 765, 768, 769, 474, 739, 1301, 1302, 1300, 762, 766, - 131, 908, 855, 132, 840, 907, 905, 906, 2009, 2007, - 961, 91, 848, 849, 1775, 771, 1779, 761, 1774, 1354, - 1772, 775, 765, 910, 1687, 1777, 1031, 1703, 1118, 771, - 1000, 776, 591, 839, 1776, 771, 765, 768, 769, 913, - 739, 841, 1524, 1016, 762, 766, 2053, 1778, 1780, 1522, - 1019, 911, 912, 910, 975, 978, 980, 982, 983, 985, - 987, 988, 979, 981, 2142, 984, 986, 1520, 989, 913, - 945, 946, 1354, 565, 1601, 1263, 997, 922, 921, 931, - 932, 924, 925, 926, 927, 928, 929, 930, 923, 945, - 946, 933, 1266, 144, 149, 146, 152, 153, 154, 155, - 157, 158, 159, 160, 771, 1452, 806, 595, 781, 161, - 162, 163, 164, 836, 854, 837, 779, 1453, 838, 922, - 921, 931, 932, 924, 925, 926, 927, 928, 929, 930, - 923, 2143, 1590, 933, 185, 1517, 1576, 1945, 1110, 926, - 927, 928, 929, 930, 923, 2125, 185, 933, 1119, 1120, - 924, 925, 926, 927, 928, 929, 930, 923, 1004, 1521, - 933, 474, 745, 1139, 1291, 1293, 1294, 911, 912, 910, - 169, 1148, 1517, 2126, 1944, 1152, 1292, 1223, 474, 474, - 67, 474, 1149, 474, 474, 913, 474, 474, 474, 474, - 474, 474, 1299, 174, 175, 176, 1519, 1135, 174, 175, - 176, 474, 1321, 912, 910, 185, 1188, 1183, 1184, 1121, - 1122, 1568, 1569, 1570, 2115, 911, 912, 910, 1128, 1859, - 913, 1201, 931, 932, 924, 925, 926, 927, 928, 929, - 930, 923, 474, 913, 933, 1222, 1702, 1185, 1386, 1387, - 185, 1147, 2116, 1798, 1701, 1491, 185, 1109, 1224, 185, - 1247, 1221, 185, 1694, 1014, 1146, 1211, 1209, 1322, 748, - 1208, 1116, 1207, 185, 744, 185, 1125, 1191, 1192, 1126, - 1199, 1124, 1138, 1197, 1198, 1193, 1190, 474, 474, 185, - 474, 474, 185, 474, 474, 1189, 1145, 1145, 1164, 2128, - 1157, 1799, 1158, 2127, 1160, 1162, 1047, 1911, 1166, 1168, - 1170, 1172, 1174, 174, 175, 176, 1253, 1681, 1255, 1220, - 1257, 1258, 1259, 1260, 1210, 2117, 1250, 2106, 2084, 911, - 912, 910, 1186, 1980, 1942, 1916, 1268, 1269, 1800, 1271, - 1272, 1267, 1274, 1275, 1320, 914, 1297, 913, 1711, 581, - 1242, 1699, 1565, 1323, 922, 921, 931, 932, 924, 925, - 926, 927, 928, 929, 930, 923, 1533, 474, 933, 1532, - 1251, 107, 1212, 751, 750, 1200, 1589, 1196, 174, 175, - 176, 485, 1499, 1195, 1588, 1342, 1345, 1324, 1325, 1194, - 971, 1355, 1383, 1748, 586, 911, 912, 910, 1335, 1336, - 565, 474, 474, 1791, 1298, 1337, 2014, 1276, 2013, 911, - 912, 910, 185, 913, 1331, 174, 175, 176, 76, 1497, - 1008, 1011, 1864, 2109, 474, 1877, 1332, 913, 174, 175, - 176, 185, 1202, 1333, 474, 583, 584, 1376, 185, 1725, - 185, 961, 1377, 174, 175, 176, 1714, 1388, 185, 185, - 1864, 2066, 1361, 1362, 1382, 474, 1864, 2051, 474, 2037, - 565, 1421, 511, 510, 513, 514, 515, 516, 1450, 474, - 1820, 512, 1923, 517, 1864, 2022, 1334, 1960, 565, 911, - 912, 910, 1331, 921, 931, 932, 924, 925, 926, 927, - 928, 929, 930, 923, 1400, 565, 933, 913, 1635, 1396, - 591, 1333, 78, 591, 1635, 1446, 1517, 565, 33, 1445, - 1926, 565, 1864, 1869, 1851, 1850, 33, 1463, 1464, 1465, - 1847, 1848, 1847, 1846, 474, 1394, 565, 551, 1426, 1742, - 1427, 1449, 1496, 1498, 1113, 1727, 1720, 1721, 1398, 1406, - 565, 1642, 33, 1424, 1427, 474, 909, 565, 1668, 1478, - 1518, 474, 1113, 1112, 909, 1148, 1426, 1148, 1429, 1433, - 1432, 1058, 1057, 1643, 1809, 1516, 1406, 1394, 2023, 1448, - 1447, 1395, 1820, 1820, 1947, 595, 1864, 186, 595, 1405, - 186, 1503, 67, 558, 1849, 475, 1406, 186, 1434, 1998, - 67, 1428, 1606, 1605, 1394, 474, 1517, 1320, 1500, 1430, - 1384, 1365, 1320, 1320, 1517, 1428, 1240, 1479, 1044, 756, - 755, 1474, 1475, 1426, 2058, 475, 67, 1492, 475, 186, - 475, 1948, 1949, 1950, 1490, 1513, 1489, 1514, 1495, 1406, - 1179, 1526, 67, 1969, 1876, 1509, 1479, 185, 1934, 1512, - 1394, 185, 185, 185, 185, 185, 1508, 1115, 1528, 1477, - 1527, 185, 185, 1530, 1531, 185, 1747, 67, 773, 1338, - 1339, 774, 1510, 1344, 1347, 1348, 1473, 1467, 1466, 1226, - 1140, 1145, 1136, 185, 185, 185, 1111, 1180, 1181, 1182, - 1708, 92, 171, 1824, 1825, 1750, 1970, 185, 1951, 1360, - 185, 474, 1363, 1364, 1707, 186, 488, 1488, 2130, 1910, - 1411, 1414, 1415, 1416, 1412, 186, 1413, 1417, 2122, 1842, - 186, 1827, 1809, 1252, 1176, 1716, 1559, 1279, 1244, 1830, - 1013, 1829, 1563, 1656, 1456, 1655, 1457, 1458, 1459, 1460, - 1297, 1536, 1952, 1953, 2112, 1659, 1657, 2093, 1708, 1558, - 1660, 1658, 1468, 1469, 1470, 1471, 922, 921, 931, 932, - 924, 925, 926, 927, 928, 929, 930, 923, 1177, 1178, - 933, 1661, 1578, 1415, 1416, 1801, 1579, 1286, 1287, 1288, - 1289, 571, 1585, 1624, 1927, 1867, 185, 1586, 1587, 1633, - 1632, 2080, 2077, 1593, 185, 572, 1596, 1597, 1298, 2114, - 2097, 2099, 1571, 2105, 1603, 2104, 1604, 1622, 96, 1607, - 1608, 1609, 1610, 1611, 2047, 1623, 185, 2045, 1017, 1018, - 574, 1239, 573, 1621, 552, 1712, 1350, 185, 185, 185, - 185, 185, 1340, 1341, 1649, 1628, 1644, 1584, 1707, 185, - 1351, 556, 803, 185, 802, 897, 185, 185, 1006, 1735, - 185, 185, 185, 1600, 168, 1734, 1666, 181, 1637, 1686, - 1007, 108, 1000, 1680, 2055, 1612, 2054, 1996, 1620, 1664, - 1665, 1511, 1154, 1153, 1141, 1921, 1379, 1640, 1627, 1386, - 1387, 1493, 1243, 1693, 2015, 1964, 1419, 1631, 1669, 1638, - 1636, 562, 1671, 559, 560, 1630, 1919, 2119, 1692, 2118, - 1695, 1696, 1697, 2102, 2081, 1650, 1683, 1662, 1653, 474, - 1920, 1863, 1690, 1691, 1501, 1667, 1250, 563, 1672, 78, - 1440, 1804, 474, 1675, 1651, 1652, 1635, 1654, 474, 1684, - 1595, 474, 1592, 1148, 1724, 2132, 2131, 558, 474, 1027, - 1728, 1411, 1414, 1415, 1416, 1412, 571, 1413, 1417, 1020, - 1739, 1824, 1825, 2132, 1700, 2048, 1940, 1380, 185, 76, - 572, 81, 1710, 73, 1, 1738, 1709, 445, 1366, 998, - 457, 2120, 1213, 186, 1203, 1880, 1966, 474, 185, 1480, - 1737, 1730, 1128, 568, 569, 574, 1870, 573, 1494, 1704, - 1482, 764, 133, 1332, 1443, 1444, 475, 475, 475, 2062, - 1333, 1736, 89, 1729, 474, 732, 88, 767, 884, 1502, - 1320, 2006, 1961, 1688, 475, 475, 1454, 1938, 1841, 1685, - 2052, 1064, 1062, 1063, 1061, 1066, 1756, 1065, 1060, 1764, - 1765, 1280, 1769, 471, 1418, 1580, 1581, 1053, 1768, 1760, - 474, 1021, 1758, 804, 1759, 1852, 1451, 1277, 1534, 1767, - 185, 1766, 1788, 452, 1782, 871, 1598, 448, 941, 1629, - 474, 1781, 1676, 592, 585, 1815, 474, 474, 95, 2103, - 2078, 1649, 2076, 2044, 1992, 1810, 2079, 2042, 2113, 2096, - 1378, 1009, 1918, 1813, 1803, 1807, 1599, 970, 1352, 185, - 1036, 186, 494, 1290, 1816, 509, 506, 507, 1389, 1641, - 915, 492, 486, 1028, 1410, 1767, 1408, 1407, 1245, 521, - 1040, 1826, 1822, 1034, 1828, 1831, 475, 1393, 1539, 186, - 1744, 186, 186, 1819, 475, 1905, 567, 1844, 1845, 93, - 475, 1833, 1349, 1860, 2031, 1907, 185, 185, 566, 59, - 36, 474, 478, 1832, 1840, 1834, 2089, 1835, 900, 575, - 30, 1797, 1866, 29, 185, 28, 23, 22, 21, 184, - 20, 19, 469, 25, 1856, 1855, 18, 17, 1871, 184, - 16, 1881, 474, 474, 474, 103, 185, 1865, 1818, 46, - 43, 41, 110, 1868, 1857, 1858, 109, 1873, 44, 1874, - 40, 843, 579, 579, 27, 26, 15, 14, 13, 12, - 11, 184, 10, 947, 948, 949, 950, 951, 952, 953, - 954, 955, 956, 1890, 9, 1889, 5, 4, 903, 1891, - 24, 959, 1602, 1886, 1887, 2, 1895, 0, 0, 0, - 1900, 1901, 0, 922, 921, 931, 932, 924, 925, 926, - 927, 928, 929, 930, 923, 0, 1915, 933, 0, 1649, - 1625, 1626, 1011, 0, 1917, 0, 0, 0, 0, 0, - 1922, 0, 0, 1924, 1925, 0, 0, 1929, 1931, 0, - 0, 186, 0, 1930, 0, 0, 0, 184, 0, 0, - 0, 0, 0, 186, 474, 474, 1936, 184, 0, 0, - 0, 0, 184, 1955, 0, 0, 0, 474, 475, 0, - 474, 1941, 0, 1943, 166, 1954, 1965, 0, 0, 1937, - 0, 0, 0, 0, 1973, 475, 475, 0, 475, 1959, - 475, 475, 0, 475, 475, 475, 475, 475, 475, 108, - 0, 0, 0, 474, 474, 474, 185, 1971, 475, 150, - 0, 0, 186, 0, 0, 1968, 0, 474, 1972, 474, - 0, 0, 1979, 0, 0, 474, 0, 0, 1989, 1999, - 0, 1984, 1997, 1813, 2004, 0, 1995, 1813, 2001, 475, - 0, 1990, 1987, 1988, 0, 2003, 0, 186, 2011, 185, - 2012, 2005, 2010, 186, 0, 0, 186, 0, 0, 186, - 474, 185, 0, 147, 0, 148, 2018, 0, 2021, 2025, - 186, 0, 186, 0, 165, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 475, 475, 186, 475, 475, 186, - 475, 475, 0, 2027, 2028, 2029, 2030, 2041, 2034, 0, - 2035, 2036, 2038, 0, 0, 0, 2039, 2040, 1813, 2049, - 1892, 1893, 0, 1894, 474, 474, 1896, 0, 1898, 2056, - 0, 0, 0, 0, 0, 2061, 0, 0, 474, 0, - 0, 0, 151, 0, 0, 0, 0, 0, 0, 1790, - 0, 474, 156, 2073, 0, 0, 0, 1649, 0, 474, - 2067, 2082, 0, 0, 2085, 0, 0, 0, 2088, 1968, - 2063, 0, 2092, 0, 475, 0, 0, 0, 0, 0, - 0, 2101, 2100, 1805, 922, 921, 931, 932, 924, 925, - 926, 927, 928, 929, 930, 923, 2111, 0, 933, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 475, 475, - 0, 0, 0, 0, 0, 2107, 2108, 0, 0, 186, - 0, 0, 0, 2129, 0, 184, 0, 0, 0, 0, - 0, 475, 0, 0, 2139, 0, 0, 0, 186, 0, - 0, 475, 0, 0, 0, 186, 0, 186, 0, 0, - 0, 0, 0, 143, 0, 186, 186, 0, 0, 0, - 0, 917, 475, 920, 0, 475, 0, 0, 1904, 934, - 935, 936, 937, 938, 939, 940, 475, 918, 919, 916, - 922, 921, 931, 932, 924, 925, 926, 927, 928, 929, - 930, 923, 0, 0, 933, 0, 0, 0, 0, 0, - 0, 0, 1295, 0, 0, 1304, 1305, 1306, 1307, 1308, - 1309, 1310, 1311, 1312, 1313, 1314, 1315, 1316, 1317, 1318, - 1903, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 475, 0, 0, 0, 0, 0, 0, 0, 1909, - 0, 0, 0, 184, 0, 0, 0, 0, 0, 0, - 0, 0, 475, 0, 0, 0, 0, 579, 475, 0, - 0, 0, 485, 0, 1357, 0, 0, 0, 0, 1932, - 0, 184, 1933, 184, 1043, 1935, 922, 921, 931, 932, - 924, 925, 926, 927, 928, 929, 930, 923, 0, 0, - 933, 0, 1902, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 475, 0, 0, 0, 0, 144, 149, 146, - 152, 153, 154, 155, 157, 158, 159, 160, 0, 0, - 0, 0, 0, 161, 162, 163, 164, 0, 922, 921, - 931, 932, 924, 925, 926, 927, 928, 929, 930, 923, - 0, 0, 933, 0, 186, 0, 0, 0, 186, 186, - 186, 186, 186, 0, 0, 0, 0, 0, 186, 186, - 0, 0, 186, 0, 166, 0, 0, 1994, 485, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 186, 186, 186, 520, 0, 0, 0, 0, 0, 108, - 0, 0, 0, 0, 186, 0, 0, 186, 475, 150, - 922, 921, 931, 932, 924, 925, 926, 927, 928, 929, - 930, 923, 0, 0, 933, 0, 0, 0, 0, 0, - 0, 0, 0, 184, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 184, 0, 0, 0, 0, - 1682, 473, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 147, 0, 148, 0, 0, 0, 0, - 0, 0, 0, 1151, 165, 0, 0, 0, 0, 0, - 0, 593, 0, 0, 736, 1761, 743, 0, 0, 0, - 0, 0, 0, 186, 0, 0, 0, 0, 1151, 1151, - 0, 186, 0, 0, 184, 922, 921, 931, 932, 924, - 925, 926, 927, 928, 929, 930, 923, 0, 0, 933, - 0, 0, 0, 186, 0, 0, 0, 0, 0, 1001, - 0, 0, 151, 0, 186, 186, 186, 186, 186, 184, - 0, 0, 156, 0, 0, 184, 186, 0, 184, 0, - 186, 1249, 0, 186, 186, 0, 0, 186, 186, 186, - 0, 0, 184, 0, 184, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 184, 183, - 0, 184, 0, 1577, 0, 0, 0, 0, 0, 477, - 0, 0, 0, 0, 0, 0, 0, 0, 1572, 1573, - 1574, 0, 0, 922, 921, 931, 932, 924, 925, 926, - 927, 928, 929, 930, 923, 0, 475, 933, 0, 0, - 0, 740, 0, 0, 0, 0, 0, 0, 0, 475, - 0, 0, 0, 0, 0, 475, 0, 0, 475, 0, - 0, 0, 0, 143, 0, 475, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 579, 1249, 0, - 0, 0, 579, 579, 0, 186, 579, 579, 579, 0, - 0, 0, 1151, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 475, 186, 0, 0, 0, 0, - 0, 0, 579, 579, 579, 579, 579, 833, 0, 0, - 0, 1374, 0, 0, 0, 0, 0, 844, 0, 0, - 0, 475, 850, 0, 0, 0, 0, 0, 0, 0, - 184, 0, 0, 0, 0, 0, 1249, 184, 0, 184, - 0, 0, 0, 0, 0, 0, 0, 184, 184, 0, - 0, 0, 0, 0, 0, 0, 0, 475, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 186, 0, 523, - 32, 0, 0, 0, 0, 0, 0, 475, 0, 0, - 0, 0, 0, 475, 475, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 32, 0, 0, 0, 186, 144, 149, 146, - 152, 153, 154, 155, 157, 158, 159, 160, 0, 0, - 0, 0, 0, 161, 162, 163, 164, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 557, 0, 0, 0, 0, - 0, 0, 0, 186, 186, 0, 0, 0, 475, 0, - 0, 0, 593, 593, 593, 0, 0, 0, 0, 0, - 0, 186, 0, 0, 0, 0, 0, 0, 0, 0, - 899, 901, 0, 1762, 1763, 0, 0, 0, 0, 475, - 475, 475, 0, 186, 0, 0, 0, 0, 1783, 1784, - 0, 1785, 1786, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1792, 1793, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 184, 0, 0, 0, - 184, 184, 184, 184, 184, 0, 0, 0, 0, 0, - 184, 184, 0, 0, 184, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1560, 1561, 184, 852, 0, 0, 0, 0, - 0, 0, 1024, 0, 0, 0, 184, 0, 0, 184, - 593, 0, 0, 1843, 0, 0, 1054, 0, 0, 0, - 0, 475, 475, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 475, 0, 0, 475, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 579, 579, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 475, 475, 475, 186, 0, 0, 0, 0, 0, 579, - 0, 0, 0, 0, 475, 0, 475, 0, 1888, 0, - 0, 0, 475, 0, 0, 184, 0, 0, 0, 0, - 0, 0, 0, 1374, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 186, 0, 0, 0, - 0, 0, 0, 0, 579, 184, 0, 475, 186, 0, - 0, 0, 0, 0, 0, 1151, 184, 184, 184, 184, - 184, 1030, 0, 0, 1041, 0, 0, 0, 1663, 0, - 0, 0, 184, 0, 0, 184, 184, 0, 0, 184, - 1673, 1249, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 475, 475, 0, 736, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 475, 0, 1150, 0, 0, - 0, 1156, 1156, 0, 1156, 0, 1156, 1156, 475, 1165, - 1156, 1156, 1156, 1156, 1156, 0, 475, 0, 0, 0, - 0, 0, 1150, 1150, 736, 0, 0, 0, 0, 1974, - 1975, 1976, 1977, 1978, 0, 1151, 0, 1981, 1982, 0, - 0, 0, 0, 0, 0, 1249, 0, 0, 893, 893, - 893, 0, 0, 0, 0, 1225, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 184, 32, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 942, - 944, 0, 0, 0, 0, 0, 0, 184, 0, 0, - 0, 0, 0, 1059, 0, 0, 0, 0, 0, 0, - 593, 593, 0, 593, 593, 1117, 593, 593, 0, 0, - 957, 0, 579, 0, 962, 963, 964, 965, 966, 967, - 968, 969, 0, 972, 974, 977, 977, 977, 974, 977, - 977, 974, 977, 990, 991, 992, 993, 994, 995, 996, - 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 184, - 0, 0, 0, 0, 1187, 0, 0, 0, 0, 0, - 0, 0, 1151, 1037, 0, 0, 0, 0, 0, 0, - 1326, 0, 593, 0, 2086, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1150, 0, 184, 1235, - 0, 0, 0, 0, 0, 1041, 0, 0, 1246, 0, - 0, 0, 0, 0, 1358, 1359, 0, 166, 0, 0, - 0, 0, 1254, 0, 1256, 0, 0, 0, 1717, 0, - 0, 0, 0, 0, 0, 0, 0, 1390, 1270, 0, - 0, 1273, 108, 0, 130, 184, 184, 1024, 0, 0, - 593, 0, 150, 1151, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 184, 0, 0, 0, 0, 593, 0, - 0, 593, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 736, 140, 0, 184, 0, 0, 129, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 147, 0, 148, 0, - 0, 0, 0, 1131, 1132, 139, 138, 165, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 743, 0, 0, - 0, 0, 0, 166, 0, 0, 0, 0, 0, 0, - 1151, 0, 0, 0, 1127, 0, 0, 0, 736, 0, - 0, 0, 0, 0, 743, 134, 1133, 141, 108, 1130, - 130, 135, 136, 0, 0, 151, 0, 0, 150, 0, - 1397, 0, 0, 0, 0, 156, 0, 1401, 0, 1404, - 0, 0, 0, 0, 0, 0, 0, 0, 1423, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 736, 140, - 0, 0, 0, 0, 129, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 147, 0, 148, 0, 0, 0, 0, 1131, - 1132, 139, 138, 165, 0, 1374, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 893, 893, 0, 893, - 893, 0, 893, 893, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 143, 0, 184, 0, - 0, 134, 1133, 141, 0, 1130, 0, 135, 136, 0, - 184, 151, 0, 0, 1567, 0, 0, 0, 0, 0, - 0, 156, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 137, 0, 0, 0, 33, - 34, 35, 68, 37, 38, 0, 0, 131, 0, 0, - 132, 0, 0, 0, 0, 0, 0, 0, 0, 72, - 0, 0, 0, 0, 39, 65, 66, 0, 63, 0, - 0, 0, 0, 0, 64, 0, 0, 0, 1151, 0, - 0, 0, 0, 0, 0, 0, 1041, 0, 0, 0, - 1543, 1544, 1545, 1546, 1547, 0, 0, 0, 0, 1081, - 1551, 1552, 0, 52, 1556, 0, 0, 0, 0, 0, - 0, 0, 143, 67, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1562, 0, 0, 1422, 0, 1150, - 0, 0, 0, 0, 0, 0, 1564, 0, 0, 1566, - 144, 149, 146, 152, 153, 154, 155, 157, 158, 159, - 160, 0, 0, 0, 0, 0, 161, 162, 163, 164, - 0, 137, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 131, 0, 0, 132, 0, 0, 0, - 0, 0, 0, 0, 0, 42, 45, 48, 47, 50, - 0, 62, 0, 0, 0, 0, 0, 0, 0, 0, + 579, 2247, 2236, 1997, 2191, 1858, 2213, 2173, 1746, 1827, + 2161, 2110, 2086, 551, 1925, 1713, 1548, 2102, 1021, 939, + 1530, 1926, 1994, 537, 1457, 1597, 1922, 1733, 1068, 1747, + 592, 1831, 83, 3, 520, 892, 1563, 1812, 769, 1568, + 829, 147, 522, 1811, 517, 1183, 1937, 1810, 1320, 180, + 1673, 1647, 180, 1884, 485, 180, 1595, 1075, 626, 1205, + 501, 1509, 180, 1414, 1570, 133, 1112, 1804, 1105, 1406, + 180, 794, 1491, 1078, 1498, 919, 1098, 601, 1095, 1073, + 1459, 1060, 1440, 33, 513, 586, 1096, 524, 1383, 957, + 1102, 81, 501, 1182, 807, 501, 180, 501, 776, 1212, + 781, 800, 773, 797, 795, 796, 1295, 1111, 1559, 1474, + 1514, 623, 777, 1085, 79, 1325, 886, 110, 1223, 150, + 1172, 1549, 1109, 1197, 116, 937, 111, 1180, 871, 1034, + 78, 8, 508, 7, 6, 1177, 1037, 1850, 1849, 1626, + 117, 2112, 1872, 1873, 84, 1372, 182, 183, 184, 1454, + 1455, 1371, 1370, 1369, 1368, 1367, 511, 1282, 512, 1360, + 608, 612, 2205, 587, 1711, 112, 770, 2157, 1971, 2065, + 118, 2134, 2133, 180, 2081, 460, 831, 2082, 2253, 833, + 834, 86, 87, 88, 89, 90, 91, 832, 2210, 845, + 846, 2246, 849, 850, 851, 852, 509, 2184, 855, 856, + 857, 858, 859, 860, 861, 862, 863, 864, 865, 866, + 867, 868, 869, 627, 620, 811, 958, 1184, 80, 2239, + 1998, 1614, 2209, 2183, 810, 1663, 1901, 788, 2029, 112, + 786, 1633, 1712, 177, 789, 1632, 787, 1951, 1573, 1952, + 1953, 842, 958, 835, 836, 837, 1525, 1526, 785, 1871, + 2146, 983, 982, 992, 993, 985, 986, 987, 988, 989, + 990, 991, 984, 1661, 107, 994, 453, 454, 1113, 848, + 1114, 1456, 847, 1524, 171, 926, 564, 928, 570, 571, + 568, 569, 968, 567, 566, 565, 912, 1515, 489, 790, + 899, 900, 35, 572, 573, 72, 39, 40, 112, 113, + 1777, 905, 911, 1776, 583, 582, 1778, 1794, 968, 935, + 155, 1542, 171, 1860, 925, 927, 2188, 2020, 1572, 107, + 172, 105, 2018, 1826, 1361, 1362, 1363, 499, 1359, 503, + 104, 182, 183, 184, 877, 497, 585, 113, 897, 135, + 1443, 488, 898, 899, 900, 1306, 1304, 1305, 155, 1832, + 1854, 1781, 1596, 1629, 1301, 1308, 1272, 1309, 1855, 1310, + 1296, 956, 918, 2238, 152, 872, 153, 71, 916, 917, + 914, 915, 932, 881, 1863, 170, 1641, 964, 854, 145, + 913, 1300, 853, 176, 134, 489, 107, 2206, 99, 1861, + 1862, 1298, 934, 102, 2130, 906, 101, 100, 1273, 2076, + 1274, 818, 152, 964, 153, 489, 1598, 1492, 1302, 1199, + 1200, 144, 143, 170, 816, 827, 1417, 924, 826, 825, + 923, 929, 1299, 824, 595, 823, 106, 822, 821, 820, + 815, 791, 1191, 828, 156, 1515, 922, 2077, 488, 109, + 774, 2254, 1970, 105, 161, 803, 774, 175, 2225, 1646, + 772, 909, 180, 774, 885, 1211, 1210, 180, 488, 802, + 180, 139, 1201, 146, 614, 1198, 887, 140, 141, 2147, + 1714, 1716, 156, 1181, 878, 1910, 1864, 1631, 930, 1620, + 1313, 106, 161, 944, 838, 1820, 501, 501, 501, 1885, + 1574, 809, 1628, 1909, 819, 809, 1908, 895, 2182, 901, + 902, 903, 904, 931, 501, 501, 489, 817, 784, 783, + 1773, 782, 1842, 963, 960, 961, 962, 967, 969, 966, + 936, 965, 809, 884, 780, 459, 451, 2251, 959, 2168, + 2189, 1640, 1649, 1887, 1639, 2049, 1616, 1648, 950, 963, + 960, 961, 962, 967, 969, 966, 1649, 965, 106, 844, + 148, 1648, 1662, 1950, 959, 809, 1738, 933, 1681, 488, + 2174, 1006, 1007, 1606, 1520, 1089, 1019, 1715, 876, 890, + 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, + 1531, 984, 908, 180, 994, 994, 1470, 1355, 148, 1284, + 1283, 1285, 1286, 1287, 910, 1889, 920, 1893, 974, 1888, + 888, 1886, 1791, 1786, 896, 809, 1891, 1004, 2138, 830, + 1935, 501, 941, 942, 180, 1890, 180, 180, 880, 501, + 73, 1066, 182, 183, 184, 501, 1408, 808, 1892, 1894, + 1326, 808, 1692, 812, 802, 1297, 1022, 953, 623, 951, + 952, 894, 142, 813, 1065, 809, 1787, 1115, 873, 954, + 874, 1689, 879, 875, 136, 973, 971, 137, 808, 1061, + 1615, 814, 94, 1903, 1441, 802, 805, 806, 1789, 774, + 1580, 1784, 974, 799, 803, 2249, 1613, 1094, 2250, 1079, + 2248, 1611, 1409, 1785, 818, 182, 183, 184, 971, 1799, + 1472, 808, 798, 843, 1036, 1039, 1041, 1043, 1044, 1046, + 1048, 1049, 1040, 1042, 974, 1045, 1047, 95, 1050, 816, + 1475, 1476, 921, 1441, 1058, 1699, 1082, 1390, 1608, 2240, + 149, 154, 151, 157, 158, 159, 160, 162, 163, 164, + 165, 1388, 1389, 1387, 1067, 1955, 166, 167, 168, 169, + 627, 808, 1612, 1792, 1790, 1800, 1327, 2241, 802, 805, + 806, 2255, 774, 1471, 893, 2234, 799, 803, 149, 154, + 151, 157, 158, 159, 160, 162, 163, 164, 165, 180, + 174, 1006, 1007, 1173, 166, 167, 168, 169, 972, 973, + 971, 808, 2064, 1185, 1186, 1187, 2230, 812, 802, 1608, + 1006, 1007, 972, 973, 971, 71, 974, 813, 501, 2063, + 1207, 987, 988, 989, 990, 991, 984, 1386, 1216, 994, + 974, 1110, 1220, 1610, 2231, 501, 501, 1976, 501, 2256, + 501, 501, 1912, 501, 501, 501, 501, 501, 501, 1687, + 1189, 1190, 1378, 1380, 1381, 1808, 1203, 1686, 501, 1666, + 1667, 1668, 180, 1256, 1379, 985, 986, 987, 988, 989, + 990, 991, 984, 1217, 1788, 994, 1291, 1809, 1269, 1196, + 1289, 1807, 972, 973, 971, 1577, 2026, 1215, 779, 501, + 1913, 613, 972, 973, 971, 1292, 1253, 180, 1251, 1252, + 974, 972, 973, 971, 972, 973, 971, 180, 1259, 1260, + 974, 180, 1905, 1277, 1265, 1266, 1276, 1279, 610, 974, + 1213, 1213, 974, 618, 1275, 1267, 1214, 180, 1179, 1261, + 1258, 1257, 1232, 1188, 180, 1290, 1193, 1194, 1857, 1288, + 1206, 180, 180, 180, 180, 180, 180, 180, 180, 180, + 501, 501, 501, 1192, 1225, 2233, 1226, 2232, 1228, 1230, + 2221, 1330, 1234, 1236, 1238, 1240, 1242, 2219, 1334, 1322, + 1336, 1337, 1338, 1339, 2099, 1341, 1278, 180, 2061, 1688, + 1254, 615, 616, 2037, 514, 1958, 182, 183, 184, 1356, + 1780, 1914, 1817, 1328, 1329, 983, 982, 992, 993, 985, + 986, 987, 988, 989, 990, 991, 984, 1333, 1805, 994, + 1657, 1384, 1624, 1623, 1340, 1407, 1323, 1280, 788, 1268, + 112, 1314, 1264, 1263, 1410, 1319, 1262, 787, 540, 539, + 542, 543, 544, 545, 1064, 1077, 596, 541, 501, 546, + 2128, 1382, 2127, 1332, 1391, 1392, 1393, 1394, 1395, 1396, + 1397, 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1405, 1418, + 35, 1411, 1412, 972, 973, 971, 1996, 1366, 182, 183, + 184, 1834, 501, 501, 1819, 1351, 1352, 1353, 182, 183, + 184, 974, 1590, 180, 80, 1385, 1983, 2224, 1419, 1539, + 1429, 1432, 1983, 2180, 1983, 2169, 1442, 501, 1420, 1934, + 1424, 82, 1464, 1444, 180, 2151, 596, 501, 2117, 1022, + 1734, 180, 1734, 180, 1983, 2136, 1465, 2079, 596, 1608, + 596, 180, 180, 182, 183, 184, 1477, 1588, 501, 1418, + 1484, 501, 182, 183, 184, 71, 1270, 1516, 1448, 1449, + 2047, 596, 501, 1983, 1988, 1968, 1967, 623, 1964, 1965, + 623, 1964, 1963, 1510, 1516, 1421, 1483, 596, 1489, 1515, + 1851, 2172, 596, 1176, 1836, 1829, 1830, 1483, 1420, 1495, + 596, 1494, 1550, 1551, 1552, 970, 596, 1485, 35, 1495, + 1535, 1934, 1534, 1609, 983, 982, 992, 993, 985, 986, + 987, 988, 989, 990, 991, 984, 1767, 501, 994, 1517, + 1483, 180, 2044, 1741, 1515, 501, 2066, 1519, 35, 180, + 1587, 1589, 1513, 1538, 1176, 1175, 1517, 1121, 1120, 970, + 1487, 1923, 1495, 501, 1515, 2137, 1742, 1983, 1565, 501, + 1934, 1966, 1495, 1216, 1518, 1216, 1522, 1523, 1608, 1704, + 1703, 1571, 589, 1607, 1674, 1483, 1537, 1536, 1608, 627, + 1521, 1591, 627, 71, 2067, 2068, 2069, 1473, 1814, 1452, + 1364, 1594, 992, 993, 985, 986, 987, 988, 989, 990, + 991, 984, 1312, 501, 994, 1407, 1107, 793, 792, 71, + 1407, 1407, 2088, 71, 1543, 1995, 1544, 1545, 1546, 1547, + 2055, 1178, 1604, 1247, 1605, 1566, 580, 1561, 1562, 1583, + 1584, 1585, 1555, 1556, 1557, 1558, 1578, 1576, 1564, 1575, + 1856, 1617, 1601, 1560, 1554, 180, 811, 71, 2070, 180, + 180, 180, 180, 180, 1553, 810, 1213, 1600, 1599, 1566, + 1618, 1603, 1294, 180, 180, 180, 180, 1208, 1204, 1174, + 180, 1248, 1249, 1250, 1619, 181, 180, 96, 181, 1621, + 1622, 181, 1813, 180, 1244, 177, 502, 1859, 181, 2089, + 596, 1184, 2243, 2071, 2072, 2237, 181, 982, 992, 993, + 985, 986, 987, 988, 989, 990, 991, 984, 180, 501, + 994, 1422, 1423, 1941, 1652, 1653, 1938, 1939, 502, 1655, + 2032, 502, 181, 502, 1923, 1825, 1656, 1814, 1824, 1245, + 1246, 1823, 1581, 1357, 1315, 1627, 983, 982, 992, 993, + 985, 986, 987, 988, 989, 990, 991, 984, 1758, 1756, + 994, 1384, 1944, 1759, 1757, 1644, 1760, 1466, 1504, 1505, + 1943, 1755, 1754, 2227, 2208, 975, 1915, 983, 982, 992, + 993, 985, 986, 987, 988, 989, 990, 991, 984, 1723, + 1076, 994, 2048, 1670, 1671, 1672, 1986, 1732, 2162, 2164, + 1425, 1426, 1731, 2229, 1431, 1434, 1435, 2165, 2193, 181, + 98, 514, 2212, 180, 1660, 1683, 2192, 2214, 2196, 1721, + 1032, 180, 1500, 1503, 1504, 1505, 1501, 1722, 1502, 1506, + 1447, 2159, 1311, 1450, 1451, 1385, 1669, 1500, 1503, 1504, + 1505, 1501, 103, 1502, 1506, 180, 581, 1938, 1939, 1818, + 840, 839, 1071, 1074, 1720, 1437, 180, 180, 180, 180, + 180, 452, 1069, 2007, 1813, 587, 1727, 1682, 180, 1870, + 1438, 943, 180, 1844, 1070, 180, 180, 1843, 1739, 180, + 180, 180, 1698, 113, 1743, 2115, 1960, 606, 602, 1061, + 173, 1959, 1779, 455, 1710, 1468, 1602, 1748, 1222, 1221, + 1209, 1718, 2042, 603, 1765, 1821, 1736, 1475, 1476, 1318, + 1798, 2129, 2083, 1726, 1508, 590, 591, 1735, 1307, 1665, + 593, 1795, 1796, 1737, 2220, 2218, 1080, 1081, 605, 1797, + 604, 1801, 1802, 1803, 1749, 1782, 1768, 1752, 2217, 1322, + 1770, 1761, 180, 1750, 1751, 1766, 1753, 1730, 1771, 2197, + 1774, 2195, 82, 501, 2041, 1729, 1982, 1592, 594, 501, + 2040, 1918, 501, 1734, 1216, 2245, 2244, 1783, 1693, 501, + 80, 1837, 1690, 1816, 1090, 1571, 1083, 2245, 2166, 606, + 602, 1848, 1806, 1957, 1469, 589, 1839, 85, 77, 180, + 1, 472, 1453, 1059, 1815, 603, 484, 2235, 1281, 1833, + 1847, 1271, 1999, 2085, 1989, 1569, 801, 180, 138, 1532, + 1846, 1533, 2176, 93, 767, 1419, 92, 1196, 599, 600, + 605, 804, 604, 1838, 907, 1420, 1593, 2080, 1793, 1541, + 1127, 1125, 1126, 1124, 1845, 1129, 1128, 1123, 1358, 498, + 1507, 178, 501, 1116, 1084, 841, 462, 1969, 1407, 1354, + 1625, 468, 1002, 1866, 1728, 1775, 1865, 624, 617, 1929, + 2190, 1881, 2158, 2160, 2111, 2163, 2156, 2228, 2211, 1540, + 1467, 1072, 1883, 2039, 1917, 1697, 1031, 1439, 501, 1876, + 1877, 1874, 1099, 523, 1463, 1377, 538, 1868, 181, 180, + 1869, 535, 536, 181, 1897, 1898, 181, 1899, 1900, 501, + 1882, 1478, 1896, 1740, 1880, 501, 501, 976, 1906, 1907, + 1924, 1676, 521, 515, 1902, 1677, 1927, 1895, 1881, 1091, + 1499, 1497, 502, 502, 502, 1496, 1684, 1685, 180, 1316, + 1921, 1103, 1691, 1940, 1933, 1694, 1695, 1936, 1097, 1482, + 502, 502, 1630, 1701, 1748, 1702, 1853, 955, 1705, 1706, + 1707, 1708, 1709, 1324, 1946, 598, 1948, 510, 1949, 1942, + 97, 1436, 2145, 1664, 1719, 2028, 597, 61, 38, 505, + 2204, 946, 607, 32, 31, 30, 1961, 1962, 29, 1977, + 28, 180, 23, 1954, 180, 180, 180, 22, 21, 1947, + 501, 1956, 1678, 1679, 20, 19, 1911, 25, 18, 17, + 16, 108, 48, 180, 45, 43, 115, 114, 46, 1972, + 1763, 1764, 42, 1696, 1974, 1975, 1973, 882, 27, 181, + 2000, 501, 501, 501, 1932, 180, 26, 1990, 1984, 1373, + 1374, 1375, 1376, 15, 2008, 1985, 1987, 1993, 14, 1992, + 13, 12, 1571, 11, 10, 9, 5, 502, 4, 949, + 181, 550, 181, 181, 24, 502, 1020, 2, 0, 0, + 0, 502, 0, 0, 2005, 2006, 0, 0, 0, 0, + 0, 0, 0, 2011, 0, 0, 0, 0, 0, 0, + 2009, 0, 0, 0, 1427, 1428, 2016, 0, 0, 0, + 2038, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 179, 0, 0, 458, 0, 0, 496, 0, 0, 0, + 0, 2043, 0, 458, 0, 0, 0, 0, 0, 0, + 0, 458, 514, 0, 2052, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 2058, 0, 1748, 611, 611, + 2060, 2059, 2062, 501, 501, 0, 0, 458, 0, 0, + 0, 2051, 2074, 0, 0, 0, 501, 0, 0, 501, + 0, 2073, 0, 0, 2057, 2084, 0, 0, 0, 0, + 0, 0, 0, 0, 1529, 2092, 0, 0, 2087, 1878, + 1879, 2013, 2014, 0, 2015, 0, 0, 2017, 0, 2019, + 0, 2091, 0, 0, 501, 501, 501, 180, 0, 0, + 2090, 0, 0, 0, 0, 181, 0, 0, 501, 0, + 501, 0, 2106, 2107, 2109, 0, 501, 0, 1927, 2098, + 2114, 2108, 1927, 2120, 458, 2123, 2116, 0, 2093, 2094, + 2095, 2096, 2097, 1567, 502, 0, 2100, 2101, 180, 2118, + 0, 2125, 2122, 2126, 0, 1930, 0, 0, 2124, 501, + 180, 502, 502, 0, 502, 0, 502, 502, 2139, 502, + 502, 502, 502, 502, 502, 2132, 1945, 0, 0, 0, + 0, 0, 0, 0, 502, 0, 2135, 0, 181, 0, + 0, 0, 0, 0, 2155, 0, 0, 0, 0, 0, + 0, 1927, 2167, 0, 0, 0, 0, 0, 501, 501, + 0, 0, 0, 0, 0, 502, 0, 0, 0, 0, + 2175, 0, 501, 181, 0, 0, 0, 2087, 2177, 0, + 0, 0, 2170, 181, 0, 2025, 0, 181, 2187, 501, + 0, 2194, 0, 501, 0, 2198, 0, 0, 2200, 0, + 0, 0, 2203, 181, 0, 0, 2207, 0, 0, 0, + 181, 0, 0, 0, 0, 2216, 2215, 181, 181, 181, + 181, 181, 181, 181, 181, 181, 502, 502, 502, 1748, + 2031, 2226, 0, 0, 0, 171, 2201, 0, 0, 0, + 0, 2010, 0, 0, 0, 2012, 0, 0, 0, 0, + 0, 0, 0, 181, 0, 0, 2021, 2022, 2242, 0, + 113, 0, 0, 0, 0, 0, 0, 2252, 0, 0, + 0, 155, 2036, 0, 0, 0, 0, 983, 982, 992, + 993, 985, 986, 987, 988, 989, 990, 991, 984, 2045, + 2046, 994, 0, 2050, 983, 982, 992, 993, 985, 986, + 987, 988, 989, 990, 991, 984, 0, 0, 994, 0, + 0, 0, 0, 0, 502, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 152, 0, 153, 0, 0, + 0, 0, 0, 0, 0, 0, 170, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 502, 502, + 2078, 0, 0, 0, 0, 0, 0, 0, 0, 181, + 1700, 0, 0, 458, 0, 0, 0, 0, 458, 0, + 0, 458, 0, 502, 0, 0, 0, 0, 0, 0, + 181, 0, 0, 502, 0, 0, 0, 181, 0, 181, + 1724, 1725, 1074, 0, 2103, 156, 0, 181, 181, 0, + 0, 0, 0, 0, 502, 161, 0, 502, 0, 0, + 0, 978, 0, 981, 0, 0, 0, 0, 502, 995, + 996, 997, 998, 999, 1000, 1001, 0, 979, 980, 977, + 983, 982, 992, 993, 985, 986, 987, 988, 989, 990, + 991, 984, 0, 0, 994, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 2141, 2142, 2143, 2144, 2024, 2148, + 0, 2149, 2150, 2152, 0, 0, 0, 2153, 2154, 0, + 0, 0, 0, 502, 0, 0, 0, 181, 0, 0, + 0, 502, 0, 0, 0, 181, 0, 0, 0, 0, + 0, 0, 0, 0, 458, 0, 0, 0, 0, 502, + 0, 0, 0, 0, 0, 502, 2181, 0, 0, 1875, + 611, 148, 552, 34, 0, 0, 0, 0, 0, 0, + 0, 2023, 0, 0, 0, 458, 0, 458, 1106, 983, + 982, 992, 993, 985, 986, 987, 988, 989, 990, 991, + 984, 0, 0, 994, 0, 0, 0, 34, 0, 502, + 983, 982, 992, 993, 985, 986, 987, 988, 989, 990, + 991, 984, 2222, 2223, 994, 0, 0, 983, 982, 992, + 993, 985, 986, 987, 988, 989, 990, 991, 984, 0, + 0, 994, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 181, 588, 0, 0, 181, 181, 181, 181, 181, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 181, + 181, 181, 181, 0, 0, 0, 181, 0, 1904, 0, + 0, 0, 181, 0, 0, 0, 0, 0, 0, 181, + 983, 982, 992, 993, 985, 986, 987, 988, 989, 990, + 991, 984, 0, 0, 994, 1675, 0, 0, 0, 0, + 0, 0, 0, 1919, 181, 502, 0, 0, 0, 0, + 0, 0, 0, 549, 0, 983, 982, 992, 993, 985, + 986, 987, 988, 989, 990, 991, 984, 0, 0, 994, + 458, 149, 154, 151, 157, 158, 159, 160, 162, 163, + 164, 165, 0, 0, 0, 0, 0, 166, 167, 168, + 169, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1219, 500, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1219, 1219, 181, + 0, 0, 0, 458, 0, 625, 0, 181, 771, 0, + 778, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 181, 0, 0, 0, 0, 0, 0, 458, 0, + 0, 0, 181, 181, 181, 181, 181, 0, 458, 0, + 0, 0, 1321, 0, 181, 0, 0, 0, 181, 0, + 0, 181, 181, 0, 0, 181, 181, 181, 458, 0, + 0, 0, 0, 0, 2030, 458, 0, 0, 0, 0, + 0, 0, 1342, 1343, 458, 458, 458, 458, 458, 458, + 458, 0, 0, 0, 0, 0, 0, 514, 0, 0, + 0, 0, 0, 0, 2053, 0, 0, 2054, 0, 0, + 2056, 0, 0, 0, 0, 0, 0, 0, 458, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 181, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 502, + 0, 0, 0, 0, 0, 502, 0, 0, 502, 0, + 0, 0, 0, 0, 0, 502, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 181, 0, 0, 0, 0, + 611, 1321, 0, 0, 0, 611, 611, 0, 0, 611, + 611, 611, 0, 181, 0, 1219, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 2113, + 514, 0, 0, 0, 0, 611, 611, 611, 611, 611, + 0, 0, 0, 0, 1461, 0, 0, 0, 502, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 458, 0, 0, 938, 938, + 938, 1321, 458, 0, 458, 0, 0, 0, 0, 0, + 0, 0, 458, 458, 502, 0, 0, 0, 34, 0, + 0, 0, 0, 0, 0, 181, 0, 0, 0, 0, + 0, 1003, 1005, 0, 0, 502, 0, 0, 0, 0, + 0, 502, 502, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1018, 0, 181, 0, 1023, 1024, 1025, 1026, + 1027, 1028, 1029, 1030, 0, 1033, 1035, 1038, 1038, 1038, + 1035, 1038, 1038, 1035, 1038, 1051, 1052, 1053, 1054, 1055, + 1056, 1057, 458, 0, 0, 0, 0, 1063, 0, 0, + 1586, 34, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 181, 0, 0, + 181, 181, 181, 0, 0, 0, 502, 0, 1100, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 181, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 625, + 625, 625, 0, 0, 0, 0, 0, 502, 502, 502, + 0, 181, 0, 0, 0, 0, 0, 945, 947, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1062, 458, 0, 0, 0, + 458, 458, 458, 458, 458, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 458, 458, 458, 458, 0, 0, + 0, 1650, 0, 0, 0, 0, 0, 458, 0, 0, + 0, 0, 0, 0, 458, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 457, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 504, 0, 458, + 0, 0, 0, 0, 1087, 584, 0, 0, 0, 502, + 502, 0, 625, 0, 0, 0, 0, 0, 1117, 0, + 0, 0, 502, 0, 0, 502, 0, 0, 0, 0, + 0, 775, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 611, 611, 0, + 502, 502, 502, 181, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 502, 0, 502, 0, 611, 0, + 0, 0, 502, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 458, 0, 0, 0, 0, 0, + 0, 0, 1461, 0, 181, 0, 0, 0, 870, 0, + 0, 0, 0, 0, 0, 502, 181, 0, 0, 0, + 0, 0, 0, 0, 0, 611, 458, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1219, 458, 458, 458, + 458, 458, 0, 0, 0, 0, 0, 0, 0, 1762, + 0, 0, 0, 458, 0, 0, 458, 458, 0, 0, + 458, 1772, 1321, 0, 502, 502, 0, 0, 0, 0, + 0, 0, 938, 938, 938, 0, 0, 0, 502, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 771, 0, 0, 0, 502, 0, 0, 0, 502, + 0, 0, 0, 0, 1218, 0, 0, 0, 1224, 1224, + 0, 1224, 0, 1224, 1224, 0, 1233, 1224, 1224, 1224, + 1224, 1224, 0, 458, 0, 0, 0, 0, 0, 1218, + 1218, 771, 0, 0, 0, 0, 0, 0, 1219, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1321, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1293, 0, 0, 0, 0, 0, 0, 0, + 458, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 458, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 611, 625, 625, 625, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 171, 0, + 0, 0, 0, 1511, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 113, 0, 135, 0, 0, 0, 0, + 458, 0, 0, 0, 155, 0, 0, 0, 0, 0, + 0, 0, 0, 1219, 0, 0, 0, 883, 0, 0, + 0, 0, 889, 0, 0, 891, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 145, 0, 0, 0, 458, + 134, 1413, 0, 625, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1218, 152, 0, + 153, 0, 0, 0, 0, 122, 123, 144, 143, 170, + 0, 0, 0, 0, 0, 1445, 1446, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 458, 0, 0, 458, 458, 458, 0, 0, + 1479, 0, 0, 0, 1219, 0, 0, 0, 0, 0, + 1087, 0, 0, 625, 458, 0, 0, 139, 120, 146, + 127, 119, 0, 140, 141, 0, 0, 0, 156, 0, + 0, 625, 0, 0, 625, 0, 458, 0, 161, 128, + 0, 0, 0, 0, 0, 771, 0, 0, 0, 0, + 0, 0, 0, 131, 129, 124, 125, 126, 130, 0, + 0, 0, 0, 121, 0, 0, 0, 0, 0, 0, + 0, 0, 132, 0, 0, 0, 0, 0, 0, 1093, + 0, 0, 1104, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 778, 0, 0, 0, 0, 0, 1219, 0, 1582, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 171, 771, 0, 0, 0, + 0, 0, 778, 0, 0, 0, 1195, 0, 0, 0, + 0, 0, 0, 0, 148, 0, 0, 0, 0, 0, + 113, 0, 135, 0, 0, 0, 0, 0, 0, 0, + 0, 155, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 771, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1680, 145, 0, 588, 0, 0, 134, 142, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1461, 0, + 136, 0, 0, 137, 0, 152, 0, 153, 0, 0, + 0, 0, 1199, 1200, 144, 143, 170, 0, 182, 183, + 184, 1717, 0, 0, 1122, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 458, + 0, 0, 0, 0, 0, 0, 0, 1100, 0, 0, + 0, 458, 0, 0, 1744, 1745, 0, 0, 1100, 1100, + 1100, 1100, 1100, 0, 139, 1201, 146, 0, 1198, 0, + 140, 141, 1659, 0, 1511, 156, 0, 1100, 477, 0, + 0, 1100, 0, 0, 0, 161, 0, 476, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1255, 474, 0, + 0, 0, 0, 0, 149, 154, 151, 157, 158, 159, + 160, 162, 163, 164, 165, 0, 0, 0, 0, 0, + 166, 167, 168, 169, 0, 0, 0, 0, 1219, 0, + 0, 0, 1303, 0, 0, 0, 0, 471, 0, 0, + 0, 0, 1317, 0, 0, 0, 483, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1069, 1713, 0, 0, 0, 51, 71, 70, 0, - 0, 60, 61, 49, 0, 1719, 0, 0, 0, 1150, - 0, 1726, 0, 0, 1719, 0, 0, 0, 0, 593, - 0, 1731, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1082, 0, 0, 53, 54, 0, - 55, 56, 57, 58, 0, 0, 144, 149, 146, 152, - 153, 154, 155, 157, 158, 159, 160, 0, 0, 0, - 593, 0, 161, 162, 163, 164, 1670, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1095, 1098, 1099, 1100, 1101, 1102, 1103, 593, 1104, 1105, - 1106, 1107, 1108, 1083, 1084, 1085, 1086, 1067, 1068, 1096, - 0, 1070, 0, 1071, 1072, 1073, 1074, 1075, 1076, 1077, - 1078, 1079, 1080, 1087, 1088, 1089, 1090, 1091, 1092, 1093, - 1094, 0, 0, 1156, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 69, 0, 0, 0, 0, - 0, 0, 0, 593, 0, 0, 1150, 0, 0, 1817, - 1156, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1743, 0, 0, - 0, 1097, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1757, 0, 0, - 0, 1582, 0, 0, 557, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 736, 0, 0, 1150, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1619, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 1882, 1883, 1884, 0, 0, - 0, 0, 0, 0, 0, 1037, 0, 0, 0, 1802, - 0, 0, 1645, 1646, 0, 0, 1037, 1037, 1037, 1037, - 1037, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1422, 0, 0, 1037, 0, 0, 0, 1037, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1150, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 1861, 1862, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1719, 1956, 0, - 0, 0, 0, 1872, 0, 0, 0, 0, 0, 0, - 1719, 0, 0, 593, 0, 0, 0, 0, 1732, 0, - 0, 0, 0, 0, 0, 1885, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1985, 1985, 1985, 0, - 0, 0, 0, 0, 0, 0, 893, 0, 0, 0, - 2000, 0, 2002, 0, 0, 0, 0, 0, 1719, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1719, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1814, 0, 32, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 593, 593, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 1037, 0, - 0, 2071, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1150, 0, 2083, 0, 0, 0, 0, 0, - 0, 0, 1719, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 2017, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 2026, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1906, 0, 0, - 0, 0, 0, 0, 1912, 1913, 1914, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1814, 0, 32, 0, 1814, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 1814, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 714, 701, 32, 2057, 652, 717, 623, - 641, 726, 643, 646, 684, 604, 665, 321, 638, 0, - 627, 600, 634, 601, 625, 654, 237, 658, 622, 703, - 668, 716, 280, 0, 628, 334, 686, 370, 223, 289, - 287, 396, 246, 240, 236, 222, 265, 294, 332, 387, - 326, 723, 284, 675, 0, 379, 306, 0, 0, 0, - 656, 706, 663, 697, 651, 685, 612, 674, 718, 639, - 682, 719, 270, 221, 192, 318, 380, 249, 0, 0, - 0, 174, 175, 176, 0, 2064, 2065, 0, 0, 0, - 0, 0, 213, 0, 219, 679, 713, 636, 681, 233, - 268, 239, 232, 394, 683, 729, 599, 676, 0, 602, - 605, 725, 709, 631, 632, 0, 0, 0, 0, 0, - 0, 0, 655, 664, 694, 649, 0, 0, 0, 0, - 0, 0, 0, 0, 629, 0, 673, 0, 0, 0, - 608, 603, 0, 0, 0, 0, 653, 0, 0, 0, - 611, 0, 630, 695, 0, 597, 256, 606, 307, 699, - 708, 650, 421, 712, 648, 647, 715, 690, 609, 705, - 642, 279, 607, 276, 188, 202, 0, 640, 317, 355, - 360, 704, 626, 635, 224, 633, 358, 330, 409, 209, - 247, 352, 335, 356, 672, 688, 357, 285, 398, 347, - 408, 422, 423, 231, 311, 415, 391, 419, 431, 203, - 228, 324, 384, 412, 376, 304, 395, 275, 375, 254, - 191, 283, 195, 386, 406, 214, 368, 0, 0, 0, - 197, 404, 383, 301, 272, 273, 196, 0, 351, 235, - 252, 226, 320, 401, 402, 225, 433, 204, 418, 199, - 205, 417, 313, 397, 405, 302, 293, 198, 403, 300, - 292, 278, 245, 261, 345, 288, 346, 262, 309, 308, - 310, 0, 193, 0, 381, 413, 434, 211, 621, 700, - 393, 427, 430, 0, 348, 212, 253, 244, 344, 251, - 281, 426, 428, 429, 210, 342, 259, 312, 206, 264, - 377, 277, 286, 692, 728, 329, 359, 215, 411, 378, - 616, 620, 614, 615, 666, 667, 617, 720, 721, 722, - 696, 610, 0, 618, 619, 0, 702, 710, 711, 671, - 187, 200, 282, 724, 349, 250, 432, 416, 414, 598, - 613, 230, 624, 0, 0, 637, 644, 645, 657, 659, - 660, 661, 662, 670, 677, 678, 680, 687, 689, 691, - 693, 698, 707, 727, 189, 190, 201, 208, 217, 229, - 242, 248, 257, 260, 263, 266, 267, 269, 274, 291, - 295, 296, 297, 298, 314, 315, 316, 319, 322, 323, - 325, 327, 328, 331, 337, 338, 339, 340, 341, 343, - 350, 354, 361, 362, 363, 364, 365, 366, 367, 371, - 372, 373, 374, 382, 385, 399, 400, 410, 420, 424, - 258, 407, 425, 0, 290, 669, 194, 220, 207, 227, - 241, 243, 271, 299, 305, 333, 336, 255, 238, 218, - 353, 216, 369, 388, 389, 390, 392, 303, 234, 714, - 701, 0, 0, 652, 717, 623, 641, 726, 643, 646, - 684, 604, 665, 321, 638, 0, 627, 600, 634, 601, - 625, 654, 237, 658, 622, 703, 668, 716, 280, 0, - 628, 334, 686, 370, 223, 289, 287, 396, 246, 240, - 236, 222, 265, 294, 332, 387, 326, 723, 284, 675, - 0, 379, 306, 0, 0, 0, 656, 706, 663, 697, - 651, 685, 612, 674, 718, 639, 682, 719, 270, 221, - 192, 318, 380, 249, 0, 0, 0, 174, 175, 176, - 0, 0, 0, 0, 0, 0, 0, 0, 213, 0, - 219, 679, 713, 636, 681, 233, 268, 239, 232, 394, - 683, 729, 599, 676, 0, 602, 605, 725, 709, 631, - 632, 0, 0, 0, 0, 0, 0, 0, 655, 664, - 694, 649, 0, 0, 0, 0, 0, 0, 1806, 0, - 629, 0, 673, 0, 0, 0, 608, 603, 0, 0, - 0, 0, 653, 0, 0, 0, 611, 0, 630, 695, - 0, 597, 256, 606, 307, 699, 708, 650, 421, 712, - 648, 647, 715, 690, 609, 705, 642, 279, 607, 276, - 188, 202, 0, 640, 317, 355, 360, 704, 626, 635, - 224, 633, 358, 330, 409, 209, 247, 352, 335, 356, - 672, 688, 357, 285, 398, 347, 408, 422, 423, 231, - 311, 415, 391, 419, 431, 203, 228, 324, 384, 412, - 376, 304, 395, 275, 375, 254, 191, 283, 195, 386, - 406, 214, 368, 0, 0, 0, 197, 404, 383, 301, - 272, 273, 196, 0, 351, 235, 252, 226, 320, 401, - 402, 225, 433, 204, 418, 199, 205, 417, 313, 397, - 405, 302, 293, 198, 403, 300, 292, 278, 245, 261, - 345, 288, 346, 262, 309, 308, 310, 0, 193, 0, - 381, 413, 434, 211, 621, 700, 393, 427, 430, 0, - 348, 212, 253, 244, 344, 251, 281, 426, 428, 429, - 210, 342, 259, 312, 206, 264, 377, 277, 286, 692, - 728, 329, 359, 215, 411, 378, 616, 620, 614, 615, - 666, 667, 617, 720, 721, 722, 696, 610, 0, 618, - 619, 0, 702, 710, 711, 671, 187, 200, 282, 724, - 349, 250, 432, 416, 414, 598, 613, 230, 624, 0, - 0, 637, 644, 645, 657, 659, 660, 661, 662, 670, - 677, 678, 680, 687, 689, 691, 693, 698, 707, 727, - 189, 190, 201, 208, 217, 229, 242, 248, 257, 260, - 263, 266, 267, 269, 274, 291, 295, 296, 297, 298, - 314, 315, 316, 319, 322, 323, 325, 327, 328, 331, - 337, 338, 339, 340, 341, 343, 350, 354, 361, 362, - 363, 364, 365, 366, 367, 371, 372, 373, 374, 382, - 385, 399, 400, 410, 420, 424, 258, 407, 425, 0, - 290, 669, 194, 220, 207, 227, 241, 243, 271, 299, - 305, 333, 336, 255, 238, 218, 353, 216, 369, 388, - 389, 390, 392, 303, 234, 714, 701, 0, 0, 652, - 717, 623, 641, 726, 643, 646, 684, 604, 665, 321, - 638, 0, 627, 600, 634, 601, 625, 654, 237, 658, - 622, 703, 668, 716, 280, 0, 628, 334, 686, 370, - 223, 289, 287, 396, 246, 240, 236, 222, 265, 294, - 332, 387, 326, 723, 284, 675, 0, 379, 306, 0, - 0, 0, 656, 706, 663, 697, 651, 685, 612, 674, - 718, 639, 682, 719, 270, 221, 192, 318, 380, 249, - 67, 0, 0, 174, 175, 176, 0, 0, 0, 0, - 0, 0, 0, 0, 213, 0, 219, 679, 713, 636, - 681, 233, 268, 239, 232, 394, 683, 729, 599, 676, - 0, 602, 605, 725, 709, 631, 632, 0, 0, 0, - 0, 0, 0, 0, 655, 664, 694, 649, 0, 0, - 0, 0, 0, 0, 0, 0, 629, 0, 673, 0, - 0, 0, 608, 603, 0, 0, 0, 0, 653, 0, - 0, 0, 611, 0, 630, 695, 0, 597, 256, 606, - 307, 699, 708, 650, 421, 712, 648, 647, 715, 690, - 609, 705, 642, 279, 607, 276, 188, 202, 0, 640, - 317, 355, 360, 704, 626, 635, 224, 633, 358, 330, - 409, 209, 247, 352, 335, 356, 672, 688, 357, 285, - 398, 347, 408, 422, 423, 231, 311, 415, 391, 419, - 431, 203, 228, 324, 384, 412, 376, 304, 395, 275, - 375, 254, 191, 283, 195, 386, 406, 214, 368, 0, - 0, 0, 197, 404, 383, 301, 272, 273, 196, 0, - 351, 235, 252, 226, 320, 401, 402, 225, 433, 204, - 418, 199, 205, 417, 313, 397, 405, 302, 293, 198, - 403, 300, 292, 278, 245, 261, 345, 288, 346, 262, - 309, 308, 310, 0, 193, 0, 381, 413, 434, 211, - 621, 700, 393, 427, 430, 0, 348, 212, 253, 244, - 344, 251, 281, 426, 428, 429, 210, 342, 259, 312, - 206, 264, 377, 277, 286, 692, 728, 329, 359, 215, - 411, 378, 616, 620, 614, 615, 666, 667, 617, 720, - 721, 722, 696, 610, 0, 618, 619, 0, 702, 710, - 711, 671, 187, 200, 282, 724, 349, 250, 432, 416, - 414, 598, 613, 230, 624, 0, 0, 637, 644, 645, - 657, 659, 660, 661, 662, 670, 677, 678, 680, 687, - 689, 691, 693, 698, 707, 727, 189, 190, 201, 208, - 217, 229, 242, 248, 257, 260, 263, 266, 267, 269, - 274, 291, 295, 296, 297, 298, 314, 315, 316, 319, - 322, 323, 325, 327, 328, 331, 337, 338, 339, 340, - 341, 343, 350, 354, 361, 362, 363, 364, 365, 366, - 367, 371, 372, 373, 374, 382, 385, 399, 400, 410, - 420, 424, 258, 407, 425, 0, 290, 669, 194, 220, - 207, 227, 241, 243, 271, 299, 305, 333, 336, 255, - 238, 218, 353, 216, 369, 388, 389, 390, 392, 303, - 234, 714, 701, 0, 0, 652, 717, 623, 641, 726, - 643, 646, 684, 604, 665, 321, 638, 0, 627, 600, - 634, 601, 625, 654, 237, 658, 622, 703, 668, 716, - 280, 0, 628, 334, 686, 370, 223, 289, 287, 396, - 246, 240, 236, 222, 265, 294, 332, 387, 326, 723, - 284, 675, 0, 379, 306, 0, 0, 0, 656, 706, - 663, 697, 651, 685, 612, 674, 718, 639, 682, 719, - 270, 221, 192, 318, 380, 249, 0, 0, 0, 174, - 175, 176, 0, 0, 0, 0, 0, 0, 0, 0, - 213, 0, 219, 679, 713, 636, 681, 233, 268, 239, - 232, 394, 683, 729, 599, 676, 0, 602, 605, 725, - 709, 631, 632, 0, 0, 0, 0, 0, 0, 0, - 655, 664, 694, 649, 0, 0, 0, 0, 0, 0, - 1674, 0, 629, 0, 673, 0, 0, 0, 608, 603, - 0, 0, 0, 0, 653, 0, 0, 0, 611, 0, - 630, 695, 0, 597, 256, 606, 307, 699, 708, 650, - 421, 712, 648, 647, 715, 690, 609, 705, 642, 279, - 607, 276, 188, 202, 0, 640, 317, 355, 360, 704, - 626, 635, 224, 633, 358, 330, 409, 209, 247, 352, - 335, 356, 672, 688, 357, 285, 398, 347, 408, 422, - 423, 231, 311, 415, 391, 419, 431, 203, 228, 324, - 384, 412, 376, 304, 395, 275, 375, 254, 191, 283, - 195, 386, 406, 214, 368, 0, 0, 0, 197, 404, - 383, 301, 272, 273, 196, 0, 351, 235, 252, 226, - 320, 401, 402, 225, 433, 204, 418, 199, 205, 417, - 313, 397, 405, 302, 293, 198, 403, 300, 292, 278, - 245, 261, 345, 288, 346, 262, 309, 308, 310, 0, - 193, 0, 381, 413, 434, 211, 621, 700, 393, 427, - 430, 0, 348, 212, 253, 244, 344, 251, 281, 426, - 428, 429, 210, 342, 259, 312, 206, 264, 377, 277, - 286, 692, 728, 329, 359, 215, 411, 378, 616, 620, - 614, 615, 666, 667, 617, 720, 721, 722, 696, 610, - 0, 618, 619, 0, 702, 710, 711, 671, 187, 200, - 282, 724, 349, 250, 432, 416, 414, 598, 613, 230, - 624, 0, 0, 637, 644, 645, 657, 659, 660, 661, - 662, 670, 677, 678, 680, 687, 689, 691, 693, 698, - 707, 727, 189, 190, 201, 208, 217, 229, 242, 248, - 257, 260, 263, 266, 267, 269, 274, 291, 295, 296, - 297, 298, 314, 315, 316, 319, 322, 323, 325, 327, - 328, 331, 337, 338, 339, 340, 341, 343, 350, 354, - 361, 362, 363, 364, 365, 366, 367, 371, 372, 373, - 374, 382, 385, 399, 400, 410, 420, 424, 258, 407, - 425, 0, 290, 669, 194, 220, 207, 227, 241, 243, - 271, 299, 305, 333, 336, 255, 238, 218, 353, 216, - 369, 388, 389, 390, 392, 303, 234, 714, 701, 0, - 0, 652, 717, 623, 641, 726, 643, 646, 684, 604, - 665, 321, 638, 0, 627, 600, 634, 601, 625, 654, - 237, 658, 622, 703, 668, 716, 280, 0, 628, 334, - 686, 370, 223, 289, 287, 396, 246, 240, 236, 222, - 265, 294, 332, 387, 326, 723, 284, 675, 0, 379, - 306, 0, 0, 0, 656, 706, 663, 697, 651, 685, - 612, 674, 718, 639, 682, 719, 270, 221, 192, 318, - 380, 249, 0, 0, 0, 174, 175, 176, 0, 0, - 0, 0, 0, 0, 0, 0, 213, 0, 219, 679, - 713, 636, 681, 233, 268, 239, 232, 394, 683, 729, - 599, 676, 0, 602, 605, 725, 709, 631, 632, 0, - 0, 0, 0, 0, 0, 0, 655, 664, 694, 649, - 0, 0, 0, 0, 0, 0, 1399, 0, 629, 0, - 673, 0, 0, 0, 608, 603, 0, 0, 0, 0, - 653, 0, 0, 0, 611, 0, 630, 695, 0, 597, - 256, 606, 307, 699, 708, 650, 421, 712, 648, 647, - 715, 690, 609, 705, 642, 279, 607, 276, 188, 202, - 0, 640, 317, 355, 360, 704, 626, 635, 224, 633, - 358, 330, 409, 209, 247, 352, 335, 356, 672, 688, - 357, 285, 398, 347, 408, 422, 423, 231, 311, 415, - 391, 419, 431, 203, 228, 324, 384, 412, 376, 304, - 395, 275, 375, 254, 191, 283, 195, 386, 406, 214, - 368, 0, 0, 0, 197, 404, 383, 301, 272, 273, - 196, 0, 351, 235, 252, 226, 320, 401, 402, 225, - 433, 204, 418, 199, 205, 417, 313, 397, 405, 302, - 293, 198, 403, 300, 292, 278, 245, 261, 345, 288, - 346, 262, 309, 308, 310, 0, 193, 0, 381, 413, - 434, 211, 621, 700, 393, 427, 430, 0, 348, 212, - 253, 244, 344, 251, 281, 426, 428, 429, 210, 342, - 259, 312, 206, 264, 377, 277, 286, 692, 728, 329, - 359, 215, 411, 378, 616, 620, 614, 615, 666, 667, - 617, 720, 721, 722, 696, 610, 0, 618, 619, 0, - 702, 710, 711, 671, 187, 200, 282, 724, 349, 250, - 432, 416, 414, 598, 613, 230, 624, 0, 0, 637, - 644, 645, 657, 659, 660, 661, 662, 670, 677, 678, - 680, 687, 689, 691, 693, 698, 707, 727, 189, 190, - 201, 208, 217, 229, 242, 248, 257, 260, 263, 266, - 267, 269, 274, 291, 295, 296, 297, 298, 314, 315, - 316, 319, 322, 323, 325, 327, 328, 331, 337, 338, - 339, 340, 341, 343, 350, 354, 361, 362, 363, 364, - 365, 366, 367, 371, 372, 373, 374, 382, 385, 399, - 400, 410, 420, 424, 258, 407, 425, 0, 290, 669, - 194, 220, 207, 227, 241, 243, 271, 299, 305, 333, - 336, 255, 238, 218, 353, 216, 369, 388, 389, 390, - 392, 303, 234, 714, 701, 0, 0, 652, 717, 623, - 641, 726, 643, 646, 684, 604, 665, 321, 638, 0, - 627, 600, 634, 601, 625, 654, 237, 658, 622, 703, - 668, 716, 280, 0, 628, 334, 686, 370, 223, 289, - 287, 396, 246, 240, 236, 222, 265, 294, 332, 387, - 326, 723, 284, 675, 0, 379, 306, 0, 0, 0, - 656, 706, 663, 697, 651, 685, 612, 674, 718, 639, - 682, 719, 270, 221, 192, 318, 380, 249, 0, 0, - 0, 174, 175, 176, 0, 0, 0, 0, 0, 0, - 0, 0, 213, 0, 219, 679, 713, 636, 681, 233, - 268, 239, 232, 394, 683, 729, 599, 676, 0, 602, - 605, 725, 709, 631, 632, 0, 0, 0, 0, 0, - 0, 0, 655, 664, 694, 649, 0, 0, 0, 0, - 0, 0, 0, 0, 629, 0, 673, 0, 0, 0, - 608, 603, 0, 0, 0, 0, 653, 0, 0, 0, - 611, 0, 630, 695, 0, 597, 256, 606, 307, 699, - 708, 650, 421, 712, 648, 647, 715, 690, 609, 705, - 642, 279, 607, 276, 188, 202, 0, 640, 317, 355, - 360, 704, 626, 635, 224, 633, 358, 330, 409, 209, - 247, 352, 335, 356, 672, 688, 357, 285, 398, 347, - 408, 422, 423, 231, 311, 415, 391, 419, 431, 203, - 228, 324, 384, 412, 376, 304, 395, 275, 375, 254, - 191, 283, 195, 386, 406, 214, 368, 0, 0, 0, - 197, 404, 383, 301, 272, 273, 196, 0, 351, 235, - 252, 226, 320, 401, 402, 225, 433, 204, 418, 199, - 205, 417, 313, 397, 405, 302, 293, 198, 403, 300, - 292, 278, 245, 261, 345, 288, 346, 262, 309, 308, - 310, 0, 193, 0, 381, 413, 434, 211, 621, 700, - 393, 427, 430, 0, 348, 212, 253, 244, 344, 251, - 281, 426, 428, 429, 210, 342, 259, 312, 206, 264, - 377, 277, 286, 692, 728, 329, 359, 215, 411, 378, - 616, 620, 614, 615, 666, 667, 617, 720, 721, 722, - 696, 610, 0, 618, 619, 0, 702, 710, 711, 671, - 187, 200, 282, 724, 349, 250, 432, 416, 414, 598, - 613, 230, 624, 0, 0, 637, 644, 645, 657, 659, - 660, 661, 662, 670, 677, 678, 680, 687, 689, 691, - 693, 698, 707, 727, 189, 190, 201, 208, 217, 229, - 242, 248, 257, 260, 263, 266, 267, 269, 274, 291, - 295, 296, 297, 298, 314, 315, 316, 319, 322, 323, - 325, 327, 328, 331, 337, 338, 339, 340, 341, 343, - 350, 354, 361, 362, 363, 364, 365, 366, 367, 371, - 372, 373, 374, 382, 385, 399, 400, 410, 420, 424, - 258, 407, 425, 0, 290, 669, 194, 220, 207, 227, - 241, 243, 271, 299, 305, 333, 336, 255, 238, 218, - 353, 216, 369, 388, 389, 390, 392, 303, 234, 714, - 701, 0, 0, 652, 717, 623, 641, 726, 643, 646, - 684, 604, 665, 321, 638, 0, 627, 600, 634, 601, - 625, 654, 237, 658, 622, 703, 668, 716, 280, 0, - 628, 334, 686, 370, 223, 289, 287, 396, 246, 240, - 236, 222, 265, 294, 332, 387, 326, 723, 284, 675, - 0, 379, 306, 0, 0, 0, 656, 706, 663, 697, - 651, 685, 612, 674, 718, 639, 682, 719, 270, 221, - 192, 318, 380, 249, 0, 0, 0, 174, 175, 176, - 0, 0, 0, 0, 0, 0, 0, 0, 213, 0, - 219, 679, 713, 636, 681, 233, 268, 239, 232, 394, - 683, 729, 599, 676, 0, 602, 605, 725, 709, 631, - 632, 0, 0, 0, 0, 0, 0, 0, 655, 664, - 694, 649, 0, 0, 0, 0, 0, 0, 0, 0, - 629, 0, 673, 0, 0, 0, 608, 603, 0, 0, - 0, 0, 653, 0, 0, 0, 611, 0, 630, 695, - 0, 597, 256, 606, 307, 699, 708, 650, 421, 712, - 648, 647, 715, 690, 609, 705, 642, 279, 607, 276, - 188, 202, 0, 640, 317, 355, 360, 704, 626, 635, - 224, 633, 358, 330, 409, 209, 247, 352, 335, 356, - 672, 688, 357, 285, 398, 347, 408, 422, 423, 231, - 311, 415, 391, 419, 431, 203, 228, 324, 384, 412, - 376, 304, 395, 275, 375, 254, 191, 283, 195, 386, - 406, 214, 368, 0, 0, 0, 197, 404, 383, 301, - 272, 273, 196, 0, 351, 235, 252, 226, 320, 401, - 402, 225, 433, 204, 418, 199, 731, 417, 313, 397, - 405, 302, 293, 198, 403, 300, 292, 278, 245, 261, - 345, 288, 346, 262, 309, 308, 310, 0, 193, 0, - 381, 413, 434, 211, 621, 700, 393, 427, 430, 0, - 348, 212, 253, 244, 344, 251, 281, 426, 428, 429, - 210, 342, 259, 596, 730, 590, 589, 277, 286, 692, - 728, 329, 359, 215, 411, 378, 616, 620, 614, 615, - 666, 667, 617, 720, 721, 722, 696, 610, 0, 618, - 619, 0, 702, 710, 711, 671, 187, 200, 282, 724, - 349, 250, 432, 416, 414, 598, 613, 230, 624, 0, - 0, 637, 644, 645, 657, 659, 660, 661, 662, 670, - 677, 678, 680, 687, 689, 691, 693, 698, 707, 727, - 189, 190, 201, 208, 217, 229, 242, 248, 257, 260, - 263, 266, 267, 269, 274, 291, 295, 296, 297, 298, - 314, 315, 316, 319, 322, 323, 325, 327, 328, 331, - 337, 338, 339, 340, 341, 343, 350, 354, 361, 362, - 363, 364, 365, 366, 367, 371, 372, 373, 374, 382, - 385, 399, 400, 410, 420, 424, 258, 407, 425, 0, - 290, 669, 194, 220, 207, 227, 241, 243, 271, 299, - 305, 333, 336, 255, 238, 218, 353, 216, 369, 388, - 389, 390, 392, 303, 234, 714, 701, 0, 0, 652, - 717, 623, 641, 726, 643, 646, 684, 604, 665, 321, - 638, 0, 627, 600, 634, 601, 625, 654, 237, 658, - 622, 703, 668, 716, 280, 0, 628, 334, 686, 370, - 223, 289, 287, 396, 246, 240, 236, 222, 265, 294, - 332, 387, 326, 723, 284, 675, 0, 379, 306, 0, - 0, 0, 656, 706, 663, 697, 651, 685, 612, 674, - 718, 639, 682, 719, 270, 221, 192, 318, 380, 249, - 0, 0, 0, 174, 175, 176, 0, 0, 0, 0, - 0, 0, 0, 0, 213, 0, 219, 679, 713, 636, - 681, 233, 268, 239, 232, 394, 683, 729, 599, 676, - 0, 602, 605, 725, 709, 631, 632, 0, 0, 0, - 0, 0, 0, 0, 655, 664, 694, 649, 0, 0, - 0, 0, 0, 0, 0, 0, 629, 0, 673, 0, - 0, 0, 608, 603, 0, 0, 0, 0, 653, 0, - 0, 0, 611, 0, 630, 695, 0, 597, 256, 606, - 307, 699, 708, 650, 421, 712, 648, 647, 715, 690, - 609, 705, 642, 279, 607, 276, 188, 202, 0, 640, - 317, 355, 360, 704, 626, 635, 224, 633, 358, 330, - 409, 209, 247, 352, 335, 356, 672, 688, 357, 285, - 398, 347, 408, 422, 423, 231, 311, 415, 391, 419, - 431, 203, 228, 324, 384, 412, 376, 304, 395, 275, - 375, 254, 191, 283, 195, 386, 1045, 214, 368, 0, - 0, 0, 197, 404, 383, 301, 272, 273, 196, 0, - 351, 235, 252, 226, 320, 401, 402, 225, 433, 204, - 418, 199, 731, 417, 313, 397, 405, 302, 293, 198, - 403, 300, 292, 278, 245, 261, 345, 288, 346, 262, - 309, 308, 310, 0, 193, 0, 381, 413, 434, 211, - 621, 700, 393, 427, 430, 0, 348, 212, 253, 244, - 344, 251, 281, 426, 428, 429, 210, 342, 259, 596, - 730, 590, 589, 277, 286, 692, 728, 329, 359, 215, - 411, 378, 616, 620, 614, 615, 666, 667, 617, 720, - 721, 722, 696, 610, 0, 618, 619, 0, 702, 710, - 711, 671, 187, 200, 282, 724, 349, 250, 432, 416, - 414, 598, 613, 230, 624, 0, 0, 637, 644, 645, - 657, 659, 660, 661, 662, 670, 677, 678, 680, 687, - 689, 691, 693, 698, 707, 727, 189, 190, 201, 208, - 217, 229, 242, 248, 257, 260, 263, 266, 267, 269, - 274, 291, 295, 296, 297, 298, 314, 315, 316, 319, - 322, 323, 325, 327, 328, 331, 337, 338, 339, 340, - 341, 343, 350, 354, 361, 362, 363, 364, 365, 366, - 367, 371, 372, 373, 374, 382, 385, 399, 400, 410, - 420, 424, 258, 407, 425, 0, 290, 669, 194, 220, - 207, 227, 241, 243, 271, 299, 305, 333, 336, 255, - 238, 218, 353, 216, 369, 388, 389, 390, 392, 303, - 234, 714, 701, 0, 0, 652, 717, 623, 641, 726, - 643, 646, 684, 604, 665, 321, 638, 0, 627, 600, - 634, 601, 625, 654, 237, 658, 622, 703, 668, 716, - 280, 0, 628, 334, 686, 370, 223, 289, 287, 396, - 246, 240, 236, 222, 265, 294, 332, 387, 326, 723, - 284, 675, 0, 379, 306, 0, 0, 0, 656, 706, - 663, 697, 651, 685, 612, 674, 718, 639, 682, 719, - 270, 221, 192, 318, 380, 249, 0, 0, 0, 174, - 175, 176, 0, 0, 0, 0, 0, 0, 0, 0, - 213, 0, 219, 679, 713, 636, 681, 233, 268, 239, - 232, 394, 683, 729, 599, 676, 0, 602, 605, 725, - 709, 631, 632, 0, 0, 0, 0, 0, 0, 0, - 655, 664, 694, 649, 0, 0, 0, 0, 0, 0, - 0, 0, 629, 0, 673, 0, 0, 0, 608, 603, - 0, 0, 0, 0, 653, 0, 0, 0, 611, 0, - 630, 695, 0, 597, 256, 606, 307, 699, 708, 650, - 421, 712, 648, 647, 715, 690, 609, 705, 642, 279, - 607, 276, 188, 202, 0, 640, 317, 355, 360, 704, - 626, 635, 224, 633, 358, 330, 409, 209, 247, 352, - 335, 356, 672, 688, 357, 285, 398, 347, 408, 422, - 423, 231, 311, 415, 391, 419, 431, 203, 228, 324, - 384, 412, 376, 304, 395, 275, 375, 254, 191, 283, - 195, 386, 587, 214, 368, 0, 0, 0, 197, 404, - 383, 301, 272, 273, 196, 0, 351, 235, 252, 226, - 320, 401, 402, 225, 433, 204, 418, 199, 731, 417, - 313, 397, 405, 302, 293, 198, 403, 300, 292, 278, - 245, 261, 345, 288, 346, 262, 309, 308, 310, 0, - 193, 0, 381, 413, 434, 211, 621, 700, 393, 427, - 430, 0, 348, 212, 253, 244, 344, 251, 281, 426, - 428, 429, 210, 342, 259, 596, 730, 590, 589, 277, - 286, 692, 728, 329, 359, 215, 411, 378, 616, 620, - 614, 615, 666, 667, 617, 720, 721, 722, 696, 610, - 0, 618, 619, 0, 702, 710, 711, 671, 187, 200, - 282, 724, 349, 250, 432, 416, 414, 598, 613, 230, - 624, 0, 0, 637, 644, 645, 657, 659, 660, 661, - 662, 670, 677, 678, 680, 687, 689, 691, 693, 698, - 707, 727, 189, 190, 201, 208, 217, 229, 242, 248, - 257, 260, 263, 266, 267, 269, 274, 291, 295, 296, - 297, 298, 314, 315, 316, 319, 322, 323, 325, 327, - 328, 331, 337, 338, 339, 340, 341, 343, 350, 354, - 361, 362, 363, 364, 365, 366, 367, 371, 372, 373, - 374, 382, 385, 399, 400, 410, 420, 424, 258, 407, - 425, 0, 290, 669, 194, 220, 207, 227, 241, 243, - 271, 299, 305, 333, 336, 255, 238, 218, 353, 216, - 369, 388, 389, 390, 392, 303, 234, 321, 0, 0, - 1328, 0, 490, 0, 0, 0, 237, 0, 489, 0, - 0, 0, 280, 0, 1329, 334, 0, 370, 223, 289, - 287, 396, 246, 240, 236, 222, 265, 294, 332, 387, - 326, 533, 284, 0, 0, 379, 306, 0, 0, 0, - 0, 0, 524, 525, 0, 0, 0, 0, 0, 0, - 0, 0, 270, 221, 192, 318, 380, 249, 67, 0, - 0, 174, 175, 176, 511, 510, 513, 514, 515, 516, - 0, 0, 213, 512, 219, 517, 518, 519, 0, 233, - 268, 239, 232, 394, 0, 0, 0, 487, 504, 0, - 532, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 501, 502, 577, 0, 0, 0, 548, 0, 503, 0, - 0, 496, 497, 499, 498, 500, 505, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 256, 0, 307, 547, - 0, 0, 421, 0, 0, 545, 0, 0, 0, 0, - 0, 279, 0, 276, 188, 202, 0, 0, 317, 355, - 360, 0, 0, 0, 224, 0, 358, 330, 409, 209, - 247, 352, 335, 356, 0, 0, 357, 285, 398, 347, - 408, 422, 423, 231, 311, 415, 391, 419, 431, 203, - 228, 324, 384, 412, 376, 304, 395, 275, 375, 254, - 191, 283, 195, 386, 406, 214, 368, 0, 0, 0, - 197, 404, 383, 301, 272, 273, 196, 0, 351, 235, - 252, 226, 320, 401, 402, 225, 433, 204, 418, 199, - 205, 417, 313, 397, 405, 302, 293, 198, 403, 300, - 292, 278, 245, 261, 345, 288, 346, 262, 309, 308, - 310, 0, 193, 0, 381, 413, 434, 211, 0, 0, - 393, 427, 430, 0, 348, 212, 253, 244, 344, 251, - 281, 426, 428, 429, 210, 342, 259, 312, 206, 264, - 377, 277, 286, 0, 0, 329, 359, 215, 411, 378, - 535, 546, 541, 542, 539, 540, 534, 538, 537, 536, - 549, 526, 527, 528, 529, 531, 0, 543, 544, 530, - 187, 200, 282, 0, 349, 250, 432, 416, 414, 0, - 0, 230, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1331, 0, 0, 0, 0, 0, 0, 1335, + 0, 0, 1841, 0, 0, 0, 0, 0, 1344, 1345, + 1346, 1347, 1348, 1349, 1350, 0, 0, 0, 0, 0, + 0, 148, 0, 489, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1218, 0, + 0, 0, 1104, 0, 0, 0, 0, 0, 0, 0, + 461, 463, 464, 0, 480, 482, 490, 0, 0, 0, + 478, 479, 491, 465, 466, 495, 494, 481, 0, 470, + 467, 469, 475, 0, 0, 142, 488, 473, 492, 0, + 0, 0, 0, 0, 0, 0, 0, 136, 0, 0, + 137, 0, 0, 0, 0, 0, 0, 0, 1144, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 189, 190, 201, 208, 217, 229, - 242, 248, 257, 260, 263, 266, 267, 269, 274, 291, - 295, 296, 297, 298, 314, 315, 316, 319, 322, 323, - 325, 327, 328, 331, 337, 338, 339, 340, 341, 343, - 350, 354, 361, 362, 363, 364, 365, 366, 367, 371, - 372, 373, 374, 382, 385, 399, 400, 410, 420, 424, - 258, 407, 425, 0, 290, 0, 194, 220, 207, 227, - 241, 243, 271, 299, 305, 333, 336, 255, 238, 218, - 353, 216, 369, 388, 389, 390, 392, 303, 234, 321, - 0, 0, 0, 0, 490, 0, 0, 0, 237, 0, - 489, 0, 0, 0, 280, 0, 0, 334, 0, 370, - 223, 289, 287, 396, 246, 240, 236, 222, 265, 294, - 332, 387, 326, 533, 284, 0, 0, 379, 306, 0, - 0, 0, 0, 0, 524, 525, 0, 0, 0, 0, - 0, 0, 1438, 0, 270, 221, 192, 318, 380, 249, - 67, 0, 0, 174, 175, 176, 511, 510, 513, 514, - 515, 516, 0, 0, 213, 512, 219, 517, 518, 519, - 1439, 233, 268, 239, 232, 394, 0, 0, 0, 487, - 504, 0, 532, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 501, 502, 0, 0, 0, 0, 548, 0, - 503, 0, 0, 496, 497, 499, 498, 500, 505, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 256, 0, - 307, 547, 0, 0, 421, 0, 0, 545, 0, 0, - 0, 0, 0, 279, 0, 276, 188, 202, 0, 0, - 317, 355, 360, 0, 0, 0, 224, 0, 358, 330, - 409, 209, 247, 352, 335, 356, 0, 0, 357, 285, - 398, 347, 408, 422, 423, 231, 311, 415, 391, 419, - 431, 203, 228, 324, 384, 412, 376, 304, 395, 275, - 375, 254, 191, 283, 195, 386, 406, 214, 368, 0, - 0, 0, 197, 404, 383, 301, 272, 273, 196, 0, - 351, 235, 252, 226, 320, 401, 402, 225, 433, 204, - 418, 199, 205, 417, 313, 397, 405, 302, 293, 198, - 403, 300, 292, 278, 245, 261, 345, 288, 346, 262, - 309, 308, 310, 0, 193, 0, 381, 413, 434, 211, - 0, 0, 393, 427, 430, 0, 348, 212, 253, 244, - 344, 251, 281, 426, 428, 429, 210, 342, 259, 312, - 206, 264, 377, 277, 286, 0, 0, 329, 359, 215, - 411, 378, 535, 546, 541, 542, 539, 540, 534, 538, - 537, 536, 549, 526, 527, 528, 529, 531, 0, 543, - 544, 530, 187, 200, 282, 0, 349, 250, 432, 416, - 414, 0, 0, 230, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 189, 190, 201, 208, - 217, 229, 242, 248, 257, 260, 263, 266, 267, 269, - 274, 291, 295, 296, 297, 298, 314, 315, 316, 319, - 322, 323, 325, 327, 328, 331, 337, 338, 339, 340, - 341, 343, 350, 354, 361, 362, 363, 364, 365, 366, - 367, 371, 372, 373, 374, 382, 385, 399, 400, 410, - 420, 424, 258, 407, 425, 0, 290, 0, 194, 220, - 207, 227, 241, 243, 271, 299, 305, 333, 336, 255, - 238, 218, 353, 216, 369, 388, 389, 390, 392, 303, - 234, 321, 0, 0, 0, 0, 490, 0, 0, 0, - 237, 0, 489, 0, 0, 0, 280, 0, 0, 334, - 0, 370, 223, 289, 287, 396, 246, 240, 236, 222, - 265, 294, 332, 387, 326, 533, 284, 0, 0, 379, - 306, 0, 0, 0, 0, 0, 524, 525, 0, 0, - 0, 0, 0, 0, 0, 0, 270, 221, 192, 318, - 380, 249, 67, 0, 565, 174, 175, 176, 511, 510, - 513, 514, 515, 516, 0, 0, 213, 512, 219, 517, - 518, 519, 0, 233, 268, 239, 232, 394, 0, 0, - 0, 487, 504, 0, 532, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 501, 502, 0, 0, 0, 0, - 548, 0, 503, 0, 0, 496, 497, 499, 498, 500, - 505, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 256, 0, 307, 547, 0, 0, 421, 0, 0, 545, - 0, 0, 0, 0, 0, 279, 0, 276, 188, 202, - 0, 0, 317, 355, 360, 0, 0, 0, 224, 0, - 358, 330, 409, 209, 247, 352, 335, 356, 0, 0, - 357, 285, 398, 347, 408, 422, 423, 231, 311, 415, - 391, 419, 431, 203, 228, 324, 384, 412, 376, 304, - 395, 275, 375, 254, 191, 283, 195, 386, 406, 214, - 368, 0, 0, 0, 197, 404, 383, 301, 272, 273, - 196, 0, 351, 235, 252, 226, 320, 401, 402, 225, - 433, 204, 418, 199, 205, 417, 313, 397, 405, 302, - 293, 198, 403, 300, 292, 278, 245, 261, 345, 288, - 346, 262, 309, 308, 310, 0, 193, 0, 381, 413, - 434, 211, 0, 0, 393, 427, 430, 0, 348, 212, - 253, 244, 344, 251, 281, 426, 428, 429, 210, 342, - 259, 312, 206, 264, 377, 277, 286, 0, 0, 329, - 359, 215, 411, 378, 535, 546, 541, 542, 539, 540, - 534, 538, 537, 536, 549, 526, 527, 528, 529, 531, - 0, 543, 544, 530, 187, 200, 282, 0, 349, 250, - 432, 416, 414, 0, 0, 230, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 189, 190, - 201, 208, 217, 229, 242, 248, 257, 260, 263, 266, - 267, 269, 274, 291, 295, 296, 297, 298, 314, 315, - 316, 319, 322, 323, 325, 327, 328, 331, 337, 338, - 339, 340, 341, 343, 350, 354, 361, 362, 363, 364, - 365, 366, 367, 371, 372, 373, 374, 382, 385, 399, - 400, 410, 420, 424, 258, 407, 425, 0, 290, 0, - 194, 220, 207, 227, 241, 243, 271, 299, 305, 333, - 336, 255, 238, 218, 353, 216, 369, 388, 389, 390, - 392, 303, 234, 321, 0, 0, 0, 0, 490, 0, - 0, 0, 237, 0, 489, 0, 0, 0, 280, 0, - 0, 334, 0, 370, 223, 289, 287, 396, 246, 240, - 236, 222, 265, 294, 332, 387, 326, 533, 284, 0, - 0, 379, 306, 0, 0, 0, 0, 0, 524, 525, - 0, 0, 0, 0, 0, 0, 0, 0, 270, 221, - 192, 318, 380, 249, 67, 0, 0, 174, 175, 176, - 511, 510, 513, 514, 515, 516, 0, 0, 213, 512, - 219, 517, 518, 519, 0, 233, 268, 239, 232, 394, - 0, 0, 0, 487, 504, 0, 532, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 501, 502, 577, 0, - 0, 0, 548, 0, 503, 0, 0, 496, 497, 499, - 498, 500, 505, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 256, 0, 307, 547, 0, 0, 421, 0, - 0, 545, 0, 0, 0, 0, 0, 279, 0, 276, - 188, 202, 0, 0, 317, 355, 360, 0, 0, 0, - 224, 0, 358, 330, 409, 209, 247, 352, 335, 356, - 0, 0, 357, 285, 398, 347, 408, 422, 423, 231, - 311, 415, 391, 419, 431, 203, 228, 324, 384, 412, - 376, 304, 395, 275, 375, 254, 191, 283, 195, 386, - 406, 214, 368, 0, 0, 0, 197, 404, 383, 301, - 272, 273, 196, 0, 351, 235, 252, 226, 320, 401, - 402, 225, 433, 204, 418, 199, 205, 417, 313, 397, - 405, 302, 293, 198, 403, 300, 292, 278, 245, 261, - 345, 288, 346, 262, 309, 308, 310, 0, 193, 0, - 381, 413, 434, 211, 0, 0, 393, 427, 430, 0, - 348, 212, 253, 244, 344, 251, 281, 426, 428, 429, - 210, 342, 259, 312, 206, 264, 377, 277, 286, 0, - 0, 329, 359, 215, 411, 378, 535, 546, 541, 542, - 539, 540, 534, 538, 537, 536, 549, 526, 527, 528, - 529, 531, 0, 543, 544, 530, 187, 200, 282, 0, - 349, 250, 432, 416, 414, 0, 0, 230, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 189, 190, 201, 208, 217, 229, 242, 248, 257, 260, - 263, 266, 267, 269, 274, 291, 295, 296, 297, 298, - 314, 315, 316, 319, 322, 323, 325, 327, 328, 331, - 337, 338, 339, 340, 341, 343, 350, 354, 361, 362, - 363, 364, 365, 366, 367, 371, 372, 373, 374, 382, - 385, 399, 400, 410, 420, 424, 258, 407, 425, 0, - 290, 0, 194, 220, 207, 227, 241, 243, 271, 299, - 305, 333, 336, 255, 238, 218, 353, 216, 369, 388, - 389, 390, 392, 303, 234, 321, 0, 0, 0, 0, - 490, 0, 0, 0, 237, 0, 489, 0, 0, 0, - 280, 0, 0, 334, 0, 370, 223, 289, 287, 396, - 246, 240, 236, 222, 265, 294, 332, 387, 326, 533, - 284, 0, 0, 379, 306, 0, 0, 0, 0, 0, - 524, 525, 0, 0, 0, 0, 0, 0, 0, 0, - 270, 221, 192, 318, 380, 249, 67, 0, 0, 174, - 175, 176, 511, 1346, 513, 514, 515, 516, 0, 0, - 213, 512, 219, 517, 518, 519, 0, 233, 268, 239, - 232, 394, 0, 0, 0, 487, 504, 0, 532, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 501, 502, - 577, 0, 0, 0, 548, 0, 503, 0, 0, 496, - 497, 499, 498, 500, 505, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 256, 0, 307, 547, 0, 0, - 421, 0, 0, 545, 0, 0, 0, 0, 0, 279, - 0, 276, 188, 202, 0, 0, 317, 355, 360, 0, - 0, 0, 224, 0, 358, 330, 409, 209, 247, 352, - 335, 356, 0, 0, 357, 285, 398, 347, 408, 422, - 423, 231, 311, 415, 391, 419, 431, 203, 228, 324, - 384, 412, 376, 304, 395, 275, 375, 254, 191, 283, - 195, 386, 406, 214, 368, 0, 0, 0, 197, 404, - 383, 301, 272, 273, 196, 0, 351, 235, 252, 226, - 320, 401, 402, 225, 433, 204, 418, 199, 205, 417, - 313, 397, 405, 302, 293, 198, 403, 300, 292, 278, - 245, 261, 345, 288, 346, 262, 309, 308, 310, 0, - 193, 0, 381, 413, 434, 211, 0, 0, 393, 427, - 430, 0, 348, 212, 253, 244, 344, 251, 281, 426, - 428, 429, 210, 342, 259, 312, 206, 264, 377, 277, - 286, 0, 0, 329, 359, 215, 411, 378, 535, 546, - 541, 542, 539, 540, 534, 538, 537, 536, 549, 526, - 527, 528, 529, 531, 0, 543, 544, 530, 187, 200, - 282, 0, 349, 250, 432, 416, 414, 0, 0, 230, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 189, 190, 201, 208, 217, 229, 242, 248, - 257, 260, 263, 266, 267, 269, 274, 291, 295, 296, - 297, 298, 314, 315, 316, 319, 322, 323, 325, 327, - 328, 331, 337, 338, 339, 340, 341, 343, 350, 354, - 361, 362, 363, 364, 365, 366, 367, 371, 372, 373, - 374, 382, 385, 399, 400, 410, 420, 424, 258, 407, - 425, 0, 290, 0, 194, 220, 207, 227, 241, 243, - 271, 299, 305, 333, 336, 255, 238, 218, 353, 216, - 369, 388, 389, 390, 392, 303, 234, 321, 0, 0, - 0, 0, 490, 0, 0, 0, 237, 0, 489, 0, - 0, 0, 280, 0, 0, 334, 0, 370, 223, 289, - 287, 396, 246, 240, 236, 222, 265, 294, 332, 387, - 326, 533, 284, 0, 0, 379, 306, 0, 0, 0, - 0, 0, 524, 525, 0, 0, 0, 0, 0, 0, - 0, 0, 270, 221, 192, 318, 380, 249, 67, 0, - 0, 174, 175, 176, 511, 1343, 513, 514, 515, 516, - 0, 0, 213, 512, 219, 517, 518, 519, 0, 233, - 268, 239, 232, 394, 0, 0, 0, 487, 504, 0, - 532, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 501, 502, 577, 0, 0, 0, 548, 0, 503, 0, - 0, 496, 497, 499, 498, 500, 505, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 256, 0, 307, 547, - 0, 0, 421, 0, 0, 545, 0, 0, 0, 0, - 0, 279, 0, 276, 188, 202, 0, 0, 317, 355, - 360, 0, 0, 0, 224, 0, 358, 330, 409, 209, - 247, 352, 335, 356, 0, 0, 357, 285, 398, 347, - 408, 422, 423, 231, 311, 415, 391, 419, 431, 203, - 228, 324, 384, 412, 376, 304, 395, 275, 375, 254, - 191, 283, 195, 386, 406, 214, 368, 0, 0, 0, - 197, 404, 383, 301, 272, 273, 196, 0, 351, 235, - 252, 226, 320, 401, 402, 225, 433, 204, 418, 199, - 205, 417, 313, 397, 405, 302, 293, 198, 403, 300, - 292, 278, 245, 261, 345, 288, 346, 262, 309, 308, - 310, 0, 193, 0, 381, 413, 434, 211, 0, 0, - 393, 427, 430, 0, 348, 212, 253, 244, 344, 251, - 281, 426, 428, 429, 210, 342, 259, 312, 206, 264, - 377, 277, 286, 0, 0, 329, 359, 215, 411, 378, - 535, 546, 541, 542, 539, 540, 534, 538, 537, 536, - 549, 526, 527, 528, 529, 531, 0, 543, 544, 530, - 187, 200, 282, 0, 349, 250, 432, 416, 414, 0, - 0, 230, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 189, 190, 201, 208, 217, 229, - 242, 248, 257, 260, 263, 266, 267, 269, 274, 291, - 295, 296, 297, 298, 314, 315, 316, 319, 322, 323, - 325, 327, 328, 331, 337, 338, 339, 340, 341, 343, - 350, 354, 361, 362, 363, 364, 365, 366, 367, 371, - 372, 373, 374, 382, 385, 399, 400, 410, 420, 424, - 258, 407, 425, 0, 290, 0, 194, 220, 207, 227, - 241, 243, 271, 299, 305, 333, 336, 255, 238, 218, - 353, 216, 369, 388, 389, 390, 392, 303, 234, 558, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 321, 0, 0, 0, 0, 490, 0, 0, - 0, 237, 0, 489, 0, 0, 0, 280, 0, 0, - 334, 0, 370, 223, 289, 287, 396, 246, 240, 236, - 222, 265, 294, 332, 387, 326, 533, 284, 0, 0, - 379, 306, 0, 0, 0, 0, 0, 524, 525, 0, - 0, 0, 0, 0, 0, 0, 0, 270, 221, 192, - 318, 380, 249, 67, 0, 0, 174, 175, 176, 511, - 510, 513, 514, 515, 516, 0, 0, 213, 512, 219, - 517, 518, 519, 0, 233, 268, 239, 232, 394, 0, - 0, 0, 487, 504, 0, 532, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 501, 502, 0, 0, 0, - 0, 548, 0, 503, 0, 0, 496, 497, 499, 498, - 500, 505, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 256, 0, 307, 547, 0, 0, 421, 0, 0, - 545, 0, 0, 0, 0, 0, 279, 0, 276, 188, - 202, 0, 0, 317, 355, 360, 0, 0, 0, 224, - 0, 358, 330, 409, 209, 247, 352, 335, 356, 0, - 0, 357, 285, 398, 347, 408, 422, 423, 231, 311, - 415, 391, 419, 431, 203, 228, 324, 384, 412, 376, - 304, 395, 275, 375, 254, 191, 283, 195, 386, 406, - 214, 368, 0, 0, 0, 197, 404, 383, 301, 272, - 273, 196, 0, 351, 235, 252, 226, 320, 401, 402, - 225, 433, 204, 418, 199, 205, 417, 313, 397, 405, - 302, 293, 198, 403, 300, 292, 278, 245, 261, 345, - 288, 346, 262, 309, 308, 310, 0, 193, 0, 381, - 413, 434, 211, 0, 0, 393, 427, 430, 0, 348, - 212, 253, 244, 344, 251, 281, 426, 428, 429, 210, - 342, 259, 312, 206, 264, 377, 277, 286, 0, 0, - 329, 359, 215, 411, 378, 535, 546, 541, 542, 539, - 540, 534, 538, 537, 536, 549, 526, 527, 528, 529, - 531, 0, 543, 544, 530, 187, 200, 282, 0, 349, - 250, 432, 416, 414, 0, 0, 230, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 189, - 190, 201, 208, 217, 229, 242, 248, 257, 260, 263, - 266, 267, 269, 274, 291, 295, 296, 297, 298, 314, - 315, 316, 319, 322, 323, 325, 327, 328, 331, 337, - 338, 339, 340, 341, 343, 350, 354, 361, 362, 363, - 364, 365, 366, 367, 371, 372, 373, 374, 382, 385, - 399, 400, 410, 420, 424, 258, 407, 425, 0, 290, - 0, 194, 220, 207, 227, 241, 243, 271, 299, 305, - 333, 336, 255, 238, 218, 353, 216, 369, 388, 389, - 390, 392, 303, 234, 321, 0, 0, 0, 0, 490, - 0, 0, 0, 237, 0, 489, 0, 0, 0, 280, - 0, 0, 334, 0, 370, 223, 289, 287, 396, 246, - 240, 236, 222, 265, 294, 332, 387, 326, 533, 284, - 0, 0, 379, 306, 0, 0, 0, 0, 0, 524, - 525, 0, 0, 0, 0, 0, 0, 0, 0, 270, - 221, 192, 318, 380, 249, 67, 0, 0, 174, 175, - 176, 511, 510, 513, 514, 515, 516, 0, 0, 213, - 512, 219, 517, 518, 519, 0, 233, 268, 239, 232, - 394, 0, 0, 0, 487, 504, 0, 532, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 501, 502, 0, - 0, 0, 0, 548, 0, 503, 0, 0, 496, 497, - 499, 498, 500, 505, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 256, 0, 307, 547, 0, 0, 421, - 0, 0, 545, 0, 0, 0, 0, 0, 279, 0, - 276, 188, 202, 0, 0, 317, 355, 360, 0, 0, - 0, 224, 0, 358, 330, 409, 209, 247, 352, 335, - 356, 0, 0, 357, 285, 398, 347, 408, 422, 423, - 231, 311, 415, 391, 419, 431, 203, 228, 324, 384, - 412, 376, 304, 395, 275, 375, 254, 191, 283, 195, - 386, 406, 214, 368, 0, 0, 0, 197, 404, 383, - 301, 272, 273, 196, 0, 351, 235, 252, 226, 320, - 401, 402, 225, 433, 204, 418, 199, 205, 417, 313, - 397, 405, 302, 293, 198, 403, 300, 292, 278, 245, - 261, 345, 288, 346, 262, 309, 308, 310, 0, 193, - 0, 381, 413, 434, 211, 0, 0, 393, 427, 430, - 0, 348, 212, 253, 244, 344, 251, 281, 426, 428, - 429, 210, 342, 259, 312, 206, 264, 377, 277, 286, - 0, 0, 329, 359, 215, 411, 378, 535, 546, 541, - 542, 539, 540, 534, 538, 537, 536, 549, 526, 527, - 528, 529, 531, 0, 543, 544, 530, 187, 200, 282, - 0, 349, 250, 432, 416, 414, 0, 0, 230, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 189, 190, 201, 208, 217, 229, 242, 248, 257, - 260, 263, 266, 267, 269, 274, 291, 295, 296, 297, - 298, 314, 315, 316, 319, 322, 323, 325, 327, 328, - 331, 337, 338, 339, 340, 341, 343, 350, 354, 361, - 362, 363, 364, 365, 366, 367, 371, 372, 373, 374, - 382, 385, 399, 400, 410, 420, 424, 258, 407, 425, - 0, 290, 0, 194, 220, 207, 227, 241, 243, 271, - 299, 305, 333, 336, 255, 238, 218, 353, 216, 369, - 388, 389, 390, 392, 303, 234, 321, 0, 0, 0, - 0, 0, 0, 0, 0, 237, 0, 0, 0, 0, - 0, 280, 0, 0, 334, 0, 370, 223, 289, 287, - 396, 246, 240, 236, 222, 265, 294, 332, 387, 326, - 533, 284, 0, 0, 379, 306, 0, 0, 0, 0, - 0, 524, 525, 0, 0, 0, 0, 0, 0, 0, - 0, 270, 221, 192, 318, 380, 249, 67, 0, 0, - 174, 175, 176, 511, 510, 513, 514, 515, 516, 0, - 0, 213, 512, 219, 517, 518, 519, 0, 233, 268, - 239, 232, 394, 0, 0, 0, 0, 504, 0, 532, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 501, - 502, 0, 0, 0, 0, 548, 0, 503, 0, 0, - 496, 497, 499, 498, 500, 505, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 256, 0, 307, 547, 0, - 0, 421, 0, 0, 545, 0, 0, 0, 0, 0, - 279, 0, 276, 188, 202, 0, 0, 317, 355, 360, - 0, 0, 0, 224, 0, 358, 330, 409, 209, 247, - 352, 335, 356, 2087, 0, 357, 285, 398, 347, 408, - 422, 423, 231, 311, 415, 391, 419, 431, 203, 228, - 324, 384, 412, 376, 304, 395, 275, 375, 254, 191, - 283, 195, 386, 406, 214, 368, 0, 0, 0, 197, - 404, 383, 301, 272, 273, 196, 0, 351, 235, 252, - 226, 320, 401, 402, 225, 433, 204, 418, 199, 205, - 417, 313, 397, 405, 302, 293, 198, 403, 300, 292, - 278, 245, 261, 345, 288, 346, 262, 309, 308, 310, - 0, 193, 0, 381, 413, 434, 211, 0, 0, 393, - 427, 430, 0, 348, 212, 253, 244, 344, 251, 281, - 426, 428, 429, 210, 342, 259, 312, 206, 264, 377, - 277, 286, 0, 0, 329, 359, 215, 411, 378, 535, - 546, 541, 542, 539, 540, 534, 538, 537, 536, 549, - 526, 527, 528, 529, 531, 0, 543, 544, 530, 187, - 200, 282, 0, 349, 250, 432, 416, 414, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1928, 1828, 34, 0, 0, + 1218, 0, 1835, 0, 0, 1828, 0, 0, 0, 0, + 625, 0, 1840, 0, 0, 0, 0, 0, 0, 0, + 1100, 0, 0, 0, 0, 0, 0, 0, 0, 1486, + 0, 0, 0, 0, 0, 0, 1490, 0, 1493, 0, + 0, 0, 0, 0, 0, 0, 0, 1512, 0, 0, + 0, 149, 154, 151, 157, 158, 159, 160, 162, 163, + 164, 165, 493, 0, 0, 0, 0, 166, 167, 168, + 169, 1132, 0, 0, 0, 0, 0, 0, 0, 0, + 486, 0, 0, 0, 0, 625, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 487, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1145, 0, 0, 0, 0, + 0, 1224, 0, 0, 0, 0, 1579, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 625, 0, 0, 1218, 0, 0, 1931, 1224, + 0, 0, 0, 0, 2027, 0, 0, 0, 0, 0, + 0, 2033, 2034, 2035, 0, 0, 1158, 1161, 1162, 1163, + 1164, 1165, 1166, 0, 1167, 1168, 1169, 1170, 1171, 1146, + 1147, 1148, 1149, 1130, 1131, 1159, 0, 1133, 0, 1134, + 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, 1143, 1150, + 1151, 1152, 1153, 1154, 1155, 1156, 1157, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 771, 0, 0, 1218, 0, 0, 0, + 1104, 0, 0, 0, 1634, 1635, 1636, 1637, 1638, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1642, 1643, + 1104, 1645, 0, 0, 2001, 2002, 2003, 0, 0, 0, + 0, 1651, 1160, 0, 0, 0, 0, 0, 1654, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1928, 0, 34, + 0, 1928, 0, 1658, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, + 36, 37, 72, 39, 40, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 34, 0, 1218, 76, + 0, 0, 0, 0, 41, 67, 68, 0, 65, 69, + 0, 0, 0, 0, 0, 66, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1928, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 34, 2171, 54, 0, 1828, 2075, 0, 0, + 0, 0, 0, 0, 71, 0, 0, 0, 0, 1828, + 0, 0, 625, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 2104, 2104, 2104, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 2119, 0, 2121, 0, 0, 0, 0, 0, 1828, + 0, 1769, 0, 0, 0, 0, 44, 47, 50, 49, + 52, 0, 64, 0, 0, 70, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1828, 0, 0, 0, 0, 0, 53, 75, + 74, 0, 0, 62, 63, 51, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1822, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 625, 625, 0, 55, 56, 0, 57, 58, 59, + 60, 0, 0, 0, 0, 2185, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1218, 0, 2199, 0, 1852, 0, 1828, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1867, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 73, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 1916, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1978, 0, 0, 1979, + 1980, 1981, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1991, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 2004, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 750, 737, + 0, 0, 686, 753, 657, 675, 762, 677, 680, 720, + 636, 699, 327, 672, 0, 661, 632, 668, 633, 659, + 688, 237, 692, 656, 739, 702, 752, 285, 0, 638, + 662, 341, 722, 379, 223, 294, 292, 407, 247, 240, + 236, 222, 269, 300, 339, 397, 333, 759, 289, 709, + 0, 388, 312, 0, 0, 0, 690, 742, 697, 733, + 685, 721, 646, 708, 754, 673, 717, 755, 275, 221, + 190, 324, 389, 251, 0, 0, 0, 182, 183, 184, + 0, 2178, 2179, 0, 0, 0, 0, 0, 212, 0, + 219, 714, 749, 670, 716, 233, 273, 239, 232, 404, + 719, 765, 631, 711, 0, 634, 637, 761, 745, 665, + 666, 0, 0, 0, 0, 0, 0, 0, 689, 698, + 730, 683, 0, 0, 0, 0, 0, 0, 0, 0, + 663, 0, 707, 0, 0, 0, 642, 635, 0, 0, + 0, 0, 687, 2131, 0, 0, 645, 0, 664, 731, + 0, 629, 259, 639, 313, 2140, 735, 744, 684, 435, + 748, 682, 681, 751, 726, 643, 741, 676, 284, 641, + 281, 186, 201, 0, 674, 323, 362, 368, 740, 660, + 669, 224, 667, 366, 337, 421, 208, 249, 359, 342, + 364, 706, 724, 365, 290, 409, 354, 419, 436, 437, + 231, 317, 427, 401, 433, 447, 202, 228, 331, 394, + 424, 385, 310, 405, 406, 280, 384, 257, 189, 288, + 444, 200, 374, 216, 193, 396, 417, 213, 377, 0, + 0, 0, 195, 415, 393, 307, 277, 278, 194, 0, + 358, 235, 255, 226, 326, 412, 413, 225, 449, 204, + 432, 197, 940, 431, 319, 408, 416, 308, 299, 196, + 414, 306, 298, 283, 245, 265, 352, 293, 353, 266, + 315, 314, 316, 0, 191, 0, 390, 425, 450, 210, + 655, 736, 403, 441, 446, 0, 355, 211, 256, 244, + 351, 254, 286, 440, 442, 443, 445, 209, 349, 262, + 330, 420, 248, 428, 318, 205, 268, 386, 282, 291, + 728, 764, 336, 367, 214, 423, 387, 650, 654, 648, + 649, 700, 701, 651, 756, 757, 758, 732, 644, 0, + 652, 653, 0, 738, 746, 747, 705, 185, 198, 287, + 760, 356, 252, 448, 430, 426, 630, 647, 230, 658, + 0, 0, 671, 678, 679, 691, 693, 694, 695, 696, + 704, 712, 713, 715, 723, 725, 727, 729, 734, 743, + 763, 187, 188, 199, 207, 217, 229, 242, 250, 260, + 264, 267, 270, 271, 274, 279, 296, 301, 302, 303, + 304, 320, 321, 322, 325, 328, 329, 332, 334, 335, + 338, 344, 345, 346, 347, 348, 350, 357, 361, 369, + 370, 371, 372, 373, 375, 376, 380, 381, 382, 383, + 391, 395, 410, 411, 422, 434, 438, 261, 418, 439, + 0, 295, 703, 710, 297, 246, 263, 272, 718, 429, + 392, 203, 363, 253, 192, 220, 206, 227, 241, 243, + 276, 305, 311, 340, 343, 258, 238, 218, 360, 215, + 378, 398, 399, 400, 402, 309, 234, 750, 737, 0, + 0, 686, 753, 657, 675, 762, 677, 680, 720, 636, + 699, 327, 672, 0, 661, 632, 668, 633, 659, 688, + 237, 692, 656, 739, 702, 752, 285, 0, 638, 662, + 341, 722, 379, 223, 294, 292, 407, 247, 240, 236, + 222, 269, 300, 339, 397, 333, 759, 289, 709, 0, + 388, 312, 0, 0, 0, 690, 742, 697, 733, 685, + 721, 646, 708, 754, 673, 717, 755, 275, 221, 190, + 324, 389, 251, 0, 0, 0, 182, 183, 184, 0, + 0, 0, 0, 0, 0, 0, 0, 212, 0, 219, + 714, 749, 670, 716, 233, 273, 239, 232, 404, 719, + 765, 631, 711, 0, 634, 637, 761, 745, 665, 666, + 0, 0, 0, 0, 0, 0, 0, 689, 698, 730, + 683, 0, 0, 0, 0, 0, 0, 1920, 0, 663, + 0, 707, 0, 0, 0, 642, 635, 0, 0, 0, + 0, 687, 0, 0, 0, 645, 0, 664, 731, 0, + 629, 259, 639, 313, 0, 735, 744, 684, 435, 748, + 682, 681, 751, 726, 643, 741, 676, 284, 641, 281, + 186, 201, 0, 674, 323, 362, 368, 740, 660, 669, + 224, 667, 366, 337, 421, 208, 249, 359, 342, 364, + 706, 724, 365, 290, 409, 354, 419, 436, 437, 231, + 317, 427, 401, 433, 447, 202, 228, 331, 394, 424, + 385, 310, 405, 406, 280, 384, 257, 189, 288, 444, + 200, 374, 216, 193, 396, 417, 213, 377, 0, 0, + 0, 195, 415, 393, 307, 277, 278, 194, 0, 358, + 235, 255, 226, 326, 412, 413, 225, 449, 204, 432, + 197, 940, 431, 319, 408, 416, 308, 299, 196, 414, + 306, 298, 283, 245, 265, 352, 293, 353, 266, 315, + 314, 316, 0, 191, 0, 390, 425, 450, 210, 655, + 736, 403, 441, 446, 0, 355, 211, 256, 244, 351, + 254, 286, 440, 442, 443, 445, 209, 349, 262, 330, + 420, 248, 428, 318, 205, 268, 386, 282, 291, 728, + 764, 336, 367, 214, 423, 387, 650, 654, 648, 649, + 700, 701, 651, 756, 757, 758, 732, 644, 0, 652, + 653, 0, 738, 746, 747, 705, 185, 198, 287, 760, + 356, 252, 448, 430, 426, 630, 647, 230, 658, 0, + 0, 671, 678, 679, 691, 693, 694, 695, 696, 704, + 712, 713, 715, 723, 725, 727, 729, 734, 743, 763, + 187, 188, 199, 207, 217, 229, 242, 250, 260, 264, + 267, 270, 271, 274, 279, 296, 301, 302, 303, 304, + 320, 321, 322, 325, 328, 329, 332, 334, 335, 338, + 344, 345, 346, 347, 348, 350, 357, 361, 369, 370, + 371, 372, 373, 375, 376, 380, 381, 382, 383, 391, + 395, 410, 411, 422, 434, 438, 261, 418, 439, 0, + 295, 703, 710, 297, 246, 263, 272, 718, 429, 392, + 203, 363, 253, 192, 220, 206, 227, 241, 243, 276, + 305, 311, 340, 343, 258, 238, 218, 360, 215, 378, + 398, 399, 400, 402, 309, 234, 750, 737, 0, 0, + 686, 753, 657, 675, 762, 677, 680, 720, 636, 699, + 327, 672, 0, 661, 632, 668, 633, 659, 688, 237, + 692, 656, 739, 702, 752, 285, 0, 638, 662, 341, + 722, 379, 223, 294, 292, 407, 247, 240, 236, 222, + 269, 300, 339, 397, 333, 759, 289, 709, 0, 388, + 312, 0, 0, 0, 690, 742, 697, 733, 685, 721, + 646, 708, 754, 673, 717, 755, 275, 221, 190, 324, + 389, 251, 0, 0, 0, 182, 183, 184, 0, 0, + 0, 0, 0, 0, 0, 0, 212, 0, 219, 714, + 749, 670, 716, 233, 273, 239, 232, 404, 719, 765, + 631, 711, 0, 634, 637, 761, 745, 665, 666, 0, + 0, 0, 0, 0, 0, 0, 689, 698, 730, 683, + 0, 0, 0, 0, 0, 0, 1773, 0, 663, 0, + 707, 0, 0, 0, 642, 635, 0, 0, 0, 0, + 687, 0, 0, 0, 645, 0, 664, 731, 0, 629, + 259, 639, 313, 0, 735, 744, 684, 435, 748, 682, + 681, 751, 726, 643, 741, 676, 284, 641, 281, 186, + 201, 0, 674, 323, 362, 368, 740, 660, 669, 224, + 667, 366, 337, 421, 208, 249, 359, 342, 364, 706, + 724, 365, 290, 409, 354, 419, 436, 437, 231, 317, + 427, 401, 433, 447, 202, 228, 331, 394, 424, 385, + 310, 405, 406, 280, 384, 257, 189, 288, 444, 200, + 374, 216, 193, 396, 417, 213, 377, 0, 0, 0, + 195, 415, 393, 307, 277, 278, 194, 0, 358, 235, + 255, 226, 326, 412, 413, 225, 449, 204, 432, 197, + 940, 431, 319, 408, 416, 308, 299, 196, 414, 306, + 298, 283, 245, 265, 352, 293, 353, 266, 315, 314, + 316, 0, 191, 0, 390, 425, 450, 210, 655, 736, + 403, 441, 446, 0, 355, 211, 256, 244, 351, 254, + 286, 440, 442, 443, 445, 209, 349, 262, 330, 420, + 248, 428, 318, 205, 268, 386, 282, 291, 728, 764, + 336, 367, 214, 423, 387, 650, 654, 648, 649, 700, + 701, 651, 756, 757, 758, 732, 644, 0, 652, 653, + 0, 738, 746, 747, 705, 185, 198, 287, 760, 356, + 252, 448, 430, 426, 630, 647, 230, 658, 0, 0, + 671, 678, 679, 691, 693, 694, 695, 696, 704, 712, + 713, 715, 723, 725, 727, 729, 734, 743, 763, 187, + 188, 199, 207, 217, 229, 242, 250, 260, 264, 267, + 270, 271, 274, 279, 296, 301, 302, 303, 304, 320, + 321, 322, 325, 328, 329, 332, 334, 335, 338, 344, + 345, 346, 347, 348, 350, 357, 361, 369, 370, 371, + 372, 373, 375, 376, 380, 381, 382, 383, 391, 395, + 410, 411, 422, 434, 438, 261, 418, 439, 0, 295, + 703, 710, 297, 246, 263, 272, 718, 429, 392, 203, + 363, 253, 192, 220, 206, 227, 241, 243, 276, 305, + 311, 340, 343, 258, 238, 218, 360, 215, 378, 398, + 399, 400, 402, 309, 234, 750, 737, 0, 0, 686, + 753, 657, 675, 762, 677, 680, 720, 636, 699, 327, + 672, 0, 661, 632, 668, 633, 659, 688, 237, 692, + 656, 739, 702, 752, 285, 0, 638, 662, 341, 722, + 379, 223, 294, 292, 407, 247, 240, 236, 222, 269, + 300, 339, 397, 333, 759, 289, 709, 0, 388, 312, + 0, 0, 0, 690, 742, 697, 733, 685, 721, 646, + 708, 754, 673, 717, 755, 275, 221, 190, 324, 389, + 251, 0, 0, 0, 182, 183, 184, 0, 0, 0, + 0, 0, 0, 0, 0, 212, 0, 219, 714, 749, + 670, 716, 233, 273, 239, 232, 404, 719, 765, 631, + 711, 0, 634, 637, 761, 745, 665, 666, 0, 0, + 0, 0, 0, 0, 0, 689, 698, 730, 683, 0, + 0, 0, 0, 0, 0, 1488, 0, 663, 0, 707, + 0, 0, 0, 642, 635, 0, 0, 0, 0, 687, + 0, 0, 0, 645, 0, 664, 731, 0, 629, 259, + 639, 313, 0, 735, 744, 684, 435, 748, 682, 681, + 751, 726, 643, 741, 676, 284, 641, 281, 186, 201, + 0, 674, 323, 362, 368, 740, 660, 669, 224, 667, + 366, 337, 421, 208, 249, 359, 342, 364, 706, 724, + 365, 290, 409, 354, 419, 436, 437, 231, 317, 427, + 401, 433, 447, 202, 228, 331, 394, 424, 385, 310, + 405, 406, 280, 384, 257, 189, 288, 444, 200, 374, + 216, 193, 396, 417, 213, 377, 0, 0, 0, 195, + 415, 393, 307, 277, 278, 194, 0, 358, 235, 255, + 226, 326, 412, 413, 225, 449, 204, 432, 197, 940, + 431, 319, 408, 416, 308, 299, 196, 414, 306, 298, + 283, 245, 265, 352, 293, 353, 266, 315, 314, 316, + 0, 191, 0, 390, 425, 450, 210, 655, 736, 403, + 441, 446, 0, 355, 211, 256, 244, 351, 254, 286, + 440, 442, 443, 445, 209, 349, 262, 330, 420, 248, + 428, 318, 205, 268, 386, 282, 291, 728, 764, 336, + 367, 214, 423, 387, 650, 654, 648, 649, 700, 701, + 651, 756, 757, 758, 732, 644, 0, 652, 653, 0, + 738, 746, 747, 705, 185, 198, 287, 760, 356, 252, + 448, 430, 426, 630, 647, 230, 658, 0, 0, 671, + 678, 679, 691, 693, 694, 695, 696, 704, 712, 713, + 715, 723, 725, 727, 729, 734, 743, 763, 187, 188, + 199, 207, 217, 229, 242, 250, 260, 264, 267, 270, + 271, 274, 279, 296, 301, 302, 303, 304, 320, 321, + 322, 325, 328, 329, 332, 334, 335, 338, 344, 345, + 346, 347, 348, 350, 357, 361, 369, 370, 371, 372, + 373, 375, 376, 380, 381, 382, 383, 391, 395, 410, + 411, 422, 434, 438, 261, 418, 439, 0, 295, 703, + 710, 297, 246, 263, 272, 718, 429, 392, 203, 363, + 253, 192, 220, 206, 227, 241, 243, 276, 305, 311, + 340, 343, 258, 238, 218, 360, 215, 378, 398, 399, + 400, 402, 309, 234, 750, 737, 0, 0, 686, 753, + 657, 675, 762, 677, 680, 720, 636, 699, 327, 672, + 0, 661, 632, 668, 633, 659, 688, 237, 692, 656, + 739, 702, 752, 285, 0, 638, 662, 341, 722, 379, + 223, 294, 292, 407, 247, 240, 236, 222, 269, 300, + 339, 397, 333, 759, 289, 709, 0, 388, 312, 0, + 0, 0, 690, 742, 697, 733, 685, 721, 646, 708, + 754, 673, 717, 755, 275, 221, 190, 324, 389, 251, + 71, 0, 0, 182, 183, 184, 0, 0, 0, 0, + 0, 0, 0, 0, 212, 0, 219, 714, 749, 670, + 716, 233, 273, 239, 232, 404, 719, 765, 631, 711, + 0, 634, 637, 761, 745, 665, 666, 0, 0, 0, + 0, 0, 0, 0, 689, 698, 730, 683, 0, 0, + 0, 0, 0, 0, 0, 0, 663, 0, 707, 0, + 0, 0, 642, 635, 0, 0, 0, 0, 687, 0, + 0, 0, 645, 0, 664, 731, 0, 629, 259, 639, + 313, 0, 735, 744, 684, 435, 748, 682, 681, 751, + 726, 643, 741, 676, 284, 641, 281, 186, 201, 0, + 674, 323, 362, 368, 740, 660, 669, 224, 667, 366, + 337, 421, 208, 249, 359, 342, 364, 706, 724, 365, + 290, 409, 354, 419, 436, 437, 231, 317, 427, 401, + 433, 447, 202, 228, 331, 394, 424, 385, 310, 405, + 406, 280, 384, 257, 189, 288, 444, 200, 374, 216, + 193, 396, 417, 213, 377, 0, 0, 0, 195, 415, + 393, 307, 277, 278, 194, 0, 358, 235, 255, 226, + 326, 412, 413, 225, 449, 204, 432, 197, 940, 431, + 319, 408, 416, 308, 299, 196, 414, 306, 298, 283, + 245, 265, 352, 293, 353, 266, 315, 314, 316, 0, + 191, 0, 390, 425, 450, 210, 655, 736, 403, 441, + 446, 0, 355, 211, 256, 244, 351, 254, 286, 440, + 442, 443, 445, 209, 349, 262, 330, 420, 248, 428, + 318, 205, 268, 386, 282, 291, 728, 764, 336, 367, + 214, 423, 387, 650, 654, 648, 649, 700, 701, 651, + 756, 757, 758, 732, 644, 0, 652, 653, 0, 738, + 746, 747, 705, 185, 198, 287, 760, 356, 252, 448, + 430, 426, 630, 647, 230, 658, 0, 0, 671, 678, + 679, 691, 693, 694, 695, 696, 704, 712, 713, 715, + 723, 725, 727, 729, 734, 743, 763, 187, 188, 199, + 207, 217, 229, 242, 250, 260, 264, 267, 270, 271, + 274, 279, 296, 301, 302, 303, 304, 320, 321, 322, + 325, 328, 329, 332, 334, 335, 338, 344, 345, 346, + 347, 348, 350, 357, 361, 369, 370, 371, 372, 373, + 375, 376, 380, 381, 382, 383, 391, 395, 410, 411, + 422, 434, 438, 261, 418, 439, 0, 295, 703, 710, + 297, 246, 263, 272, 718, 429, 392, 203, 363, 253, + 192, 220, 206, 227, 241, 243, 276, 305, 311, 340, + 343, 258, 238, 218, 360, 215, 378, 398, 399, 400, + 402, 309, 234, 750, 737, 0, 0, 686, 753, 657, + 675, 762, 677, 680, 720, 636, 699, 327, 672, 0, + 661, 632, 668, 633, 659, 688, 237, 692, 656, 739, + 702, 752, 285, 0, 638, 662, 341, 722, 379, 223, + 294, 292, 407, 247, 240, 236, 222, 269, 300, 339, + 397, 333, 759, 289, 709, 0, 388, 312, 0, 0, + 0, 690, 742, 697, 733, 685, 721, 646, 708, 754, + 673, 717, 755, 275, 221, 190, 324, 389, 251, 0, + 0, 0, 182, 183, 184, 0, 0, 0, 0, 0, + 0, 0, 0, 212, 0, 219, 714, 749, 670, 716, + 233, 273, 239, 232, 404, 719, 765, 631, 711, 0, + 634, 637, 761, 745, 665, 666, 0, 0, 0, 0, + 0, 0, 0, 689, 698, 730, 683, 0, 0, 0, + 0, 0, 0, 0, 0, 663, 0, 707, 0, 0, + 0, 642, 635, 0, 0, 0, 0, 687, 0, 0, + 0, 645, 0, 664, 731, 0, 629, 259, 639, 313, + 0, 735, 744, 684, 435, 748, 682, 681, 751, 726, + 643, 741, 676, 284, 641, 281, 186, 201, 0, 674, + 323, 362, 368, 740, 660, 669, 224, 667, 366, 337, + 421, 208, 249, 359, 342, 364, 706, 724, 365, 290, + 409, 354, 419, 436, 437, 231, 317, 427, 401, 433, + 447, 202, 228, 331, 394, 424, 385, 310, 405, 406, + 280, 384, 257, 189, 288, 444, 200, 374, 216, 193, + 396, 417, 213, 377, 0, 0, 0, 195, 415, 393, + 307, 277, 278, 194, 0, 358, 235, 255, 226, 326, + 412, 413, 225, 449, 204, 432, 197, 940, 431, 319, + 408, 416, 308, 299, 196, 414, 306, 298, 283, 245, + 265, 352, 293, 353, 266, 315, 314, 316, 0, 191, + 0, 390, 425, 450, 210, 655, 736, 403, 441, 446, + 0, 355, 211, 256, 244, 351, 254, 286, 440, 442, + 443, 445, 209, 349, 262, 330, 420, 248, 428, 318, + 205, 268, 386, 282, 291, 728, 764, 336, 367, 214, + 423, 387, 650, 654, 648, 649, 700, 701, 651, 756, + 757, 758, 732, 644, 0, 652, 653, 0, 738, 746, + 747, 705, 185, 198, 287, 760, 356, 252, 448, 430, + 426, 630, 647, 230, 658, 0, 0, 671, 678, 679, + 691, 693, 694, 695, 696, 704, 712, 713, 715, 723, + 725, 727, 729, 734, 743, 763, 187, 188, 199, 207, + 217, 229, 242, 250, 260, 264, 267, 270, 271, 274, + 279, 296, 301, 302, 303, 304, 320, 321, 322, 325, + 328, 329, 332, 334, 335, 338, 344, 345, 346, 347, + 348, 350, 357, 361, 369, 370, 371, 372, 373, 375, + 376, 380, 381, 382, 383, 391, 395, 410, 411, 422, + 434, 438, 261, 418, 439, 0, 295, 703, 710, 297, + 246, 263, 272, 718, 429, 392, 203, 363, 253, 192, + 220, 206, 227, 241, 243, 276, 305, 311, 340, 343, + 258, 238, 218, 360, 215, 378, 398, 399, 400, 402, + 309, 234, 750, 737, 0, 0, 686, 753, 657, 675, + 762, 677, 680, 720, 636, 699, 327, 672, 0, 661, + 632, 668, 633, 659, 688, 237, 692, 656, 739, 702, + 752, 285, 0, 638, 662, 341, 722, 379, 223, 294, + 292, 407, 247, 240, 236, 222, 269, 300, 339, 397, + 333, 759, 289, 709, 0, 388, 312, 0, 0, 0, + 690, 742, 697, 733, 685, 721, 646, 708, 754, 673, + 717, 755, 275, 221, 190, 324, 389, 251, 0, 0, + 0, 182, 183, 184, 0, 0, 0, 0, 0, 0, + 0, 0, 212, 0, 219, 714, 749, 670, 716, 233, + 273, 239, 232, 404, 719, 765, 631, 711, 0, 634, + 637, 761, 745, 665, 666, 0, 0, 0, 0, 0, + 0, 0, 689, 698, 730, 683, 0, 0, 0, 0, + 0, 0, 0, 0, 663, 0, 707, 0, 0, 0, + 642, 635, 0, 0, 0, 0, 687, 0, 0, 0, + 645, 0, 664, 731, 0, 629, 259, 639, 313, 0, + 735, 744, 684, 435, 748, 682, 681, 751, 726, 643, + 741, 676, 284, 641, 281, 186, 201, 0, 674, 323, + 362, 368, 740, 660, 669, 224, 667, 366, 337, 421, + 208, 249, 359, 342, 364, 706, 724, 365, 290, 409, + 354, 419, 436, 437, 231, 317, 427, 401, 433, 447, + 202, 228, 331, 394, 424, 385, 310, 405, 406, 280, + 384, 257, 189, 288, 444, 200, 374, 216, 193, 396, + 417, 213, 377, 0, 0, 0, 195, 415, 393, 307, + 277, 278, 194, 0, 358, 235, 255, 226, 326, 412, + 413, 225, 449, 204, 432, 197, 640, 431, 319, 408, + 416, 308, 299, 196, 414, 306, 298, 283, 245, 265, + 352, 293, 353, 266, 315, 314, 316, 0, 191, 0, + 390, 425, 450, 210, 655, 736, 403, 441, 446, 0, + 355, 211, 256, 244, 351, 254, 286, 440, 442, 443, + 445, 209, 349, 262, 330, 420, 248, 428, 628, 766, + 622, 621, 282, 291, 728, 764, 336, 367, 214, 423, + 387, 650, 654, 648, 649, 700, 701, 651, 756, 757, + 758, 732, 644, 0, 652, 653, 0, 738, 746, 747, + 705, 185, 198, 287, 760, 356, 252, 448, 430, 426, + 630, 647, 230, 658, 0, 0, 671, 678, 679, 691, + 693, 694, 695, 696, 704, 712, 713, 715, 723, 725, + 727, 729, 734, 743, 763, 187, 188, 199, 207, 217, + 229, 242, 250, 260, 264, 267, 270, 271, 274, 279, + 296, 301, 302, 303, 304, 320, 321, 322, 325, 328, + 329, 332, 334, 335, 338, 344, 345, 346, 347, 348, + 350, 357, 361, 369, 370, 371, 372, 373, 375, 376, + 380, 381, 382, 383, 391, 395, 410, 411, 422, 434, + 438, 261, 418, 439, 0, 295, 703, 710, 297, 246, + 263, 272, 718, 429, 392, 203, 363, 253, 192, 220, + 206, 227, 241, 243, 276, 305, 311, 340, 343, 258, + 238, 218, 360, 215, 378, 398, 399, 400, 402, 309, + 234, 750, 737, 0, 0, 686, 753, 657, 675, 762, + 677, 680, 720, 636, 699, 327, 672, 0, 661, 632, + 668, 633, 659, 688, 237, 692, 656, 739, 702, 752, + 285, 0, 638, 662, 341, 722, 379, 223, 294, 292, + 407, 247, 240, 236, 222, 269, 300, 339, 397, 333, + 759, 289, 709, 0, 388, 312, 0, 0, 0, 690, + 742, 697, 733, 685, 721, 646, 708, 754, 673, 717, + 755, 275, 221, 190, 324, 389, 251, 0, 0, 0, + 182, 183, 184, 0, 0, 0, 0, 0, 0, 0, + 0, 212, 0, 219, 714, 749, 670, 716, 233, 273, + 239, 232, 404, 719, 765, 631, 711, 0, 634, 637, + 761, 745, 665, 666, 0, 0, 0, 0, 0, 0, + 0, 689, 698, 730, 683, 0, 0, 0, 0, 0, + 0, 0, 0, 663, 0, 707, 0, 0, 0, 642, + 635, 0, 0, 0, 0, 687, 0, 0, 0, 645, + 0, 664, 731, 0, 629, 259, 639, 313, 0, 735, + 744, 684, 435, 748, 682, 681, 751, 726, 643, 741, + 676, 284, 641, 281, 186, 201, 0, 674, 323, 362, + 368, 740, 660, 669, 224, 667, 366, 337, 421, 208, + 249, 359, 342, 364, 706, 724, 365, 290, 409, 354, + 419, 436, 437, 231, 317, 427, 401, 433, 447, 202, + 228, 331, 394, 424, 385, 310, 405, 406, 280, 384, + 257, 189, 288, 444, 200, 374, 216, 193, 396, 1108, + 213, 377, 0, 0, 0, 195, 415, 393, 307, 277, + 278, 194, 0, 358, 235, 255, 226, 326, 412, 413, + 225, 449, 204, 432, 197, 640, 431, 319, 408, 416, + 308, 299, 196, 414, 306, 298, 283, 245, 265, 352, + 293, 353, 266, 315, 314, 316, 0, 191, 0, 390, + 425, 450, 210, 655, 736, 403, 441, 446, 0, 355, + 211, 256, 244, 351, 254, 286, 440, 442, 443, 445, + 209, 349, 262, 330, 420, 248, 428, 628, 766, 622, + 621, 282, 291, 728, 764, 336, 367, 214, 423, 387, + 650, 654, 648, 649, 700, 701, 651, 756, 757, 758, + 732, 644, 0, 652, 653, 0, 738, 746, 747, 705, + 185, 198, 287, 760, 356, 252, 448, 430, 426, 630, + 647, 230, 658, 0, 0, 671, 678, 679, 691, 693, + 694, 695, 696, 704, 712, 713, 715, 723, 725, 727, + 729, 734, 743, 763, 187, 188, 199, 207, 217, 229, + 242, 250, 260, 264, 267, 270, 271, 274, 279, 296, + 301, 302, 303, 304, 320, 321, 322, 325, 328, 329, + 332, 334, 335, 338, 344, 345, 346, 347, 348, 350, + 357, 361, 369, 370, 371, 372, 373, 375, 376, 380, + 381, 382, 383, 391, 395, 410, 411, 422, 434, 438, + 261, 418, 439, 0, 295, 703, 710, 297, 246, 263, + 272, 718, 429, 392, 203, 363, 253, 192, 220, 206, + 227, 241, 243, 276, 305, 311, 340, 343, 258, 238, + 218, 360, 215, 378, 398, 399, 400, 402, 309, 234, + 750, 737, 0, 0, 686, 753, 657, 675, 762, 677, + 680, 720, 636, 699, 327, 672, 0, 661, 632, 668, + 633, 659, 688, 237, 692, 656, 739, 702, 752, 285, + 0, 638, 662, 341, 722, 379, 223, 294, 292, 407, + 247, 240, 236, 222, 269, 300, 339, 397, 333, 759, + 289, 709, 0, 388, 312, 0, 0, 0, 690, 742, + 697, 733, 685, 721, 646, 708, 754, 673, 717, 755, + 275, 221, 190, 324, 389, 251, 0, 0, 0, 182, + 183, 184, 0, 0, 0, 0, 0, 0, 0, 0, + 212, 0, 219, 714, 749, 670, 716, 233, 273, 239, + 232, 404, 719, 765, 631, 711, 0, 634, 637, 761, + 745, 665, 666, 0, 0, 0, 0, 0, 0, 0, + 689, 698, 730, 683, 0, 0, 0, 0, 0, 0, + 0, 0, 663, 0, 707, 0, 0, 0, 642, 635, + 0, 0, 0, 0, 687, 0, 0, 0, 645, 0, + 664, 731, 0, 629, 259, 639, 313, 0, 735, 744, + 684, 435, 748, 682, 681, 751, 726, 643, 741, 676, + 284, 641, 281, 186, 201, 0, 674, 323, 362, 368, + 740, 660, 669, 224, 667, 366, 337, 421, 208, 249, + 359, 342, 364, 706, 724, 365, 290, 409, 354, 419, + 436, 437, 231, 317, 427, 401, 433, 447, 202, 228, + 331, 394, 424, 385, 310, 405, 406, 280, 384, 257, + 189, 288, 444, 200, 374, 216, 193, 396, 619, 213, + 377, 0, 0, 0, 195, 415, 393, 307, 277, 278, + 194, 0, 358, 235, 255, 226, 326, 412, 413, 225, + 449, 204, 432, 197, 640, 431, 319, 408, 416, 308, + 299, 196, 414, 306, 298, 283, 245, 265, 352, 293, + 353, 266, 315, 314, 316, 0, 191, 0, 390, 425, + 450, 210, 655, 736, 403, 441, 446, 0, 355, 211, + 256, 244, 351, 254, 286, 440, 442, 443, 445, 209, + 349, 262, 330, 420, 248, 428, 628, 766, 622, 621, + 282, 291, 728, 764, 336, 367, 214, 423, 387, 650, + 654, 648, 649, 700, 701, 651, 756, 757, 758, 732, + 644, 0, 652, 653, 0, 738, 746, 747, 705, 185, + 198, 287, 760, 356, 252, 448, 430, 426, 630, 647, + 230, 658, 0, 0, 671, 678, 679, 691, 693, 694, + 695, 696, 704, 712, 713, 715, 723, 725, 727, 729, + 734, 743, 763, 187, 188, 199, 207, 217, 229, 242, + 250, 260, 264, 267, 270, 271, 274, 279, 296, 301, + 302, 303, 304, 320, 321, 322, 325, 328, 329, 332, + 334, 335, 338, 344, 345, 346, 347, 348, 350, 357, + 361, 369, 370, 371, 372, 373, 375, 376, 380, 381, + 382, 383, 391, 395, 410, 411, 422, 434, 438, 261, + 418, 439, 0, 295, 703, 710, 297, 246, 263, 272, + 718, 429, 392, 203, 363, 253, 192, 220, 206, 227, + 241, 243, 276, 305, 311, 340, 343, 258, 238, 218, + 360, 215, 378, 398, 399, 400, 402, 309, 234, 327, + 0, 0, 1415, 0, 519, 0, 0, 0, 237, 0, + 518, 0, 0, 0, 285, 0, 0, 1416, 341, 0, + 379, 223, 294, 292, 407, 247, 240, 236, 222, 269, + 300, 339, 397, 333, 562, 289, 0, 0, 388, 312, + 0, 0, 0, 0, 0, 553, 554, 0, 0, 0, + 0, 0, 0, 0, 0, 275, 221, 190, 324, 389, + 251, 71, 0, 0, 182, 183, 184, 540, 539, 542, + 543, 544, 545, 0, 0, 212, 541, 219, 546, 547, + 548, 0, 233, 273, 239, 232, 404, 0, 0, 0, + 516, 533, 0, 561, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 530, 531, 609, 0, 0, 0, 577, + 0, 532, 0, 0, 525, 526, 528, 527, 529, 534, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 259, + 0, 313, 0, 576, 0, 0, 435, 0, 0, 574, + 0, 0, 0, 0, 0, 284, 0, 281, 186, 201, + 0, 0, 323, 362, 368, 0, 0, 0, 224, 0, + 366, 337, 421, 208, 249, 359, 342, 364, 0, 0, + 365, 290, 409, 354, 419, 436, 437, 231, 317, 427, + 401, 433, 447, 202, 228, 331, 394, 424, 385, 310, + 405, 406, 280, 384, 257, 189, 288, 444, 200, 374, + 216, 193, 396, 417, 213, 377, 0, 0, 0, 195, + 415, 393, 307, 277, 278, 194, 0, 358, 235, 255, + 226, 326, 412, 413, 225, 449, 204, 432, 197, 0, + 431, 319, 408, 416, 308, 299, 196, 414, 306, 298, + 283, 245, 265, 352, 293, 353, 266, 315, 314, 316, + 0, 191, 0, 390, 425, 450, 210, 0, 0, 403, + 441, 446, 0, 355, 211, 256, 244, 351, 254, 286, + 440, 442, 443, 445, 209, 349, 262, 330, 420, 248, + 428, 318, 205, 268, 386, 282, 291, 0, 0, 336, + 367, 214, 423, 387, 564, 575, 570, 571, 568, 569, + 563, 567, 566, 565, 578, 555, 556, 557, 558, 560, + 0, 572, 573, 559, 185, 198, 287, 0, 356, 252, + 448, 430, 426, 0, 0, 230, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 187, 188, + 199, 207, 217, 229, 242, 250, 260, 264, 267, 270, + 271, 274, 279, 296, 301, 302, 303, 304, 320, 321, + 322, 325, 328, 329, 332, 334, 335, 338, 344, 345, + 346, 347, 348, 350, 357, 361, 369, 370, 371, 372, + 373, 375, 376, 380, 381, 382, 383, 391, 395, 410, + 411, 422, 434, 438, 261, 418, 439, 0, 295, 0, + 0, 297, 246, 263, 272, 0, 429, 392, 203, 363, + 253, 192, 220, 206, 227, 241, 243, 276, 305, 311, + 340, 343, 258, 238, 218, 360, 215, 378, 398, 399, + 400, 402, 309, 234, 327, 0, 0, 0, 0, 519, + 0, 0, 0, 237, 0, 518, 0, 0, 0, 285, + 0, 0, 0, 341, 0, 379, 223, 294, 292, 407, + 247, 240, 236, 222, 269, 300, 339, 397, 333, 562, + 289, 0, 0, 388, 312, 0, 0, 0, 0, 0, + 553, 554, 0, 0, 0, 0, 0, 0, 1527, 0, + 275, 221, 190, 324, 389, 251, 71, 0, 0, 182, + 183, 184, 540, 539, 542, 543, 544, 545, 0, 0, + 212, 541, 219, 546, 547, 548, 1528, 233, 273, 239, + 232, 404, 0, 0, 0, 516, 533, 0, 561, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 530, 531, + 0, 0, 0, 0, 577, 0, 532, 0, 0, 525, + 526, 528, 527, 529, 534, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 259, 0, 313, 0, 576, 0, + 0, 435, 0, 0, 574, 0, 0, 0, 0, 0, + 284, 0, 281, 186, 201, 0, 0, 323, 362, 368, + 0, 0, 0, 224, 0, 366, 337, 421, 208, 249, + 359, 342, 364, 0, 0, 365, 290, 409, 354, 419, + 436, 437, 231, 317, 427, 401, 433, 447, 202, 228, + 331, 394, 424, 385, 310, 405, 406, 280, 384, 257, + 189, 288, 444, 200, 374, 216, 193, 396, 417, 213, + 377, 0, 0, 0, 195, 415, 393, 307, 277, 278, + 194, 0, 358, 235, 255, 226, 326, 412, 413, 225, + 449, 204, 432, 197, 0, 431, 319, 408, 416, 308, + 299, 196, 414, 306, 298, 283, 245, 265, 352, 293, + 353, 266, 315, 314, 316, 0, 191, 0, 390, 425, + 450, 210, 0, 0, 403, 441, 446, 0, 355, 211, + 256, 244, 351, 254, 286, 440, 442, 443, 445, 209, + 349, 262, 330, 420, 248, 428, 318, 205, 268, 386, + 282, 291, 0, 0, 336, 367, 214, 423, 387, 564, + 575, 570, 571, 568, 569, 563, 567, 566, 565, 578, + 555, 556, 557, 558, 560, 0, 572, 573, 559, 185, + 198, 287, 0, 356, 252, 448, 430, 426, 0, 0, 230, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 189, 190, 201, 208, 217, 229, 242, - 248, 257, 260, 263, 266, 267, 269, 274, 291, 295, - 296, 297, 298, 314, 315, 316, 319, 322, 323, 325, - 327, 328, 331, 337, 338, 339, 340, 341, 343, 350, - 354, 361, 362, 363, 364, 365, 366, 367, 371, 372, - 373, 374, 382, 385, 399, 400, 410, 420, 424, 258, - 407, 425, 0, 290, 0, 194, 220, 207, 227, 241, - 243, 271, 299, 305, 333, 336, 255, 238, 218, 353, - 216, 369, 388, 389, 390, 392, 303, 234, 321, 0, - 0, 0, 0, 0, 0, 0, 0, 237, 0, 0, - 0, 0, 0, 280, 0, 0, 334, 0, 370, 223, - 289, 287, 396, 246, 240, 236, 222, 265, 294, 332, - 387, 326, 533, 284, 0, 0, 379, 306, 0, 0, - 0, 0, 0, 524, 525, 0, 0, 0, 0, 0, - 0, 0, 0, 270, 221, 192, 318, 380, 249, 67, - 0, 565, 174, 175, 176, 511, 510, 513, 514, 515, - 516, 0, 0, 213, 512, 219, 517, 518, 519, 0, - 233, 268, 239, 232, 394, 0, 0, 0, 0, 504, - 0, 532, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 501, 502, 0, 0, 0, 0, 548, 0, 503, - 0, 0, 496, 497, 499, 498, 500, 505, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 256, 0, 307, - 547, 0, 0, 421, 0, 0, 545, 0, 0, 0, - 0, 0, 279, 0, 276, 188, 202, 0, 0, 317, - 355, 360, 0, 0, 0, 224, 0, 358, 330, 409, - 209, 247, 352, 335, 356, 0, 0, 357, 285, 398, - 347, 408, 422, 423, 231, 311, 415, 391, 419, 431, - 203, 228, 324, 384, 412, 376, 304, 395, 275, 375, - 254, 191, 283, 195, 386, 406, 214, 368, 0, 0, - 0, 197, 404, 383, 301, 272, 273, 196, 0, 351, - 235, 252, 226, 320, 401, 402, 225, 433, 204, 418, - 199, 205, 417, 313, 397, 405, 302, 293, 198, 403, - 300, 292, 278, 245, 261, 345, 288, 346, 262, 309, - 308, 310, 0, 193, 0, 381, 413, 434, 211, 0, - 0, 393, 427, 430, 0, 348, 212, 253, 244, 344, - 251, 281, 426, 428, 429, 210, 342, 259, 312, 206, - 264, 377, 277, 286, 0, 0, 329, 359, 215, 411, - 378, 535, 546, 541, 542, 539, 540, 534, 538, 537, - 536, 549, 526, 527, 528, 529, 531, 0, 543, 544, - 530, 187, 200, 282, 0, 349, 250, 432, 416, 414, - 0, 0, 230, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 189, 190, 201, 208, 217, - 229, 242, 248, 257, 260, 263, 266, 267, 269, 274, - 291, 295, 296, 297, 298, 314, 315, 316, 319, 322, - 323, 325, 327, 328, 331, 337, 338, 339, 340, 341, - 343, 350, 354, 361, 362, 363, 364, 365, 366, 367, - 371, 372, 373, 374, 382, 385, 399, 400, 410, 420, - 424, 258, 407, 425, 0, 290, 0, 194, 220, 207, - 227, 241, 243, 271, 299, 305, 333, 336, 255, 238, - 218, 353, 216, 369, 388, 389, 390, 392, 303, 234, - 321, 0, 0, 0, 0, 0, 0, 0, 0, 237, - 0, 0, 0, 0, 0, 280, 0, 0, 334, 0, - 370, 223, 289, 287, 396, 246, 240, 236, 222, 265, - 294, 332, 387, 326, 533, 284, 0, 0, 379, 306, - 0, 0, 0, 0, 0, 524, 525, 0, 0, 0, - 0, 0, 0, 0, 0, 270, 221, 192, 318, 380, - 249, 67, 0, 0, 174, 175, 176, 511, 510, 513, - 514, 515, 516, 0, 0, 213, 512, 219, 517, 518, - 519, 0, 233, 268, 239, 232, 394, 0, 0, 0, - 0, 504, 0, 532, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 501, 502, 0, 0, 0, 0, 548, - 0, 503, 0, 0, 496, 497, 499, 498, 500, 505, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 256, - 0, 307, 547, 0, 0, 421, 0, 0, 545, 0, - 0, 0, 0, 0, 279, 0, 276, 188, 202, 0, - 0, 317, 355, 360, 0, 0, 0, 224, 0, 358, - 330, 409, 209, 247, 352, 335, 356, 0, 0, 357, - 285, 398, 347, 408, 422, 423, 231, 311, 415, 391, - 419, 431, 203, 228, 324, 384, 412, 376, 304, 395, - 275, 375, 254, 191, 283, 195, 386, 406, 214, 368, - 0, 0, 0, 197, 404, 383, 301, 272, 273, 196, - 0, 351, 235, 252, 226, 320, 401, 402, 225, 433, - 204, 418, 199, 205, 417, 313, 397, 405, 302, 293, - 198, 403, 300, 292, 278, 245, 261, 345, 288, 346, - 262, 309, 308, 310, 0, 193, 0, 381, 413, 434, - 211, 0, 0, 393, 427, 430, 0, 348, 212, 253, - 244, 344, 251, 281, 426, 428, 429, 210, 342, 259, - 312, 206, 264, 377, 277, 286, 0, 0, 329, 359, - 215, 411, 378, 535, 546, 541, 542, 539, 540, 534, - 538, 537, 536, 549, 526, 527, 528, 529, 531, 0, - 543, 544, 530, 187, 200, 282, 0, 349, 250, 432, - 416, 414, 0, 0, 230, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 189, 190, 201, - 208, 217, 229, 242, 248, 257, 260, 263, 266, 267, - 269, 274, 291, 295, 296, 297, 298, 314, 315, 316, - 319, 322, 323, 325, 327, 328, 331, 337, 338, 339, - 340, 341, 343, 350, 354, 361, 362, 363, 364, 365, - 366, 367, 371, 372, 373, 374, 382, 385, 399, 400, - 410, 420, 424, 258, 407, 425, 0, 290, 0, 194, - 220, 207, 227, 241, 243, 271, 299, 305, 333, 336, - 255, 238, 218, 353, 216, 369, 388, 389, 390, 392, - 303, 234, 321, 0, 0, 0, 0, 0, 0, 0, - 0, 237, 0, 0, 0, 0, 0, 280, 0, 0, - 334, 0, 370, 223, 289, 287, 396, 246, 240, 236, - 222, 265, 294, 332, 387, 326, 0, 284, 0, 0, - 379, 306, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 270, 221, 192, - 318, 380, 249, 0, 0, 0, 174, 175, 176, 0, - 0, 0, 0, 0, 0, 0, 0, 213, 0, 219, - 0, 0, 0, 0, 233, 268, 239, 232, 394, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 922, 921, 931, 932, 924, 925, 926, 927, 928, - 929, 930, 923, 0, 0, 933, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 256, 0, 307, 0, 0, 0, 421, 0, 0, - 0, 0, 0, 0, 0, 0, 279, 0, 276, 188, - 202, 0, 0, 317, 355, 360, 0, 0, 0, 224, - 0, 358, 330, 409, 209, 247, 352, 335, 356, 0, - 0, 357, 285, 398, 347, 408, 422, 423, 231, 311, - 415, 391, 419, 431, 203, 228, 324, 384, 412, 376, - 304, 395, 275, 375, 254, 191, 283, 195, 386, 406, - 214, 368, 0, 0, 0, 197, 404, 383, 301, 272, - 273, 196, 0, 351, 235, 252, 226, 320, 401, 402, - 225, 433, 204, 418, 199, 205, 417, 313, 397, 405, - 302, 293, 198, 403, 300, 292, 278, 245, 261, 345, - 288, 346, 262, 309, 308, 310, 0, 193, 0, 381, - 413, 434, 211, 0, 0, 393, 427, 430, 0, 348, - 212, 253, 244, 344, 251, 281, 426, 428, 429, 210, - 342, 259, 312, 206, 264, 377, 277, 286, 0, 0, - 329, 359, 215, 411, 378, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 187, 200, 282, 0, 349, - 250, 432, 416, 414, 0, 0, 230, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 189, - 190, 201, 208, 217, 229, 242, 248, 257, 260, 263, - 266, 267, 269, 274, 291, 295, 296, 297, 298, 314, - 315, 316, 319, 322, 323, 325, 327, 328, 331, 337, - 338, 339, 340, 341, 343, 350, 354, 361, 362, 363, - 364, 365, 366, 367, 371, 372, 373, 374, 382, 385, - 399, 400, 410, 420, 424, 258, 407, 425, 0, 290, - 0, 194, 220, 207, 227, 241, 243, 271, 299, 305, - 333, 336, 255, 238, 218, 353, 216, 369, 388, 389, - 390, 392, 303, 234, 321, 0, 0, 0, 0, 0, - 0, 0, 0, 237, 772, 0, 0, 0, 0, 280, - 0, 0, 334, 0, 370, 223, 289, 287, 396, 246, - 240, 236, 222, 265, 294, 332, 387, 326, 0, 284, - 0, 0, 379, 306, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 270, - 221, 192, 318, 380, 249, 0, 0, 0, 174, 175, - 176, 0, 0, 0, 0, 0, 0, 0, 0, 213, - 0, 219, 0, 0, 0, 0, 233, 268, 239, 232, - 394, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 256, 0, 307, 0, 0, 771, 421, - 0, 0, 0, 0, 0, 0, 768, 769, 279, 739, - 276, 188, 202, 762, 766, 317, 355, 360, 0, 0, - 0, 224, 0, 358, 330, 409, 209, 247, 352, 335, - 356, 0, 0, 357, 285, 398, 347, 408, 422, 423, - 231, 311, 415, 391, 419, 431, 203, 228, 324, 384, - 412, 376, 304, 395, 275, 375, 254, 191, 283, 195, - 386, 406, 214, 368, 0, 0, 0, 197, 404, 383, - 301, 272, 273, 196, 0, 351, 235, 252, 226, 320, - 401, 402, 225, 433, 204, 418, 199, 205, 417, 313, - 397, 405, 302, 293, 198, 403, 300, 292, 278, 245, - 261, 345, 288, 346, 262, 309, 308, 310, 0, 193, - 0, 381, 413, 434, 211, 0, 0, 393, 427, 430, - 0, 348, 212, 253, 244, 344, 251, 281, 426, 428, - 429, 210, 342, 259, 312, 206, 264, 377, 277, 286, - 0, 0, 329, 359, 215, 411, 378, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 187, 200, 282, - 0, 349, 250, 432, 416, 414, 0, 0, 230, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 189, 190, 201, 208, 217, 229, 242, 248, 257, - 260, 263, 266, 267, 269, 274, 291, 295, 296, 297, - 298, 314, 315, 316, 319, 322, 323, 325, 327, 328, - 331, 337, 338, 339, 340, 341, 343, 350, 354, 361, - 362, 363, 364, 365, 366, 367, 371, 372, 373, 374, - 382, 385, 399, 400, 410, 420, 424, 258, 407, 425, - 0, 290, 0, 194, 220, 207, 227, 241, 243, 271, - 299, 305, 333, 336, 255, 238, 218, 353, 216, 369, - 388, 389, 390, 392, 303, 234, 321, 0, 0, 0, - 1023, 0, 0, 0, 0, 237, 0, 0, 0, 0, - 0, 280, 0, 0, 334, 0, 370, 223, 289, 287, - 396, 246, 240, 236, 222, 265, 294, 332, 387, 326, - 0, 284, 0, 0, 379, 306, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 270, 221, 192, 318, 380, 249, 0, 0, 0, - 174, 175, 176, 0, 1025, 0, 0, 0, 0, 0, - 0, 213, 0, 219, 0, 0, 0, 0, 233, 268, - 239, 232, 394, 911, 912, 910, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 913, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 256, 0, 307, 0, 0, - 0, 421, 0, 0, 0, 0, 0, 0, 0, 0, - 279, 0, 276, 188, 202, 0, 0, 317, 355, 360, - 0, 0, 0, 224, 0, 358, 330, 409, 209, 247, - 352, 335, 356, 0, 0, 357, 285, 398, 347, 408, - 422, 423, 231, 311, 415, 391, 419, 431, 203, 228, - 324, 384, 412, 376, 304, 395, 275, 375, 254, 191, - 283, 195, 386, 406, 214, 368, 0, 0, 0, 197, - 404, 383, 301, 272, 273, 196, 0, 351, 235, 252, - 226, 320, 401, 402, 225, 433, 204, 418, 199, 205, - 417, 313, 397, 405, 302, 293, 198, 403, 300, 292, - 278, 245, 261, 345, 288, 346, 262, 309, 308, 310, - 0, 193, 0, 381, 413, 434, 211, 0, 0, 393, - 427, 430, 0, 348, 212, 253, 244, 344, 251, 281, - 426, 428, 429, 210, 342, 259, 312, 206, 264, 377, - 277, 286, 0, 0, 329, 359, 215, 411, 378, 0, + 0, 0, 0, 187, 188, 199, 207, 217, 229, 242, + 250, 260, 264, 267, 270, 271, 274, 279, 296, 301, + 302, 303, 304, 320, 321, 322, 325, 328, 329, 332, + 334, 335, 338, 344, 345, 346, 347, 348, 350, 357, + 361, 369, 370, 371, 372, 373, 375, 376, 380, 381, + 382, 383, 391, 395, 410, 411, 422, 434, 438, 261, + 418, 439, 0, 295, 0, 0, 297, 246, 263, 272, + 0, 429, 392, 203, 363, 253, 192, 220, 206, 227, + 241, 243, 276, 305, 311, 340, 343, 258, 238, 218, + 360, 215, 378, 398, 399, 400, 402, 309, 234, 327, + 0, 0, 0, 0, 519, 0, 0, 0, 237, 0, + 518, 0, 0, 0, 285, 0, 0, 0, 341, 0, + 379, 223, 294, 292, 407, 247, 240, 236, 222, 269, + 300, 339, 397, 333, 562, 289, 0, 0, 388, 312, + 0, 0, 0, 0, 0, 553, 554, 0, 0, 0, + 0, 0, 0, 0, 0, 275, 221, 190, 324, 389, + 251, 71, 0, 596, 182, 183, 184, 540, 539, 542, + 543, 544, 545, 0, 0, 212, 541, 219, 546, 547, + 548, 0, 233, 273, 239, 232, 404, 0, 0, 0, + 516, 533, 0, 561, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 530, 531, 0, 0, 0, 0, 577, + 0, 532, 0, 0, 525, 526, 528, 527, 529, 534, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 259, + 0, 313, 0, 576, 0, 0, 435, 0, 0, 574, + 0, 0, 0, 0, 0, 284, 0, 281, 186, 201, + 0, 0, 323, 362, 368, 0, 0, 0, 224, 0, + 366, 337, 421, 208, 249, 359, 342, 364, 0, 0, + 365, 290, 409, 354, 419, 436, 437, 231, 317, 427, + 401, 433, 447, 202, 228, 331, 394, 424, 385, 310, + 405, 406, 280, 384, 257, 189, 288, 444, 200, 374, + 216, 193, 396, 417, 213, 377, 0, 0, 0, 195, + 415, 393, 307, 277, 278, 194, 0, 358, 235, 255, + 226, 326, 412, 413, 225, 449, 204, 432, 197, 0, + 431, 319, 408, 416, 308, 299, 196, 414, 306, 298, + 283, 245, 265, 352, 293, 353, 266, 315, 314, 316, + 0, 191, 0, 390, 425, 450, 210, 0, 0, 403, + 441, 446, 0, 355, 211, 256, 244, 351, 254, 286, + 440, 442, 443, 445, 209, 349, 262, 330, 420, 248, + 428, 318, 205, 268, 386, 282, 291, 0, 0, 336, + 367, 214, 423, 387, 564, 575, 570, 571, 568, 569, + 563, 567, 566, 565, 578, 555, 556, 557, 558, 560, + 0, 572, 573, 559, 185, 198, 287, 0, 356, 252, + 448, 430, 426, 0, 0, 230, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 187, 188, + 199, 207, 217, 229, 242, 250, 260, 264, 267, 270, + 271, 274, 279, 296, 301, 302, 303, 304, 320, 321, + 322, 325, 328, 329, 332, 334, 335, 338, 344, 345, + 346, 347, 348, 350, 357, 361, 369, 370, 371, 372, + 373, 375, 376, 380, 381, 382, 383, 391, 395, 410, + 411, 422, 434, 438, 261, 418, 439, 0, 295, 0, + 0, 297, 246, 263, 272, 0, 429, 392, 203, 363, + 253, 192, 220, 206, 227, 241, 243, 276, 305, 311, + 340, 343, 258, 238, 218, 360, 215, 378, 398, 399, + 400, 402, 309, 234, 327, 0, 0, 0, 0, 519, + 0, 0, 0, 237, 0, 518, 0, 0, 0, 285, + 0, 0, 0, 341, 0, 379, 223, 294, 292, 407, + 247, 240, 236, 222, 269, 300, 339, 397, 333, 562, + 289, 0, 0, 388, 312, 0, 0, 0, 0, 0, + 553, 554, 0, 0, 0, 0, 0, 0, 0, 0, + 275, 221, 190, 324, 389, 251, 71, 0, 0, 182, + 183, 184, 540, 539, 542, 543, 544, 545, 0, 0, + 212, 541, 219, 546, 547, 548, 0, 233, 273, 239, + 232, 404, 0, 0, 0, 516, 533, 0, 561, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 530, 531, + 609, 0, 0, 0, 577, 0, 532, 0, 0, 525, + 526, 528, 527, 529, 534, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 259, 0, 313, 0, 576, 0, + 0, 435, 0, 0, 574, 0, 0, 0, 0, 0, + 284, 0, 281, 186, 201, 0, 0, 323, 362, 368, + 0, 0, 0, 224, 0, 366, 337, 421, 208, 249, + 359, 342, 364, 0, 0, 365, 290, 409, 354, 419, + 436, 437, 231, 317, 427, 401, 433, 447, 202, 228, + 331, 394, 424, 385, 310, 405, 406, 280, 384, 257, + 189, 288, 444, 200, 374, 216, 193, 396, 417, 213, + 377, 0, 0, 0, 195, 415, 393, 307, 277, 278, + 194, 0, 358, 235, 255, 226, 326, 412, 413, 225, + 449, 204, 432, 197, 0, 431, 319, 408, 416, 308, + 299, 196, 414, 306, 298, 283, 245, 265, 352, 293, + 353, 266, 315, 314, 316, 0, 191, 0, 390, 425, + 450, 210, 0, 0, 403, 441, 446, 0, 355, 211, + 256, 244, 351, 254, 286, 440, 442, 443, 445, 209, + 349, 262, 330, 420, 248, 428, 318, 205, 268, 386, + 282, 291, 0, 0, 336, 367, 214, 423, 387, 564, + 575, 570, 571, 568, 569, 563, 567, 566, 565, 578, + 555, 556, 557, 558, 560, 0, 572, 573, 559, 185, + 198, 287, 0, 356, 252, 448, 430, 426, 0, 0, + 230, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 187, - 200, 282, 0, 349, 250, 432, 416, 414, 0, 0, + 0, 0, 0, 187, 188, 199, 207, 217, 229, 242, + 250, 260, 264, 267, 270, 271, 274, 279, 296, 301, + 302, 303, 304, 320, 321, 322, 325, 328, 329, 332, + 334, 335, 338, 344, 345, 346, 347, 348, 350, 357, + 361, 369, 370, 371, 372, 373, 375, 376, 380, 381, + 382, 383, 391, 395, 410, 411, 422, 434, 438, 261, + 418, 439, 0, 295, 0, 0, 297, 246, 263, 272, + 0, 429, 392, 203, 363, 253, 192, 220, 206, 227, + 241, 243, 276, 305, 311, 340, 343, 258, 238, 218, + 360, 215, 378, 398, 399, 400, 402, 309, 234, 327, + 0, 0, 0, 0, 519, 0, 0, 0, 237, 0, + 518, 0, 0, 0, 285, 0, 0, 0, 341, 0, + 379, 223, 294, 292, 407, 247, 240, 236, 222, 269, + 300, 339, 397, 333, 562, 289, 0, 0, 388, 312, + 0, 0, 0, 0, 0, 553, 554, 0, 0, 0, + 0, 0, 0, 0, 0, 275, 221, 190, 324, 389, + 251, 71, 0, 0, 182, 183, 184, 540, 1433, 542, + 543, 544, 545, 0, 0, 212, 541, 219, 546, 547, + 548, 0, 233, 273, 239, 232, 404, 0, 0, 0, + 516, 533, 0, 561, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 530, 531, 609, 0, 0, 0, 577, + 0, 532, 0, 0, 525, 526, 528, 527, 529, 534, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 259, + 0, 313, 0, 576, 0, 0, 435, 0, 0, 574, + 0, 0, 0, 0, 0, 284, 0, 281, 186, 201, + 0, 0, 323, 362, 368, 0, 0, 0, 224, 0, + 366, 337, 421, 208, 249, 359, 342, 364, 0, 0, + 365, 290, 409, 354, 419, 436, 437, 231, 317, 427, + 401, 433, 447, 202, 228, 331, 394, 424, 385, 310, + 405, 406, 280, 384, 257, 189, 288, 444, 200, 374, + 216, 193, 396, 417, 213, 377, 0, 0, 0, 195, + 415, 393, 307, 277, 278, 194, 0, 358, 235, 255, + 226, 326, 412, 413, 225, 449, 204, 432, 197, 0, + 431, 319, 408, 416, 308, 299, 196, 414, 306, 298, + 283, 245, 265, 352, 293, 353, 266, 315, 314, 316, + 0, 191, 0, 390, 425, 450, 210, 0, 0, 403, + 441, 446, 0, 355, 211, 256, 244, 351, 254, 286, + 440, 442, 443, 445, 209, 349, 262, 330, 420, 248, + 428, 318, 205, 268, 386, 282, 291, 0, 0, 336, + 367, 214, 423, 387, 564, 575, 570, 571, 568, 569, + 563, 567, 566, 565, 578, 555, 556, 557, 558, 560, + 0, 572, 573, 559, 185, 198, 287, 0, 356, 252, + 448, 430, 426, 0, 0, 230, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 187, 188, + 199, 207, 217, 229, 242, 250, 260, 264, 267, 270, + 271, 274, 279, 296, 301, 302, 303, 304, 320, 321, + 322, 325, 328, 329, 332, 334, 335, 338, 344, 345, + 346, 347, 348, 350, 357, 361, 369, 370, 371, 372, + 373, 375, 376, 380, 381, 382, 383, 391, 395, 410, + 411, 422, 434, 438, 261, 418, 439, 0, 295, 0, + 0, 297, 246, 263, 272, 0, 429, 392, 203, 363, + 253, 192, 220, 206, 227, 241, 243, 276, 305, 311, + 340, 343, 258, 238, 218, 360, 215, 378, 398, 399, + 400, 402, 309, 234, 327, 0, 0, 0, 0, 519, + 0, 0, 0, 237, 0, 518, 0, 0, 0, 285, + 0, 0, 0, 341, 0, 379, 223, 294, 292, 407, + 247, 240, 236, 222, 269, 300, 339, 397, 333, 562, + 289, 0, 0, 388, 312, 0, 0, 0, 0, 0, + 553, 554, 0, 0, 0, 0, 0, 0, 0, 0, + 275, 221, 190, 324, 389, 251, 71, 0, 0, 182, + 183, 184, 540, 1430, 542, 543, 544, 545, 0, 0, + 212, 541, 219, 546, 547, 548, 0, 233, 273, 239, + 232, 404, 0, 0, 0, 516, 533, 0, 561, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 530, 531, + 609, 0, 0, 0, 577, 0, 532, 0, 0, 525, + 526, 528, 527, 529, 534, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 259, 0, 313, 0, 576, 0, + 0, 435, 0, 0, 574, 0, 0, 0, 0, 0, + 284, 0, 281, 186, 201, 0, 0, 323, 362, 368, + 0, 0, 0, 224, 0, 366, 337, 421, 208, 249, + 359, 342, 364, 0, 0, 365, 290, 409, 354, 419, + 436, 437, 231, 317, 427, 401, 433, 447, 202, 228, + 331, 394, 424, 385, 310, 405, 406, 280, 384, 257, + 189, 288, 444, 200, 374, 216, 193, 396, 417, 213, + 377, 0, 0, 0, 195, 415, 393, 307, 277, 278, + 194, 0, 358, 235, 255, 226, 326, 412, 413, 225, + 449, 204, 432, 197, 0, 431, 319, 408, 416, 308, + 299, 196, 414, 306, 298, 283, 245, 265, 352, 293, + 353, 266, 315, 314, 316, 0, 191, 0, 390, 425, + 450, 210, 0, 0, 403, 441, 446, 0, 355, 211, + 256, 244, 351, 254, 286, 440, 442, 443, 445, 209, + 349, 262, 330, 420, 248, 428, 318, 205, 268, 386, + 282, 291, 0, 0, 336, 367, 214, 423, 387, 564, + 575, 570, 571, 568, 569, 563, 567, 566, 565, 578, + 555, 556, 557, 558, 560, 0, 572, 573, 559, 185, + 198, 287, 0, 356, 252, 448, 430, 426, 0, 0, 230, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 189, 190, 201, 208, 217, 229, 242, - 248, 257, 260, 263, 266, 267, 269, 274, 291, 295, - 296, 297, 298, 314, 315, 316, 319, 322, 323, 325, - 327, 328, 331, 337, 338, 339, 340, 341, 343, 350, - 354, 361, 362, 363, 364, 365, 366, 367, 371, 372, - 373, 374, 382, 385, 399, 400, 410, 420, 424, 258, - 407, 425, 0, 290, 0, 194, 220, 207, 227, 241, - 243, 271, 299, 305, 333, 336, 255, 238, 218, 353, - 216, 369, 388, 389, 390, 392, 303, 234, 33, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 321, 0, 0, 0, 0, 0, 0, 0, 0, - 237, 0, 0, 0, 0, 0, 280, 0, 0, 334, - 0, 370, 223, 289, 287, 396, 246, 240, 236, 222, - 265, 294, 332, 387, 326, 0, 284, 0, 0, 379, - 306, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 270, 221, 192, 318, - 380, 249, 67, 0, 565, 174, 175, 176, 0, 0, - 0, 0, 0, 0, 0, 0, 213, 0, 219, 0, - 0, 0, 0, 233, 268, 239, 232, 394, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 256, 0, 307, 0, 0, 0, 421, 0, 0, 0, - 0, 0, 0, 0, 0, 279, 0, 276, 188, 202, - 0, 0, 317, 355, 360, 0, 0, 0, 224, 0, - 358, 330, 409, 209, 247, 352, 335, 356, 0, 0, - 357, 285, 398, 347, 408, 422, 423, 231, 311, 415, - 391, 419, 431, 203, 228, 324, 384, 412, 376, 304, - 395, 275, 375, 254, 191, 283, 195, 386, 406, 214, - 368, 0, 0, 0, 197, 404, 383, 301, 272, 273, - 196, 0, 351, 235, 252, 226, 320, 401, 402, 225, - 433, 204, 418, 199, 205, 417, 313, 397, 405, 302, - 293, 198, 403, 300, 292, 278, 245, 261, 345, 288, - 346, 262, 309, 308, 310, 0, 193, 0, 381, 413, - 434, 211, 0, 0, 393, 427, 430, 0, 348, 212, - 253, 244, 344, 251, 281, 426, 428, 429, 210, 342, - 259, 312, 206, 264, 377, 277, 286, 0, 0, 329, - 359, 215, 411, 378, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 187, 200, 282, 0, 349, 250, - 432, 416, 414, 0, 0, 230, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 189, 190, - 201, 208, 217, 229, 242, 248, 257, 260, 263, 266, - 267, 269, 274, 291, 295, 296, 297, 298, 314, 315, - 316, 319, 322, 323, 325, 327, 328, 331, 337, 338, - 339, 340, 341, 343, 350, 354, 361, 362, 363, 364, - 365, 366, 367, 371, 372, 373, 374, 382, 385, 399, - 400, 410, 420, 424, 258, 407, 425, 0, 290, 0, - 194, 220, 207, 227, 241, 243, 271, 299, 305, 333, - 336, 255, 238, 218, 353, 216, 369, 388, 389, 390, - 392, 303, 234, 321, 0, 0, 0, 1373, 0, 0, - 0, 0, 237, 0, 0, 0, 0, 0, 280, 0, - 0, 334, 0, 370, 223, 289, 287, 396, 246, 240, - 236, 222, 265, 294, 332, 387, 326, 0, 284, 0, - 0, 379, 306, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 270, 221, - 192, 318, 380, 249, 0, 0, 0, 174, 175, 176, - 0, 1375, 0, 0, 0, 0, 0, 0, 213, 0, - 219, 0, 0, 0, 0, 233, 268, 239, 232, 394, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 256, 0, 307, 0, 0, 0, 421, 0, - 0, 0, 0, 0, 0, 0, 0, 279, 0, 276, - 188, 202, 0, 0, 317, 355, 360, 0, 0, 0, - 224, 0, 358, 330, 409, 209, 247, 352, 335, 356, - 0, 1371, 357, 285, 398, 347, 408, 422, 423, 231, - 311, 415, 391, 419, 431, 203, 228, 324, 384, 412, - 376, 304, 395, 275, 375, 254, 191, 283, 195, 386, - 406, 214, 368, 0, 0, 0, 197, 404, 383, 301, - 272, 273, 196, 0, 351, 235, 252, 226, 320, 401, - 402, 225, 433, 204, 418, 199, 205, 417, 313, 397, - 405, 302, 293, 198, 403, 300, 292, 278, 245, 261, - 345, 288, 346, 262, 309, 308, 310, 0, 193, 0, - 381, 413, 434, 211, 0, 0, 393, 427, 430, 0, - 348, 212, 253, 244, 344, 251, 281, 426, 428, 429, - 210, 342, 259, 312, 206, 264, 377, 277, 286, 0, - 0, 329, 359, 215, 411, 378, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 187, 200, 282, 0, - 349, 250, 432, 416, 414, 0, 0, 230, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 189, 190, 201, 208, 217, 229, 242, 248, 257, 260, - 263, 266, 267, 269, 274, 291, 295, 296, 297, 298, - 314, 315, 316, 319, 322, 323, 325, 327, 328, 331, - 337, 338, 339, 340, 341, 343, 350, 354, 361, 362, - 363, 364, 365, 366, 367, 371, 372, 373, 374, 382, - 385, 399, 400, 410, 420, 424, 258, 407, 425, 0, - 290, 0, 194, 220, 207, 227, 241, 243, 271, 299, - 305, 333, 336, 255, 238, 218, 353, 216, 369, 388, - 389, 390, 392, 303, 234, 321, 0, 0, 0, 0, + 0, 0, 0, 187, 188, 199, 207, 217, 229, 242, + 250, 260, 264, 267, 270, 271, 274, 279, 296, 301, + 302, 303, 304, 320, 321, 322, 325, 328, 329, 332, + 334, 335, 338, 344, 345, 346, 347, 348, 350, 357, + 361, 369, 370, 371, 372, 373, 375, 376, 380, 381, + 382, 383, 391, 395, 410, 411, 422, 434, 438, 261, + 418, 439, 0, 295, 0, 0, 297, 246, 263, 272, + 0, 429, 392, 203, 363, 253, 192, 220, 206, 227, + 241, 243, 276, 305, 311, 340, 343, 258, 238, 218, + 360, 215, 378, 398, 399, 400, 402, 309, 234, 589, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 327, 0, 0, 0, 0, 519, 0, 0, + 0, 237, 0, 518, 0, 0, 0, 285, 0, 0, + 0, 341, 0, 379, 223, 294, 292, 407, 247, 240, + 236, 222, 269, 300, 339, 397, 333, 562, 289, 0, + 0, 388, 312, 0, 0, 0, 0, 0, 553, 554, + 0, 0, 0, 0, 0, 0, 0, 0, 275, 221, + 190, 324, 389, 251, 71, 0, 0, 182, 183, 184, + 540, 539, 542, 543, 544, 545, 0, 0, 212, 541, + 219, 546, 547, 548, 0, 233, 273, 239, 232, 404, + 0, 0, 0, 516, 533, 0, 561, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 530, 531, 0, 0, + 0, 0, 577, 0, 532, 0, 0, 525, 526, 528, + 527, 529, 534, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 259, 0, 313, 0, 576, 0, 0, 435, + 0, 0, 574, 0, 0, 0, 0, 0, 284, 0, + 281, 186, 201, 0, 0, 323, 362, 368, 0, 0, + 0, 224, 0, 366, 337, 421, 208, 249, 359, 342, + 364, 0, 0, 365, 290, 409, 354, 419, 436, 437, + 231, 317, 427, 401, 433, 447, 202, 228, 331, 394, + 424, 385, 310, 405, 406, 280, 384, 257, 189, 288, + 444, 200, 374, 216, 193, 396, 417, 213, 377, 0, + 0, 0, 195, 415, 393, 307, 277, 278, 194, 0, + 358, 235, 255, 226, 326, 412, 413, 225, 449, 204, + 432, 197, 0, 431, 319, 408, 416, 308, 299, 196, + 414, 306, 298, 283, 245, 265, 352, 293, 353, 266, + 315, 314, 316, 0, 191, 0, 390, 425, 450, 210, + 0, 0, 403, 441, 446, 0, 355, 211, 256, 244, + 351, 254, 286, 440, 442, 443, 445, 209, 349, 262, + 330, 420, 248, 428, 318, 205, 268, 386, 282, 291, + 0, 0, 336, 367, 214, 423, 387, 564, 575, 570, + 571, 568, 569, 563, 567, 566, 565, 578, 555, 556, + 557, 558, 560, 0, 572, 573, 559, 185, 198, 287, + 0, 356, 252, 448, 430, 426, 0, 0, 230, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 187, 188, 199, 207, 217, 229, 242, 250, 260, + 264, 267, 270, 271, 274, 279, 296, 301, 302, 303, + 304, 320, 321, 322, 325, 328, 329, 332, 334, 335, + 338, 344, 345, 346, 347, 348, 350, 357, 361, 369, + 370, 371, 372, 373, 375, 376, 380, 381, 382, 383, + 391, 395, 410, 411, 422, 434, 438, 261, 418, 439, + 0, 295, 0, 0, 297, 246, 263, 272, 0, 429, + 392, 203, 363, 253, 192, 220, 206, 227, 241, 243, + 276, 305, 311, 340, 343, 258, 238, 218, 360, 215, + 378, 398, 399, 400, 402, 309, 234, 327, 0, 0, + 0, 0, 519, 0, 0, 0, 237, 0, 518, 0, + 0, 0, 285, 0, 0, 0, 341, 0, 379, 223, + 294, 292, 407, 247, 240, 236, 222, 269, 300, 339, + 397, 333, 562, 289, 0, 0, 388, 312, 0, 0, + 0, 0, 0, 553, 554, 0, 0, 0, 0, 0, + 0, 0, 0, 275, 221, 190, 324, 389, 251, 71, + 0, 0, 182, 183, 184, 540, 539, 542, 543, 544, + 545, 0, 0, 212, 541, 219, 546, 547, 548, 0, + 233, 273, 239, 232, 404, 0, 0, 0, 516, 533, + 0, 561, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 530, 531, 0, 0, 0, 0, 577, 0, 532, + 0, 0, 525, 526, 528, 527, 529, 534, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 259, 0, 313, + 0, 576, 0, 0, 435, 0, 0, 574, 0, 0, + 0, 0, 0, 284, 0, 281, 186, 201, 0, 0, + 323, 362, 368, 0, 0, 0, 224, 0, 366, 337, + 421, 208, 249, 359, 342, 364, 0, 0, 365, 290, + 409, 354, 419, 436, 437, 231, 317, 427, 401, 433, + 447, 202, 228, 331, 394, 424, 385, 310, 405, 406, + 280, 384, 257, 189, 288, 444, 200, 374, 216, 193, + 396, 417, 213, 377, 0, 0, 0, 195, 415, 393, + 307, 277, 278, 194, 0, 358, 235, 255, 226, 326, + 412, 413, 225, 449, 204, 432, 197, 0, 431, 319, + 408, 416, 308, 299, 196, 414, 306, 298, 283, 245, + 265, 352, 293, 353, 266, 315, 314, 316, 0, 191, + 0, 390, 425, 450, 210, 0, 0, 403, 441, 446, + 0, 355, 211, 256, 244, 351, 254, 286, 440, 442, + 443, 445, 209, 349, 262, 330, 420, 248, 428, 318, + 205, 268, 386, 282, 291, 0, 0, 336, 367, 214, + 423, 387, 564, 575, 570, 571, 568, 569, 563, 567, + 566, 565, 578, 555, 556, 557, 558, 560, 0, 572, + 573, 559, 185, 198, 287, 0, 356, 252, 448, 430, + 426, 0, 0, 230, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 187, 188, 199, 207, + 217, 229, 242, 250, 260, 264, 267, 270, 271, 274, + 279, 296, 301, 302, 303, 304, 320, 321, 322, 325, + 328, 329, 332, 334, 335, 338, 344, 345, 346, 347, + 348, 350, 357, 361, 369, 370, 371, 372, 373, 375, + 376, 380, 381, 382, 383, 391, 395, 410, 411, 422, + 434, 438, 261, 418, 439, 0, 295, 0, 0, 297, + 246, 263, 272, 0, 429, 392, 203, 363, 253, 192, + 220, 206, 227, 241, 243, 276, 305, 311, 340, 343, + 258, 238, 218, 360, 215, 378, 398, 399, 400, 402, + 309, 234, 327, 0, 0, 0, 0, 0, 0, 0, + 0, 237, 0, 0, 0, 0, 0, 285, 0, 0, + 0, 341, 0, 379, 223, 294, 292, 407, 247, 240, + 236, 222, 269, 300, 339, 397, 333, 562, 289, 0, + 0, 388, 312, 0, 0, 0, 0, 0, 553, 554, + 0, 0, 0, 0, 0, 0, 0, 0, 275, 221, + 190, 324, 389, 251, 71, 0, 0, 182, 183, 184, + 540, 539, 542, 543, 544, 545, 0, 0, 212, 541, + 219, 546, 547, 548, 0, 233, 273, 239, 232, 404, + 0, 0, 0, 0, 533, 0, 561, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 530, 531, 0, 0, + 0, 0, 577, 0, 532, 0, 0, 525, 526, 528, + 527, 529, 534, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 259, 0, 313, 0, 576, 0, 0, 435, + 0, 0, 574, 0, 0, 0, 0, 0, 284, 0, + 281, 186, 201, 0, 0, 323, 362, 368, 0, 0, + 0, 224, 0, 366, 337, 421, 208, 249, 359, 342, + 364, 2202, 0, 365, 290, 409, 354, 419, 436, 437, + 231, 317, 427, 401, 433, 447, 202, 228, 331, 394, + 424, 385, 310, 405, 406, 280, 384, 257, 189, 288, + 444, 200, 374, 216, 193, 396, 417, 213, 377, 0, + 0, 0, 195, 415, 393, 307, 277, 278, 194, 0, + 358, 235, 255, 226, 326, 412, 413, 225, 449, 204, + 432, 197, 0, 431, 319, 408, 416, 308, 299, 196, + 414, 306, 298, 283, 245, 265, 352, 293, 353, 266, + 315, 314, 316, 0, 191, 0, 390, 425, 450, 210, + 0, 0, 403, 441, 446, 0, 355, 211, 256, 244, + 351, 254, 286, 440, 442, 443, 445, 209, 349, 262, + 330, 420, 248, 428, 318, 205, 268, 386, 282, 291, + 0, 0, 336, 367, 214, 423, 387, 564, 575, 570, + 571, 568, 569, 563, 567, 566, 565, 578, 555, 556, + 557, 558, 560, 0, 572, 573, 559, 185, 198, 287, + 0, 356, 252, 448, 430, 426, 0, 0, 230, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 187, 188, 199, 207, 217, 229, 242, 250, 260, + 264, 267, 270, 271, 274, 279, 296, 301, 302, 303, + 304, 320, 321, 322, 325, 328, 329, 332, 334, 335, + 338, 344, 345, 346, 347, 348, 350, 357, 361, 369, + 370, 371, 372, 373, 375, 376, 380, 381, 382, 383, + 391, 395, 410, 411, 422, 434, 438, 261, 418, 439, + 0, 295, 0, 0, 297, 246, 263, 272, 0, 429, + 392, 203, 363, 253, 192, 220, 206, 227, 241, 243, + 276, 305, 311, 340, 343, 258, 238, 218, 360, 215, + 378, 398, 399, 400, 402, 309, 234, 327, 0, 0, + 0, 0, 0, 0, 0, 0, 237, 0, 0, 0, + 0, 0, 285, 0, 0, 0, 341, 0, 379, 223, + 294, 292, 407, 247, 240, 236, 222, 269, 300, 339, + 397, 333, 562, 289, 0, 0, 388, 312, 0, 0, + 0, 0, 0, 553, 554, 0, 0, 0, 0, 0, + 0, 0, 0, 275, 221, 190, 324, 389, 251, 71, + 0, 596, 182, 183, 184, 540, 539, 542, 543, 544, + 545, 0, 0, 212, 541, 219, 546, 547, 548, 0, + 233, 273, 239, 232, 404, 0, 0, 0, 0, 533, + 0, 561, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 530, 531, 0, 0, 0, 0, 577, 0, 532, + 0, 0, 525, 526, 528, 527, 529, 534, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 259, 0, 313, + 0, 576, 0, 0, 435, 0, 0, 574, 0, 0, + 0, 0, 0, 284, 0, 281, 186, 201, 0, 0, + 323, 362, 368, 0, 0, 0, 224, 0, 366, 337, + 421, 208, 249, 359, 342, 364, 0, 0, 365, 290, + 409, 354, 419, 436, 437, 231, 317, 427, 401, 433, + 447, 202, 228, 331, 394, 424, 385, 310, 405, 406, + 280, 384, 257, 189, 288, 444, 200, 374, 216, 193, + 396, 417, 213, 377, 0, 0, 0, 195, 415, 393, + 307, 277, 278, 194, 0, 358, 235, 255, 226, 326, + 412, 413, 225, 449, 204, 432, 197, 0, 431, 319, + 408, 416, 308, 299, 196, 414, 306, 298, 283, 245, + 265, 352, 293, 353, 266, 315, 314, 316, 0, 191, + 0, 390, 425, 450, 210, 0, 0, 403, 441, 446, + 0, 355, 211, 256, 244, 351, 254, 286, 440, 442, + 443, 445, 209, 349, 262, 330, 420, 248, 428, 318, + 205, 268, 386, 282, 291, 0, 0, 336, 367, 214, + 423, 387, 564, 575, 570, 571, 568, 569, 563, 567, + 566, 565, 578, 555, 556, 557, 558, 560, 0, 572, + 573, 559, 185, 198, 287, 0, 356, 252, 448, 430, + 426, 0, 0, 230, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 187, 188, 199, 207, + 217, 229, 242, 250, 260, 264, 267, 270, 271, 274, + 279, 296, 301, 302, 303, 304, 320, 321, 322, 325, + 328, 329, 332, 334, 335, 338, 344, 345, 346, 347, + 348, 350, 357, 361, 369, 370, 371, 372, 373, 375, + 376, 380, 381, 382, 383, 391, 395, 410, 411, 422, + 434, 438, 261, 418, 439, 0, 295, 0, 0, 297, + 246, 263, 272, 0, 429, 392, 203, 363, 253, 192, + 220, 206, 227, 241, 243, 276, 305, 311, 340, 343, + 258, 238, 218, 360, 215, 378, 398, 399, 400, 402, + 309, 234, 327, 0, 0, 0, 0, 0, 0, 0, + 0, 237, 0, 0, 0, 0, 0, 285, 0, 0, + 0, 341, 0, 379, 223, 294, 292, 407, 247, 240, + 236, 222, 269, 300, 339, 397, 333, 562, 289, 0, + 0, 388, 312, 0, 0, 0, 0, 0, 553, 554, + 0, 0, 0, 0, 0, 0, 0, 0, 275, 221, + 190, 324, 389, 251, 71, 0, 0, 182, 183, 184, + 540, 539, 542, 543, 544, 545, 0, 0, 212, 541, + 219, 546, 547, 548, 0, 233, 273, 239, 232, 404, + 0, 0, 0, 0, 533, 0, 561, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 530, 531, 0, 0, + 0, 0, 577, 0, 532, 0, 0, 525, 526, 528, + 527, 529, 534, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 259, 0, 313, 0, 576, 0, 0, 435, + 0, 0, 574, 0, 0, 0, 0, 0, 284, 0, + 281, 186, 201, 0, 0, 323, 362, 368, 0, 0, + 0, 224, 0, 366, 337, 421, 208, 249, 359, 342, + 364, 0, 0, 365, 290, 409, 354, 419, 436, 437, + 231, 317, 427, 401, 433, 447, 202, 228, 331, 394, + 424, 385, 310, 405, 406, 280, 384, 257, 189, 288, + 444, 200, 374, 216, 193, 396, 417, 213, 377, 0, + 0, 0, 195, 415, 393, 307, 277, 278, 194, 0, + 358, 235, 255, 226, 326, 412, 413, 225, 449, 204, + 432, 197, 0, 431, 319, 408, 416, 308, 299, 196, + 414, 306, 298, 283, 245, 265, 352, 293, 353, 266, + 315, 314, 316, 0, 191, 0, 390, 425, 450, 210, + 0, 0, 403, 441, 446, 0, 355, 211, 256, 244, + 351, 254, 286, 440, 442, 443, 445, 209, 349, 262, + 330, 420, 248, 428, 318, 205, 268, 386, 282, 291, + 0, 0, 336, 367, 214, 423, 387, 564, 575, 570, + 571, 568, 569, 563, 567, 566, 565, 578, 555, 556, + 557, 558, 560, 0, 572, 573, 559, 185, 198, 287, + 0, 356, 252, 448, 430, 426, 0, 0, 230, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 187, 188, 199, 207, 217, 229, 242, 250, 260, + 264, 267, 270, 271, 274, 279, 296, 301, 302, 303, + 304, 320, 321, 322, 325, 328, 329, 332, 334, 335, + 338, 344, 345, 346, 347, 348, 350, 357, 361, 369, + 370, 371, 372, 373, 375, 376, 380, 381, 382, 383, + 391, 395, 410, 411, 422, 434, 438, 261, 418, 439, + 0, 295, 0, 0, 297, 246, 263, 272, 0, 429, + 392, 203, 363, 253, 192, 220, 206, 227, 241, 243, + 276, 305, 311, 340, 343, 258, 238, 218, 360, 215, + 378, 398, 399, 400, 402, 309, 234, 327, 0, 0, + 0, 0, 0, 0, 0, 0, 237, 0, 0, 0, + 0, 0, 285, 0, 0, 0, 341, 0, 379, 223, + 294, 292, 407, 247, 240, 236, 222, 269, 300, 339, + 397, 333, 0, 289, 0, 0, 388, 312, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 275, 221, 190, 324, 389, 251, 0, + 0, 0, 182, 183, 184, 0, 0, 0, 0, 0, + 0, 0, 0, 212, 0, 219, 0, 0, 0, 0, + 233, 273, 239, 232, 404, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 983, 982, 992, + 993, 985, 986, 987, 988, 989, 990, 991, 984, 0, + 0, 994, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 259, 0, 313, + 0, 0, 0, 0, 435, 0, 0, 0, 0, 0, + 0, 0, 0, 284, 0, 281, 186, 201, 0, 0, + 323, 362, 368, 0, 0, 0, 224, 0, 366, 337, + 421, 208, 249, 359, 342, 364, 0, 0, 365, 290, + 409, 354, 419, 436, 437, 231, 317, 427, 401, 433, + 447, 202, 228, 331, 394, 424, 385, 310, 405, 406, + 280, 384, 257, 189, 288, 444, 200, 374, 216, 193, + 396, 417, 213, 377, 0, 0, 0, 195, 415, 393, + 307, 277, 278, 194, 0, 358, 235, 255, 226, 326, + 412, 413, 225, 449, 204, 432, 197, 0, 431, 319, + 408, 416, 308, 299, 196, 414, 306, 298, 283, 245, + 265, 352, 293, 353, 266, 315, 314, 316, 0, 191, + 0, 390, 425, 450, 210, 0, 0, 403, 441, 446, + 0, 355, 211, 256, 244, 351, 254, 286, 440, 442, + 443, 445, 209, 349, 262, 330, 420, 248, 428, 318, + 205, 268, 386, 282, 291, 0, 0, 336, 367, 214, + 423, 387, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 185, 198, 287, 0, 356, 252, 448, 430, + 426, 0, 0, 230, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 187, 188, 199, 207, + 217, 229, 242, 250, 260, 264, 267, 270, 271, 274, + 279, 296, 301, 302, 303, 304, 320, 321, 322, 325, + 328, 329, 332, 334, 335, 338, 344, 345, 346, 347, + 348, 350, 357, 361, 369, 370, 371, 372, 373, 375, + 376, 380, 381, 382, 383, 391, 395, 410, 411, 422, + 434, 438, 261, 418, 439, 0, 295, 0, 0, 297, + 246, 263, 272, 0, 429, 392, 203, 363, 253, 192, + 220, 206, 227, 241, 243, 276, 305, 311, 340, 343, + 258, 238, 218, 360, 215, 378, 398, 399, 400, 402, + 309, 234, 327, 0, 0, 0, 0, 0, 0, 0, + 0, 237, 809, 0, 0, 0, 0, 285, 0, 0, + 0, 341, 0, 379, 223, 294, 292, 407, 247, 240, + 236, 222, 269, 300, 339, 397, 333, 0, 289, 0, + 0, 388, 312, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 275, 221, + 190, 324, 389, 251, 0, 0, 0, 182, 183, 184, + 0, 0, 0, 0, 0, 0, 0, 0, 212, 0, + 219, 0, 0, 0, 0, 233, 273, 239, 232, 404, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 259, 0, 313, 0, 0, 0, 808, 435, + 0, 0, 0, 0, 0, 0, 805, 806, 284, 774, + 281, 186, 201, 799, 803, 323, 362, 368, 0, 0, + 0, 224, 0, 366, 337, 421, 208, 249, 359, 342, + 364, 0, 0, 365, 290, 409, 354, 419, 436, 437, + 231, 317, 427, 401, 433, 447, 202, 228, 331, 394, + 424, 385, 310, 405, 406, 280, 384, 257, 189, 288, + 444, 200, 374, 216, 193, 396, 417, 213, 377, 0, + 0, 0, 195, 415, 393, 307, 277, 278, 194, 0, + 358, 235, 255, 226, 326, 412, 413, 225, 449, 204, + 432, 197, 0, 431, 319, 408, 416, 308, 299, 196, + 414, 306, 298, 283, 245, 265, 352, 293, 353, 266, + 315, 314, 316, 0, 191, 0, 390, 425, 450, 210, + 0, 0, 403, 441, 446, 0, 355, 211, 256, 244, + 351, 254, 286, 440, 442, 443, 445, 209, 349, 262, + 330, 420, 248, 428, 318, 205, 268, 386, 282, 291, + 0, 0, 336, 367, 214, 423, 387, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 185, 198, 287, + 0, 356, 252, 448, 430, 426, 0, 0, 230, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 187, 188, 199, 207, 217, 229, 242, 250, 260, + 264, 267, 270, 271, 274, 279, 296, 301, 302, 303, + 304, 320, 321, 322, 325, 328, 329, 332, 334, 335, + 338, 344, 345, 346, 347, 348, 350, 357, 361, 369, + 370, 371, 372, 373, 375, 376, 380, 381, 382, 383, + 391, 395, 410, 411, 422, 434, 438, 261, 418, 439, + 0, 295, 0, 0, 297, 246, 263, 272, 0, 429, + 392, 203, 363, 253, 192, 220, 206, 227, 241, 243, + 276, 305, 311, 340, 343, 258, 238, 218, 360, 215, + 378, 398, 399, 400, 402, 309, 234, 327, 0, 0, + 0, 1086, 0, 0, 0, 0, 237, 0, 0, 0, + 0, 0, 285, 0, 0, 0, 341, 0, 379, 223, + 294, 292, 407, 247, 240, 236, 222, 269, 300, 339, + 397, 333, 0, 289, 0, 0, 388, 312, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 275, 221, 190, 324, 389, 251, 0, + 0, 0, 182, 183, 184, 0, 1088, 0, 0, 0, + 0, 0, 0, 212, 0, 219, 0, 0, 0, 0, + 233, 273, 239, 232, 404, 972, 973, 971, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 974, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 259, 0, 313, + 0, 0, 0, 0, 435, 0, 0, 0, 0, 0, + 0, 0, 0, 284, 0, 281, 186, 201, 0, 0, + 323, 362, 368, 0, 0, 0, 224, 0, 366, 337, + 421, 208, 249, 359, 342, 364, 0, 0, 365, 290, + 409, 354, 419, 436, 437, 231, 317, 427, 401, 433, + 447, 202, 228, 331, 394, 424, 385, 310, 405, 406, + 280, 384, 257, 189, 288, 444, 200, 374, 216, 193, + 396, 417, 213, 377, 0, 0, 0, 195, 415, 393, + 307, 277, 278, 194, 0, 358, 235, 255, 226, 326, + 412, 413, 225, 449, 204, 432, 197, 0, 431, 319, + 408, 416, 308, 299, 196, 414, 306, 298, 283, 245, + 265, 352, 293, 353, 266, 315, 314, 316, 0, 191, + 0, 390, 425, 450, 210, 0, 0, 403, 441, 446, + 0, 355, 211, 256, 244, 351, 254, 286, 440, 442, + 443, 445, 209, 349, 262, 330, 420, 248, 428, 318, + 205, 268, 386, 282, 291, 0, 0, 336, 367, 214, + 423, 387, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 185, 198, 287, 0, 356, 252, 448, 430, + 426, 0, 0, 230, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 187, 188, 199, 207, + 217, 229, 242, 250, 260, 264, 267, 270, 271, 274, + 279, 296, 301, 302, 303, 304, 320, 321, 322, 325, + 328, 329, 332, 334, 335, 338, 344, 345, 346, 347, + 348, 350, 357, 361, 369, 370, 371, 372, 373, 375, + 376, 380, 381, 382, 383, 391, 395, 410, 411, 422, + 434, 438, 261, 418, 439, 0, 295, 0, 0, 297, + 246, 263, 272, 0, 429, 392, 203, 363, 253, 192, + 220, 206, 227, 241, 243, 276, 305, 311, 340, 343, + 258, 238, 218, 360, 215, 378, 398, 399, 400, 402, + 309, 234, 35, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 327, 0, 0, 0, 0, 0, 0, 0, 0, 237, 0, 0, 0, 0, 0, - 280, 0, 0, 334, 0, 370, 223, 289, 287, 396, - 246, 240, 236, 222, 265, 294, 332, 387, 326, 0, - 284, 0, 0, 379, 306, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 270, 221, 192, 318, 380, 249, 0, 0, 0, 174, - 175, 176, 0, 0, 0, 0, 0, 0, 0, 0, - 213, 0, 219, 0, 0, 0, 0, 233, 268, 239, - 232, 394, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 733, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 256, 0, 307, 0, 0, 0, - 421, 0, 0, 0, 0, 0, 0, 0, 0, 279, - 739, 276, 188, 202, 737, 0, 317, 355, 360, 0, - 0, 0, 224, 0, 358, 330, 409, 209, 247, 352, - 335, 356, 0, 0, 357, 285, 398, 347, 408, 422, - 423, 231, 311, 415, 391, 419, 431, 203, 228, 324, - 384, 412, 376, 304, 395, 275, 375, 254, 191, 283, - 195, 386, 406, 214, 368, 0, 0, 0, 197, 404, - 383, 301, 272, 273, 196, 0, 351, 235, 252, 226, - 320, 401, 402, 225, 433, 204, 418, 199, 205, 417, - 313, 397, 405, 302, 293, 198, 403, 300, 292, 278, - 245, 261, 345, 288, 346, 262, 309, 308, 310, 0, - 193, 0, 381, 413, 434, 211, 0, 0, 393, 427, - 430, 0, 348, 212, 253, 244, 344, 251, 281, 426, - 428, 429, 210, 342, 259, 312, 206, 264, 377, 277, - 286, 0, 0, 329, 359, 215, 411, 378, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 187, 200, - 282, 0, 349, 250, 432, 416, 414, 0, 0, 230, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 189, 190, 201, 208, 217, 229, 242, 248, - 257, 260, 263, 266, 267, 269, 274, 291, 295, 296, - 297, 298, 314, 315, 316, 319, 322, 323, 325, 327, - 328, 331, 337, 338, 339, 340, 341, 343, 350, 354, - 361, 362, 363, 364, 365, 366, 367, 371, 372, 373, - 374, 382, 385, 399, 400, 410, 420, 424, 258, 407, - 425, 0, 290, 0, 194, 220, 207, 227, 241, 243, - 271, 299, 305, 333, 336, 255, 238, 218, 353, 216, - 369, 388, 389, 390, 392, 303, 234, 321, 0, 0, - 0, 1373, 0, 0, 0, 0, 237, 0, 0, 0, - 0, 0, 280, 0, 0, 334, 0, 370, 223, 289, - 287, 396, 246, 240, 236, 222, 265, 294, 332, 387, - 326, 0, 284, 0, 0, 379, 306, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 270, 221, 192, 318, 380, 249, 0, 0, - 0, 174, 175, 176, 0, 1375, 0, 0, 0, 0, - 0, 0, 213, 0, 219, 0, 0, 0, 0, 233, - 268, 239, 232, 394, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 256, 0, 307, 0, - 0, 0, 421, 0, 0, 0, 0, 0, 0, 0, - 0, 279, 0, 276, 188, 202, 0, 0, 317, 355, - 360, 0, 0, 0, 224, 0, 358, 330, 409, 209, - 247, 352, 335, 356, 0, 0, 357, 285, 398, 347, - 408, 422, 423, 231, 311, 415, 391, 419, 431, 203, - 228, 324, 384, 412, 376, 304, 395, 275, 375, 254, - 191, 283, 195, 386, 406, 214, 368, 0, 0, 0, - 197, 404, 383, 301, 272, 273, 196, 0, 351, 235, - 252, 226, 320, 401, 402, 225, 433, 204, 418, 199, - 205, 417, 313, 397, 405, 302, 293, 198, 403, 300, - 292, 278, 245, 261, 345, 288, 346, 262, 309, 308, - 310, 0, 193, 0, 381, 413, 434, 211, 0, 0, - 393, 427, 430, 0, 348, 212, 253, 244, 344, 251, - 281, 426, 428, 429, 210, 342, 259, 312, 206, 264, - 377, 277, 286, 0, 0, 329, 359, 215, 411, 378, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 187, 200, 282, 0, 349, 250, 432, 416, 414, 0, + 285, 0, 0, 0, 341, 0, 379, 223, 294, 292, + 407, 247, 240, 236, 222, 269, 300, 339, 397, 333, + 0, 289, 0, 0, 388, 312, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 275, 221, 190, 324, 389, 251, 71, 0, 596, + 182, 183, 184, 0, 0, 0, 0, 0, 0, 0, + 0, 212, 0, 219, 0, 0, 0, 0, 233, 273, + 239, 232, 404, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 259, 0, 313, 0, 0, + 0, 0, 435, 0, 0, 0, 0, 0, 0, 0, + 0, 284, 0, 281, 186, 201, 0, 0, 323, 362, + 368, 0, 0, 0, 224, 0, 366, 337, 421, 208, + 249, 359, 342, 364, 0, 0, 365, 290, 409, 354, + 419, 436, 437, 231, 317, 427, 401, 433, 447, 202, + 228, 331, 394, 424, 385, 310, 405, 406, 280, 384, + 257, 189, 288, 444, 200, 374, 216, 193, 396, 417, + 213, 377, 0, 0, 0, 195, 415, 393, 307, 277, + 278, 194, 0, 358, 235, 255, 226, 326, 412, 413, + 225, 449, 204, 432, 197, 0, 431, 319, 408, 416, + 308, 299, 196, 414, 306, 298, 283, 245, 265, 352, + 293, 353, 266, 315, 314, 316, 0, 191, 0, 390, + 425, 450, 210, 0, 0, 403, 441, 446, 0, 355, + 211, 256, 244, 351, 254, 286, 440, 442, 443, 445, + 209, 349, 262, 330, 420, 248, 428, 318, 205, 268, + 386, 282, 291, 0, 0, 336, 367, 214, 423, 387, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 185, 198, 287, 0, 356, 252, 448, 430, 426, 0, 0, 230, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 189, 190, 201, 208, 217, 229, - 242, 248, 257, 260, 263, 266, 267, 269, 274, 291, - 295, 296, 297, 298, 314, 315, 316, 319, 322, 323, - 325, 327, 328, 331, 337, 338, 339, 340, 341, 343, - 350, 354, 361, 362, 363, 364, 365, 366, 367, 371, - 372, 373, 374, 382, 385, 399, 400, 410, 420, 424, - 258, 407, 425, 0, 290, 0, 194, 220, 207, 227, - 241, 243, 271, 299, 305, 333, 336, 255, 238, 218, - 353, 216, 369, 388, 389, 390, 392, 303, 234, 321, - 0, 0, 0, 0, 0, 0, 0, 0, 237, 0, - 0, 0, 0, 0, 280, 0, 0, 334, 0, 370, - 223, 289, 287, 396, 246, 240, 236, 222, 265, 294, - 332, 387, 326, 0, 284, 0, 0, 379, 306, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 270, 221, 192, 318, 380, 249, - 0, 0, 565, 174, 175, 176, 0, 0, 0, 0, - 0, 0, 0, 0, 213, 0, 219, 0, 0, 0, - 0, 233, 268, 239, 232, 394, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 256, 0, - 307, 0, 0, 0, 421, 0, 0, 0, 0, 1986, - 0, 0, 0, 279, 0, 276, 188, 202, 0, 0, - 317, 355, 360, 0, 0, 0, 224, 0, 358, 330, - 409, 209, 247, 352, 335, 356, 0, 0, 357, 285, - 398, 347, 408, 422, 423, 231, 311, 415, 391, 419, - 431, 203, 228, 324, 384, 412, 376, 304, 395, 275, - 375, 254, 191, 283, 195, 386, 406, 214, 368, 0, - 0, 0, 197, 404, 383, 301, 272, 273, 196, 0, - 351, 235, 252, 226, 320, 401, 402, 225, 433, 204, - 418, 199, 205, 417, 313, 397, 405, 302, 293, 198, - 403, 300, 292, 278, 245, 261, 345, 288, 346, 262, - 309, 308, 310, 0, 193, 0, 381, 413, 434, 211, - 0, 0, 393, 427, 430, 0, 348, 212, 253, 244, - 344, 251, 281, 426, 428, 429, 210, 342, 259, 312, - 206, 264, 377, 277, 286, 0, 0, 329, 359, 215, - 411, 378, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 187, 200, 282, 0, 349, 250, 432, 416, - 414, 0, 0, 230, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 189, 190, 201, 208, - 217, 229, 242, 248, 257, 260, 263, 266, 267, 269, - 274, 291, 295, 296, 297, 298, 314, 315, 316, 319, - 322, 323, 325, 327, 328, 331, 337, 338, 339, 340, - 341, 343, 350, 354, 361, 362, 363, 364, 365, 366, - 367, 371, 372, 373, 374, 382, 385, 399, 400, 410, - 420, 424, 258, 407, 425, 0, 290, 0, 194, 220, - 207, 227, 241, 243, 271, 299, 305, 333, 336, 255, - 238, 218, 353, 216, 369, 388, 389, 390, 392, 303, - 234, 33, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 321, 0, 0, 0, 0, 0, - 0, 0, 0, 237, 0, 0, 0, 0, 0, 280, - 0, 0, 334, 0, 370, 223, 289, 287, 396, 246, - 240, 236, 222, 265, 294, 332, 387, 326, 0, 284, - 0, 0, 379, 306, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 270, - 221, 192, 318, 380, 249, 67, 0, 0, 174, 175, - 176, 0, 0, 0, 0, 0, 0, 0, 0, 213, - 0, 219, 0, 0, 0, 0, 233, 268, 239, 232, - 394, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 256, 0, 307, 0, 0, 0, 421, - 0, 0, 0, 0, 0, 0, 0, 0, 279, 0, - 276, 188, 202, 0, 0, 317, 355, 360, 0, 0, - 0, 224, 0, 358, 330, 409, 209, 247, 352, 335, - 356, 0, 0, 357, 285, 398, 347, 408, 422, 423, - 231, 311, 415, 391, 419, 431, 203, 228, 324, 384, - 412, 376, 304, 395, 275, 375, 254, 191, 283, 195, - 386, 406, 214, 368, 0, 0, 0, 197, 404, 383, - 301, 272, 273, 196, 0, 351, 235, 252, 226, 320, - 401, 402, 225, 433, 204, 418, 199, 205, 417, 313, - 397, 405, 302, 293, 198, 403, 300, 292, 278, 245, - 261, 345, 288, 346, 262, 309, 308, 310, 0, 193, - 0, 381, 413, 434, 211, 0, 0, 393, 427, 430, - 0, 348, 212, 253, 244, 344, 251, 281, 426, 428, - 429, 210, 342, 259, 312, 206, 264, 377, 277, 286, - 0, 0, 329, 359, 215, 411, 378, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 187, 200, 282, - 0, 349, 250, 432, 416, 414, 0, 0, 230, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 189, 190, 201, 208, 217, 229, 242, 248, 257, - 260, 263, 266, 267, 269, 274, 291, 295, 296, 297, - 298, 314, 315, 316, 319, 322, 323, 325, 327, 328, - 331, 337, 338, 339, 340, 341, 343, 350, 354, 361, - 362, 363, 364, 365, 366, 367, 371, 372, 373, 374, - 382, 385, 399, 400, 410, 420, 424, 258, 407, 425, - 0, 290, 0, 194, 220, 207, 227, 241, 243, 271, - 299, 305, 333, 336, 255, 238, 218, 353, 216, 369, - 388, 389, 390, 392, 303, 234, 321, 0, 0, 0, - 0, 0, 0, 0, 0, 237, 0, 0, 0, 0, - 0, 280, 0, 0, 334, 0, 370, 223, 289, 287, - 396, 246, 240, 236, 222, 265, 294, 332, 387, 326, - 0, 284, 0, 0, 379, 306, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 270, 221, 192, 318, 380, 249, 0, 0, 0, - 174, 175, 176, 0, 0, 1391, 0, 0, 1392, 0, - 0, 213, 0, 219, 0, 0, 0, 0, 233, 268, - 239, 232, 394, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 256, 0, 307, 0, 0, - 0, 421, 0, 0, 0, 0, 0, 0, 0, 0, - 279, 0, 276, 188, 202, 0, 0, 317, 355, 360, - 0, 0, 0, 224, 0, 358, 330, 409, 209, 247, - 352, 335, 356, 0, 0, 357, 285, 398, 347, 408, - 422, 423, 231, 311, 415, 391, 419, 431, 203, 228, - 324, 384, 412, 376, 304, 395, 275, 375, 254, 191, - 283, 195, 386, 406, 214, 368, 0, 0, 0, 197, - 404, 383, 301, 272, 273, 196, 0, 351, 235, 252, - 226, 320, 401, 402, 225, 433, 204, 418, 199, 205, - 417, 313, 397, 405, 302, 293, 198, 403, 300, 292, - 278, 245, 261, 345, 288, 346, 262, 309, 308, 310, - 0, 193, 0, 381, 413, 434, 211, 0, 0, 393, - 427, 430, 0, 348, 212, 253, 244, 344, 251, 281, - 426, 428, 429, 210, 342, 259, 312, 206, 264, 377, - 277, 286, 0, 0, 329, 359, 215, 411, 378, 0, + 0, 0, 0, 0, 187, 188, 199, 207, 217, 229, + 242, 250, 260, 264, 267, 270, 271, 274, 279, 296, + 301, 302, 303, 304, 320, 321, 322, 325, 328, 329, + 332, 334, 335, 338, 344, 345, 346, 347, 348, 350, + 357, 361, 369, 370, 371, 372, 373, 375, 376, 380, + 381, 382, 383, 391, 395, 410, 411, 422, 434, 438, + 261, 418, 439, 0, 295, 0, 0, 297, 246, 263, + 272, 0, 429, 392, 203, 363, 253, 192, 220, 206, + 227, 241, 243, 276, 305, 311, 340, 343, 258, 238, + 218, 360, 215, 378, 398, 399, 400, 402, 309, 234, + 327, 0, 0, 0, 1460, 0, 0, 0, 0, 237, + 0, 0, 0, 0, 0, 285, 0, 0, 0, 341, + 0, 379, 223, 294, 292, 407, 247, 240, 236, 222, + 269, 300, 339, 397, 333, 0, 289, 0, 0, 388, + 312, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 275, 221, 190, 324, + 389, 251, 0, 0, 0, 182, 183, 184, 0, 1462, + 0, 0, 0, 0, 0, 0, 212, 0, 219, 0, + 0, 0, 0, 233, 273, 239, 232, 404, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 259, 0, 313, 0, 0, 0, 0, 435, 0, 0, + 0, 0, 0, 0, 0, 0, 284, 0, 281, 186, + 201, 0, 0, 323, 362, 368, 0, 0, 0, 224, + 0, 366, 337, 421, 208, 249, 359, 342, 364, 0, + 1458, 365, 290, 409, 354, 419, 436, 437, 231, 317, + 427, 401, 433, 447, 202, 228, 331, 394, 424, 385, + 310, 405, 406, 280, 384, 257, 189, 288, 444, 200, + 374, 216, 193, 396, 417, 213, 377, 0, 0, 0, + 195, 415, 393, 307, 277, 278, 194, 0, 358, 235, + 255, 226, 326, 412, 413, 225, 449, 204, 432, 197, + 0, 431, 319, 408, 416, 308, 299, 196, 414, 306, + 298, 283, 245, 265, 352, 293, 353, 266, 315, 314, + 316, 0, 191, 0, 390, 425, 450, 210, 0, 0, + 403, 441, 446, 0, 355, 211, 256, 244, 351, 254, + 286, 440, 442, 443, 445, 209, 349, 262, 330, 420, + 248, 428, 318, 205, 268, 386, 282, 291, 0, 0, + 336, 367, 214, 423, 387, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 185, 198, 287, 0, 356, + 252, 448, 430, 426, 0, 0, 230, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 187, - 200, 282, 0, 349, 250, 432, 416, 414, 0, 0, - 230, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 188, 199, 207, 217, 229, 242, 250, 260, 264, 267, + 270, 271, 274, 279, 296, 301, 302, 303, 304, 320, + 321, 322, 325, 328, 329, 332, 334, 335, 338, 344, + 345, 346, 347, 348, 350, 357, 361, 369, 370, 371, + 372, 373, 375, 376, 380, 381, 382, 383, 391, 395, + 410, 411, 422, 434, 438, 261, 418, 439, 0, 295, + 0, 0, 297, 246, 263, 272, 0, 429, 392, 203, + 363, 253, 192, 220, 206, 227, 241, 243, 276, 305, + 311, 340, 343, 258, 238, 218, 360, 215, 378, 398, + 399, 400, 402, 309, 234, 327, 0, 0, 0, 0, + 0, 0, 0, 0, 237, 0, 0, 0, 0, 0, + 285, 0, 0, 0, 341, 0, 379, 223, 294, 292, + 407, 247, 240, 236, 222, 269, 300, 339, 397, 333, + 0, 289, 0, 0, 388, 312, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 275, 221, 190, 324, 389, 251, 0, 0, 0, + 182, 183, 184, 0, 0, 0, 0, 0, 0, 0, + 0, 212, 0, 219, 0, 0, 0, 0, 233, 273, + 239, 232, 404, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 768, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 259, 0, 313, 0, 0, + 0, 0, 435, 0, 0, 0, 0, 0, 0, 0, + 0, 284, 774, 281, 186, 201, 772, 0, 323, 362, + 368, 0, 0, 0, 224, 0, 366, 337, 421, 208, + 249, 359, 342, 364, 0, 0, 365, 290, 409, 354, + 419, 436, 437, 231, 317, 427, 401, 433, 447, 202, + 228, 331, 394, 424, 385, 310, 405, 406, 280, 384, + 257, 189, 288, 444, 200, 374, 216, 193, 396, 417, + 213, 377, 0, 0, 0, 195, 415, 393, 307, 277, + 278, 194, 0, 358, 235, 255, 226, 326, 412, 413, + 225, 449, 204, 432, 197, 0, 431, 319, 408, 416, + 308, 299, 196, 414, 306, 298, 283, 245, 265, 352, + 293, 353, 266, 315, 314, 316, 0, 191, 0, 390, + 425, 450, 210, 0, 0, 403, 441, 446, 0, 355, + 211, 256, 244, 351, 254, 286, 440, 442, 443, 445, + 209, 349, 262, 330, 420, 248, 428, 318, 205, 268, + 386, 282, 291, 0, 0, 336, 367, 214, 423, 387, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 185, 198, 287, 0, 356, 252, 448, 430, 426, 0, + 0, 230, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 189, 190, 201, 208, 217, 229, 242, - 248, 257, 260, 263, 266, 267, 269, 274, 291, 295, - 296, 297, 298, 314, 315, 316, 319, 322, 323, 325, - 327, 328, 331, 337, 338, 339, 340, 341, 343, 350, - 354, 361, 362, 363, 364, 365, 366, 367, 371, 372, - 373, 374, 382, 385, 399, 400, 410, 420, 424, 258, - 407, 425, 0, 290, 0, 194, 220, 207, 227, 241, - 243, 271, 299, 305, 333, 336, 255, 238, 218, 353, - 216, 369, 388, 389, 390, 392, 303, 234, 321, 0, - 0, 0, 0, 0, 0, 0, 0, 237, 0, 1056, - 0, 0, 0, 280, 0, 0, 334, 0, 370, 223, - 289, 287, 396, 246, 240, 236, 222, 265, 294, 332, - 387, 326, 0, 284, 0, 0, 379, 306, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 270, 221, 192, 318, 380, 249, 0, - 0, 0, 174, 175, 176, 0, 1055, 0, 0, 0, - 0, 0, 0, 213, 0, 219, 0, 0, 0, 0, - 233, 268, 239, 232, 394, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 256, 0, 307, - 0, 0, 0, 421, 0, 0, 0, 0, 0, 0, - 0, 0, 279, 0, 276, 188, 202, 0, 0, 317, - 355, 360, 0, 0, 0, 224, 0, 358, 330, 409, - 209, 247, 352, 335, 356, 0, 0, 357, 285, 398, - 347, 408, 422, 423, 231, 311, 415, 391, 419, 431, - 203, 228, 324, 384, 412, 376, 304, 395, 275, 375, - 254, 191, 283, 195, 386, 406, 214, 368, 0, 0, - 0, 197, 404, 383, 301, 272, 273, 196, 0, 351, - 235, 252, 226, 320, 401, 402, 225, 433, 204, 418, - 199, 205, 417, 313, 397, 405, 302, 293, 198, 403, - 300, 292, 278, 245, 261, 345, 288, 346, 262, 309, - 308, 310, 0, 193, 0, 381, 413, 434, 211, 0, - 0, 393, 427, 430, 0, 348, 212, 253, 244, 344, - 251, 281, 426, 428, 429, 210, 342, 259, 312, 206, - 264, 377, 277, 286, 0, 0, 329, 359, 215, 411, - 378, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 187, 200, 282, 0, 349, 250, 432, 416, 414, - 0, 0, 230, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 189, 190, 201, 208, 217, - 229, 242, 248, 257, 260, 263, 266, 267, 269, 274, - 291, 295, 296, 297, 298, 314, 315, 316, 319, 322, - 323, 325, 327, 328, 331, 337, 338, 339, 340, 341, - 343, 350, 354, 361, 362, 363, 364, 365, 366, 367, - 371, 372, 373, 374, 382, 385, 399, 400, 410, 420, - 424, 258, 407, 425, 0, 290, 0, 194, 220, 207, - 227, 241, 243, 271, 299, 305, 333, 336, 255, 238, - 218, 353, 216, 369, 388, 389, 390, 392, 303, 234, - 321, 0, 0, 0, 0, 0, 0, 0, 0, 237, - 0, 0, 0, 0, 0, 280, 0, 0, 334, 0, - 370, 223, 289, 287, 396, 246, 240, 236, 222, 265, - 294, 332, 387, 326, 0, 284, 0, 0, 379, 306, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 270, 221, 192, 318, 380, - 249, 0, 0, 0, 174, 175, 176, 0, 0, 0, - 0, 0, 0, 0, 0, 213, 0, 219, 0, 0, - 0, 0, 233, 268, 239, 232, 394, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 256, - 0, 307, 0, 0, 0, 421, 0, 0, 0, 0, - 2072, 0, 0, 0, 279, 0, 276, 188, 202, 0, - 0, 317, 355, 360, 0, 0, 0, 224, 0, 358, - 330, 409, 209, 247, 352, 335, 356, 0, 0, 357, - 285, 398, 347, 408, 422, 423, 231, 311, 415, 391, - 419, 431, 203, 228, 324, 384, 412, 376, 304, 395, - 275, 375, 254, 191, 283, 195, 386, 406, 214, 368, - 0, 0, 0, 197, 404, 383, 301, 272, 273, 196, - 0, 351, 235, 252, 226, 320, 401, 402, 225, 433, - 204, 418, 199, 205, 417, 313, 397, 405, 302, 293, - 198, 403, 300, 292, 278, 245, 261, 345, 288, 346, - 262, 309, 308, 310, 0, 193, 0, 381, 413, 434, - 211, 0, 0, 393, 427, 430, 0, 348, 212, 253, - 244, 344, 251, 281, 426, 428, 429, 210, 342, 259, - 312, 206, 264, 377, 277, 286, 0, 0, 329, 359, - 215, 411, 378, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 187, 200, 282, 0, 349, 250, 432, - 416, 414, 0, 0, 230, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 189, 190, 201, - 208, 217, 229, 242, 248, 257, 260, 263, 266, 267, - 269, 274, 291, 295, 296, 297, 298, 314, 315, 316, - 319, 322, 323, 325, 327, 328, 331, 337, 338, 339, - 340, 341, 343, 350, 354, 361, 362, 363, 364, 365, - 366, 367, 371, 372, 373, 374, 382, 385, 399, 400, - 410, 420, 424, 258, 407, 425, 0, 290, 0, 194, - 220, 207, 227, 241, 243, 271, 299, 305, 333, 336, - 255, 238, 218, 353, 216, 369, 388, 389, 390, 392, - 303, 234, 321, 0, 0, 0, 0, 0, 0, 0, - 0, 237, 0, 0, 0, 0, 0, 280, 0, 0, - 334, 0, 370, 223, 289, 287, 396, 246, 240, 236, - 222, 265, 294, 332, 387, 326, 0, 284, 0, 0, - 379, 306, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 270, 221, 192, - 318, 380, 249, 0, 0, 0, 174, 175, 176, 0, - 0, 0, 0, 0, 0, 0, 0, 213, 0, 219, - 0, 0, 0, 0, 233, 268, 239, 232, 394, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 256, 0, 307, 0, 0, 0, 421, 0, 0, - 0, 0, 1986, 0, 0, 0, 279, 0, 276, 188, - 202, 0, 0, 317, 355, 360, 0, 0, 0, 224, - 0, 358, 330, 409, 209, 247, 352, 335, 356, 0, - 0, 357, 285, 398, 347, 408, 422, 423, 231, 311, - 415, 391, 419, 431, 203, 228, 324, 384, 412, 376, - 304, 395, 275, 375, 254, 191, 283, 195, 386, 406, - 214, 368, 0, 0, 0, 197, 404, 383, 301, 272, - 273, 196, 0, 351, 235, 252, 226, 320, 401, 402, - 225, 433, 204, 418, 199, 205, 417, 313, 397, 405, - 302, 293, 198, 403, 300, 292, 278, 245, 261, 345, - 288, 346, 262, 309, 308, 310, 0, 193, 0, 381, - 413, 434, 211, 0, 0, 393, 427, 430, 0, 348, - 212, 253, 244, 344, 251, 281, 426, 428, 429, 210, - 342, 259, 312, 206, 264, 377, 277, 286, 0, 0, - 329, 359, 215, 411, 378, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 187, 200, 282, 0, 349, - 250, 432, 416, 414, 0, 0, 230, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 189, - 190, 201, 208, 217, 229, 242, 248, 257, 260, 263, - 266, 267, 269, 274, 291, 295, 296, 297, 298, 314, - 315, 316, 319, 322, 323, 325, 327, 328, 331, 337, - 338, 339, 340, 341, 343, 350, 354, 361, 362, 363, - 364, 365, 366, 367, 371, 372, 373, 374, 382, 385, - 399, 400, 410, 420, 424, 258, 407, 425, 0, 290, - 0, 194, 220, 207, 227, 241, 243, 271, 299, 305, - 333, 336, 255, 238, 218, 353, 216, 369, 388, 389, - 390, 392, 303, 234, 321, 0, 0, 0, 0, 0, - 0, 0, 0, 237, 0, 0, 0, 0, 0, 280, - 0, 0, 334, 0, 370, 223, 289, 287, 396, 246, - 240, 236, 222, 265, 294, 332, 387, 326, 0, 284, - 0, 0, 379, 306, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 270, - 221, 192, 318, 380, 249, 67, 0, 0, 174, 175, - 176, 0, 0, 0, 0, 0, 0, 0, 0, 213, - 0, 219, 0, 0, 0, 0, 233, 268, 239, 232, - 394, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 256, 0, 307, 0, 0, 0, 421, - 0, 0, 0, 0, 0, 0, 0, 0, 279, 0, - 276, 188, 202, 0, 0, 317, 355, 360, 0, 0, - 0, 224, 0, 358, 330, 409, 209, 247, 352, 335, - 356, 0, 0, 357, 285, 398, 347, 408, 422, 423, - 231, 311, 415, 391, 419, 431, 203, 228, 324, 384, - 412, 376, 304, 395, 275, 375, 254, 191, 283, 195, - 386, 406, 214, 368, 0, 0, 0, 197, 404, 383, - 301, 272, 273, 196, 0, 351, 235, 252, 226, 320, - 401, 402, 225, 433, 204, 418, 199, 205, 417, 313, - 397, 405, 302, 293, 198, 403, 300, 292, 278, 245, - 261, 345, 288, 346, 262, 309, 308, 310, 0, 193, - 0, 381, 413, 434, 211, 0, 0, 393, 427, 430, - 0, 348, 212, 253, 244, 344, 251, 281, 426, 428, - 429, 210, 342, 259, 312, 206, 264, 377, 277, 286, - 0, 0, 329, 359, 215, 411, 378, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 187, 200, 282, - 0, 349, 250, 432, 416, 414, 0, 0, 230, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 189, 190, 201, 208, 217, 229, 242, 248, 257, - 260, 263, 266, 267, 269, 274, 291, 295, 296, 297, - 298, 314, 315, 316, 319, 322, 323, 325, 327, 328, - 331, 337, 338, 339, 340, 341, 343, 350, 354, 361, - 362, 363, 364, 365, 366, 367, 371, 372, 373, 374, - 382, 385, 399, 400, 410, 420, 424, 258, 407, 425, - 0, 290, 0, 194, 220, 207, 227, 241, 243, 271, - 299, 305, 333, 336, 255, 238, 218, 353, 216, 369, - 388, 389, 390, 392, 303, 234, 321, 0, 0, 0, - 0, 0, 0, 0, 0, 237, 0, 0, 0, 0, - 0, 280, 0, 0, 334, 0, 370, 223, 289, 287, - 396, 246, 240, 236, 222, 265, 294, 332, 387, 326, - 0, 284, 0, 0, 379, 306, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 270, 221, 192, 318, 380, 249, 0, 0, 0, - 174, 175, 176, 0, 1375, 0, 0, 0, 0, 0, - 0, 213, 0, 219, 0, 0, 0, 0, 233, 268, - 239, 232, 394, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 256, 0, 307, 0, 0, - 0, 421, 0, 0, 0, 0, 0, 0, 0, 0, - 279, 0, 276, 188, 202, 0, 0, 317, 355, 360, - 0, 0, 0, 224, 0, 358, 330, 409, 209, 247, - 352, 335, 356, 0, 0, 357, 285, 398, 347, 408, - 422, 423, 231, 311, 415, 391, 419, 431, 203, 228, - 324, 384, 412, 376, 304, 395, 275, 375, 254, 191, - 283, 195, 386, 406, 214, 368, 0, 0, 0, 197, - 404, 383, 301, 272, 273, 196, 0, 351, 235, 252, - 226, 320, 401, 402, 225, 433, 204, 418, 199, 205, - 417, 313, 397, 405, 302, 293, 198, 403, 300, 292, - 278, 245, 261, 345, 288, 346, 262, 309, 308, 310, - 0, 193, 0, 381, 413, 434, 211, 0, 0, 393, - 427, 430, 0, 348, 212, 253, 244, 344, 251, 281, - 426, 428, 429, 210, 342, 259, 312, 206, 264, 377, - 277, 286, 0, 0, 329, 359, 215, 411, 378, 0, + 0, 0, 0, 0, 187, 188, 199, 207, 217, 229, + 242, 250, 260, 264, 267, 270, 271, 274, 279, 296, + 301, 302, 303, 304, 320, 321, 322, 325, 328, 329, + 332, 334, 335, 338, 344, 345, 346, 347, 348, 350, + 357, 361, 369, 370, 371, 372, 373, 375, 376, 380, + 381, 382, 383, 391, 395, 410, 411, 422, 434, 438, + 261, 418, 439, 0, 295, 0, 0, 297, 246, 263, + 272, 0, 429, 392, 203, 363, 253, 192, 220, 206, + 227, 241, 243, 276, 305, 311, 340, 343, 258, 238, + 218, 360, 215, 378, 398, 399, 400, 402, 309, 234, + 327, 0, 0, 0, 1460, 0, 0, 0, 0, 237, + 0, 0, 0, 0, 0, 285, 0, 0, 0, 341, + 0, 379, 223, 294, 292, 407, 247, 240, 236, 222, + 269, 300, 339, 397, 333, 0, 289, 0, 0, 388, + 312, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 275, 221, 190, 324, + 389, 251, 0, 0, 0, 182, 183, 184, 0, 1462, + 0, 0, 0, 0, 0, 0, 212, 0, 219, 0, + 0, 0, 0, 233, 273, 239, 232, 404, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 259, 0, 313, 0, 0, 0, 0, 435, 0, 0, + 0, 0, 0, 0, 0, 0, 284, 0, 281, 186, + 201, 0, 0, 323, 362, 368, 0, 0, 0, 224, + 0, 366, 337, 421, 208, 249, 359, 342, 364, 0, + 0, 365, 290, 409, 354, 419, 436, 437, 231, 317, + 427, 401, 433, 447, 202, 228, 331, 394, 424, 385, + 310, 405, 406, 280, 384, 257, 189, 288, 444, 200, + 374, 216, 193, 396, 417, 213, 377, 0, 0, 0, + 195, 415, 393, 307, 277, 278, 194, 0, 358, 235, + 255, 226, 326, 412, 413, 225, 449, 204, 432, 197, + 0, 431, 319, 408, 416, 308, 299, 196, 414, 306, + 298, 283, 245, 265, 352, 293, 353, 266, 315, 314, + 316, 0, 191, 0, 390, 425, 450, 210, 0, 0, + 403, 441, 446, 0, 355, 211, 256, 244, 351, 254, + 286, 440, 442, 443, 445, 209, 349, 262, 330, 420, + 248, 428, 318, 205, 268, 386, 282, 291, 0, 0, + 336, 367, 214, 423, 387, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 185, 198, 287, 0, 356, + 252, 448, 430, 426, 0, 0, 230, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 187, - 200, 282, 0, 349, 250, 432, 416, 414, 0, 0, - 230, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 188, 199, 207, 217, 229, 242, 250, 260, 264, 267, + 270, 271, 274, 279, 296, 301, 302, 303, 304, 320, + 321, 322, 325, 328, 329, 332, 334, 335, 338, 344, + 345, 346, 347, 348, 350, 357, 361, 369, 370, 371, + 372, 373, 375, 376, 380, 381, 382, 383, 391, 395, + 410, 411, 422, 434, 438, 261, 418, 439, 0, 295, + 0, 0, 297, 246, 263, 272, 0, 429, 392, 203, + 363, 253, 192, 220, 206, 227, 241, 243, 276, 305, + 311, 340, 343, 258, 238, 218, 360, 215, 378, 398, + 399, 400, 402, 309, 234, 327, 0, 0, 0, 0, + 0, 0, 0, 0, 237, 0, 0, 0, 0, 0, + 285, 0, 0, 0, 341, 0, 379, 223, 294, 292, + 407, 247, 240, 236, 222, 269, 300, 339, 397, 333, + 0, 289, 0, 0, 388, 312, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 275, 221, 190, 324, 389, 251, 0, 0, 596, + 182, 183, 184, 0, 0, 0, 0, 0, 0, 0, + 0, 212, 0, 219, 0, 0, 0, 0, 233, 273, + 239, 232, 404, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 259, 0, 313, 0, 0, + 0, 0, 435, 0, 0, 0, 0, 2105, 0, 0, + 0, 284, 0, 281, 186, 201, 0, 0, 323, 362, + 368, 0, 0, 0, 224, 0, 366, 337, 421, 208, + 249, 359, 342, 364, 0, 0, 365, 290, 409, 354, + 419, 436, 437, 231, 317, 427, 401, 433, 447, 202, + 228, 331, 394, 424, 385, 310, 405, 406, 280, 384, + 257, 189, 288, 444, 200, 374, 216, 193, 396, 417, + 213, 377, 0, 0, 0, 195, 415, 393, 307, 277, + 278, 194, 0, 358, 235, 255, 226, 326, 412, 413, + 225, 449, 204, 432, 197, 0, 431, 319, 408, 416, + 308, 299, 196, 414, 306, 298, 283, 245, 265, 352, + 293, 353, 266, 315, 314, 316, 0, 191, 0, 390, + 425, 450, 210, 0, 0, 403, 441, 446, 0, 355, + 211, 256, 244, 351, 254, 286, 440, 442, 443, 445, + 209, 349, 262, 330, 420, 248, 428, 318, 205, 268, + 386, 282, 291, 0, 0, 336, 367, 214, 423, 387, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 185, 198, 287, 0, 356, 252, 448, 430, 426, 0, + 0, 230, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 189, 190, 201, 208, 217, 229, 242, - 248, 257, 260, 263, 266, 267, 269, 274, 291, 295, - 296, 297, 298, 314, 315, 316, 319, 322, 323, 325, - 327, 328, 331, 337, 338, 339, 340, 341, 343, 350, - 354, 361, 362, 363, 364, 365, 366, 367, 371, 372, - 373, 374, 382, 385, 399, 400, 410, 420, 424, 258, - 407, 425, 0, 290, 0, 194, 220, 207, 227, 241, - 243, 271, 299, 305, 333, 336, 255, 238, 218, 353, - 216, 369, 388, 389, 390, 392, 303, 234, 321, 0, + 0, 0, 0, 0, 187, 188, 199, 207, 217, 229, + 242, 250, 260, 264, 267, 270, 271, 274, 279, 296, + 301, 302, 303, 304, 320, 321, 322, 325, 328, 329, + 332, 334, 335, 338, 344, 345, 346, 347, 348, 350, + 357, 361, 369, 370, 371, 372, 373, 375, 376, 380, + 381, 382, 383, 391, 395, 410, 411, 422, 434, 438, + 261, 418, 439, 0, 295, 0, 0, 297, 246, 263, + 272, 0, 429, 392, 203, 363, 253, 192, 220, 206, + 227, 241, 243, 276, 305, 311, 340, 343, 258, 238, + 218, 360, 215, 378, 398, 399, 400, 402, 309, 234, + 35, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 327, 0, 0, 0, 0, 0, 0, + 0, 0, 237, 0, 0, 0, 0, 0, 285, 0, + 0, 0, 341, 0, 379, 223, 294, 292, 407, 247, + 240, 236, 222, 269, 300, 339, 397, 333, 0, 289, + 0, 0, 388, 312, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 275, + 221, 190, 324, 389, 251, 71, 0, 0, 182, 183, + 184, 0, 0, 0, 0, 0, 0, 0, 0, 212, + 0, 219, 0, 0, 0, 0, 233, 273, 239, 232, + 404, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 259, 0, 313, 0, 0, 0, 0, + 435, 0, 0, 0, 0, 0, 0, 0, 0, 284, + 0, 281, 186, 201, 0, 0, 323, 362, 368, 0, + 0, 0, 224, 0, 366, 337, 421, 208, 249, 359, + 342, 364, 0, 0, 365, 290, 409, 354, 419, 436, + 437, 231, 317, 427, 401, 433, 447, 202, 228, 331, + 394, 424, 385, 310, 405, 406, 280, 384, 257, 189, + 288, 444, 200, 374, 216, 193, 396, 417, 213, 377, + 0, 0, 0, 195, 415, 393, 307, 277, 278, 194, + 0, 358, 235, 255, 226, 326, 412, 413, 225, 449, + 204, 432, 197, 0, 431, 319, 408, 416, 308, 299, + 196, 414, 306, 298, 283, 245, 265, 352, 293, 353, + 266, 315, 314, 316, 0, 191, 0, 390, 425, 450, + 210, 0, 0, 403, 441, 446, 0, 355, 211, 256, + 244, 351, 254, 286, 440, 442, 443, 445, 209, 349, + 262, 330, 420, 248, 428, 318, 205, 268, 386, 282, + 291, 0, 0, 336, 367, 214, 423, 387, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 185, 198, + 287, 0, 356, 252, 448, 430, 426, 0, 0, 230, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 187, 188, 199, 207, 217, 229, 242, 250, + 260, 264, 267, 270, 271, 274, 279, 296, 301, 302, + 303, 304, 320, 321, 322, 325, 328, 329, 332, 334, + 335, 338, 344, 345, 346, 347, 348, 350, 357, 361, + 369, 370, 371, 372, 373, 375, 376, 380, 381, 382, + 383, 391, 395, 410, 411, 422, 434, 438, 261, 418, + 439, 0, 295, 0, 0, 297, 246, 263, 272, 0, + 429, 392, 203, 363, 253, 192, 220, 206, 227, 241, + 243, 276, 305, 311, 340, 343, 258, 238, 218, 360, + 215, 378, 398, 399, 400, 402, 309, 234, 327, 0, + 0, 0, 0, 0, 0, 0, 0, 237, 0, 0, + 0, 0, 0, 285, 0, 0, 0, 341, 0, 379, + 223, 294, 292, 407, 247, 240, 236, 222, 269, 300, + 339, 397, 333, 0, 289, 0, 0, 388, 312, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 275, 221, 190, 324, 389, 251, + 0, 0, 0, 182, 183, 184, 0, 0, 1480, 0, + 0, 1481, 0, 0, 212, 0, 219, 0, 0, 0, + 0, 233, 273, 239, 232, 404, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 259, 0, + 313, 0, 0, 0, 0, 435, 0, 0, 0, 0, + 0, 0, 0, 0, 284, 0, 281, 186, 201, 0, + 0, 323, 362, 368, 0, 0, 0, 224, 0, 366, + 337, 421, 208, 249, 359, 342, 364, 0, 0, 365, + 290, 409, 354, 419, 436, 437, 231, 317, 427, 401, + 433, 447, 202, 228, 331, 394, 424, 385, 310, 405, + 406, 280, 384, 257, 189, 288, 444, 200, 374, 216, + 193, 396, 417, 213, 377, 0, 0, 0, 195, 415, + 393, 307, 277, 278, 194, 0, 358, 235, 255, 226, + 326, 412, 413, 225, 449, 204, 432, 197, 0, 431, + 319, 408, 416, 308, 299, 196, 414, 306, 298, 283, + 245, 265, 352, 293, 353, 266, 315, 314, 316, 0, + 191, 0, 390, 425, 450, 210, 0, 0, 403, 441, + 446, 0, 355, 211, 256, 244, 351, 254, 286, 440, + 442, 443, 445, 209, 349, 262, 330, 420, 248, 428, + 318, 205, 268, 386, 282, 291, 0, 0, 336, 367, + 214, 423, 387, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 185, 198, 287, 0, 356, 252, 448, + 430, 426, 0, 0, 230, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 187, 188, 199, + 207, 217, 229, 242, 250, 260, 264, 267, 270, 271, + 274, 279, 296, 301, 302, 303, 304, 320, 321, 322, + 325, 328, 329, 332, 334, 335, 338, 344, 345, 346, + 347, 348, 350, 357, 361, 369, 370, 371, 372, 373, + 375, 376, 380, 381, 382, 383, 391, 395, 410, 411, + 422, 434, 438, 261, 418, 439, 0, 295, 0, 0, + 297, 246, 263, 272, 0, 429, 392, 203, 363, 253, + 192, 220, 206, 227, 241, 243, 276, 305, 311, 340, + 343, 258, 238, 218, 360, 215, 378, 398, 399, 400, + 402, 309, 234, 327, 0, 0, 0, 0, 0, 0, + 0, 0, 237, 0, 1119, 0, 0, 0, 285, 0, + 0, 0, 341, 0, 379, 223, 294, 292, 407, 247, + 240, 236, 222, 269, 300, 339, 397, 333, 0, 289, + 0, 0, 388, 312, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 275, + 221, 190, 324, 389, 251, 0, 0, 0, 182, 183, + 184, 0, 1118, 0, 0, 0, 0, 0, 0, 212, + 0, 219, 0, 0, 0, 0, 233, 273, 239, 232, + 404, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 259, 0, 313, 0, 0, 0, 0, + 435, 0, 0, 0, 0, 0, 0, 0, 0, 284, + 0, 281, 186, 201, 0, 0, 323, 362, 368, 0, + 0, 0, 224, 0, 366, 337, 421, 208, 249, 359, + 342, 364, 0, 0, 365, 290, 409, 354, 419, 436, + 437, 231, 317, 427, 401, 433, 447, 202, 228, 331, + 394, 424, 385, 310, 405, 406, 280, 384, 257, 189, + 288, 444, 200, 374, 216, 193, 396, 417, 213, 377, + 0, 0, 0, 195, 415, 393, 307, 277, 278, 194, + 0, 358, 235, 255, 226, 326, 412, 413, 225, 449, + 204, 432, 197, 0, 431, 319, 408, 416, 308, 299, + 196, 414, 306, 298, 283, 245, 265, 352, 293, 353, + 266, 315, 314, 316, 0, 191, 0, 390, 425, 450, + 210, 0, 0, 403, 441, 446, 0, 355, 211, 256, + 244, 351, 254, 286, 440, 442, 443, 445, 209, 349, + 262, 330, 420, 248, 428, 318, 205, 268, 386, 282, + 291, 0, 0, 336, 367, 214, 423, 387, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 185, 198, + 287, 0, 356, 252, 448, 430, 426, 0, 0, 230, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 187, 188, 199, 207, 217, 229, 242, 250, + 260, 264, 267, 270, 271, 274, 279, 296, 301, 302, + 303, 304, 320, 321, 322, 325, 328, 329, 332, 334, + 335, 338, 344, 345, 346, 347, 348, 350, 357, 361, + 369, 370, 371, 372, 373, 375, 376, 380, 381, 382, + 383, 391, 395, 410, 411, 422, 434, 438, 261, 418, + 439, 0, 295, 0, 0, 297, 246, 263, 272, 0, + 429, 392, 203, 363, 253, 192, 220, 206, 227, 241, + 243, 276, 305, 311, 340, 343, 258, 238, 218, 360, + 215, 378, 398, 399, 400, 402, 309, 234, 327, 0, + 0, 0, 0, 0, 0, 0, 0, 237, 0, 0, + 0, 0, 0, 285, 0, 0, 0, 341, 0, 379, + 223, 294, 292, 407, 247, 240, 236, 222, 269, 300, + 339, 397, 333, 0, 289, 0, 0, 388, 312, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 275, 221, 190, 324, 389, 251, + 0, 0, 0, 182, 183, 184, 0, 0, 0, 0, + 0, 0, 0, 0, 212, 0, 219, 0, 0, 0, + 0, 233, 273, 239, 232, 404, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 259, 0, + 313, 0, 0, 0, 0, 435, 0, 0, 0, 0, + 2186, 0, 0, 0, 284, 0, 281, 186, 201, 0, + 0, 323, 362, 368, 0, 0, 0, 224, 0, 366, + 337, 421, 208, 249, 359, 342, 364, 0, 0, 365, + 290, 409, 354, 419, 436, 437, 231, 317, 427, 401, + 433, 447, 202, 228, 331, 394, 424, 385, 310, 405, + 406, 280, 384, 257, 189, 288, 444, 200, 374, 216, + 193, 396, 417, 213, 377, 0, 0, 0, 195, 415, + 393, 307, 277, 278, 194, 0, 358, 235, 255, 226, + 326, 412, 413, 225, 449, 204, 432, 197, 0, 431, + 319, 408, 416, 308, 299, 196, 414, 306, 298, 283, + 245, 265, 352, 293, 353, 266, 315, 314, 316, 0, + 191, 0, 390, 425, 450, 210, 0, 0, 403, 441, + 446, 0, 355, 211, 256, 244, 351, 254, 286, 440, + 442, 443, 445, 209, 349, 262, 330, 420, 248, 428, + 318, 205, 268, 386, 282, 291, 0, 0, 336, 367, + 214, 423, 387, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 185, 198, 287, 0, 356, 252, 448, + 430, 426, 0, 0, 230, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 187, 188, 199, + 207, 217, 229, 242, 250, 260, 264, 267, 270, 271, + 274, 279, 296, 301, 302, 303, 304, 320, 321, 322, + 325, 328, 329, 332, 334, 335, 338, 344, 345, 346, + 347, 348, 350, 357, 361, 369, 370, 371, 372, 373, + 375, 376, 380, 381, 382, 383, 391, 395, 410, 411, + 422, 434, 438, 261, 418, 439, 0, 295, 0, 0, + 297, 246, 263, 272, 0, 429, 392, 203, 363, 253, + 192, 220, 206, 227, 241, 243, 276, 305, 311, 340, + 343, 258, 238, 218, 360, 215, 378, 398, 399, 400, + 402, 309, 234, 327, 0, 0, 0, 0, 0, 0, + 0, 0, 237, 0, 0, 0, 0, 0, 285, 0, + 0, 0, 341, 0, 379, 223, 294, 292, 407, 247, + 240, 236, 222, 269, 300, 339, 397, 333, 0, 289, + 0, 0, 388, 312, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 275, + 221, 190, 324, 389, 251, 0, 0, 0, 182, 183, + 184, 0, 0, 0, 0, 0, 0, 0, 0, 212, + 0, 219, 0, 0, 0, 0, 233, 273, 239, 232, + 404, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 259, 0, 313, 0, 0, 0, 0, + 435, 0, 0, 0, 0, 2105, 0, 0, 0, 284, + 0, 281, 186, 201, 0, 0, 323, 362, 368, 0, + 0, 0, 224, 0, 366, 337, 421, 208, 249, 359, + 342, 364, 0, 0, 365, 290, 409, 354, 419, 436, + 437, 231, 317, 427, 401, 433, 447, 202, 228, 331, + 394, 424, 385, 310, 405, 406, 280, 384, 257, 189, + 288, 444, 200, 374, 216, 193, 396, 417, 213, 377, + 0, 0, 0, 195, 415, 393, 307, 277, 278, 194, + 0, 358, 235, 255, 226, 326, 412, 413, 225, 449, + 204, 432, 197, 0, 431, 319, 408, 416, 308, 299, + 196, 414, 306, 298, 283, 245, 265, 352, 293, 353, + 266, 315, 314, 316, 0, 191, 0, 390, 425, 450, + 210, 0, 0, 403, 441, 446, 0, 355, 211, 256, + 244, 351, 254, 286, 440, 442, 443, 445, 209, 349, + 262, 330, 420, 248, 428, 318, 205, 268, 386, 282, + 291, 0, 0, 336, 367, 214, 423, 387, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 185, 198, + 287, 0, 356, 252, 448, 430, 426, 0, 0, 230, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 187, 188, 199, 207, 217, 229, 242, 250, + 260, 264, 267, 270, 271, 274, 279, 296, 301, 302, + 303, 304, 320, 321, 322, 325, 328, 329, 332, 334, + 335, 338, 344, 345, 346, 347, 348, 350, 357, 361, + 369, 370, 371, 372, 373, 375, 376, 380, 381, 382, + 383, 391, 395, 410, 411, 422, 434, 438, 261, 418, + 439, 0, 295, 0, 0, 297, 246, 263, 272, 0, + 429, 392, 203, 363, 253, 192, 220, 206, 227, 241, + 243, 276, 305, 311, 340, 343, 258, 238, 218, 360, + 215, 378, 398, 399, 400, 402, 309, 234, 327, 0, 0, 0, 0, 0, 0, 0, 0, 237, 0, 0, - 0, 0, 0, 280, 0, 0, 334, 0, 370, 223, - 289, 287, 396, 246, 240, 236, 222, 265, 294, 332, - 387, 326, 0, 284, 0, 0, 379, 306, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 270, 221, 192, 318, 380, 249, 0, - 0, 0, 174, 175, 176, 0, 1025, 0, 0, 0, - 0, 0, 0, 213, 0, 219, 0, 0, 0, 0, - 233, 268, 239, 232, 394, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 256, 0, 307, - 0, 0, 0, 421, 0, 0, 0, 0, 0, 0, - 0, 0, 279, 0, 276, 188, 202, 0, 0, 317, - 355, 360, 0, 0, 0, 224, 0, 358, 330, 409, - 209, 247, 352, 335, 356, 0, 0, 357, 285, 398, - 347, 408, 422, 423, 231, 311, 415, 391, 419, 431, - 203, 228, 324, 384, 412, 376, 304, 395, 275, 375, - 254, 191, 283, 195, 386, 406, 214, 368, 0, 0, - 0, 197, 404, 383, 301, 272, 273, 196, 0, 351, - 235, 252, 226, 320, 401, 402, 225, 433, 204, 418, - 199, 205, 417, 313, 397, 405, 302, 293, 198, 403, - 300, 292, 278, 245, 261, 345, 288, 346, 262, 309, - 308, 310, 0, 193, 0, 381, 413, 434, 211, 0, - 0, 393, 427, 430, 0, 348, 212, 253, 244, 344, - 251, 281, 426, 428, 429, 210, 342, 259, 312, 206, - 264, 377, 277, 286, 0, 0, 329, 359, 215, 411, - 378, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 187, 200, 282, 0, 349, 250, 432, 416, 414, - 0, 0, 230, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 189, 190, 201, 208, 217, - 229, 242, 248, 257, 260, 263, 266, 267, 269, 274, - 291, 295, 296, 297, 298, 314, 315, 316, 319, 322, - 323, 325, 327, 328, 331, 337, 338, 339, 340, 341, - 343, 350, 354, 361, 362, 363, 364, 365, 366, 367, - 371, 372, 373, 374, 382, 385, 399, 400, 410, 420, - 424, 258, 407, 425, 0, 290, 0, 194, 220, 207, - 227, 241, 243, 271, 299, 305, 333, 336, 255, 238, - 218, 353, 216, 369, 388, 389, 390, 392, 303, 234, - 321, 0, 1175, 0, 0, 0, 0, 0, 0, 237, - 0, 0, 0, 0, 0, 280, 0, 0, 334, 0, - 370, 223, 289, 287, 396, 246, 240, 236, 222, 265, - 294, 332, 387, 326, 0, 284, 0, 0, 379, 306, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 270, 221, 192, 318, 380, - 249, 0, 0, 0, 174, 175, 176, 0, 0, 0, - 0, 0, 0, 0, 0, 213, 0, 219, 0, 0, - 0, 0, 233, 268, 239, 232, 394, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 256, - 0, 307, 0, 0, 0, 421, 0, 0, 0, 0, - 0, 0, 0, 0, 279, 0, 276, 188, 202, 0, - 0, 317, 355, 360, 0, 0, 0, 224, 0, 358, - 330, 409, 209, 247, 352, 335, 356, 0, 0, 357, - 285, 398, 347, 408, 422, 423, 231, 311, 415, 391, - 419, 431, 203, 228, 324, 384, 412, 376, 304, 395, - 275, 375, 254, 191, 283, 195, 386, 406, 214, 368, - 0, 0, 0, 197, 404, 383, 301, 272, 273, 196, - 0, 351, 235, 252, 226, 320, 401, 402, 225, 433, - 204, 418, 199, 205, 417, 313, 397, 405, 302, 293, - 198, 403, 300, 292, 278, 245, 261, 345, 288, 346, - 262, 309, 308, 310, 0, 193, 0, 381, 413, 434, - 211, 0, 0, 393, 427, 430, 0, 348, 212, 253, - 244, 344, 251, 281, 426, 428, 429, 210, 342, 259, - 312, 206, 264, 377, 277, 286, 0, 0, 329, 359, - 215, 411, 378, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 187, 200, 282, 0, 349, 250, 432, - 416, 414, 0, 0, 230, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 189, 190, 201, - 208, 217, 229, 242, 248, 257, 260, 263, 266, 267, - 269, 274, 291, 295, 296, 297, 298, 314, 315, 316, - 319, 322, 323, 325, 327, 328, 331, 337, 338, 339, - 340, 341, 343, 350, 354, 361, 362, 363, 364, 365, - 366, 367, 371, 372, 373, 374, 382, 385, 399, 400, - 410, 420, 424, 258, 407, 425, 0, 290, 0, 194, - 220, 207, 227, 241, 243, 271, 299, 305, 333, 336, - 255, 238, 218, 353, 216, 369, 388, 389, 390, 392, - 303, 234, 321, 0, 1173, 0, 0, 0, 0, 0, - 0, 237, 0, 0, 0, 0, 0, 280, 0, 0, - 334, 0, 370, 223, 289, 287, 396, 246, 240, 236, - 222, 265, 294, 332, 387, 326, 0, 284, 0, 0, - 379, 306, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 270, 221, 192, - 318, 380, 249, 0, 0, 0, 174, 175, 176, 0, - 0, 0, 0, 0, 0, 0, 0, 213, 0, 219, - 0, 0, 0, 0, 233, 268, 239, 232, 394, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 256, 0, 307, 0, 0, 0, 421, 0, 0, - 0, 0, 0, 0, 0, 0, 279, 0, 276, 188, - 202, 0, 0, 317, 355, 360, 0, 0, 0, 224, - 0, 358, 330, 409, 209, 247, 352, 335, 356, 0, - 0, 357, 285, 398, 347, 408, 422, 423, 231, 311, - 415, 391, 419, 431, 203, 228, 324, 384, 412, 376, - 304, 395, 275, 375, 254, 191, 283, 195, 386, 406, - 214, 368, 0, 0, 0, 197, 404, 383, 301, 272, - 273, 196, 0, 351, 235, 252, 226, 320, 401, 402, - 225, 433, 204, 418, 199, 205, 417, 313, 397, 405, - 302, 293, 198, 403, 300, 292, 278, 245, 261, 345, - 288, 346, 262, 309, 308, 310, 0, 193, 0, 381, - 413, 434, 211, 0, 0, 393, 427, 430, 0, 348, - 212, 253, 244, 344, 251, 281, 426, 428, 429, 210, - 342, 259, 312, 206, 264, 377, 277, 286, 0, 0, - 329, 359, 215, 411, 378, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 187, 200, 282, 0, 349, - 250, 432, 416, 414, 0, 0, 230, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 189, - 190, 201, 208, 217, 229, 242, 248, 257, 260, 263, - 266, 267, 269, 274, 291, 295, 296, 297, 298, 314, - 315, 316, 319, 322, 323, 325, 327, 328, 331, 337, - 338, 339, 340, 341, 343, 350, 354, 361, 362, 363, - 364, 365, 366, 367, 371, 372, 373, 374, 382, 385, - 399, 400, 410, 420, 424, 258, 407, 425, 0, 290, - 0, 194, 220, 207, 227, 241, 243, 271, 299, 305, - 333, 336, 255, 238, 218, 353, 216, 369, 388, 389, - 390, 392, 303, 234, 321, 0, 1171, 0, 0, 0, - 0, 0, 0, 237, 0, 0, 0, 0, 0, 280, - 0, 0, 334, 0, 370, 223, 289, 287, 396, 246, - 240, 236, 222, 265, 294, 332, 387, 326, 0, 284, - 0, 0, 379, 306, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 270, - 221, 192, 318, 380, 249, 0, 0, 0, 174, 175, - 176, 0, 0, 0, 0, 0, 0, 0, 0, 213, - 0, 219, 0, 0, 0, 0, 233, 268, 239, 232, - 394, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 256, 0, 307, 0, 0, 0, 421, - 0, 0, 0, 0, 0, 0, 0, 0, 279, 0, - 276, 188, 202, 0, 0, 317, 355, 360, 0, 0, - 0, 224, 0, 358, 330, 409, 209, 247, 352, 335, - 356, 0, 0, 357, 285, 398, 347, 408, 422, 423, - 231, 311, 415, 391, 419, 431, 203, 228, 324, 384, - 412, 376, 304, 395, 275, 375, 254, 191, 283, 195, - 386, 406, 214, 368, 0, 0, 0, 197, 404, 383, - 301, 272, 273, 196, 0, 351, 235, 252, 226, 320, - 401, 402, 225, 433, 204, 418, 199, 205, 417, 313, - 397, 405, 302, 293, 198, 403, 300, 292, 278, 245, - 261, 345, 288, 346, 262, 309, 308, 310, 0, 193, - 0, 381, 413, 434, 211, 0, 0, 393, 427, 430, - 0, 348, 212, 253, 244, 344, 251, 281, 426, 428, - 429, 210, 342, 259, 312, 206, 264, 377, 277, 286, - 0, 0, 329, 359, 215, 411, 378, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 187, 200, 282, - 0, 349, 250, 432, 416, 414, 0, 0, 230, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 189, 190, 201, 208, 217, 229, 242, 248, 257, - 260, 263, 266, 267, 269, 274, 291, 295, 296, 297, - 298, 314, 315, 316, 319, 322, 323, 325, 327, 328, - 331, 337, 338, 339, 340, 341, 343, 350, 354, 361, - 362, 363, 364, 365, 366, 367, 371, 372, 373, 374, - 382, 385, 399, 400, 410, 420, 424, 258, 407, 425, - 0, 290, 0, 194, 220, 207, 227, 241, 243, 271, - 299, 305, 333, 336, 255, 238, 218, 353, 216, 369, - 388, 389, 390, 392, 303, 234, 321, 0, 1169, 0, - 0, 0, 0, 0, 0, 237, 0, 0, 0, 0, - 0, 280, 0, 0, 334, 0, 370, 223, 289, 287, - 396, 246, 240, 236, 222, 265, 294, 332, 387, 326, - 0, 284, 0, 0, 379, 306, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 270, 221, 192, 318, 380, 249, 0, 0, 0, - 174, 175, 176, 0, 0, 0, 0, 0, 0, 0, - 0, 213, 0, 219, 0, 0, 0, 0, 233, 268, - 239, 232, 394, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 256, 0, 307, 0, 0, - 0, 421, 0, 0, 0, 0, 0, 0, 0, 0, - 279, 0, 276, 188, 202, 0, 0, 317, 355, 360, - 0, 0, 0, 224, 0, 358, 330, 409, 209, 247, - 352, 335, 356, 0, 0, 357, 285, 398, 347, 408, - 422, 423, 231, 311, 415, 391, 419, 431, 203, 228, - 324, 384, 412, 376, 304, 395, 275, 375, 254, 191, - 283, 195, 386, 406, 214, 368, 0, 0, 0, 197, - 404, 383, 301, 272, 273, 196, 0, 351, 235, 252, - 226, 320, 401, 402, 225, 433, 204, 418, 199, 205, - 417, 313, 397, 405, 302, 293, 198, 403, 300, 292, - 278, 245, 261, 345, 288, 346, 262, 309, 308, 310, - 0, 193, 0, 381, 413, 434, 211, 0, 0, 393, - 427, 430, 0, 348, 212, 253, 244, 344, 251, 281, - 426, 428, 429, 210, 342, 259, 312, 206, 264, 377, - 277, 286, 0, 0, 329, 359, 215, 411, 378, 0, + 0, 0, 0, 285, 0, 0, 0, 341, 0, 379, + 223, 294, 292, 407, 247, 240, 236, 222, 269, 300, + 339, 397, 333, 0, 289, 0, 0, 388, 312, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 275, 221, 190, 324, 389, 251, + 71, 0, 0, 182, 183, 184, 0, 0, 0, 0, + 0, 0, 0, 0, 212, 0, 219, 0, 0, 0, + 0, 233, 273, 239, 232, 404, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 259, 0, + 313, 0, 0, 0, 0, 435, 0, 0, 0, 0, + 0, 0, 0, 0, 284, 0, 281, 186, 201, 0, + 0, 323, 362, 368, 0, 0, 0, 224, 0, 366, + 337, 421, 208, 249, 359, 342, 364, 0, 0, 365, + 290, 409, 354, 419, 436, 437, 231, 317, 427, 401, + 433, 447, 202, 228, 331, 394, 424, 385, 310, 405, + 406, 280, 384, 257, 189, 288, 444, 200, 374, 216, + 193, 396, 417, 213, 377, 0, 0, 0, 195, 415, + 393, 307, 277, 278, 194, 0, 358, 235, 255, 226, + 326, 412, 413, 225, 449, 204, 432, 197, 0, 431, + 319, 408, 416, 308, 299, 196, 414, 306, 298, 283, + 245, 265, 352, 293, 353, 266, 315, 314, 316, 0, + 191, 0, 390, 425, 450, 210, 0, 0, 403, 441, + 446, 0, 355, 211, 256, 244, 351, 254, 286, 440, + 442, 443, 445, 209, 349, 262, 330, 420, 248, 428, + 318, 205, 268, 386, 282, 291, 0, 0, 336, 367, + 214, 423, 387, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 185, 198, 287, 0, 356, 252, 448, + 430, 426, 0, 0, 230, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 187, 188, 199, + 207, 217, 229, 242, 250, 260, 264, 267, 270, 271, + 274, 279, 296, 301, 302, 303, 304, 320, 321, 322, + 325, 328, 329, 332, 334, 335, 338, 344, 345, 346, + 347, 348, 350, 357, 361, 369, 370, 371, 372, 373, + 375, 376, 380, 381, 382, 383, 391, 395, 410, 411, + 422, 434, 438, 261, 418, 439, 0, 295, 0, 0, + 297, 246, 263, 272, 0, 429, 392, 203, 363, 253, + 192, 220, 206, 227, 241, 243, 276, 305, 311, 340, + 343, 258, 238, 218, 360, 215, 378, 398, 399, 400, + 402, 309, 234, 327, 0, 0, 0, 0, 0, 0, + 0, 0, 237, 0, 0, 0, 0, 0, 285, 0, + 0, 0, 341, 0, 379, 223, 294, 292, 407, 247, + 240, 236, 222, 269, 300, 339, 397, 333, 0, 289, + 0, 0, 388, 312, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 275, + 221, 190, 324, 389, 251, 0, 0, 0, 182, 183, + 184, 0, 1462, 0, 0, 0, 0, 0, 0, 212, + 0, 219, 0, 0, 0, 0, 233, 273, 239, 232, + 404, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 259, 0, 313, 0, 0, 0, 0, + 435, 0, 0, 0, 0, 0, 0, 0, 0, 284, + 0, 281, 186, 201, 0, 0, 323, 362, 368, 0, + 0, 0, 224, 0, 366, 337, 421, 208, 249, 359, + 342, 364, 0, 0, 365, 290, 409, 354, 419, 436, + 437, 231, 317, 427, 401, 433, 447, 202, 228, 331, + 394, 424, 385, 310, 405, 406, 280, 384, 257, 189, + 288, 444, 200, 374, 216, 193, 396, 417, 213, 377, + 0, 0, 0, 195, 415, 393, 307, 277, 278, 194, + 0, 358, 235, 255, 226, 326, 412, 413, 225, 449, + 204, 432, 197, 0, 431, 319, 408, 416, 308, 299, + 196, 414, 306, 298, 283, 245, 265, 352, 293, 353, + 266, 315, 314, 316, 0, 191, 0, 390, 425, 450, + 210, 0, 0, 403, 441, 446, 0, 355, 211, 256, + 244, 351, 254, 286, 440, 442, 443, 445, 209, 349, + 262, 330, 420, 248, 428, 318, 205, 268, 386, 282, + 291, 0, 0, 336, 367, 214, 423, 387, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 185, 198, + 287, 0, 356, 252, 448, 430, 426, 0, 0, 230, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 187, 188, 199, 207, 217, 229, 242, 250, + 260, 264, 267, 270, 271, 274, 279, 296, 301, 302, + 303, 304, 320, 321, 322, 325, 328, 329, 332, 334, + 335, 338, 344, 345, 346, 347, 348, 350, 357, 361, + 369, 370, 371, 372, 373, 375, 376, 380, 381, 382, + 383, 391, 395, 410, 411, 422, 434, 438, 261, 418, + 439, 0, 295, 0, 0, 297, 246, 263, 272, 0, + 429, 392, 203, 363, 253, 192, 220, 206, 227, 241, + 243, 276, 305, 311, 340, 343, 258, 238, 218, 360, + 215, 378, 398, 399, 400, 402, 309, 234, 327, 0, + 0, 0, 0, 0, 0, 0, 0, 237, 0, 0, + 0, 0, 0, 285, 0, 0, 0, 341, 0, 379, + 223, 294, 292, 407, 247, 240, 236, 222, 269, 300, + 339, 397, 333, 0, 289, 0, 0, 388, 312, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 275, 221, 190, 324, 389, 251, + 0, 0, 0, 182, 183, 184, 0, 1088, 0, 0, + 0, 0, 0, 0, 212, 0, 219, 0, 0, 0, + 0, 233, 273, 239, 232, 404, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 259, 0, + 313, 0, 0, 0, 0, 435, 0, 0, 0, 0, + 0, 0, 0, 0, 284, 0, 281, 186, 201, 0, + 0, 323, 362, 368, 0, 0, 0, 224, 0, 366, + 337, 421, 208, 249, 359, 342, 364, 0, 0, 365, + 290, 409, 354, 419, 436, 437, 231, 317, 427, 401, + 433, 447, 202, 228, 331, 394, 424, 385, 310, 405, + 406, 280, 384, 257, 189, 288, 444, 200, 374, 216, + 193, 396, 417, 213, 377, 0, 0, 0, 195, 415, + 393, 307, 277, 278, 194, 0, 358, 235, 255, 226, + 326, 412, 413, 225, 449, 204, 432, 197, 0, 431, + 319, 408, 416, 308, 299, 196, 414, 306, 298, 283, + 245, 265, 352, 293, 353, 266, 315, 314, 316, 0, + 191, 0, 390, 425, 450, 210, 0, 0, 403, 441, + 446, 0, 355, 211, 256, 244, 351, 254, 286, 440, + 442, 443, 445, 209, 349, 262, 330, 420, 248, 428, + 318, 205, 268, 386, 282, 291, 0, 0, 336, 367, + 214, 423, 387, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 185, 198, 287, 0, 356, 252, 448, + 430, 426, 0, 0, 230, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 187, 188, 199, + 207, 217, 229, 242, 250, 260, 264, 267, 270, 271, + 274, 279, 296, 301, 302, 303, 304, 320, 321, 322, + 325, 328, 329, 332, 334, 335, 338, 344, 345, 346, + 347, 348, 350, 357, 361, 369, 370, 371, 372, 373, + 375, 376, 380, 381, 382, 383, 391, 395, 410, 411, + 422, 434, 438, 261, 418, 439, 0, 295, 0, 0, + 297, 246, 263, 272, 0, 429, 392, 203, 363, 253, + 192, 220, 206, 227, 241, 243, 276, 305, 311, 340, + 343, 258, 238, 218, 360, 215, 378, 398, 399, 400, + 402, 309, 234, 327, 0, 0, 0, 0, 0, 0, + 0, 0, 237, 0, 0, 0, 0, 0, 285, 0, + 0, 0, 341, 0, 379, 223, 294, 292, 407, 247, + 240, 236, 222, 269, 300, 339, 397, 333, 0, 289, + 0, 0, 388, 312, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 275, + 221, 190, 324, 389, 251, 0, 0, 0, 182, 183, + 184, 0, 0, 0, 0, 0, 0, 0, 0, 212, + 0, 219, 0, 0, 0, 0, 233, 273, 239, 232, + 404, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 259, 0, 313, 0, 0, 0, 0, + 435, 0, 0, 0, 0, 0, 0, 0, 0, 284, + 0, 281, 186, 201, 0, 0, 323, 362, 368, 0, + 0, 0, 224, 0, 366, 337, 421, 208, 249, 359, + 342, 364, 0, 0, 365, 290, 409, 354, 419, 436, + 437, 231, 317, 427, 401, 433, 447, 202, 228, 331, + 394, 424, 385, 310, 405, 406, 280, 384, 257, 189, + 288, 444, 200, 374, 216, 193, 396, 417, 213, 377, + 0, 0, 0, 195, 415, 393, 307, 277, 278, 194, + 0, 358, 235, 255, 226, 326, 412, 413, 225, 449, + 204, 432, 197, 0, 431, 319, 408, 416, 308, 299, + 196, 414, 306, 298, 283, 245, 265, 352, 293, 353, + 266, 315, 314, 316, 0, 191, 0, 390, 425, 450, + 210, 0, 0, 403, 441, 446, 0, 355, 211, 256, + 244, 351, 254, 286, 440, 442, 443, 445, 209, 349, + 262, 330, 420, 248, 428, 318, 205, 268, 386, 282, + 291, 0, 0, 336, 367, 214, 423, 387, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 185, 198, + 287, 1365, 356, 252, 448, 430, 426, 0, 0, 230, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 187, 188, 199, 207, 217, 229, 242, 250, + 260, 264, 267, 270, 271, 274, 279, 296, 301, 302, + 303, 304, 320, 321, 322, 325, 328, 329, 332, 334, + 335, 338, 344, 345, 346, 347, 348, 350, 357, 361, + 369, 370, 371, 372, 373, 375, 376, 380, 381, 382, + 383, 391, 395, 410, 411, 422, 434, 438, 261, 418, + 439, 0, 295, 0, 0, 297, 246, 263, 272, 0, + 429, 392, 203, 363, 253, 192, 220, 206, 227, 241, + 243, 276, 305, 311, 340, 343, 258, 238, 218, 360, + 215, 378, 398, 399, 400, 402, 309, 234, 327, 0, + 1243, 0, 0, 0, 0, 0, 0, 237, 0, 0, + 0, 0, 0, 285, 0, 0, 0, 341, 0, 379, + 223, 294, 292, 407, 247, 240, 236, 222, 269, 300, + 339, 397, 333, 0, 289, 0, 0, 388, 312, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 275, 221, 190, 324, 389, 251, + 0, 0, 0, 182, 183, 184, 0, 0, 0, 0, + 0, 0, 0, 0, 212, 0, 219, 0, 0, 0, + 0, 233, 273, 239, 232, 404, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 259, 0, + 313, 0, 0, 0, 0, 435, 0, 0, 0, 0, + 0, 0, 0, 0, 284, 0, 281, 186, 201, 0, + 0, 323, 362, 368, 0, 0, 0, 224, 0, 366, + 337, 421, 208, 249, 359, 342, 364, 0, 0, 365, + 290, 409, 354, 419, 436, 437, 231, 317, 427, 401, + 433, 447, 202, 228, 331, 394, 424, 385, 310, 405, + 406, 280, 384, 257, 189, 288, 444, 200, 374, 216, + 193, 396, 417, 213, 377, 0, 0, 0, 195, 415, + 393, 307, 277, 278, 194, 0, 358, 235, 255, 226, + 326, 412, 413, 225, 449, 204, 432, 197, 0, 431, + 319, 408, 416, 308, 299, 196, 414, 306, 298, 283, + 245, 265, 352, 293, 353, 266, 315, 314, 316, 0, + 191, 0, 390, 425, 450, 210, 0, 0, 403, 441, + 446, 0, 355, 211, 256, 244, 351, 254, 286, 440, + 442, 443, 445, 209, 349, 262, 330, 420, 248, 428, + 318, 205, 268, 386, 282, 291, 0, 0, 336, 367, + 214, 423, 387, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 185, 198, 287, 0, 356, 252, 448, + 430, 426, 0, 0, 230, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 187, 188, 199, + 207, 217, 229, 242, 250, 260, 264, 267, 270, 271, + 274, 279, 296, 301, 302, 303, 304, 320, 321, 322, + 325, 328, 329, 332, 334, 335, 338, 344, 345, 346, + 347, 348, 350, 357, 361, 369, 370, 371, 372, 373, + 375, 376, 380, 381, 382, 383, 391, 395, 410, 411, + 422, 434, 438, 261, 418, 439, 0, 295, 0, 0, + 297, 246, 263, 272, 0, 429, 392, 203, 363, 253, + 192, 220, 206, 227, 241, 243, 276, 305, 311, 340, + 343, 258, 238, 218, 360, 215, 378, 398, 399, 400, + 402, 309, 234, 327, 0, 1241, 0, 0, 0, 0, + 0, 0, 237, 0, 0, 0, 0, 0, 285, 0, + 0, 0, 341, 0, 379, 223, 294, 292, 407, 247, + 240, 236, 222, 269, 300, 339, 397, 333, 0, 289, + 0, 0, 388, 312, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 275, + 221, 190, 324, 389, 251, 0, 0, 0, 182, 183, + 184, 0, 0, 0, 0, 0, 0, 0, 0, 212, + 0, 219, 0, 0, 0, 0, 233, 273, 239, 232, + 404, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 259, 0, 313, 0, 0, 0, 0, + 435, 0, 0, 0, 0, 0, 0, 0, 0, 284, + 0, 281, 186, 201, 0, 0, 323, 362, 368, 0, + 0, 0, 224, 0, 366, 337, 421, 208, 249, 359, + 342, 364, 0, 0, 365, 290, 409, 354, 419, 436, + 437, 231, 317, 427, 401, 433, 447, 202, 228, 331, + 394, 424, 385, 310, 405, 406, 280, 384, 257, 189, + 288, 444, 200, 374, 216, 193, 396, 417, 213, 377, + 0, 0, 0, 195, 415, 393, 307, 277, 278, 194, + 0, 358, 235, 255, 226, 326, 412, 413, 225, 449, + 204, 432, 197, 0, 431, 319, 408, 416, 308, 299, + 196, 414, 306, 298, 283, 245, 265, 352, 293, 353, + 266, 315, 314, 316, 0, 191, 0, 390, 425, 450, + 210, 0, 0, 403, 441, 446, 0, 355, 211, 256, + 244, 351, 254, 286, 440, 442, 443, 445, 209, 349, + 262, 330, 420, 248, 428, 318, 205, 268, 386, 282, + 291, 0, 0, 336, 367, 214, 423, 387, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 185, 198, + 287, 0, 356, 252, 448, 430, 426, 0, 0, 230, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 187, 188, 199, 207, 217, 229, 242, 250, + 260, 264, 267, 270, 271, 274, 279, 296, 301, 302, + 303, 304, 320, 321, 322, 325, 328, 329, 332, 334, + 335, 338, 344, 345, 346, 347, 348, 350, 357, 361, + 369, 370, 371, 372, 373, 375, 376, 380, 381, 382, + 383, 391, 395, 410, 411, 422, 434, 438, 261, 418, + 439, 0, 295, 0, 0, 297, 246, 263, 272, 0, + 429, 392, 203, 363, 253, 192, 220, 206, 227, 241, + 243, 276, 305, 311, 340, 343, 258, 238, 218, 360, + 215, 378, 398, 399, 400, 402, 309, 234, 327, 0, + 1239, 0, 0, 0, 0, 0, 0, 237, 0, 0, + 0, 0, 0, 285, 0, 0, 0, 341, 0, 379, + 223, 294, 292, 407, 247, 240, 236, 222, 269, 300, + 339, 397, 333, 0, 289, 0, 0, 388, 312, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 275, 221, 190, 324, 389, 251, + 0, 0, 0, 182, 183, 184, 0, 0, 0, 0, + 0, 0, 0, 0, 212, 0, 219, 0, 0, 0, + 0, 233, 273, 239, 232, 404, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 259, 0, + 313, 0, 0, 0, 0, 435, 0, 0, 0, 0, + 0, 0, 0, 0, 284, 0, 281, 186, 201, 0, + 0, 323, 362, 368, 0, 0, 0, 224, 0, 366, + 337, 421, 208, 249, 359, 342, 364, 0, 0, 365, + 290, 409, 354, 419, 436, 437, 231, 317, 427, 401, + 433, 447, 202, 228, 331, 394, 424, 385, 310, 405, + 406, 280, 384, 257, 189, 288, 444, 200, 374, 216, + 193, 396, 417, 213, 377, 0, 0, 0, 195, 415, + 393, 307, 277, 278, 194, 0, 358, 235, 255, 226, + 326, 412, 413, 225, 449, 204, 432, 197, 0, 431, + 319, 408, 416, 308, 299, 196, 414, 306, 298, 283, + 245, 265, 352, 293, 353, 266, 315, 314, 316, 0, + 191, 0, 390, 425, 450, 210, 0, 0, 403, 441, + 446, 0, 355, 211, 256, 244, 351, 254, 286, 440, + 442, 443, 445, 209, 349, 262, 330, 420, 248, 428, + 318, 205, 268, 386, 282, 291, 0, 0, 336, 367, + 214, 423, 387, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 185, 198, 287, 0, 356, 252, 448, + 430, 426, 0, 0, 230, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 187, 188, 199, + 207, 217, 229, 242, 250, 260, 264, 267, 270, 271, + 274, 279, 296, 301, 302, 303, 304, 320, 321, 322, + 325, 328, 329, 332, 334, 335, 338, 344, 345, 346, + 347, 348, 350, 357, 361, 369, 370, 371, 372, 373, + 375, 376, 380, 381, 382, 383, 391, 395, 410, 411, + 422, 434, 438, 261, 418, 439, 0, 295, 0, 0, + 297, 246, 263, 272, 0, 429, 392, 203, 363, 253, + 192, 220, 206, 227, 241, 243, 276, 305, 311, 340, + 343, 258, 238, 218, 360, 215, 378, 398, 399, 400, + 402, 309, 234, 327, 0, 1237, 0, 0, 0, 0, + 0, 0, 237, 0, 0, 0, 0, 0, 285, 0, + 0, 0, 341, 0, 379, 223, 294, 292, 407, 247, + 240, 236, 222, 269, 300, 339, 397, 333, 0, 289, + 0, 0, 388, 312, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 275, + 221, 190, 324, 389, 251, 0, 0, 0, 182, 183, + 184, 0, 0, 0, 0, 0, 0, 0, 0, 212, + 0, 219, 0, 0, 0, 0, 233, 273, 239, 232, + 404, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 259, 0, 313, 0, 0, 0, 0, + 435, 0, 0, 0, 0, 0, 0, 0, 0, 284, + 0, 281, 186, 201, 0, 0, 323, 362, 368, 0, + 0, 0, 224, 0, 366, 337, 421, 208, 249, 359, + 342, 364, 0, 0, 365, 290, 409, 354, 419, 436, + 437, 231, 317, 427, 401, 433, 447, 202, 228, 331, + 394, 424, 385, 310, 405, 406, 280, 384, 257, 189, + 288, 444, 200, 374, 216, 193, 396, 417, 213, 377, + 0, 0, 0, 195, 415, 393, 307, 277, 278, 194, + 0, 358, 235, 255, 226, 326, 412, 413, 225, 449, + 204, 432, 197, 0, 431, 319, 408, 416, 308, 299, + 196, 414, 306, 298, 283, 245, 265, 352, 293, 353, + 266, 315, 314, 316, 0, 191, 0, 390, 425, 450, + 210, 0, 0, 403, 441, 446, 0, 355, 211, 256, + 244, 351, 254, 286, 440, 442, 443, 445, 209, 349, + 262, 330, 420, 248, 428, 318, 205, 268, 386, 282, + 291, 0, 0, 336, 367, 214, 423, 387, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 185, 198, + 287, 0, 356, 252, 448, 430, 426, 0, 0, 230, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 187, 188, 199, 207, 217, 229, 242, 250, + 260, 264, 267, 270, 271, 274, 279, 296, 301, 302, + 303, 304, 320, 321, 322, 325, 328, 329, 332, 334, + 335, 338, 344, 345, 346, 347, 348, 350, 357, 361, + 369, 370, 371, 372, 373, 375, 376, 380, 381, 382, + 383, 391, 395, 410, 411, 422, 434, 438, 261, 418, + 439, 0, 295, 0, 0, 297, 246, 263, 272, 0, + 429, 392, 203, 363, 253, 192, 220, 206, 227, 241, + 243, 276, 305, 311, 340, 343, 258, 238, 218, 360, + 215, 378, 398, 399, 400, 402, 309, 234, 327, 0, + 1235, 0, 0, 0, 0, 0, 0, 237, 0, 0, + 0, 0, 0, 285, 0, 0, 0, 341, 0, 379, + 223, 294, 292, 407, 247, 240, 236, 222, 269, 300, + 339, 397, 333, 0, 289, 0, 0, 388, 312, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 275, 221, 190, 324, 389, 251, + 0, 0, 0, 182, 183, 184, 0, 0, 0, 0, + 0, 0, 0, 0, 212, 0, 219, 0, 0, 0, + 0, 233, 273, 239, 232, 404, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 259, 0, + 313, 0, 0, 0, 0, 435, 0, 0, 0, 0, + 0, 0, 0, 0, 284, 0, 281, 186, 201, 0, + 0, 323, 362, 368, 0, 0, 0, 224, 0, 366, + 337, 421, 208, 249, 359, 342, 364, 0, 0, 365, + 290, 409, 354, 419, 436, 437, 231, 317, 427, 401, + 433, 447, 202, 228, 331, 394, 424, 385, 310, 405, + 406, 280, 384, 257, 189, 288, 444, 200, 374, 216, + 193, 396, 417, 213, 377, 0, 0, 0, 195, 415, + 393, 307, 277, 278, 194, 0, 358, 235, 255, 226, + 326, 412, 413, 225, 449, 204, 432, 197, 0, 431, + 319, 408, 416, 308, 299, 196, 414, 306, 298, 283, + 245, 265, 352, 293, 353, 266, 315, 314, 316, 0, + 191, 0, 390, 425, 450, 210, 0, 0, 403, 441, + 446, 0, 355, 211, 256, 244, 351, 254, 286, 440, + 442, 443, 445, 209, 349, 262, 330, 420, 248, 428, + 318, 205, 268, 386, 282, 291, 0, 0, 336, 367, + 214, 423, 387, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 185, 198, 287, 0, 356, 252, 448, + 430, 426, 0, 0, 230, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 187, 188, 199, + 207, 217, 229, 242, 250, 260, 264, 267, 270, 271, + 274, 279, 296, 301, 302, 303, 304, 320, 321, 322, + 325, 328, 329, 332, 334, 335, 338, 344, 345, 346, + 347, 348, 350, 357, 361, 369, 370, 371, 372, 373, + 375, 376, 380, 381, 382, 383, 391, 395, 410, 411, + 422, 434, 438, 261, 418, 439, 0, 295, 0, 0, + 297, 246, 263, 272, 0, 429, 392, 203, 363, 253, + 192, 220, 206, 227, 241, 243, 276, 305, 311, 340, + 343, 258, 238, 218, 360, 215, 378, 398, 399, 400, + 402, 309, 234, 327, 0, 1231, 0, 0, 0, 0, + 0, 0, 237, 0, 0, 0, 0, 0, 285, 0, + 0, 0, 341, 0, 379, 223, 294, 292, 407, 247, + 240, 236, 222, 269, 300, 339, 397, 333, 0, 289, + 0, 0, 388, 312, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 275, + 221, 190, 324, 389, 251, 0, 0, 0, 182, 183, + 184, 0, 0, 0, 0, 0, 0, 0, 0, 212, + 0, 219, 0, 0, 0, 0, 233, 273, 239, 232, + 404, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 259, 0, 313, 0, 0, 0, 0, + 435, 0, 0, 0, 0, 0, 0, 0, 0, 284, + 0, 281, 186, 201, 0, 0, 323, 362, 368, 0, + 0, 0, 224, 0, 366, 337, 421, 208, 249, 359, + 342, 364, 0, 0, 365, 290, 409, 354, 419, 436, + 437, 231, 317, 427, 401, 433, 447, 202, 228, 331, + 394, 424, 385, 310, 405, 406, 280, 384, 257, 189, + 288, 444, 200, 374, 216, 193, 396, 417, 213, 377, + 0, 0, 0, 195, 415, 393, 307, 277, 278, 194, + 0, 358, 235, 255, 226, 326, 412, 413, 225, 449, + 204, 432, 197, 0, 431, 319, 408, 416, 308, 299, + 196, 414, 306, 298, 283, 245, 265, 352, 293, 353, + 266, 315, 314, 316, 0, 191, 0, 390, 425, 450, + 210, 0, 0, 403, 441, 446, 0, 355, 211, 256, + 244, 351, 254, 286, 440, 442, 443, 445, 209, 349, + 262, 330, 420, 248, 428, 318, 205, 268, 386, 282, + 291, 0, 0, 336, 367, 214, 423, 387, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 185, 198, + 287, 0, 356, 252, 448, 430, 426, 0, 0, 230, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 187, 188, 199, 207, 217, 229, 242, 250, + 260, 264, 267, 270, 271, 274, 279, 296, 301, 302, + 303, 304, 320, 321, 322, 325, 328, 329, 332, 334, + 335, 338, 344, 345, 346, 347, 348, 350, 357, 361, + 369, 370, 371, 372, 373, 375, 376, 380, 381, 382, + 383, 391, 395, 410, 411, 422, 434, 438, 261, 418, + 439, 0, 295, 0, 0, 297, 246, 263, 272, 0, + 429, 392, 203, 363, 253, 192, 220, 206, 227, 241, + 243, 276, 305, 311, 340, 343, 258, 238, 218, 360, + 215, 378, 398, 399, 400, 402, 309, 234, 327, 0, + 1229, 0, 0, 0, 0, 0, 0, 237, 0, 0, + 0, 0, 0, 285, 0, 0, 0, 341, 0, 379, + 223, 294, 292, 407, 247, 240, 236, 222, 269, 300, + 339, 397, 333, 0, 289, 0, 0, 388, 312, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 275, 221, 190, 324, 389, 251, + 0, 0, 0, 182, 183, 184, 0, 0, 0, 0, + 0, 0, 0, 0, 212, 0, 219, 0, 0, 0, + 0, 233, 273, 239, 232, 404, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 259, 0, + 313, 0, 0, 0, 0, 435, 0, 0, 0, 0, + 0, 0, 0, 0, 284, 0, 281, 186, 201, 0, + 0, 323, 362, 368, 0, 0, 0, 224, 0, 366, + 337, 421, 208, 249, 359, 342, 364, 0, 0, 365, + 290, 409, 354, 419, 436, 437, 231, 317, 427, 401, + 433, 447, 202, 228, 331, 394, 424, 385, 310, 405, + 406, 280, 384, 257, 189, 288, 444, 200, 374, 216, + 193, 396, 417, 213, 377, 0, 0, 0, 195, 415, + 393, 307, 277, 278, 194, 0, 358, 235, 255, 226, + 326, 412, 413, 225, 449, 204, 432, 197, 0, 431, + 319, 408, 416, 308, 299, 196, 414, 306, 298, 283, + 245, 265, 352, 293, 353, 266, 315, 314, 316, 0, + 191, 0, 390, 425, 450, 210, 0, 0, 403, 441, + 446, 0, 355, 211, 256, 244, 351, 254, 286, 440, + 442, 443, 445, 209, 349, 262, 330, 420, 248, 428, + 318, 205, 268, 386, 282, 291, 0, 0, 336, 367, + 214, 423, 387, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 185, 198, 287, 0, 356, 252, 448, + 430, 426, 0, 0, 230, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 187, 188, 199, + 207, 217, 229, 242, 250, 260, 264, 267, 270, 271, + 274, 279, 296, 301, 302, 303, 304, 320, 321, 322, + 325, 328, 329, 332, 334, 335, 338, 344, 345, 346, + 347, 348, 350, 357, 361, 369, 370, 371, 372, 373, + 375, 376, 380, 381, 382, 383, 391, 395, 410, 411, + 422, 434, 438, 261, 418, 439, 0, 295, 0, 0, + 297, 246, 263, 272, 0, 429, 392, 203, 363, 253, + 192, 220, 206, 227, 241, 243, 276, 305, 311, 340, + 343, 258, 238, 218, 360, 215, 378, 398, 399, 400, + 402, 309, 234, 327, 0, 1227, 0, 0, 0, 0, + 0, 0, 237, 0, 0, 0, 0, 0, 285, 0, + 0, 0, 341, 0, 379, 223, 294, 292, 407, 247, + 240, 236, 222, 269, 300, 339, 397, 333, 0, 289, + 0, 0, 388, 312, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 275, + 221, 190, 324, 389, 251, 0, 0, 0, 182, 183, + 184, 0, 0, 0, 0, 0, 0, 0, 0, 212, + 0, 219, 0, 0, 0, 0, 233, 273, 239, 232, + 404, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 259, 0, 313, 0, 0, 0, 0, + 435, 0, 0, 0, 0, 0, 0, 0, 0, 284, + 0, 281, 186, 201, 0, 0, 323, 362, 368, 0, + 0, 0, 224, 0, 366, 337, 421, 208, 249, 359, + 342, 364, 0, 0, 365, 290, 409, 354, 419, 436, + 437, 231, 317, 427, 401, 433, 447, 202, 228, 331, + 394, 424, 385, 310, 405, 406, 280, 384, 257, 189, + 288, 444, 200, 374, 216, 193, 396, 417, 213, 377, + 0, 0, 0, 195, 415, 393, 307, 277, 278, 194, + 0, 358, 235, 255, 226, 326, 412, 413, 225, 449, + 204, 432, 197, 0, 431, 319, 408, 416, 308, 299, + 196, 414, 306, 298, 283, 245, 265, 352, 293, 353, + 266, 315, 314, 316, 0, 191, 0, 390, 425, 450, + 210, 0, 0, 403, 441, 446, 0, 355, 211, 256, + 244, 351, 254, 286, 440, 442, 443, 445, 209, 349, + 262, 330, 420, 248, 428, 318, 205, 268, 386, 282, + 291, 0, 0, 336, 367, 214, 423, 387, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 185, 198, + 287, 0, 356, 252, 448, 430, 426, 0, 0, 230, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 187, 188, 199, 207, 217, 229, 242, 250, + 260, 264, 267, 270, 271, 274, 279, 296, 301, 302, + 303, 304, 320, 321, 322, 325, 328, 329, 332, 334, + 335, 338, 344, 345, 346, 347, 348, 350, 357, 361, + 369, 370, 371, 372, 373, 375, 376, 380, 381, 382, + 383, 391, 395, 410, 411, 422, 434, 438, 261, 418, + 439, 0, 295, 0, 0, 297, 246, 263, 272, 0, + 429, 392, 203, 363, 253, 192, 220, 206, 227, 241, + 243, 276, 305, 311, 340, 343, 258, 238, 218, 360, + 215, 378, 398, 399, 400, 402, 309, 234, 327, 0, + 0, 0, 0, 0, 0, 0, 0, 237, 0, 0, + 0, 0, 0, 285, 0, 0, 0, 341, 0, 379, + 223, 294, 292, 407, 247, 240, 236, 222, 269, 300, + 339, 397, 333, 0, 289, 0, 0, 388, 312, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 275, 221, 190, 324, 389, 251, + 1202, 0, 0, 182, 183, 184, 0, 0, 0, 0, + 0, 0, 0, 0, 212, 0, 219, 0, 0, 0, + 0, 233, 273, 239, 232, 404, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 259, 0, + 313, 0, 0, 0, 0, 435, 0, 0, 0, 0, + 0, 0, 0, 0, 284, 0, 281, 186, 201, 0, + 0, 323, 362, 368, 0, 0, 0, 224, 0, 366, + 337, 421, 208, 249, 359, 342, 364, 0, 0, 365, + 290, 409, 354, 419, 436, 437, 231, 317, 427, 401, + 433, 447, 202, 228, 331, 394, 424, 385, 310, 405, + 406, 280, 384, 257, 189, 288, 444, 200, 374, 216, + 193, 396, 417, 213, 377, 0, 0, 0, 195, 415, + 393, 307, 277, 278, 194, 0, 358, 235, 255, 226, + 326, 412, 413, 225, 449, 204, 432, 197, 0, 431, + 319, 408, 416, 308, 299, 196, 414, 306, 298, 283, + 245, 265, 352, 293, 353, 266, 315, 314, 316, 0, + 191, 0, 390, 425, 450, 210, 0, 0, 403, 441, + 446, 0, 355, 211, 256, 244, 351, 254, 286, 440, + 442, 443, 445, 209, 349, 262, 330, 420, 248, 428, + 318, 205, 268, 386, 282, 291, 0, 0, 336, 367, + 214, 423, 387, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 185, 198, 287, 0, 356, 252, 448, + 430, 426, 0, 0, 230, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 187, 188, 199, + 207, 217, 229, 242, 250, 260, 264, 267, 270, 271, + 274, 279, 296, 301, 302, 303, 304, 320, 321, 322, + 325, 328, 329, 332, 334, 335, 338, 344, 345, 346, + 347, 348, 350, 357, 361, 369, 370, 371, 372, 373, + 375, 376, 380, 381, 382, 383, 391, 395, 410, 411, + 422, 434, 438, 261, 418, 439, 0, 295, 0, 0, + 297, 246, 263, 272, 0, 429, 392, 203, 363, 253, + 192, 220, 206, 227, 241, 243, 276, 305, 311, 340, + 343, 258, 238, 218, 360, 215, 378, 398, 399, 400, + 402, 309, 234, 1101, 0, 0, 0, 0, 0, 0, + 327, 0, 0, 0, 0, 0, 0, 0, 0, 237, + 0, 0, 0, 0, 0, 285, 0, 0, 0, 341, + 0, 379, 223, 294, 292, 407, 247, 240, 236, 222, + 269, 300, 339, 397, 333, 0, 289, 0, 0, 388, + 312, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 275, 221, 190, 324, + 389, 251, 0, 0, 0, 182, 183, 184, 0, 0, + 0, 0, 0, 0, 0, 0, 212, 0, 219, 0, + 0, 0, 0, 233, 273, 239, 232, 404, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 259, 0, 313, 0, 0, 0, 0, 435, 0, 0, + 0, 0, 0, 0, 0, 0, 284, 0, 281, 186, + 201, 0, 0, 323, 362, 368, 0, 0, 0, 224, + 0, 366, 337, 421, 208, 249, 359, 342, 364, 0, + 0, 365, 290, 409, 354, 419, 436, 437, 231, 317, + 427, 401, 433, 447, 202, 228, 331, 394, 424, 385, + 310, 405, 406, 280, 384, 257, 189, 288, 444, 200, + 374, 216, 193, 396, 417, 213, 377, 0, 0, 0, + 195, 415, 393, 307, 277, 278, 194, 0, 358, 235, + 255, 226, 326, 412, 413, 225, 449, 204, 432, 197, + 0, 431, 319, 408, 416, 308, 299, 196, 414, 306, + 298, 283, 245, 265, 352, 293, 353, 266, 315, 314, + 316, 0, 191, 0, 390, 425, 450, 210, 0, 0, + 403, 441, 446, 0, 355, 211, 256, 244, 351, 254, + 286, 440, 442, 443, 445, 209, 349, 262, 330, 420, + 248, 428, 318, 205, 268, 386, 282, 291, 0, 0, + 336, 367, 214, 423, 387, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 185, 198, 287, 0, 356, + 252, 448, 430, 426, 0, 0, 230, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 187, - 200, 282, 0, 349, 250, 432, 416, 414, 0, 0, - 230, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 188, 199, 207, 217, 229, 242, 250, 260, 264, 267, + 270, 271, 274, 279, 296, 301, 302, 303, 304, 320, + 321, 322, 325, 328, 329, 332, 334, 335, 338, 344, + 345, 346, 347, 348, 350, 357, 361, 369, 370, 371, + 372, 373, 375, 376, 380, 381, 382, 383, 391, 395, + 410, 411, 422, 434, 438, 261, 418, 439, 0, 295, + 0, 0, 297, 246, 263, 272, 0, 429, 392, 203, + 363, 253, 192, 220, 206, 227, 241, 243, 276, 305, + 311, 340, 343, 258, 238, 218, 360, 215, 378, 398, + 399, 400, 402, 309, 234, 327, 0, 0, 0, 0, + 0, 0, 0, 1092, 237, 0, 0, 0, 0, 0, + 285, 0, 0, 0, 341, 0, 379, 223, 294, 292, + 407, 247, 240, 236, 222, 269, 300, 339, 397, 333, + 0, 289, 0, 0, 388, 312, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 275, 221, 190, 324, 389, 251, 0, 0, 0, + 182, 183, 184, 0, 0, 0, 0, 0, 0, 0, + 0, 212, 0, 219, 0, 0, 0, 0, 233, 273, + 239, 232, 404, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 259, 0, 313, 0, 0, + 0, 0, 435, 0, 0, 0, 0, 0, 0, 0, + 0, 284, 0, 281, 186, 201, 0, 0, 323, 362, + 368, 0, 0, 0, 224, 0, 366, 337, 421, 208, + 249, 359, 342, 364, 0, 0, 365, 290, 409, 354, + 419, 436, 437, 231, 317, 427, 401, 433, 447, 202, + 228, 331, 394, 424, 385, 310, 405, 406, 280, 384, + 257, 189, 288, 444, 200, 374, 216, 193, 396, 417, + 213, 377, 0, 0, 0, 195, 415, 393, 307, 277, + 278, 194, 0, 358, 235, 255, 226, 326, 412, 413, + 225, 449, 204, 432, 197, 0, 431, 319, 408, 416, + 308, 299, 196, 414, 306, 298, 283, 245, 265, 352, + 293, 353, 266, 315, 314, 316, 0, 191, 0, 390, + 425, 450, 210, 0, 0, 403, 441, 446, 0, 355, + 211, 256, 244, 351, 254, 286, 440, 442, 443, 445, + 209, 349, 262, 330, 420, 248, 428, 318, 205, 268, + 386, 282, 291, 0, 0, 336, 367, 214, 423, 387, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 185, 198, 287, 0, 356, 252, 448, 430, 426, 0, + 0, 230, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 189, 190, 201, 208, 217, 229, 242, - 248, 257, 260, 263, 266, 267, 269, 274, 291, 295, - 296, 297, 298, 314, 315, 316, 319, 322, 323, 325, - 327, 328, 331, 337, 338, 339, 340, 341, 343, 350, - 354, 361, 362, 363, 364, 365, 366, 367, 371, 372, - 373, 374, 382, 385, 399, 400, 410, 420, 424, 258, - 407, 425, 0, 290, 0, 194, 220, 207, 227, 241, - 243, 271, 299, 305, 333, 336, 255, 238, 218, 353, - 216, 369, 388, 389, 390, 392, 303, 234, 321, 0, - 1167, 0, 0, 0, 0, 0, 0, 237, 0, 0, - 0, 0, 0, 280, 0, 0, 334, 0, 370, 223, - 289, 287, 396, 246, 240, 236, 222, 265, 294, 332, - 387, 326, 0, 284, 0, 0, 379, 306, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 270, 221, 192, 318, 380, 249, 0, - 0, 0, 174, 175, 176, 0, 0, 0, 0, 0, - 0, 0, 0, 213, 0, 219, 0, 0, 0, 0, - 233, 268, 239, 232, 394, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 256, 0, 307, - 0, 0, 0, 421, 0, 0, 0, 0, 0, 0, - 0, 0, 279, 0, 276, 188, 202, 0, 0, 317, - 355, 360, 0, 0, 0, 224, 0, 358, 330, 409, - 209, 247, 352, 335, 356, 0, 0, 357, 285, 398, - 347, 408, 422, 423, 231, 311, 415, 391, 419, 431, - 203, 228, 324, 384, 412, 376, 304, 395, 275, 375, - 254, 191, 283, 195, 386, 406, 214, 368, 0, 0, - 0, 197, 404, 383, 301, 272, 273, 196, 0, 351, - 235, 252, 226, 320, 401, 402, 225, 433, 204, 418, - 199, 205, 417, 313, 397, 405, 302, 293, 198, 403, - 300, 292, 278, 245, 261, 345, 288, 346, 262, 309, - 308, 310, 0, 193, 0, 381, 413, 434, 211, 0, - 0, 393, 427, 430, 0, 348, 212, 253, 244, 344, - 251, 281, 426, 428, 429, 210, 342, 259, 312, 206, - 264, 377, 277, 286, 0, 0, 329, 359, 215, 411, - 378, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 187, 200, 282, 0, 349, 250, 432, 416, 414, - 0, 0, 230, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 189, 190, 201, 208, 217, - 229, 242, 248, 257, 260, 263, 266, 267, 269, 274, - 291, 295, 296, 297, 298, 314, 315, 316, 319, 322, - 323, 325, 327, 328, 331, 337, 338, 339, 340, 341, - 343, 350, 354, 361, 362, 363, 364, 365, 366, 367, - 371, 372, 373, 374, 382, 385, 399, 400, 410, 420, - 424, 258, 407, 425, 0, 290, 0, 194, 220, 207, - 227, 241, 243, 271, 299, 305, 333, 336, 255, 238, - 218, 353, 216, 369, 388, 389, 390, 392, 303, 234, - 321, 0, 1163, 0, 0, 0, 0, 0, 0, 237, - 0, 0, 0, 0, 0, 280, 0, 0, 334, 0, - 370, 223, 289, 287, 396, 246, 240, 236, 222, 265, - 294, 332, 387, 326, 0, 284, 0, 0, 379, 306, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 270, 221, 192, 318, 380, - 249, 0, 0, 0, 174, 175, 176, 0, 0, 0, - 0, 0, 0, 0, 0, 213, 0, 219, 0, 0, - 0, 0, 233, 268, 239, 232, 394, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 256, - 0, 307, 0, 0, 0, 421, 0, 0, 0, 0, - 0, 0, 0, 0, 279, 0, 276, 188, 202, 0, - 0, 317, 355, 360, 0, 0, 0, 224, 0, 358, - 330, 409, 209, 247, 352, 335, 356, 0, 0, 357, - 285, 398, 347, 408, 422, 423, 231, 311, 415, 391, - 419, 431, 203, 228, 324, 384, 412, 376, 304, 395, - 275, 375, 254, 191, 283, 195, 386, 406, 214, 368, - 0, 0, 0, 197, 404, 383, 301, 272, 273, 196, - 0, 351, 235, 252, 226, 320, 401, 402, 225, 433, - 204, 418, 199, 205, 417, 313, 397, 405, 302, 293, - 198, 403, 300, 292, 278, 245, 261, 345, 288, 346, - 262, 309, 308, 310, 0, 193, 0, 381, 413, 434, - 211, 0, 0, 393, 427, 430, 0, 348, 212, 253, - 244, 344, 251, 281, 426, 428, 429, 210, 342, 259, - 312, 206, 264, 377, 277, 286, 0, 0, 329, 359, - 215, 411, 378, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 187, 200, 282, 0, 349, 250, 432, - 416, 414, 0, 0, 230, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 189, 190, 201, - 208, 217, 229, 242, 248, 257, 260, 263, 266, 267, - 269, 274, 291, 295, 296, 297, 298, 314, 315, 316, - 319, 322, 323, 325, 327, 328, 331, 337, 338, 339, - 340, 341, 343, 350, 354, 361, 362, 363, 364, 365, - 366, 367, 371, 372, 373, 374, 382, 385, 399, 400, - 410, 420, 424, 258, 407, 425, 0, 290, 0, 194, - 220, 207, 227, 241, 243, 271, 299, 305, 333, 336, - 255, 238, 218, 353, 216, 369, 388, 389, 390, 392, - 303, 234, 321, 0, 1161, 0, 0, 0, 0, 0, - 0, 237, 0, 0, 0, 0, 0, 280, 0, 0, - 334, 0, 370, 223, 289, 287, 396, 246, 240, 236, - 222, 265, 294, 332, 387, 326, 0, 284, 0, 0, - 379, 306, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 270, 221, 192, - 318, 380, 249, 0, 0, 0, 174, 175, 176, 0, - 0, 0, 0, 0, 0, 0, 0, 213, 0, 219, - 0, 0, 0, 0, 233, 268, 239, 232, 394, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 256, 0, 307, 0, 0, 0, 421, 0, 0, - 0, 0, 0, 0, 0, 0, 279, 0, 276, 188, - 202, 0, 0, 317, 355, 360, 0, 0, 0, 224, - 0, 358, 330, 409, 209, 247, 352, 335, 356, 0, - 0, 357, 285, 398, 347, 408, 422, 423, 231, 311, - 415, 391, 419, 431, 203, 228, 324, 384, 412, 376, - 304, 395, 275, 375, 254, 191, 283, 195, 386, 406, - 214, 368, 0, 0, 0, 197, 404, 383, 301, 272, - 273, 196, 0, 351, 235, 252, 226, 320, 401, 402, - 225, 433, 204, 418, 199, 205, 417, 313, 397, 405, - 302, 293, 198, 403, 300, 292, 278, 245, 261, 345, - 288, 346, 262, 309, 308, 310, 0, 193, 0, 381, - 413, 434, 211, 0, 0, 393, 427, 430, 0, 348, - 212, 253, 244, 344, 251, 281, 426, 428, 429, 210, - 342, 259, 312, 206, 264, 377, 277, 286, 0, 0, - 329, 359, 215, 411, 378, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 187, 200, 282, 0, 349, - 250, 432, 416, 414, 0, 0, 230, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 189, - 190, 201, 208, 217, 229, 242, 248, 257, 260, 263, - 266, 267, 269, 274, 291, 295, 296, 297, 298, 314, - 315, 316, 319, 322, 323, 325, 327, 328, 331, 337, - 338, 339, 340, 341, 343, 350, 354, 361, 362, 363, - 364, 365, 366, 367, 371, 372, 373, 374, 382, 385, - 399, 400, 410, 420, 424, 258, 407, 425, 0, 290, - 0, 194, 220, 207, 227, 241, 243, 271, 299, 305, - 333, 336, 255, 238, 218, 353, 216, 369, 388, 389, - 390, 392, 303, 234, 321, 0, 1159, 0, 0, 0, - 0, 0, 0, 237, 0, 0, 0, 0, 0, 280, - 0, 0, 334, 0, 370, 223, 289, 287, 396, 246, - 240, 236, 222, 265, 294, 332, 387, 326, 0, 284, - 0, 0, 379, 306, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 270, - 221, 192, 318, 380, 249, 0, 0, 0, 174, 175, - 176, 0, 0, 0, 0, 0, 0, 0, 0, 213, - 0, 219, 0, 0, 0, 0, 233, 268, 239, 232, - 394, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 256, 0, 307, 0, 0, 0, 421, - 0, 0, 0, 0, 0, 0, 0, 0, 279, 0, - 276, 188, 202, 0, 0, 317, 355, 360, 0, 0, - 0, 224, 0, 358, 330, 409, 209, 247, 352, 335, - 356, 0, 0, 357, 285, 398, 347, 408, 422, 423, - 231, 311, 415, 391, 419, 431, 203, 228, 324, 384, - 412, 376, 304, 395, 275, 375, 254, 191, 283, 195, - 386, 406, 214, 368, 0, 0, 0, 197, 404, 383, - 301, 272, 273, 196, 0, 351, 235, 252, 226, 320, - 401, 402, 225, 433, 204, 418, 199, 205, 417, 313, - 397, 405, 302, 293, 198, 403, 300, 292, 278, 245, - 261, 345, 288, 346, 262, 309, 308, 310, 0, 193, - 0, 381, 413, 434, 211, 0, 0, 393, 427, 430, - 0, 348, 212, 253, 244, 344, 251, 281, 426, 428, - 429, 210, 342, 259, 312, 206, 264, 377, 277, 286, - 0, 0, 329, 359, 215, 411, 378, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 187, 200, 282, - 0, 349, 250, 432, 416, 414, 0, 0, 230, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 189, 190, 201, 208, 217, 229, 242, 248, 257, - 260, 263, 266, 267, 269, 274, 291, 295, 296, 297, - 298, 314, 315, 316, 319, 322, 323, 325, 327, 328, - 331, 337, 338, 339, 340, 341, 343, 350, 354, 361, - 362, 363, 364, 365, 366, 367, 371, 372, 373, 374, - 382, 385, 399, 400, 410, 420, 424, 258, 407, 425, - 0, 290, 0, 194, 220, 207, 227, 241, 243, 271, - 299, 305, 333, 336, 255, 238, 218, 353, 216, 369, - 388, 389, 390, 392, 303, 234, 321, 0, 0, 0, - 0, 0, 0, 0, 0, 237, 0, 0, 0, 0, - 0, 280, 0, 0, 334, 0, 370, 223, 289, 287, - 396, 246, 240, 236, 222, 265, 294, 332, 387, 326, - 0, 284, 0, 0, 379, 306, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 270, 221, 192, 318, 380, 249, 1134, 0, 0, - 174, 175, 176, 0, 0, 0, 0, 0, 0, 0, - 0, 213, 0, 219, 0, 0, 0, 0, 233, 268, - 239, 232, 394, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 256, 0, 307, 0, 0, - 0, 421, 0, 0, 0, 0, 0, 0, 0, 0, - 279, 0, 276, 188, 202, 0, 0, 317, 355, 360, - 0, 0, 0, 224, 0, 358, 330, 409, 209, 247, - 352, 335, 356, 0, 0, 357, 285, 398, 347, 408, - 422, 423, 231, 311, 415, 391, 419, 431, 203, 228, - 324, 384, 412, 376, 304, 395, 275, 375, 254, 191, - 283, 195, 386, 406, 214, 368, 0, 0, 0, 197, - 404, 383, 301, 272, 273, 196, 0, 351, 235, 252, - 226, 320, 401, 402, 225, 433, 204, 418, 199, 205, - 417, 313, 397, 405, 302, 293, 198, 403, 300, 292, - 278, 245, 261, 345, 288, 346, 262, 309, 308, 310, - 0, 193, 0, 381, 413, 434, 211, 0, 0, 393, - 427, 430, 0, 348, 212, 253, 244, 344, 251, 281, - 426, 428, 429, 210, 342, 259, 312, 206, 264, 377, - 277, 286, 0, 0, 329, 359, 215, 411, 378, 0, + 0, 0, 0, 0, 187, 188, 199, 207, 217, 229, + 242, 250, 260, 264, 267, 270, 271, 274, 279, 296, + 301, 302, 303, 304, 320, 321, 322, 325, 328, 329, + 332, 334, 335, 338, 344, 345, 346, 347, 348, 350, + 357, 361, 369, 370, 371, 372, 373, 375, 376, 380, + 381, 382, 383, 391, 395, 410, 411, 422, 434, 438, + 261, 418, 439, 0, 295, 0, 0, 297, 246, 263, + 272, 0, 429, 392, 203, 363, 253, 192, 220, 206, + 227, 241, 243, 276, 305, 311, 340, 343, 258, 238, + 218, 360, 215, 378, 398, 399, 400, 402, 309, 234, + 327, 0, 0, 0, 0, 0, 0, 0, 0, 237, + 0, 0, 0, 0, 0, 285, 0, 0, 0, 341, + 0, 379, 223, 294, 292, 407, 247, 240, 236, 222, + 269, 300, 339, 397, 333, 0, 289, 0, 0, 388, + 312, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 275, 221, 190, 324, + 389, 251, 0, 0, 0, 182, 183, 184, 0, 948, + 0, 0, 0, 0, 0, 0, 212, 0, 219, 0, + 0, 0, 0, 233, 273, 239, 232, 404, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 259, 0, 313, 0, 0, 0, 0, 435, 0, 0, + 0, 0, 0, 0, 0, 0, 284, 0, 281, 186, + 201, 0, 0, 323, 362, 368, 0, 0, 0, 224, + 0, 366, 337, 421, 208, 249, 359, 342, 364, 0, + 0, 365, 290, 409, 354, 419, 436, 437, 231, 317, + 427, 401, 433, 447, 202, 228, 331, 394, 424, 385, + 310, 405, 406, 280, 384, 257, 189, 288, 444, 200, + 374, 216, 193, 396, 417, 213, 377, 0, 0, 0, + 195, 415, 393, 307, 277, 278, 194, 0, 358, 235, + 255, 226, 326, 412, 413, 225, 449, 204, 432, 197, + 0, 431, 319, 408, 416, 308, 299, 196, 414, 306, + 298, 283, 245, 265, 352, 293, 353, 266, 315, 314, + 316, 0, 191, 0, 390, 425, 450, 210, 0, 0, + 403, 441, 446, 0, 355, 211, 256, 244, 351, 254, + 286, 440, 442, 443, 445, 209, 349, 262, 330, 420, + 248, 428, 318, 205, 268, 386, 282, 291, 0, 0, + 336, 367, 214, 423, 387, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 185, 198, 287, 0, 356, + 252, 448, 430, 426, 0, 0, 230, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 187, - 200, 282, 0, 349, 250, 432, 416, 414, 0, 0, - 230, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 189, 190, 201, 208, 217, 229, 242, - 248, 257, 260, 263, 266, 267, 269, 274, 291, 295, - 296, 297, 298, 314, 315, 316, 319, 322, 323, 325, - 327, 328, 331, 337, 338, 339, 340, 341, 343, 350, - 354, 361, 362, 363, 364, 365, 366, 367, 371, 372, - 373, 374, 382, 385, 399, 400, 410, 420, 424, 258, - 407, 425, 0, 290, 0, 194, 220, 207, 227, 241, - 243, 271, 299, 305, 333, 336, 255, 238, 218, 353, - 216, 369, 388, 389, 390, 392, 303, 234, 1038, 0, - 0, 0, 0, 0, 0, 321, 0, 0, 0, 0, + 188, 199, 207, 217, 229, 242, 250, 260, 264, 267, + 270, 271, 274, 279, 296, 301, 302, 303, 304, 320, + 321, 322, 325, 328, 329, 332, 334, 335, 338, 344, + 345, 346, 347, 348, 350, 357, 361, 369, 370, 371, + 372, 373, 375, 376, 380, 381, 382, 383, 391, 395, + 410, 411, 422, 434, 438, 261, 418, 439, 0, 295, + 0, 0, 297, 246, 263, 272, 0, 429, 392, 203, + 363, 253, 192, 220, 206, 227, 241, 243, 276, 305, + 311, 340, 343, 258, 238, 218, 360, 215, 378, 398, + 399, 400, 402, 309, 234, 327, 0, 0, 0, 0, 0, 0, 0, 0, 237, 0, 0, 0, 0, 0, - 280, 0, 0, 334, 0, 370, 223, 289, 287, 396, - 246, 240, 236, 222, 265, 294, 332, 387, 326, 0, - 284, 0, 0, 379, 306, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 270, 221, 192, 318, 380, 249, 0, 0, 0, 174, - 175, 176, 0, 0, 0, 0, 0, 0, 0, 0, - 213, 0, 219, 0, 0, 0, 0, 233, 268, 239, - 232, 394, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 256, 0, 307, 0, 0, 0, - 421, 0, 0, 0, 0, 0, 0, 0, 0, 279, - 0, 276, 188, 202, 0, 0, 317, 355, 360, 0, - 0, 0, 224, 0, 358, 330, 409, 209, 247, 352, - 335, 356, 0, 0, 357, 285, 398, 347, 408, 422, - 423, 231, 311, 415, 391, 419, 431, 203, 228, 324, - 384, 412, 376, 304, 395, 275, 375, 254, 191, 283, - 195, 386, 406, 214, 368, 0, 0, 0, 197, 404, - 383, 301, 272, 273, 196, 0, 351, 235, 252, 226, - 320, 401, 402, 225, 433, 204, 418, 199, 205, 417, - 313, 397, 405, 302, 293, 198, 403, 300, 292, 278, - 245, 261, 345, 288, 346, 262, 309, 308, 310, 0, - 193, 0, 381, 413, 434, 211, 0, 0, 393, 427, - 430, 0, 348, 212, 253, 244, 344, 251, 281, 426, - 428, 429, 210, 342, 259, 312, 206, 264, 377, 277, - 286, 0, 0, 329, 359, 215, 411, 378, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 187, 200, - 282, 0, 349, 250, 432, 416, 414, 0, 0, 230, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 189, 190, 201, 208, 217, 229, 242, 248, - 257, 260, 263, 266, 267, 269, 274, 291, 295, 296, - 297, 298, 314, 315, 316, 319, 322, 323, 325, 327, - 328, 331, 337, 338, 339, 340, 341, 343, 350, 354, - 361, 362, 363, 364, 365, 366, 367, 371, 372, 373, - 374, 382, 385, 399, 400, 410, 420, 424, 258, 407, - 425, 0, 290, 0, 194, 220, 207, 227, 241, 243, - 271, 299, 305, 333, 336, 255, 238, 218, 353, 216, - 369, 388, 389, 390, 392, 303, 234, 321, 0, 0, - 0, 0, 0, 0, 0, 1029, 237, 0, 0, 0, - 0, 0, 280, 0, 0, 334, 0, 370, 223, 289, - 287, 396, 246, 240, 236, 222, 265, 294, 332, 387, - 326, 0, 284, 0, 0, 379, 306, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 270, 221, 192, 318, 380, 249, 0, 0, - 0, 174, 175, 176, 0, 0, 0, 0, 0, 0, - 0, 0, 213, 0, 219, 0, 0, 0, 0, 233, - 268, 239, 232, 394, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 256, 0, 307, 0, - 0, 0, 421, 0, 0, 0, 0, 0, 0, 0, - 0, 279, 0, 276, 188, 202, 0, 0, 317, 355, - 360, 0, 0, 0, 224, 0, 358, 330, 409, 209, - 247, 352, 335, 356, 0, 0, 357, 285, 398, 347, - 408, 422, 423, 231, 311, 415, 391, 419, 431, 203, - 228, 324, 384, 412, 376, 304, 395, 275, 375, 254, - 191, 283, 195, 386, 406, 214, 368, 0, 0, 0, - 197, 404, 383, 301, 272, 273, 196, 0, 351, 235, - 252, 226, 320, 401, 402, 225, 433, 204, 418, 199, - 205, 417, 313, 397, 405, 302, 293, 198, 403, 300, - 292, 278, 245, 261, 345, 288, 346, 262, 309, 308, - 310, 0, 193, 0, 381, 413, 434, 211, 0, 0, - 393, 427, 430, 0, 348, 212, 253, 244, 344, 251, - 281, 426, 428, 429, 210, 342, 259, 312, 206, 264, - 377, 277, 286, 0, 0, 329, 359, 215, 411, 378, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 187, 200, 282, 0, 349, 250, 432, 416, 414, 0, + 285, 0, 0, 0, 341, 0, 379, 223, 294, 292, + 407, 247, 240, 236, 222, 269, 300, 339, 397, 333, + 0, 289, 0, 0, 388, 312, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 275, 221, 190, 324, 389, 251, 0, 0, 0, + 182, 183, 184, 0, 0, 0, 0, 0, 0, 0, + 0, 212, 0, 219, 0, 0, 0, 0, 233, 273, + 239, 232, 404, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 507, 0, 259, 0, 313, 0, 0, + 0, 0, 435, 0, 0, 0, 0, 0, 0, 0, + 0, 284, 0, 281, 186, 201, 0, 0, 323, 362, + 368, 0, 0, 0, 224, 0, 366, 337, 421, 208, + 249, 359, 342, 364, 0, 0, 365, 290, 409, 354, + 419, 436, 437, 231, 317, 427, 401, 433, 447, 202, + 228, 331, 394, 424, 385, 310, 405, 406, 280, 384, + 257, 189, 288, 444, 200, 374, 216, 193, 396, 417, + 213, 377, 0, 0, 0, 195, 415, 393, 307, 277, + 278, 194, 0, 358, 235, 255, 226, 326, 412, 413, + 225, 449, 204, 432, 197, 0, 431, 319, 408, 416, + 308, 299, 196, 414, 306, 298, 283, 245, 265, 352, + 293, 353, 266, 315, 314, 316, 0, 191, 0, 390, + 425, 450, 210, 0, 0, 403, 441, 446, 0, 355, + 211, 256, 244, 351, 254, 286, 440, 442, 443, 445, + 209, 349, 262, 330, 420, 248, 428, 318, 205, 268, + 386, 282, 291, 0, 0, 336, 367, 214, 423, 387, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 185, 198, 287, 0, 356, 252, 448, 430, 426, 0, 0, 230, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 189, 190, 201, 208, 217, 229, - 242, 248, 257, 260, 263, 266, 267, 269, 274, 291, - 295, 296, 297, 298, 314, 315, 316, 319, 322, 323, - 325, 327, 328, 331, 337, 338, 339, 340, 341, 343, - 350, 354, 361, 362, 363, 364, 365, 366, 367, 371, - 372, 373, 374, 382, 385, 399, 400, 410, 420, 424, - 258, 407, 425, 0, 290, 0, 194, 220, 207, 227, - 241, 243, 271, 299, 305, 333, 336, 255, 238, 218, - 353, 216, 369, 388, 389, 390, 392, 303, 234, 321, - 0, 0, 0, 0, 0, 0, 0, 0, 237, 0, - 0, 0, 0, 0, 280, 0, 0, 334, 0, 370, - 223, 289, 287, 396, 246, 240, 236, 222, 265, 294, - 332, 387, 326, 0, 284, 0, 0, 379, 306, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 270, 221, 192, 318, 380, 249, - 0, 0, 0, 174, 175, 176, 0, 902, 0, 0, - 0, 0, 0, 0, 213, 0, 219, 0, 0, 0, - 0, 233, 268, 239, 232, 394, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 256, 0, - 307, 0, 0, 0, 421, 0, 0, 0, 0, 0, - 0, 0, 0, 279, 0, 276, 188, 202, 0, 0, - 317, 355, 360, 0, 0, 0, 224, 0, 358, 330, - 409, 209, 247, 352, 335, 356, 0, 0, 357, 285, - 398, 347, 408, 422, 423, 231, 311, 415, 391, 419, - 431, 203, 228, 324, 384, 412, 376, 304, 395, 275, - 375, 254, 191, 283, 195, 386, 406, 214, 368, 0, - 0, 0, 197, 404, 383, 301, 272, 273, 196, 0, - 351, 235, 252, 226, 320, 401, 402, 225, 433, 204, - 418, 199, 205, 417, 313, 397, 405, 302, 293, 198, - 403, 300, 292, 278, 245, 261, 345, 288, 346, 262, - 309, 308, 310, 0, 193, 0, 381, 413, 434, 211, - 0, 0, 393, 427, 430, 0, 348, 212, 253, 244, - 344, 251, 281, 426, 428, 429, 210, 342, 259, 312, - 206, 264, 377, 277, 286, 0, 0, 329, 359, 215, - 411, 378, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 187, 200, 282, 0, 349, 250, 432, 416, - 414, 0, 0, 230, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 189, 190, 201, 208, - 217, 229, 242, 248, 257, 260, 263, 266, 267, 269, - 274, 291, 295, 296, 297, 298, 314, 315, 316, 319, - 322, 323, 325, 327, 328, 331, 337, 338, 339, 340, - 341, 343, 350, 354, 361, 362, 363, 364, 365, 366, - 367, 371, 372, 373, 374, 382, 385, 399, 400, 410, - 420, 424, 258, 407, 425, 0, 290, 0, 194, 220, - 207, 227, 241, 243, 271, 299, 305, 333, 336, 255, - 238, 218, 353, 216, 369, 388, 389, 390, 392, 303, - 234, 321, 0, 0, 0, 0, 0, 0, 0, 0, - 237, 0, 0, 0, 0, 0, 280, 0, 0, 334, - 0, 370, 223, 289, 287, 396, 246, 240, 236, 222, - 265, 294, 332, 387, 326, 0, 284, 0, 0, 379, - 306, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 270, 221, 192, 318, - 380, 249, 0, 0, 0, 174, 175, 176, 0, 0, - 0, 0, 0, 0, 0, 0, 213, 0, 219, 0, - 0, 0, 0, 233, 268, 239, 232, 394, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 480, 0, - 256, 0, 307, 0, 0, 0, 421, 0, 0, 0, - 0, 0, 0, 0, 0, 279, 0, 276, 188, 202, - 0, 0, 317, 355, 360, 0, 0, 0, 224, 0, - 358, 330, 409, 209, 247, 352, 335, 356, 0, 0, - 357, 285, 398, 347, 408, 422, 423, 231, 311, 415, - 391, 419, 431, 203, 228, 324, 384, 412, 376, 304, - 395, 275, 375, 254, 191, 283, 195, 386, 406, 214, - 368, 0, 0, 0, 197, 404, 383, 301, 272, 273, - 196, 0, 351, 235, 252, 226, 320, 401, 402, 225, - 433, 204, 418, 199, 205, 417, 313, 397, 405, 302, - 293, 198, 403, 300, 292, 278, 245, 261, 345, 288, - 346, 262, 309, 308, 310, 0, 193, 0, 381, 413, - 434, 211, 0, 0, 393, 427, 430, 0, 348, 212, - 253, 244, 344, 251, 281, 426, 428, 429, 210, 342, - 259, 312, 206, 264, 377, 277, 286, 0, 0, 329, - 359, 215, 411, 378, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 187, 200, 282, 0, 349, 250, - 432, 416, 414, 0, 0, 230, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 189, 190, - 201, 208, 217, 229, 242, 248, 257, 260, 263, 266, - 267, 269, 274, 291, 295, 296, 297, 298, 314, 315, - 316, 319, 322, 323, 325, 327, 328, 331, 337, 338, - 339, 340, 341, 343, 350, 354, 361, 362, 363, 364, - 365, 366, 367, 371, 372, 373, 374, 382, 385, 399, - 400, 410, 420, 424, 479, 407, 425, 0, 290, 0, - 194, 220, 207, 227, 241, 243, 271, 299, 305, 333, - 336, 255, 238, 218, 353, 216, 369, 388, 389, 390, - 392, 303, 234, 321, 0, 0, 0, 0, 0, 0, - 0, 0, 237, 0, 0, 0, 0, 0, 280, 0, - 0, 334, 0, 370, 223, 289, 287, 396, 246, 240, - 236, 222, 265, 294, 332, 387, 326, 0, 284, 0, - 0, 379, 306, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 270, 221, - 192, 318, 380, 249, 0, 0, 0, 174, 175, 176, - 0, 0, 0, 0, 0, 0, 0, 0, 213, 0, - 219, 0, 0, 0, 0, 233, 268, 239, 232, 394, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 256, 0, 307, 0, 182, 0, 421, 0, - 0, 0, 0, 0, 0, 0, 0, 279, 0, 276, - 188, 202, 0, 0, 317, 355, 360, 0, 0, 0, - 224, 0, 358, 330, 409, 209, 247, 352, 335, 356, - 0, 0, 357, 285, 398, 347, 408, 422, 423, 231, - 311, 415, 391, 419, 431, 203, 228, 324, 384, 412, - 376, 304, 395, 275, 375, 254, 191, 283, 195, 386, - 406, 214, 368, 0, 0, 0, 197, 404, 383, 301, - 272, 273, 196, 0, 351, 235, 252, 226, 320, 401, - 402, 225, 433, 204, 418, 199, 205, 417, 313, 397, - 405, 302, 293, 198, 403, 300, 292, 278, 245, 261, - 345, 288, 346, 262, 309, 308, 310, 0, 193, 0, - 381, 413, 434, 211, 0, 0, 393, 427, 430, 0, - 348, 212, 253, 244, 344, 251, 281, 426, 428, 429, - 210, 342, 259, 312, 206, 264, 377, 277, 286, 0, - 0, 329, 359, 215, 411, 378, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 187, 200, 282, 0, - 349, 250, 432, 416, 414, 0, 0, 230, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 189, 190, 201, 208, 217, 229, 242, 248, 257, 260, - 263, 266, 267, 269, 274, 291, 295, 296, 297, 298, - 314, 315, 316, 319, 322, 323, 325, 327, 328, 331, - 337, 338, 339, 340, 341, 343, 350, 354, 361, 362, - 363, 364, 365, 366, 367, 371, 372, 373, 374, 382, - 385, 399, 400, 410, 420, 424, 258, 407, 425, 0, - 290, 0, 194, 220, 207, 227, 241, 243, 271, 299, - 305, 333, 336, 255, 238, 218, 353, 216, 369, 388, - 389, 390, 392, 303, 234, 321, 0, 0, 0, 0, + 0, 0, 0, 0, 187, 188, 199, 207, 217, 229, + 242, 250, 260, 264, 267, 270, 271, 274, 279, 296, + 301, 302, 303, 304, 320, 321, 322, 325, 328, 329, + 332, 334, 335, 338, 344, 345, 346, 347, 348, 350, + 357, 361, 369, 370, 371, 372, 373, 375, 376, 380, + 381, 382, 383, 391, 395, 410, 411, 422, 434, 438, + 506, 418, 439, 0, 295, 0, 0, 297, 246, 263, + 272, 0, 429, 392, 203, 363, 253, 192, 220, 206, + 227, 241, 243, 276, 305, 311, 340, 343, 258, 238, + 218, 360, 215, 378, 398, 399, 400, 402, 309, 234, + 327, 0, 0, 0, 0, 0, 0, 0, 0, 237, + 0, 0, 0, 0, 0, 285, 0, 0, 0, 341, + 0, 379, 223, 294, 292, 407, 247, 240, 236, 222, + 269, 300, 339, 397, 333, 0, 289, 0, 0, 388, + 312, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 275, 221, 190, 324, + 389, 251, 0, 0, 0, 182, 183, 184, 0, 0, + 0, 0, 0, 0, 0, 0, 212, 0, 219, 0, + 0, 0, 0, 233, 273, 239, 232, 404, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 259, 0, 313, 0, 0, 456, 0, 435, 0, 0, + 0, 0, 0, 0, 0, 0, 284, 0, 281, 186, + 201, 0, 0, 323, 362, 368, 0, 0, 0, 224, + 0, 366, 337, 421, 208, 249, 359, 342, 364, 0, + 0, 365, 290, 409, 354, 419, 436, 437, 231, 317, + 427, 401, 433, 447, 202, 228, 331, 394, 424, 385, + 310, 405, 406, 280, 384, 257, 189, 288, 444, 200, + 374, 216, 193, 396, 417, 213, 377, 0, 0, 0, + 195, 415, 393, 307, 277, 278, 194, 0, 358, 235, + 255, 226, 326, 412, 413, 225, 449, 204, 432, 197, + 0, 431, 319, 408, 416, 308, 299, 196, 414, 306, + 298, 283, 245, 265, 352, 293, 353, 266, 315, 314, + 316, 0, 191, 0, 390, 425, 450, 210, 0, 0, + 403, 441, 446, 0, 355, 211, 256, 244, 351, 254, + 286, 440, 442, 443, 445, 209, 349, 262, 330, 420, + 248, 428, 318, 205, 268, 386, 282, 291, 0, 0, + 336, 367, 214, 423, 387, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 185, 198, 287, 0, 356, + 252, 448, 430, 426, 0, 0, 230, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 187, + 188, 199, 207, 217, 229, 242, 250, 260, 264, 267, + 270, 271, 274, 279, 296, 301, 302, 303, 304, 320, + 321, 322, 325, 328, 329, 332, 334, 335, 338, 344, + 345, 346, 347, 348, 350, 357, 361, 369, 370, 371, + 372, 373, 375, 376, 380, 381, 382, 383, 391, 395, + 410, 411, 422, 434, 438, 261, 418, 439, 0, 295, + 0, 0, 297, 246, 263, 272, 0, 429, 392, 203, + 363, 253, 192, 220, 206, 227, 241, 243, 276, 305, + 311, 340, 343, 258, 238, 218, 360, 215, 378, 398, + 399, 400, 402, 309, 234, 327, 0, 0, 0, 0, 0, 0, 0, 0, 237, 0, 0, 0, 0, 0, - 280, 0, 0, 334, 0, 370, 223, 289, 287, 396, - 246, 240, 236, 222, 265, 294, 332, 387, 326, 0, - 284, 0, 0, 379, 306, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 270, 221, 192, 318, 380, 249, 0, 0, 0, 174, - 175, 176, 0, 0, 0, 0, 0, 0, 0, 0, - 213, 0, 219, 0, 0, 0, 0, 233, 268, 239, - 232, 394, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 256, 0, 307, 0, 0, 0, - 421, 0, 0, 0, 0, 0, 0, 0, 0, 279, - 0, 276, 188, 202, 0, 0, 317, 355, 360, 0, - 0, 0, 224, 0, 358, 330, 409, 209, 247, 352, - 335, 356, 0, 0, 357, 285, 398, 347, 408, 422, - 423, 231, 311, 415, 391, 419, 431, 203, 228, 324, - 384, 412, 376, 304, 395, 275, 375, 254, 191, 283, - 195, 386, 406, 214, 368, 0, 0, 0, 197, 404, - 383, 301, 272, 273, 196, 0, 351, 235, 252, 226, - 320, 401, 402, 225, 433, 204, 418, 199, 205, 417, - 313, 397, 405, 302, 293, 198, 403, 300, 292, 278, - 245, 261, 345, 288, 346, 262, 309, 308, 310, 0, - 193, 0, 381, 413, 434, 211, 0, 0, 393, 427, - 430, 0, 348, 212, 253, 244, 344, 251, 281, 426, - 428, 429, 210, 342, 259, 312, 206, 264, 377, 277, - 286, 0, 0, 329, 359, 215, 411, 378, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 187, 200, - 282, 0, 349, 250, 432, 416, 414, 0, 0, 230, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 189, 190, 201, 208, 217, 229, 242, 248, - 257, 260, 263, 266, 267, 269, 274, 291, 295, 296, - 297, 298, 314, 315, 316, 319, 322, 323, 325, 327, - 328, 331, 337, 338, 339, 340, 341, 343, 350, 354, - 361, 362, 363, 364, 365, 366, 367, 371, 372, 373, - 374, 382, 385, 399, 400, 410, 420, 424, 258, 407, - 425, 0, 290, 0, 194, 220, 207, 227, 241, 243, - 271, 299, 305, 333, 336, 255, 238, 218, 353, 216, - 369, 388, 389, 390, 392, 303, 234, + 285, 0, 0, 0, 341, 0, 379, 223, 294, 292, + 407, 247, 240, 236, 222, 269, 300, 339, 397, 333, + 0, 289, 0, 0, 388, 312, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 275, 221, 190, 324, 389, 251, 0, 0, 0, + 182, 183, 184, 0, 0, 0, 0, 0, 0, 0, + 0, 212, 0, 219, 0, 0, 0, 0, 233, 273, + 239, 232, 404, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 259, 0, 313, 0, 0, + 0, 0, 435, 0, 0, 0, 0, 0, 0, 0, + 0, 284, 0, 281, 186, 201, 0, 0, 323, 362, + 368, 0, 0, 0, 224, 0, 366, 337, 421, 208, + 249, 359, 342, 364, 0, 0, 365, 290, 409, 354, + 419, 436, 437, 231, 317, 427, 401, 433, 447, 202, + 228, 331, 394, 424, 385, 310, 405, 406, 280, 384, + 257, 189, 288, 444, 200, 374, 216, 193, 396, 417, + 213, 377, 0, 0, 0, 195, 415, 393, 307, 277, + 278, 194, 0, 358, 235, 255, 226, 326, 412, 413, + 225, 449, 204, 432, 197, 0, 431, 319, 408, 416, + 308, 299, 196, 414, 306, 298, 283, 245, 265, 352, + 293, 353, 266, 315, 314, 316, 0, 191, 0, 390, + 425, 450, 210, 0, 0, 403, 441, 446, 0, 355, + 211, 256, 244, 351, 254, 286, 440, 442, 443, 445, + 209, 349, 262, 330, 420, 248, 428, 318, 205, 268, + 386, 282, 291, 0, 0, 336, 367, 214, 423, 387, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 185, 198, 287, 0, 356, 252, 448, 430, 426, 0, + 0, 230, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 187, 188, 199, 207, 217, 229, + 242, 250, 260, 264, 267, 270, 271, 274, 279, 296, + 301, 302, 303, 304, 320, 321, 322, 325, 328, 329, + 332, 334, 335, 338, 344, 345, 346, 347, 348, 350, + 357, 361, 369, 370, 371, 372, 373, 375, 376, 380, + 381, 382, 383, 391, 395, 410, 411, 422, 434, 438, + 261, 418, 439, 0, 295, 0, 0, 297, 246, 263, + 272, 0, 429, 392, 203, 363, 253, 192, 220, 206, + 227, 241, 243, 276, 305, 311, 340, 343, 258, 238, + 218, 360, 215, 378, 398, 399, 400, 402, 309, 234, } var yyPact = [...]int{ - 3693, -1000, -327, 1534, -1000, -1000, -1000, -1000, -1000, -1000, + 4553, -1000, -344, 1605, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, 1483, 1092, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - 486, 1191, 177, 1411, 265, 181, 950, 350, 154, 26084, - 346, 111, 26516, -1000, 38, -1000, 29, 26516, 39, 25652, - -1000, -1000, -1000, 11795, 1364, -75, -80, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 1167, 1452, 1453, 1480, 1003, - 1504, -1000, 10054, 10054, 309, 309, 309, 8326, -1000, -1000, - 15696, 26516, 26516, 1196, 281, 341, 281, -153, -1000, -1000, - -1000, -1000, -1000, -1000, 1411, -1000, -1000, 83, -1000, 215, - 1119, -1000, 1118, -1000, 420, 336, 202, 274, 267, 201, - 200, 191, 189, 188, 187, 185, 184, 236, -1000, 456, - 456, -192, -193, 1879, 271, 271, 271, 328, 1390, 1388, - -1000, 541, -1000, 456, 456, 55, 456, 456, 456, 456, - 146, 135, 456, 456, 456, 456, 456, 456, 456, 456, - 456, 456, 456, 456, 456, 456, 456, 26516, -1000, 81, - 561, 487, 1411, 97, -1000, -1000, -1000, 26516, 276, 950, - 276, 276, 26516, -1000, 393, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, 1576, 1182, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, 581, 1246, 223, 1493, 3583, 156, 28476, 362, + 101, 28021, 361, 3864, 28476, -1000, 102, -1000, 89, 28476, + 93, 27566, -1000, -1000, -282, 12518, 1445, 16, 15, 28476, + 109, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1216, + 1534, 1542, 1581, 1059, 1598, -1000, 10685, 10685, 296, 296, + 296, 8865, -1000, -1000, 16626, 28476, 28476, 1258, 360, 964, + 346, 344, 343, -1000, -104, -1000, -1000, -1000, -1000, 1493, + -1000, -1000, 146, -1000, 234, 1176, -1000, 1175, -1000, 493, + 462, 231, 308, 295, 230, 229, 228, 226, 224, 220, + 219, 216, 237, -1000, 491, 491, -164, -172, 2210, 287, + 287, 287, 317, 1457, 1456, -1000, 526, -1000, 491, 491, + 126, 491, 491, 491, 491, 175, 171, 491, 491, 491, + 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, + 491, 491, 28476, -1000, 151, 495, 246, 534, 1493, 164, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, @@ -3888,26 +4037,29 @@ var yyPact = [...]int{ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 26516, 589, 589, 589, 589, - 589, 589, 23, -1000, -29, 149, 145, 98, 89, 950, - 225, -1000, 399, -1000, 85, -13, -1000, 589, 5650, 5650, - 5650, -1000, 1394, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, 326, -1000, -1000, -1000, -1000, 26516, 25220, 234, 484, - -1000, -1000, -1000, -1000, 1063, 661, -1000, 11795, 2052, 1142, - 1142, -1000, -1000, 370, -1000, -1000, 13091, 13091, 13091, 13091, - 13091, 13091, 13091, 13091, 13091, 13091, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - 1142, 392, -1000, 11363, 1142, 1142, 1142, 1142, 1142, 1142, - 1142, 1142, 11795, 1142, 1142, 1142, 1142, 1142, 1142, 1142, - 1142, 1142, 1142, 1142, 1142, 1142, 1142, 1142, 1142, 1142, - -1000, -1000, -1000, 26516, -1000, 1483, -1000, 1092, -1000, -1000, - -1000, 1408, 11795, 11795, 1483, -1000, 1255, 10054, -1000, -1000, - 1339, -1000, -1000, -1000, -1000, 557, 1517, -1000, 14387, 391, - 1507, 24788, -1000, 19165, 24356, 1117, 7880, -50, -1000, -1000, - -1000, 469, 17869, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, 28476, 359, 964, 297, -1000, 28476, -1000, 419, 28476, + 628, 628, 49, 628, 628, 628, 628, 91, 417, 13, + -1000, 76, 160, 158, 152, 584, 111, 62, -1000, -1000, + 161, 584, 103, -1000, 628, 6989, 6989, 6989, -1000, 1480, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 316, -1000, + -1000, -1000, -1000, 28476, 27111, 286, 531, -1000, -1000, -1000, + 72, -1000, -1000, 1117, 765, -1000, 12518, 2281, 1178, 1178, + -1000, -1000, 410, -1000, -1000, 13883, 13883, 13883, 13883, 13883, + 13883, 13883, 13883, 13883, 13883, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1178, + 416, -1000, 12063, 1178, 1178, 1178, 1178, 1178, 1178, 1178, + 1178, 12518, 1178, 1178, 1178, 1178, 1178, 1178, 1178, 1178, + 1178, 1178, 1178, 1178, 1178, 1178, 1178, 1178, 1178, -1000, + -1000, -1000, 28476, -1000, 1178, 926, 1576, -1000, 1182, -1000, + -1000, -1000, 1482, 12518, 12518, 1576, -1000, 1374, 10685, -1000, + -1000, 1506, -1000, -1000, -1000, -1000, -1000, 622, 1604, -1000, + 15248, 415, 1602, 26656, -1000, 20279, 26201, 1174, 8396, -47, + -1000, -1000, -1000, 529, 18914, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + 1480, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, @@ -3918,410 +4070,867 @@ var yyPact = [...]int{ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, 1394, 1070, 26516, -1000, -1000, 3728, 950, -1000, 1186, - -1000, 1061, -1000, 1157, 81, 26516, 519, 950, 950, -1000, - -1000, -1000, 456, 456, 222, 265, 3498, -1000, -1000, -1000, - 23917, 1182, 950, -1000, 1180, -1000, 1425, 305, 472, 472, - 950, -1000, -1000, 26516, 950, 1424, 1423, 26516, 26516, -1000, - 23485, -1000, 23053, 22621, 800, 26516, 22189, 21757, 21325, 20893, - 20461, -1000, 1274, -1000, 1190, -1000, -1000, -1000, 26516, 26516, - 26516, -54, -1000, -1000, 26516, 950, -1000, -1000, 797, 788, - 456, 456, 787, 892, 886, 880, 456, 456, 782, 878, - 935, 194, 774, 772, 769, 826, 875, 106, 821, 747, - 760, 26516, 1179, -1000, 73, 461, 157, 231, 134, 26516, - 129, 1411, 1361, 1115, 324, 26516, 1438, 1236, 26516, 950, - -1000, 6988, -1000, -1000, 873, 11795, -1000, -1000, -1000, -1000, - -1000, 589, 26516, 589, 26516, 589, 589, 589, 589, 548, - 565, 548, -1000, -1000, -1000, -1000, 5650, 5650, 26516, 5650, - 5650, 26516, 5650, 5650, 565, -1000, -1000, -1000, 442, -1000, - 1235, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 36, -1000, - -1000, -1000, -1000, -1000, 1534, -1000, -1000, -1000, -125, 11795, - 11795, 11795, 11795, 667, 441, 13091, 700, 470, 13091, 13091, - 13091, 13091, 13091, 13091, 13091, 13091, 13091, 13091, 13091, 13091, - 13091, 13091, 13091, 715, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 950, -1000, 1511, 966, 966, 410, 410, 410, - 410, 410, 410, 410, 410, 410, 13523, 8758, 6988, 1003, - 1055, 1483, 10054, 10054, 11795, 11795, 10918, 10486, 10054, 1384, - 506, 661, 26516, -1000, 908, -1000, -1000, 12659, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - 26516, 26516, 10054, 10054, 10054, 10054, 10054, -1000, 1110, -1000, - -171, 15264, 1453, 1003, 1339, 1429, 1527, 434, 963, 1109, - -1000, 813, 1453, 17437, 1149, -1000, 1339, -1000, -1000, -1000, - 26516, -1000, -1000, 20029, -1000, -1000, 6542, 26516, 183, 26516, - -1000, 1138, 1228, -1000, -1000, -1000, 1443, 17005, 26516, 1122, - 1108, -1000, -1000, 386, 7434, -50, -1000, 7434, 1097, -1000, - -120, -117, 9190, 401, -1000, -1000, -1000, 1879, 13955, 976, - 608, -45, -1000, -1000, -1000, 1157, -1000, 1157, 1157, 1157, - 1157, -54, -54, -54, -54, -1000, -1000, -1000, -1000, -1000, - 1178, 1177, -1000, 1157, 1157, 1157, 1157, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1115, 28476, -1000, + -1000, 4167, 964, -1000, 1238, -1000, 1112, -1000, 1190, 151, + 304, 1267, 964, 964, 964, 304, -1000, -1000, -1000, 491, + 491, 236, 3583, 3840, -1000, -1000, -1000, 25739, 1237, 964, + -1000, 1236, -1000, 1511, 285, 466, 466, 964, -1000, -1000, + 28476, 964, 1510, 1509, 28476, 28476, -1000, 25284, -1000, 24829, + 24374, 823, 28476, 23919, 23464, 23009, 22554, 22099, -1000, 1304, + -1000, 1243, -1000, -1000, -1000, 28476, 28476, 28476, 29, -1000, + -1000, 28476, 964, -1000, -1000, 822, 821, 491, 491, 820, + 918, 915, 914, 491, 491, 816, 911, 1028, 174, 815, + 807, 804, 867, 909, 127, 830, 826, 786, 28476, 1231, + -1000, 144, 517, 186, 217, 190, 28476, 117, 1537, 138, + 1493, 1431, 1170, 313, 297, 1311, 28476, 1525, 297, -1000, + 7458, -1000, -1000, 908, 12518, -1000, 618, 584, 584, -1000, + -1000, -1000, -1000, -1000, -1000, 628, 28476, 618, -1000, -1000, + -1000, 584, 628, 28476, 628, 628, 628, 628, 584, 628, + 28476, 28476, 28476, 28476, 28476, 28476, 28476, 28476, 28476, 6989, + 6989, 6989, 461, 628, -1000, 1310, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, 92, -1000, -1000, -1000, -1000, -1000, + 1605, -1000, -1000, -1000, -111, 1158, 21644, -1000, -286, -287, + -288, -289, -1000, -1000, -1000, -290, -296, -1000, -1000, -1000, + 12518, 12518, 12518, 12518, 734, 473, 13883, 714, 605, 13883, + 13883, 13883, 13883, 13883, 13883, 13883, 13883, 13883, 13883, 13883, + 13883, 13883, 13883, 13883, 538, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, 964, -1000, 1619, 921, 921, 442, 442, + 442, 442, 442, 442, 442, 442, 442, 14338, 9320, 7458, + 1059, 1073, 1576, 10685, 10685, 12518, 12518, 11595, 11140, 10685, + 1473, 550, 765, 28476, -1000, 933, -1000, -1000, 13428, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, 1176, 1176, 1176, 1159, 1159, 278, - -1000, 11795, 79, 26516, 1434, 757, 73, -1000, 1437, 1214, - -1000, 922, 885, -1000, 1107, -1000, -1000, 1477, -1000, -1000, - 466, 611, 603, 452, 26516, 57, 176, -1000, 263, -1000, - 26516, 1172, 1422, 472, 950, -1000, 950, -1000, -1000, -1000, - -1000, 385, -1000, -1000, 950, 1105, -1000, 1113, 691, 562, - 654, 544, 1105, -1000, -1000, -174, 1105, -1000, 1105, -1000, - 1105, -1000, 1105, -1000, 1105, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, 488, 26516, 57, 715, -1000, 323, -1000, - -1000, 715, 715, -1000, -1000, -1000, -1000, 872, 869, -1000, + -1000, 28476, 28476, 10685, 10685, 10685, 10685, 10685, -1000, 1157, + -1000, -168, 16171, 12518, -1000, 1542, 1059, 1506, 1508, 1614, + 458, 671, 1155, -1000, 685, 1542, 18459, 1098, -1000, 1506, + -1000, -1000, -1000, 28476, -1000, -1000, 21189, -1000, -1000, 6520, + 28476, 208, 28476, -1000, 1120, 1399, -1000, -1000, -1000, 1531, + 18004, 28476, 1122, 1105, -1000, -1000, 414, 7927, -47, -1000, + 7927, 1135, -1000, -43, -72, 9775, 437, -1000, -1000, -1000, + 2210, 14793, 986, -1000, 27, -1000, -1000, -1000, 1190, -1000, + 1190, 1190, 1190, 1190, 29, 29, 29, 29, -1000, -1000, + -1000, -1000, -1000, 1223, 1213, -1000, 1190, 1190, 1190, 1190, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1212, 1212, 1212, + 1207, 1207, 277, -1000, 12518, 143, 28476, 1522, 776, 144, + 28476, 560, 1309, -1000, 28476, 1267, 1267, 1267, 28476, 1019, + 974, -1000, 1149, -1000, -1000, 1580, -1000, -1000, 576, 603, + 578, 616, 28476, 128, 207, -1000, 264, -1000, 28476, 1211, + 1507, 466, 964, -1000, 964, -1000, -1000, -1000, -1000, 413, + -1000, -1000, 964, 1146, -1000, 1136, 707, 575, 636, 570, + 1146, -1000, -1000, -126, 1146, -1000, 1146, -1000, 1146, -1000, + 1146, -1000, 1146, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, 505, 28476, 128, 538, -1000, 312, -1000, -1000, 538, + 538, -1000, -1000, -1000, -1000, 905, 904, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -316, 26516, 333, 62, 105, - 26516, 26516, 26516, 26516, 26516, 359, -1000, -1000, -1000, 115, - 26516, 26516, 381, -1000, 26516, 354, -1000, -1000, -1000, -1000, - -1000, -1000, 661, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, 589, 26516, 26516, 26516, -1000, -1000, 589, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, 26516, -1000, 855, 26516, - 26516, -1000, -1000, -1000, -1000, -1000, 661, 441, 696, 515, - -1000, -1000, 714, -1000, -1000, 1956, -1000, -1000, -1000, -1000, - 700, 13091, 13091, 13091, 549, 1956, 2465, 692, 944, 410, - 605, 605, 411, 411, 411, 411, 411, 618, 618, -1000, - -1000, -1000, -1000, 908, -1000, -1000, -1000, 908, 10054, 10054, - 1103, 1142, 379, -1000, 1167, -1000, -1000, 1453, 1034, 1034, - 893, 709, 539, 1500, 1034, 520, 1498, 1034, 1034, 10054, - -1000, -1000, 559, -1000, 11795, 908, -1000, 591, 1102, 1101, - 1034, 908, 908, 1034, 1034, 26516, -1000, -287, -1000, -136, - 409, 1142, -1000, 19597, -1000, -1000, 1408, -1000, -1000, 1349, - -1000, 1306, 11795, 11795, 11795, -1000, -1000, -1000, 1408, 1455, - -1000, 1317, 1316, 1493, 10054, 19165, 1339, -1000, -1000, -1000, - 377, 1493, 1100, 1142, -1000, 26516, 19165, 19165, 19165, 19165, - 19165, -1000, 1253, 1251, -1000, 1264, 1263, 1289, 26516, -1000, - 1048, 1003, 17005, 183, 1065, 19165, 26516, -1000, -1000, 19165, - 26516, 6096, -1000, 1097, -50, -134, -1000, -1000, -1000, -1000, - 661, -1000, 820, -1000, 2359, -1000, 266, -1000, -1000, -1000, - -1000, 1409, -1000, 527, -49, -1000, -1000, -54, -54, -1000, - -1000, 401, 710, 401, 401, 401, 854, 854, -1000, -1000, - -1000, -1000, -1000, 756, -1000, -1000, -1000, 748, -1000, -1000, - 545, 1252, 79, -1000, -1000, 456, 851, 1368, 26516, -1000, - -1000, 954, 332, -1000, 1233, -1000, -1000, -1000, -1000, -1000, - 3372, 26516, 1045, -1000, 49, 26516, 947, 26516, -1000, 1043, - 26516, -1000, 950, -1000, -1000, 6988, -1000, 26516, 1142, -1000, - -1000, -1000, -1000, 345, 1405, 1399, 57, 49, 401, 950, - -1000, -1000, -1000, -1000, -1000, -320, 1037, 26516, 72, -1000, - 1166, 899, -1000, 1202, -1000, -1000, -1000, -1000, 82, 108, - 107, 311, -1000, -1000, -1000, -1000, 5650, 26516, -1000, -1000, - -1000, -1000, 548, -1000, 548, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 549, 1956, 2367, -1000, 13091, 13091, -1000, -1000, - 1034, 1034, 10054, 6988, 1483, 1408, -1000, -1000, 371, 715, - 371, 13091, 13091, -1000, 13091, 13091, -1000, -166, 1076, 453, - -1000, 11795, 879, -1000, -1000, 13091, 13091, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, 340, 337, 335, 26516, - -1000, -1000, 804, 841, 1297, 661, 661, -1000, -1000, 26516, - -1000, -1000, -1000, -1000, 1487, 11795, -1000, 1095, -1000, 5204, - 1453, 1230, 26516, 1142, 1534, 14832, 26516, 1081, -1000, 459, - 1228, 1201, 1229, 1459, -1000, -1000, -1000, -1000, 1249, -1000, - 1247, -1000, -1000, -1000, -1000, -1000, 1003, 1493, 19165, 1075, - -1000, 1075, -1000, 376, -1000, -1000, -1000, -129, -128, -1000, - -1000, -1000, 1879, -1000, -1000, 1227, 13091, -1000, -1000, -1000, - 401, 401, -1000, -1000, -1000, -1000, -1000, -1000, 1031, -1000, - 1029, 1093, 1023, 35, -1000, 1194, 1386, 456, 456, -1000, - 731, -1000, 950, -1000, -1000, 26516, 26516, 1474, 1085, -1000, - 26516, -1000, -1000, 26516, -1000, -1000, 1312, 79, 1021, -1000, - -1000, -1000, 176, 26516, -1000, 966, 49, -1000, -1000, -1000, - -1000, -1000, -1000, 1144, -1000, -1000, -1000, 933, -1000, -175, - 950, 26516, 26516, 26516, -1000, 26516, -1000, -1000, 589, 589, - -1000, 13091, 1956, 1956, -1000, -1000, 908, -1000, 1453, -1000, - 908, 1157, 1157, -1000, 1157, 1159, -1000, 1157, 25, 1157, - 24, 908, 908, 2272, 2200, 2148, 1685, 1142, -161, -1000, - 661, 11795, 1208, 816, 1142, 1142, 1142, 1015, 838, -54, - -1000, -1000, -1000, 1461, 1473, 661, -1000, -1000, -1000, 1427, - 1082, 981, -1000, -1000, 9622, 1019, 1311, 373, 1015, 1483, - 26516, 11795, -1000, -1000, 11795, 1148, -1000, 11795, -1000, -1000, - -1000, 1483, 1483, 1075, -1000, -1000, 421, -1000, -1000, -1000, - -1000, -27, 1526, 1956, -1000, -1000, -54, 837, -54, 686, - -1000, 649, -1000, -1000, -231, -1000, -1000, 1134, 1248, -1000, - -1000, 1144, -1000, 26516, 26516, -1000, -1000, 165, -1000, 251, - 986, -1000, -190, -1000, -1000, 1442, 26516, -1000, -1000, 6988, - -1000, -1000, 1143, 1203, -1000, -1000, -1000, -1000, 1956, -1000, - 1408, -1000, -1000, 218, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 13091, 13091, 13091, 13091, 13091, 1453, 836, 661, - 13091, 13091, 16560, 18733, 18733, 16128, -54, -46, -1000, 11795, - 11795, 1418, -1000, 1142, -1000, 1126, 26516, 1142, 26516, -1000, - 1453, -1000, 661, 661, 26516, 661, 1453, -1000, 440, -1000, - -40, 401, -1000, 401, 916, 914, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, 1441, 1085, -1000, 162, 26516, -1000, - 176, -1000, -196, -197, 1092, 983, 1077, -1000, 455, 26516, - 26516, -1000, -1000, -1000, 591, 591, 591, 591, 195, 908, - -1000, 591, 591, 968, -1000, -1000, -1000, 968, 968, 409, - -282, -1000, 1355, 1351, 661, 1063, 1525, -1000, 1142, 1534, - 368, 981, -1000, -1000, 965, -1000, 552, 1417, -1000, 1415, - -1000, -1000, -1000, -1000, -1000, 1092, 1142, 1124, -1000, -1000, - -1000, 152, -1000, 6988, 4758, 959, -1000, -1000, -1000, -1000, - -1000, 908, 96, -182, -1000, -1000, -1000, 18301, -1000, -1000, - -1000, -1000, -46, 211, -1000, 1321, 1319, 1467, 26516, 981, - 26516, -1000, -1000, 831, -1000, -1000, 152, 12227, 26516, -1000, - -74, -1000, -1000, -1000, -1000, -1000, 1202, -1000, 1269, -170, - -187, -1000, -1000, 1331, 1333, 1333, 1351, 1466, 1341, 1338, - -1000, 830, 979, -1000, -1000, -1000, 591, 908, 931, 272, - -1000, -1000, -175, -1000, 1266, -1000, 1329, 742, -1000, -1000, - -1000, -1000, 828, -1000, 1462, 1460, -1000, -1000, -1000, 1226, - 75, -1000, -179, -1000, 673, -1000, -1000, -1000, 806, 802, - 1216, -1000, 1505, -1000, -185, -1000, -1000, -1000, -1000, -1000, - 1523, 347, 347, -188, -1000, -1000, -1000, 269, 634, -1000, - -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -332, 28476, 326, 130, 147, 28476, 28476, + 28476, 28476, 28476, 376, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, 169, 28476, 28476, 28476, 28476, 367, -1000, -1000, 28476, + -1000, -1000, -1000, -1000, 765, 28476, -1000, -1000, 628, 628, + -1000, -1000, 28476, 628, -1000, -1000, -1000, -1000, -1000, -1000, + 628, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 902, -1000, 28476, 28476, -1000, + -1000, -1000, -1000, -1000, 98, -54, 205, -1000, -1000, -1000, + -1000, 1539, -1000, 765, 473, 547, 579, -1000, -1000, 741, + -1000, -1000, 2401, -1000, -1000, -1000, -1000, 714, 13883, 13883, + 13883, 1035, 2401, 2516, 1111, 1217, 442, 666, 666, 441, + 441, 441, 441, 441, 712, 712, -1000, -1000, -1000, -1000, + 933, -1000, -1000, -1000, 933, 10685, 10685, 1143, 1178, 408, + -1000, 1216, -1000, -1000, 1542, 1054, 1054, 755, 936, 639, + 1600, 1054, 620, 1596, 1054, 1054, 10685, -1000, -1000, 599, + -1000, 12518, 933, -1000, 1257, 1138, 1137, 1054, 933, 933, + 1054, 1054, 28476, -1000, -274, -1000, -86, 399, 1178, -1000, + 20734, -1000, -1000, 933, 1117, 1482, -1000, -1000, 1420, -1000, + 1371, 12518, 12518, 12518, -1000, -1000, -1000, 1482, 1575, -1000, + 1388, 1383, 1590, 10685, 20279, 1506, -1000, -1000, -1000, 406, + 1590, 1152, 1178, -1000, 28476, 20279, 20279, 20279, 20279, 20279, + -1000, 1349, 1348, -1000, 1336, 1335, 1343, 28476, -1000, 1067, + 1059, 18004, 208, 1102, 20279, 28476, -1000, -1000, 20279, 28476, + 6051, -1000, 1135, -47, -17, -1000, -1000, -1000, -1000, 765, + -1000, 882, -1000, 269, -1000, 270, -1000, -1000, -1000, -1000, + 573, 22, -1000, -1000, 29, 29, -1000, -1000, 437, 601, + 437, 437, 437, 900, 900, -1000, -1000, -1000, -1000, -1000, + 772, -1000, -1000, -1000, 746, -1000, -1000, 774, 1300, 143, + -1000, -1000, 491, 884, 1451, -1000, -1000, 971, 319, -1000, + 1521, 28476, -1000, 1308, 1305, 1302, -1000, -1000, -1000, -1000, + -1000, 307, 28476, 1063, -1000, 124, 28476, 968, 28476, -1000, + 1061, 28476, -1000, 964, -1000, -1000, 7458, -1000, 28476, 1178, + -1000, -1000, -1000, -1000, 348, 1487, 1483, 128, 124, 437, + 964, -1000, -1000, -1000, -1000, -1000, -335, 1057, 28476, 136, + -1000, 1209, 833, -1000, 1263, -1000, -1000, -1000, -1000, 108, + 185, 166, 309, -1000, 353, 1300, 28476, -1000, -1000, -1000, + -1000, 584, -1000, -1000, 584, -1000, -1000, -1000, -1000, -1000, + -1000, 1477, -68, -308, -1000, -305, -1000, -1000, -1000, -1000, + 1035, 2401, 2380, -1000, 13883, 13883, -1000, -1000, 1054, 1054, + 10685, 7458, 1576, 1482, -1000, -1000, 345, 538, 345, 13883, + 13883, -1000, 13883, 13883, -1000, -118, 1065, 546, -1000, 12518, + 777, -1000, -1000, 13883, 13883, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, 331, 328, 310, 28476, -1000, -1000, + -1000, 782, 883, 1357, 765, 765, -1000, -1000, 28476, -1000, + -1000, -1000, -1000, 1587, 12518, -1000, 1130, -1000, 5582, 1542, + 1301, 28476, 1178, 1605, 15716, 28476, 1079, -1000, 492, 1399, + 1293, 1290, 1414, -1000, -1000, -1000, -1000, 1347, -1000, 1339, + -1000, -1000, -1000, -1000, -1000, 1059, 1590, 20279, 1077, -1000, + 1077, -1000, 403, -1000, -1000, -1000, -80, -82, -1000, -1000, + -1000, 2210, -1000, -1000, -1000, 637, 13883, 1613, -1000, 877, + 1502, -1000, 1497, -1000, -1000, 437, 437, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, 1049, -1000, 1046, 1129, 1043, 58, + -1000, 1161, 1472, 491, 491, -1000, 728, -1000, 964, -1000, + 28476, -1000, -1000, 28476, 28476, 28476, 1579, 1125, -1000, 28476, + -1000, -1000, 28476, -1000, -1000, 1382, 143, 1041, -1000, -1000, + -1000, 207, 28476, -1000, 921, 124, -1000, -1000, -1000, -1000, + -1000, -1000, 1184, -1000, -1000, -1000, 963, -1000, -127, 964, + 28476, 28476, 28476, -1000, 28476, -1000, -1000, -1000, 628, 628, + -1000, 1471, -1000, 964, -1000, 13883, 2401, 2401, -1000, -1000, + 933, -1000, 1542, -1000, 933, 1190, 1190, -1000, 1190, 1207, + -1000, 1190, 78, 1190, 73, 933, 933, 2481, 2418, 2145, + 846, 1178, -112, -1000, 765, 12518, 2128, 1288, 1178, 1178, + 1178, 1017, 875, 29, -1000, -1000, -1000, 1585, 1577, 765, + -1000, -1000, -1000, 1514, 1128, 1100, -1000, -1000, 10230, 1038, + 1378, 385, 1017, 1576, 28476, 12518, -1000, -1000, 12518, 1189, + -1000, 12518, -1000, -1000, -1000, 1576, 1576, 1077, -1000, -1000, + 375, -1000, -1000, -1000, -1000, -1000, 2401, -48, -1000, -1000, + -1000, -1000, -1000, 29, 870, 29, 710, -1000, 693, -1000, + -1000, -215, -1000, -1000, 1156, 1268, -1000, -1000, 1184, -1000, + -1000, -1000, 28476, 28476, -1000, -1000, 198, -1000, 254, 1015, + -1000, -173, -1000, -1000, 1529, 28476, -1000, -1000, 7458, -1000, + -1000, 1181, 1265, -1000, -1000, -1000, -1000, -1000, -1000, 2401, + -1000, 1482, -1000, -1000, 247, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, 13883, 13883, 13883, 13883, 13883, 1542, 866, + 765, 13883, 13883, 17536, 19824, 19824, 17081, 29, -4, -1000, + 12518, 12518, 1496, -1000, 1178, -1000, 1034, 28476, 1178, 28476, + -1000, 1542, -1000, 765, 765, 28476, 765, 1542, -1000, -1000, + 437, -1000, 437, 939, 937, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, 1528, 1125, -1000, 192, 28476, -1000, 207, + -1000, -179, -180, 1182, 1012, 1123, -1000, 490, 28476, 28476, + -1000, -1000, -1000, 1257, 1257, 1257, 1257, 122, 933, -1000, + 1257, 1257, 1003, -1000, -1000, -1000, 1003, 1003, 399, -267, + -1000, 1428, 1394, 765, 1117, 1608, -1000, 1178, 1605, 379, + 1100, -1000, -1000, 992, -1000, -1000, -1000, -1000, -1000, 1182, + 1178, 1060, -1000, -1000, -1000, 213, -1000, 7458, 5113, 990, + -1000, -1000, -1000, -1000, -1000, 933, 153, -151, -1000, -1000, + -1000, 19369, -1000, -1000, -1000, -1000, -4, 268, -1000, 1404, + 1394, -1000, 1574, 1412, 1572, -1000, 28476, 1100, 28476, -1000, + 213, 12973, 28476, -1000, -51, -1000, -1000, -1000, -1000, -1000, + 1263, -1000, 1355, -124, -161, -1000, -1000, 1402, 1408, 1408, + 1404, -1000, 1561, 1548, -1000, 859, 1547, 852, 997, -1000, + -1000, 1257, 933, 984, 272, -1000, -1000, -127, -1000, 1354, + -1000, 1392, 713, -1000, -1000, -1000, -1000, 849, 847, -1000, + 667, -1000, -1000, -1000, 1272, 148, -1000, -128, -1000, 646, + -1000, -1000, -1000, -1000, -1000, 1269, -1000, 1595, -1000, -157, + -1000, -1000, -1000, 1607, 496, 496, -171, -1000, -1000, -1000, + 263, 721, -1000, -1000, -1000, -1000, -1000, } var yyPgo = [...]int{ - 0, 1805, 1801, 13, 86, 82, 1800, 1798, 1797, 1796, - 128, 127, 126, 1794, 1782, 1780, 1779, 1778, 1777, 1776, - 1775, 1774, 1771, 1770, 1768, 61, 110, 115, 1766, 1762, - 1761, 1760, 1759, 122, 121, 468, 1755, 119, 1750, 1747, - 1746, 1743, 1741, 1740, 1738, 1737, 1736, 1735, 1733, 1730, - 199, 1729, 1728, 7, 1726, 83, 1722, 1720, 1719, 1718, - 1715, 1714, 1712, 106, 1709, 46, 286, 48, 75, 1706, - 72, 770, 1700, 90, 116, 1698, 447, 1697, 42, 78, - 96, 1693, 40, 1692, 1691, 94, 1690, 1688, 1687, 69, - 1686, 1684, 2519, 1683, 67, 76, 17, 55, 1682, 1681, - 1680, 1679, 31, 1286, 1678, 1677, 21, 1676, 1675, 125, - 1673, 81, 25, 20, 19, 18, 1672, 77, 1670, 8, - 54, 32, 1668, 80, 1667, 1666, 1664, 1662, 33, 1661, - 73, 99, 24, 1660, 6, 10, 1659, 1658, 1657, 1656, - 1654, 1653, 4, 1652, 1650, 1649, 1648, 26, 1645, 9, - 23, 38, 71, 111, 29, 12, 1644, 120, 1643, 27, - 105, 68, 103, 1642, 1639, 1638, 939, 1637, 59, 1635, - 131, 1633, 1628, 44, 1627, 431, 762, 1626, 1625, 1623, - 64, 1117, 2383, 45, 104, 1621, 1617, 1689, 57, 74, - 22, 1614, 1613, 1611, 123, 49, 51, 896, 43, 1608, - 1607, 1605, 1604, 1603, 1602, 1601, 248, 1600, 1599, 1598, - 34, 16, 95, 30, 1597, 1596, 1593, 1592, 1591, 66, - 36, 1589, 101, 100, 62, 117, 1588, 112, 85, 70, - 1587, 56, 1586, 1585, 1582, 1579, 41, 1575, 1574, 1572, - 1571, 102, 84, 63, 37, 39, 98, 1570, 35, 1569, - 1568, 97, 87, 1566, 15, 113, 11, 1556, 3, 0, - 1555, 5, 109, 1388, 114, 1554, 1552, 1, 1551, 2, - 1550, 1549, 79, 1548, 1547, 1544, 1543, 2749, 28, 107, - 1541, 118, + 0, 1897, 1896, 32, 83, 85, 1894, 1889, 1888, 1886, + 134, 133, 131, 1885, 1884, 1883, 1881, 1880, 1878, 1873, + 1866, 1858, 1857, 1852, 1848, 65, 123, 43, 37, 140, + 1847, 1846, 47, 1845, 1844, 1842, 126, 117, 439, 1841, + 119, 1840, 1839, 1838, 1837, 1835, 1834, 1828, 1827, 1822, + 1820, 1818, 1815, 1814, 1813, 144, 1812, 1811, 7, 1810, + 51, 1809, 1808, 1807, 1806, 1805, 89, 1803, 1802, 1801, + 114, 1800, 1797, 50, 416, 63, 73, 1795, 1787, 77, + 770, 1786, 106, 128, 1782, 898, 1779, 61, 78, 86, + 1778, 46, 1777, 1773, 90, 1771, 1769, 1765, 74, 1761, + 1760, 3185, 1759, 68, 76, 15, 27, 1753, 1752, 1747, + 1743, 34, 44, 1741, 1732, 23, 1731, 1726, 136, 1725, + 88, 18, 1724, 14, 13, 21, 1723, 87, 1722, 42, + 56, 31, 1717, 82, 1716, 1715, 1714, 1713, 57, 1711, + 79, 109, 30, 1710, 1709, 6, 11, 1708, 1707, 1706, + 1705, 1704, 1703, 10, 1702, 4, 1700, 28, 1699, 9, + 22, 17, 72, 118, 26, 8, 1698, 121, 1697, 29, + 122, 66, 107, 1695, 1694, 1692, 871, 141, 1691, 1690, + 35, 1689, 116, 127, 1687, 1450, 1686, 1685, 58, 1276, + 2643, 19, 113, 1684, 1683, 1891, 48, 80, 24, 1681, + 75, 1680, 1679, 1678, 132, 125, 69, 811, 53, 1677, + 1676, 1675, 1673, 1672, 1671, 1670, 135, 16, 20, 108, + 36, 1669, 1668, 1667, 67, 38, 1666, 105, 104, 71, + 94, 1664, 115, 99, 59, 1661, 40, 1656, 1654, 1653, + 1652, 41, 1651, 1649, 1648, 1646, 112, 98, 64, 45, + 1645, 39, 93, 101, 102, 1644, 25, 124, 12, 1643, + 3, 0, 1642, 5, 120, 1482, 103, 1641, 1638, 1, + 1637, 2, 1636, 1633, 81, 1632, 1631, 1630, 1628, 2492, + 340, 110, 1627, 129, +} + +//line sql.y:5218 +type yySymType struct { + union interface{} + empty struct{} + LengthScaleOption LengthScaleOption + tableName TableName + tableIdent TableIdent + str string + strs []string + vindexParam VindexParam + colIdent ColIdent + joinCondition JoinCondition + collateAndCharset CollateAndCharset + columnType ColumnType + yys int +} + +func (st *yySymType) ReferenceActionUnion() ReferenceAction { + v, _ := st.union.(ReferenceAction) + return v +} + +func (st *yySymType) aliasedTableNameUnion() *AliasedTableExpr { + v, _ := st.union.(*AliasedTableExpr) + return v +} + +func (st *yySymType) alterDatabaseUnion() *AlterDatabase { + v, _ := st.union.(*AlterDatabase) + return v +} + +func (st *yySymType) alterMigrationUnion() *AlterMigration { + v, _ := st.union.(*AlterMigration) + return v +} + +func (st *yySymType) alterOptionUnion() AlterOption { + v, _ := st.union.(AlterOption) + return v +} + +func (st *yySymType) alterOptionsUnion() []AlterOption { + v, _ := st.union.([]AlterOption) + return v +} + +func (st *yySymType) alterTableUnion() *AlterTable { + v, _ := st.union.(*AlterTable) + return v +} + +func (st *yySymType) boolValUnion() BoolVal { + v, _ := st.union.(BoolVal) + return v +} + +func (st *yySymType) booleanUnion() bool { + v, _ := st.union.(bool) + return v +} + +func (st *yySymType) characteristicUnion() Characteristic { + v, _ := st.union.(Characteristic) + return v +} + +func (st *yySymType) characteristicsUnion() []Characteristic { + v, _ := st.union.([]Characteristic) + return v +} + +func (st *yySymType) colKeyOptUnion() ColumnKeyOption { + v, _ := st.union.(ColumnKeyOption) + return v +} + +func (st *yySymType) colNameUnion() *ColName { + v, _ := st.union.(*ColName) + return v +} + +func (st *yySymType) colTupleUnion() ColTuple { + v, _ := st.union.(ColTuple) + return v +} + +func (st *yySymType) collateAndCharsetsUnion() []CollateAndCharset { + v, _ := st.union.([]CollateAndCharset) + return v +} + +func (st *yySymType) columnDefinitionUnion() *ColumnDefinition { + v, _ := st.union.(*ColumnDefinition) + return v +} + +func (st *yySymType) columnDefinitionsUnion() []*ColumnDefinition { + v, _ := st.union.([]*ColumnDefinition) + return v +} + +func (st *yySymType) columnTypeOptionsUnion() *ColumnTypeOptions { + v, _ := st.union.(*ColumnTypeOptions) + return v +} + +func (st *yySymType) columnsUnion() Columns { + v, _ := st.union.(Columns) + return v +} + +func (st *yySymType) comparisonExprOperatorUnion() ComparisonExprOperator { + v, _ := st.union.(ComparisonExprOperator) + return v +} + +func (st *yySymType) constraintDefinitionUnion() *ConstraintDefinition { + v, _ := st.union.(*ConstraintDefinition) + return v +} + +func (st *yySymType) constraintInfoUnion() ConstraintInfo { + v, _ := st.union.(ConstraintInfo) + return v +} + +func (st *yySymType) convertTypeUnion() *ConvertType { + v, _ := st.union.(*ConvertType) + return v +} + +func (st *yySymType) createDatabaseUnion() *CreateDatabase { + v, _ := st.union.(*CreateDatabase) + return v +} + +func (st *yySymType) createTableUnion() *CreateTable { + v, _ := st.union.(*CreateTable) + return v +} + +func (st *yySymType) derivedTableUnion() *DerivedTable { + v, _ := st.union.(*DerivedTable) + return v +} + +func (st *yySymType) explainTypeUnion() ExplainType { + v, _ := st.union.(ExplainType) + return v +} + +func (st *yySymType) exprUnion() Expr { + v, _ := st.union.(Expr) + return v +} + +func (st *yySymType) exprsUnion() Exprs { + v, _ := st.union.(Exprs) + return v +} + +func (st *yySymType) ignoreUnion() Ignore { + v, _ := st.union.(Ignore) + return v +} + +func (st *yySymType) indexColumnUnion() *IndexColumn { + v, _ := st.union.(*IndexColumn) + return v +} + +func (st *yySymType) indexColumnsUnion() []*IndexColumn { + v, _ := st.union.([]*IndexColumn) + return v +} + +func (st *yySymType) indexDefinitionUnion() *IndexDefinition { + v, _ := st.union.(*IndexDefinition) + return v +} + +func (st *yySymType) indexHintsUnion() *IndexHints { + v, _ := st.union.(*IndexHints) + return v +} + +func (st *yySymType) indexInfoUnion() *IndexInfo { + v, _ := st.union.(*IndexInfo) + return v +} + +func (st *yySymType) indexOptionUnion() *IndexOption { + v, _ := st.union.(*IndexOption) + return v +} + +func (st *yySymType) indexOptionsUnion() []*IndexOption { + v, _ := st.union.([]*IndexOption) + return v +} + +func (st *yySymType) insUnion() *Insert { + v, _ := st.union.(*Insert) + return v +} + +func (st *yySymType) insertActionUnion() InsertAction { + v, _ := st.union.(InsertAction) + return v +} + +func (st *yySymType) isExprOperatorUnion() IsExprOperator { + v, _ := st.union.(IsExprOperator) + return v +} + +func (st *yySymType) isolationLevelUnion() IsolationLevel { + v, _ := st.union.(IsolationLevel) + return v +} + +func (st *yySymType) joinTypeUnion() JoinType { + v, _ := st.union.(JoinType) + return v +} + +func (st *yySymType) limitUnion() *Limit { + v, _ := st.union.(*Limit) + return v +} + +func (st *yySymType) literalUnion() *Literal { + v, _ := st.union.(*Literal) + return v +} + +func (st *yySymType) lockUnion() Lock { + v, _ := st.union.(Lock) + return v +} + +func (st *yySymType) lockTypeUnion() LockType { + v, _ := st.union.(LockType) + return v +} + +func (st *yySymType) matchExprOptionUnion() MatchExprOption { + v, _ := st.union.(MatchExprOption) + return v +} + +func (st *yySymType) optLikeUnion() *OptLike { + v, _ := st.union.(*OptLike) + return v +} + +func (st *yySymType) optValUnion() Expr { + v, _ := st.union.(Expr) + return v +} + +func (st *yySymType) orderUnion() *Order { + v, _ := st.union.(*Order) + return v +} + +func (st *yySymType) orderByUnion() OrderBy { + v, _ := st.union.(OrderBy) + return v +} + +func (st *yySymType) orderDirectionUnion() OrderDirection { + v, _ := st.union.(OrderDirection) + return v +} + +func (st *yySymType) partDefUnion() *PartitionDefinition { + v, _ := st.union.(*PartitionDefinition) + return v +} + +func (st *yySymType) partDefsUnion() []*PartitionDefinition { + v, _ := st.union.([]*PartitionDefinition) + return v +} + +func (st *yySymType) partSpecUnion() *PartitionSpec { + v, _ := st.union.(*PartitionSpec) + return v +} + +func (st *yySymType) partSpecsUnion() []*PartitionSpec { + v, _ := st.union.([]*PartitionSpec) + return v +} + +func (st *yySymType) partitionsUnion() Partitions { + v, _ := st.union.(Partitions) + return v +} + +func (st *yySymType) renameTablePairsUnion() []*RenameTablePair { + v, _ := st.union.([]*RenameTablePair) + return v +} + +func (st *yySymType) revertMigrationUnion() *RevertMigration { + v, _ := st.union.(*RevertMigration) + return v +} + +func (st *yySymType) scopeUnion() Scope { + v, _ := st.union.(Scope) + return v +} + +func (st *yySymType) selStmtUnion() SelectStatement { + v, _ := st.union.(SelectStatement) + return v +} + +func (st *yySymType) selectExprUnion() SelectExpr { + v, _ := st.union.(SelectExpr) + return v +} + +func (st *yySymType) selectExprsUnion() SelectExprs { + v, _ := st.union.(SelectExprs) + return v +} + +func (st *yySymType) selectIntoUnion() *SelectInto { + v, _ := st.union.(*SelectInto) + return v +} + +func (st *yySymType) setExprUnion() *SetExpr { + v, _ := st.union.(*SetExpr) + return v +} + +func (st *yySymType) setExprsUnion() SetExprs { + v, _ := st.union.(SetExprs) + return v +} + +func (st *yySymType) showFilterUnion() *ShowFilter { + v, _ := st.union.(*ShowFilter) + return v +} + +func (st *yySymType) statementUnion() Statement { + v, _ := st.union.(Statement) + return v +} + +func (st *yySymType) subqueryUnion() *Subquery { + v, _ := st.union.(*Subquery) + return v +} + +func (st *yySymType) tableAndLockTypeUnion() *TableAndLockType { + v, _ := st.union.(*TableAndLockType) + return v +} + +func (st *yySymType) tableAndLockTypesUnion() TableAndLockTypes { + v, _ := st.union.(TableAndLockTypes) + return v +} + +func (st *yySymType) tableExprUnion() TableExpr { + v, _ := st.union.(TableExpr) + return v +} + +func (st *yySymType) tableExprsUnion() TableExprs { + v, _ := st.union.(TableExprs) + return v +} + +func (st *yySymType) tableNamesUnion() TableNames { + v, _ := st.union.(TableNames) + return v +} + +func (st *yySymType) tableOptionUnion() *TableOption { + v, _ := st.union.(*TableOption) + return v +} + +func (st *yySymType) tableOptionsUnion() TableOptions { + v, _ := st.union.(TableOptions) + return v +} + +func (st *yySymType) tableSpecUnion() *TableSpec { + v, _ := st.union.(*TableSpec) + return v +} + +func (st *yySymType) updateExprUnion() *UpdateExpr { + v, _ := st.union.(*UpdateExpr) + return v +} + +func (st *yySymType) updateExprsUnion() UpdateExprs { + v, _ := st.union.(UpdateExprs) + return v +} + +func (st *yySymType) valTupleUnion() ValTuple { + v, _ := st.union.(ValTuple) + return v +} + +func (st *yySymType) valuesUnion() Values { + v, _ := st.union.(Values) + return v +} + +func (st *yySymType) vindexParamsUnion() []VindexParam { + v, _ := st.union.([]VindexParam) + return v +} + +func (st *yySymType) whenUnion() *When { + v, _ := st.union.(*When) + return v +} + +func (st *yySymType) whensUnion() []*When { + v, _ := st.union.([]*When) + return v } var yyR1 = [...]int{ - 0, 275, 276, 276, 1, 1, 1, 1, 1, 1, + 0, 277, 278, 278, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 259, 259, 259, 262, 262, 21, 47, - 3, 3, 3, 3, 2, 2, 8, 9, 4, 5, - 5, 10, 10, 57, 57, 11, 12, 12, 12, 12, - 279, 279, 87, 87, 85, 85, 86, 86, 152, 152, - 13, 14, 14, 162, 162, 161, 161, 161, 163, 163, - 163, 163, 197, 197, 15, 15, 15, 15, 15, 64, - 64, 261, 261, 260, 258, 258, 257, 257, 256, 23, - 24, 30, 31, 32, 263, 263, 232, 36, 36, 35, - 35, 35, 35, 37, 37, 34, 34, 33, 33, 234, - 234, 221, 221, 233, 233, 233, 233, 233, 233, 233, - 220, 199, 199, 199, 199, 202, 202, 200, 200, 200, - 200, 200, 200, 200, 200, 200, 201, 201, 201, 201, - 201, 203, 203, 203, 203, 203, 204, 204, 204, 204, - 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, - 204, 205, 205, 205, 205, 205, 205, 205, 205, 219, - 219, 206, 206, 212, 212, 213, 213, 213, 215, 215, - 216, 216, 177, 177, 177, 208, 208, 209, 209, 214, - 214, 210, 210, 210, 211, 211, 211, 218, 218, 218, - 218, 218, 207, 207, 222, 248, 248, 247, 247, 243, - 243, 243, 243, 231, 231, 240, 240, 240, 240, 240, - 230, 230, 226, 226, 226, 227, 227, 228, 228, 225, - 225, 229, 229, 242, 242, 241, 223, 223, 224, 224, - 251, 251, 251, 251, 252, 268, 269, 267, 267, 267, - 267, 267, 55, 55, 55, 178, 178, 178, 238, 238, - 237, 237, 237, 239, 239, 236, 236, 236, 236, 236, - 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, - 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, - 236, 236, 236, 236, 172, 172, 172, 266, 266, 266, - 266, 266, 266, 265, 265, 265, 235, 235, 235, 264, - 264, 120, 120, 121, 121, 28, 28, 28, 28, 28, - 28, 27, 27, 27, 25, 25, 25, 25, 25, 25, + 1, 1, 1, 1, 1, 261, 261, 261, 264, 264, + 21, 50, 3, 3, 3, 3, 2, 2, 8, 9, + 4, 5, 5, 10, 10, 62, 62, 11, 12, 12, + 12, 12, 281, 281, 96, 96, 94, 94, 95, 95, + 162, 162, 13, 14, 14, 172, 172, 171, 171, 171, + 173, 173, 173, 173, 207, 207, 15, 15, 15, 15, + 15, 71, 71, 263, 263, 262, 260, 260, 259, 259, + 258, 23, 24, 33, 33, 33, 33, 34, 35, 265, + 265, 237, 39, 39, 38, 38, 38, 38, 40, 40, + 37, 37, 36, 36, 239, 239, 226, 226, 238, 238, + 238, 238, 238, 238, 238, 225, 144, 144, 144, 144, + 144, 144, 144, 144, 144, 144, 144, 209, 209, 209, + 209, 212, 212, 210, 210, 210, 210, 210, 210, 210, + 210, 210, 211, 211, 211, 211, 211, 213, 213, 213, + 213, 213, 214, 214, 214, 214, 214, 214, 214, 214, + 214, 214, 214, 214, 214, 214, 214, 215, 215, 215, + 215, 215, 215, 215, 215, 224, 224, 216, 216, 219, + 219, 220, 220, 220, 221, 221, 222, 222, 217, 217, + 217, 217, 218, 218, 218, 227, 251, 251, 250, 250, + 248, 248, 248, 248, 236, 236, 245, 245, 245, 245, + 245, 235, 235, 231, 231, 231, 232, 232, 233, 233, + 230, 230, 234, 234, 247, 247, 246, 228, 228, 229, + 229, 253, 253, 253, 253, 254, 270, 271, 269, 269, + 269, 269, 269, 60, 60, 60, 184, 184, 184, 243, + 243, 242, 242, 242, 244, 244, 241, 241, 241, 241, + 241, 241, 241, 241, 241, 241, 241, 241, 241, 241, + 241, 241, 241, 241, 241, 241, 241, 241, 241, 241, + 241, 241, 241, 241, 241, 179, 179, 179, 268, 268, + 268, 268, 268, 268, 267, 267, 267, 240, 240, 240, + 266, 266, 130, 130, 131, 131, 30, 30, 30, 30, + 30, 30, 29, 29, 29, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, - 25, 25, 25, 25, 25, 25, 29, 29, 26, 26, - 26, 26, 26, 26, 26, 26, 26, 16, 16, 16, + 25, 25, 25, 25, 25, 25, 25, 31, 31, 26, + 26, 26, 26, 26, 26, 26, 26, 26, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, - 16, 16, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 217, 217, 217, 253, 253, 254, - 254, 17, 22, 22, 18, 18, 18, 18, 19, 19, - 38, 39, 39, 39, 39, 39, 39, 39, 39, 39, - 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, - 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, - 39, 39, 39, 39, 39, 39, 39, 39, 39, 169, - 169, 270, 270, 171, 171, 167, 167, 170, 170, 168, - 168, 168, 173, 173, 173, 174, 174, 274, 274, 274, - 40, 40, 42, 42, 43, 44, 44, 192, 192, 193, - 193, 45, 46, 56, 56, 56, 56, 56, 56, 58, - 58, 58, 7, 7, 7, 7, 52, 52, 52, 6, - 6, 41, 41, 48, 271, 271, 272, 273, 273, 273, - 273, 49, 20, 280, 50, 51, 51, 63, 63, 63, - 59, 59, 59, 62, 62, 62, 67, 67, 69, 69, - 69, 69, 69, 70, 70, 70, 70, 70, 70, 66, - 66, 68, 68, 68, 68, 185, 185, 185, 184, 184, - 77, 77, 78, 78, 79, 79, 80, 80, 80, 118, - 95, 95, 150, 150, 149, 149, 151, 151, 151, 151, - 153, 153, 81, 81, 81, 81, 82, 82, 83, 83, - 84, 84, 191, 191, 190, 190, 190, 189, 189, 88, - 88, 88, 90, 89, 89, 89, 89, 91, 91, 93, - 93, 92, 92, 94, 96, 96, 96, 96, 96, 97, - 97, 76, 76, 76, 76, 76, 76, 76, 76, 165, - 165, 99, 99, 98, 98, 98, 98, 98, 98, 98, - 98, 98, 98, 110, 110, 110, 110, 110, 110, 100, - 100, 100, 100, 100, 100, 100, 65, 65, 111, 111, - 111, 117, 112, 112, 103, 103, 103, 103, 103, 103, - 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, - 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, - 103, 103, 103, 103, 103, 103, 103, 103, 107, 107, - 107, 107, 105, 105, 105, 105, 105, 105, 105, 105, - 105, 105, 105, 105, 105, 105, 106, 106, 106, 106, - 106, 106, 106, 106, 106, 106, 106, 106, 106, 106, - 106, 106, 281, 281, 109, 108, 108, 108, 108, 108, - 108, 108, 61, 61, 61, 61, 61, 196, 196, 196, - 198, 198, 198, 198, 198, 198, 198, 198, 198, 198, - 198, 198, 198, 124, 124, 60, 60, 122, 122, 123, - 125, 125, 119, 119, 119, 102, 102, 102, 102, 102, - 102, 102, 102, 104, 104, 104, 126, 126, 127, 127, - 128, 128, 129, 129, 130, 131, 131, 131, 132, 132, - 132, 132, 249, 249, 249, 249, 249, 244, 244, 244, - 244, 245, 245, 245, 71, 71, 71, 71, 73, 73, - 72, 72, 53, 53, 54, 54, 54, 74, 74, 75, - 75, 75, 75, 147, 147, 147, 133, 133, 133, 133, - 138, 138, 138, 134, 134, 136, 136, 136, 137, 137, - 137, 135, 141, 141, 143, 143, 142, 142, 140, 140, - 145, 145, 144, 144, 139, 139, 101, 101, 101, 101, - 101, 148, 148, 148, 148, 154, 154, 113, 113, 115, - 115, 114, 116, 155, 155, 159, 156, 156, 160, 160, - 160, 160, 160, 157, 157, 158, 158, 186, 186, 186, - 164, 164, 175, 175, 176, 176, 166, 166, 179, 179, - 179, 146, 146, 146, 146, 250, 250, 246, 182, 182, - 183, 183, 187, 187, 188, 188, 180, 180, 180, 180, - 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, - 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, - 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, - 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, - 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, - 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, - 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, - 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, - 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, - 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, - 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, - 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, - 180, 180, 180, 180, 180, 180, 180, 180, 180, 181, - 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, - 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, - 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, - 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, - 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, - 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, - 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, - 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, - 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, - 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, - 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, - 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, - 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, - 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, - 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, - 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, - 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, - 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, - 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, - 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, - 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, - 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, - 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, - 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, - 181, 181, 181, 181, 181, 181, 181, 277, 278, 194, - 195, 195, 195, + 16, 16, 16, 16, 16, 16, 16, 257, 257, 257, + 257, 257, 257, 257, 257, 257, 257, 257, 257, 257, + 257, 257, 257, 257, 257, 257, 257, 257, 257, 223, + 223, 223, 255, 255, 256, 256, 17, 22, 22, 18, + 18, 18, 18, 19, 19, 41, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 272, 272, 178, 178, 186, 186, 177, 177, 200, 200, + 200, 180, 180, 180, 181, 181, 276, 276, 276, 43, + 43, 45, 45, 46, 47, 47, 202, 202, 203, 203, + 48, 49, 61, 61, 61, 61, 61, 61, 63, 63, + 63, 7, 7, 7, 7, 57, 57, 57, 6, 6, + 44, 44, 51, 273, 273, 274, 275, 275, 275, 275, + 52, 54, 20, 20, 20, 20, 20, 20, 78, 78, + 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 72, 72, 72, 67, 67, 282, 55, 56, + 56, 70, 70, 70, 64, 64, 64, 69, 69, 69, + 75, 75, 77, 77, 77, 77, 77, 79, 79, 79, + 79, 79, 79, 79, 74, 74, 76, 76, 76, 76, + 193, 193, 193, 192, 192, 86, 86, 87, 87, 88, + 88, 89, 89, 89, 128, 104, 104, 160, 160, 159, + 159, 161, 161, 161, 161, 163, 163, 90, 90, 90, + 90, 91, 91, 92, 92, 93, 93, 201, 201, 198, + 198, 198, 197, 197, 97, 97, 97, 99, 98, 98, + 98, 98, 100, 100, 102, 102, 101, 101, 103, 105, + 105, 105, 105, 105, 106, 106, 85, 85, 85, 85, + 85, 85, 85, 85, 175, 175, 108, 108, 107, 107, + 107, 107, 107, 107, 107, 107, 107, 107, 119, 119, + 119, 119, 119, 119, 109, 109, 109, 109, 109, 109, + 109, 73, 73, 120, 120, 120, 127, 121, 121, 112, + 112, 112, 112, 112, 112, 112, 112, 112, 112, 112, + 112, 112, 112, 112, 112, 112, 112, 112, 112, 112, + 112, 112, 112, 112, 112, 112, 112, 112, 112, 112, + 112, 112, 112, 116, 116, 116, 116, 114, 114, 114, + 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, + 114, 115, 115, 115, 115, 115, 115, 115, 115, 115, + 115, 115, 115, 115, 115, 115, 115, 283, 283, 118, + 117, 117, 117, 117, 117, 117, 117, 68, 68, 68, + 68, 68, 206, 206, 206, 208, 208, 208, 208, 208, + 208, 208, 208, 208, 208, 208, 208, 208, 134, 134, + 65, 65, 132, 132, 133, 135, 135, 129, 129, 129, + 111, 111, 111, 111, 111, 111, 111, 111, 113, 113, + 113, 136, 136, 137, 137, 138, 138, 139, 139, 140, + 141, 141, 141, 142, 142, 142, 142, 32, 32, 32, + 32, 32, 27, 27, 27, 27, 28, 28, 28, 80, + 80, 80, 80, 82, 82, 81, 81, 58, 58, 59, + 59, 59, 83, 83, 84, 84, 84, 84, 157, 157, + 157, 143, 143, 143, 143, 149, 149, 149, 145, 145, + 147, 147, 147, 148, 148, 148, 146, 154, 154, 156, + 156, 155, 155, 151, 151, 152, 152, 153, 153, 153, + 150, 150, 110, 110, 110, 110, 110, 158, 158, 158, + 158, 164, 164, 123, 123, 125, 125, 124, 126, 165, + 165, 169, 166, 166, 170, 170, 170, 170, 170, 167, + 167, 168, 168, 194, 194, 194, 174, 174, 185, 185, + 182, 182, 183, 183, 176, 176, 187, 187, 187, 53, + 122, 122, 252, 252, 249, 190, 190, 191, 191, 195, + 195, 199, 199, 196, 196, 188, 188, 188, 188, 188, + 188, 188, 188, 188, 188, 188, 188, 188, 188, 188, + 188, 188, 188, 188, 188, 188, 188, 188, 188, 188, + 188, 188, 188, 188, 188, 188, 188, 188, 188, 188, + 188, 188, 188, 188, 188, 188, 188, 188, 188, 188, + 188, 188, 188, 188, 188, 188, 188, 188, 188, 188, + 188, 188, 188, 188, 188, 188, 188, 188, 188, 188, + 188, 188, 188, 188, 188, 188, 188, 188, 188, 188, + 188, 188, 188, 188, 188, 188, 188, 188, 188, 188, + 188, 188, 188, 188, 188, 188, 188, 188, 188, 188, + 188, 188, 188, 188, 188, 188, 188, 188, 188, 188, + 188, 188, 188, 188, 188, 188, 188, 188, 188, 188, + 188, 188, 188, 188, 188, 188, 188, 188, 188, 188, + 188, 188, 188, 188, 188, 188, 188, 188, 188, 188, + 188, 188, 189, 189, 189, 189, 189, 189, 189, 189, + 189, 189, 189, 189, 189, 189, 189, 189, 189, 189, + 189, 189, 189, 189, 189, 189, 189, 189, 189, 189, + 189, 189, 189, 189, 189, 189, 189, 189, 189, 189, + 189, 189, 189, 189, 189, 189, 189, 189, 189, 189, + 189, 189, 189, 189, 189, 189, 189, 189, 189, 189, + 189, 189, 189, 189, 189, 189, 189, 189, 189, 189, + 189, 189, 189, 189, 189, 189, 189, 189, 189, 189, + 189, 189, 189, 189, 189, 189, 189, 189, 189, 189, + 189, 189, 189, 189, 189, 189, 189, 189, 189, 189, + 189, 189, 189, 189, 189, 189, 189, 189, 189, 189, + 189, 189, 189, 189, 189, 189, 189, 189, 189, 189, + 189, 189, 189, 189, 189, 189, 189, 189, 189, 189, + 189, 189, 189, 189, 189, 189, 189, 189, 189, 189, + 189, 189, 189, 189, 189, 189, 189, 189, 189, 189, + 189, 189, 189, 189, 189, 189, 189, 189, 189, 189, + 189, 189, 189, 189, 189, 189, 189, 189, 189, 189, + 189, 189, 189, 189, 189, 189, 189, 189, 189, 189, + 189, 189, 189, 189, 189, 189, 189, 189, 189, 189, + 189, 189, 189, 189, 189, 189, 189, 189, 189, 189, + 189, 189, 189, 189, 189, 189, 189, 189, 189, 189, + 189, 189, 189, 189, 189, 189, 189, 189, 189, 189, + 189, 189, 189, 189, 189, 189, 189, 189, 189, 189, + 189, 189, 189, 189, 189, 189, 189, 189, 189, 189, + 189, 189, 189, 189, 189, 189, 189, 189, 189, 189, + 189, 189, 189, 189, 189, 189, 189, 189, 189, 189, + 189, 189, 189, 189, 189, 189, 189, 189, 279, 280, + 204, 205, 205, 205, } var yyR2 = [...]int{ 0, 2, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 0, 1, 1, 1, 0, 1, 2, 3, - 5, 6, 6, 7, 4, 6, 5, 7, 8, 1, - 3, 7, 8, 1, 1, 9, 9, 8, 7, 7, - 1, 1, 1, 3, 1, 3, 1, 3, 0, 4, - 3, 5, 4, 1, 3, 3, 2, 2, 2, 2, - 2, 1, 1, 1, 2, 2, 6, 11, 2, 0, - 2, 0, 2, 1, 0, 2, 1, 3, 3, 4, - 3, 7, 4, 2, 1, 1, 4, 0, 1, 1, - 1, 2, 2, 0, 1, 4, 4, 4, 4, 2, - 4, 1, 3, 1, 1, 3, 4, 3, 3, 3, - 8, 3, 1, 1, 1, 2, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, - 2, 1, 2, 2, 2, 2, 4, 4, 2, 2, - 3, 3, 3, 3, 1, 1, 1, 1, 1, 6, - 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 3, 0, 3, 0, 5, 0, 3, 5, 0, 1, - 0, 1, 0, 1, 2, 0, 2, 0, 3, 0, - 1, 0, 2, 2, 0, 2, 2, 0, 2, 1, - 2, 1, 0, 2, 5, 0, 1, 1, 2, 1, - 3, 2, 3, 0, 1, 3, 3, 3, 4, 2, - 0, 2, 1, 1, 1, 1, 1, 0, 1, 1, - 1, 0, 1, 1, 3, 3, 3, 1, 3, 1, - 10, 11, 11, 12, 5, 3, 3, 1, 1, 2, - 2, 2, 0, 1, 1, 0, 1, 2, 0, 1, - 1, 3, 2, 1, 2, 3, 3, 4, 4, 3, - 3, 3, 3, 4, 4, 3, 3, 3, 3, 3, + 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, + 2, 3, 5, 6, 6, 7, 4, 6, 5, 7, + 8, 1, 3, 7, 8, 1, 1, 9, 9, 8, + 7, 7, 1, 1, 1, 3, 1, 3, 1, 3, + 0, 4, 3, 5, 4, 1, 3, 3, 2, 2, + 2, 2, 2, 1, 1, 1, 2, 2, 6, 11, + 2, 0, 2, 0, 2, 1, 0, 2, 1, 3, + 3, 5, 3, 6, 7, 7, 7, 5, 2, 1, + 1, 4, 0, 1, 1, 1, 2, 2, 0, 1, + 4, 4, 4, 4, 2, 4, 1, 3, 1, 1, + 3, 4, 3, 3, 3, 3, 0, 2, 3, 3, + 4, 2, 3, 3, 2, 3, 2, 3, 1, 1, + 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 2, 2, 2, 2, 2, 1, 2, 2, + 2, 2, 4, 4, 2, 2, 3, 3, 3, 3, + 1, 1, 1, 1, 1, 6, 6, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 3, 0, 3, 0, + 5, 0, 3, 5, 0, 1, 0, 1, 0, 2, + 2, 2, 0, 2, 2, 5, 0, 1, 1, 2, + 1, 3, 2, 3, 0, 1, 3, 3, 3, 4, + 2, 0, 2, 1, 1, 1, 1, 1, 0, 1, + 1, 1, 0, 1, 1, 3, 3, 3, 1, 3, + 1, 10, 11, 11, 12, 5, 3, 3, 1, 1, + 2, 2, 2, 0, 1, 1, 0, 1, 2, 0, + 1, 1, 3, 2, 1, 2, 3, 3, 4, 4, + 3, 3, 3, 3, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 4, 5, 0, 2, 2, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, - 1, 0, 2, 0, 2, 0, 1, 5, 1, 3, - 7, 1, 3, 3, 1, 2, 2, 2, 5, 5, - 5, 6, 6, 5, 5, 2, 2, 2, 2, 3, - 3, 3, 4, 1, 3, 5, 1, 3, 3, 3, - 3, 3, 3, 3, 3, 2, 2, 2, 4, 4, - 2, 10, 3, 6, 7, 5, 5, 5, 12, 7, - 5, 9, 5, 3, 7, 4, 4, 4, 4, 3, - 3, 3, 7, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 2, 0, 2, 2, 1, 3, 8, - 8, 3, 3, 5, 5, 6, 5, 4, 3, 2, - 3, 3, 3, 3, 3, 3, 3, 4, 2, 4, - 4, 4, 4, 4, 5, 7, 4, 4, 4, 4, - 4, 4, 4, 4, 2, 4, 7, 2, 4, 5, - 4, 3, 3, 5, 2, 3, 3, 3, 3, 1, - 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, - 2, 2, 0, 2, 2, 0, 2, 0, 1, 1, - 2, 1, 1, 2, 1, 1, 5, 0, 1, 0, - 1, 2, 3, 0, 3, 3, 3, 3, 1, 1, - 1, 1, 1, 1, 1, 1, 0, 1, 1, 3, - 3, 2, 2, 3, 1, 3, 2, 1, 2, 1, - 2, 2, 2, 0, 2, 0, 2, 1, 2, 2, - 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, - 2, 3, 4, 1, 1, 1, 1, 1, 1, 1, - 3, 1, 2, 3, 5, 0, 1, 2, 1, 1, - 0, 2, 1, 3, 1, 1, 1, 3, 3, 3, - 3, 7, 0, 3, 1, 3, 1, 1, 3, 3, - 1, 3, 4, 4, 4, 3, 2, 4, 0, 1, - 0, 2, 0, 1, 0, 1, 2, 1, 1, 1, - 2, 2, 1, 2, 3, 2, 3, 2, 2, 2, - 1, 1, 3, 3, 0, 5, 4, 5, 5, 0, - 2, 1, 3, 3, 3, 2, 3, 1, 2, 0, - 3, 1, 1, 3, 3, 4, 4, 5, 3, 4, - 5, 6, 2, 1, 2, 1, 2, 1, 2, 1, - 1, 1, 1, 1, 1, 1, 0, 2, 1, 1, - 1, 3, 1, 3, 1, 1, 1, 1, 1, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 3, 1, 1, 1, 1, 4, 5, - 5, 6, 4, 4, 6, 6, 6, 8, 8, 8, - 8, 9, 8, 5, 4, 2, 2, 2, 2, 2, + 3, 3, 3, 4, 5, 0, 2, 2, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 1, 0, 2, 0, 2, 0, 1, 5, 1, + 3, 7, 1, 3, 3, 1, 2, 2, 2, 5, + 5, 5, 6, 6, 5, 5, 2, 2, 2, 2, + 3, 3, 3, 4, 1, 3, 5, 1, 3, 3, + 3, 3, 3, 3, 3, 3, 2, 2, 2, 4, + 4, 2, 10, 3, 6, 7, 5, 5, 5, 12, + 7, 5, 9, 4, 4, 4, 4, 5, 3, 7, + 4, 4, 4, 4, 3, 3, 3, 7, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 2, 0, + 2, 2, 1, 3, 8, 8, 3, 3, 5, 6, + 6, 5, 5, 3, 2, 3, 3, 3, 7, 3, + 3, 3, 3, 4, 7, 5, 2, 4, 4, 4, + 4, 4, 5, 5, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 2, 4, 2, 4, 5, + 4, 4, 3, 3, 5, 2, 3, 3, 3, 3, + 1, 1, 0, 1, 0, 1, 1, 1, 0, 2, + 2, 0, 2, 2, 0, 2, 0, 1, 1, 2, + 1, 1, 2, 1, 1, 5, 0, 1, 0, 1, + 2, 3, 0, 3, 3, 3, 3, 1, 1, 1, + 1, 1, 1, 1, 1, 0, 1, 1, 3, 3, + 2, 2, 3, 1, 3, 2, 1, 2, 1, 2, + 2, 3, 3, 3, 6, 4, 7, 6, 1, 3, + 2, 2, 2, 2, 1, 1, 1, 3, 2, 1, + 1, 1, 0, 1, 1, 0, 3, 0, 2, 0, + 2, 1, 2, 2, 0, 1, 1, 0, 1, 1, + 0, 1, 0, 1, 2, 3, 4, 1, 1, 1, + 1, 1, 1, 1, 1, 3, 1, 2, 3, 5, + 0, 1, 2, 1, 1, 0, 2, 1, 3, 1, + 1, 1, 3, 3, 3, 3, 7, 0, 3, 1, + 3, 1, 1, 3, 3, 1, 3, 4, 4, 4, + 3, 2, 4, 0, 1, 0, 2, 0, 1, 0, + 1, 2, 1, 1, 1, 2, 2, 1, 2, 3, + 2, 3, 2, 2, 2, 1, 1, 3, 3, 0, + 5, 4, 5, 5, 0, 2, 1, 3, 3, 3, + 2, 3, 1, 2, 0, 3, 1, 1, 3, 3, + 4, 4, 5, 3, 4, 5, 6, 2, 1, 2, + 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, + 1, 0, 2, 1, 1, 1, 3, 1, 3, 1, + 1, 1, 1, 1, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 3, 1, + 1, 1, 1, 4, 5, 5, 6, 4, 4, 6, + 6, 6, 8, 8, 8, 8, 9, 8, 5, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 8, 8, 0, 2, 3, 4, 4, 4, 4, 4, - 4, 4, 0, 3, 4, 7, 3, 1, 1, 1, - 2, 3, 3, 1, 2, 2, 1, 2, 1, 2, - 2, 1, 2, 0, 1, 0, 2, 1, 2, 4, - 0, 2, 1, 3, 5, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 2, 2, 0, 3, 0, 2, - 0, 3, 1, 3, 2, 0, 1, 1, 0, 2, - 4, 4, 0, 2, 2, 1, 1, 3, 3, 3, - 3, 3, 3, 3, 0, 3, 3, 3, 0, 3, - 1, 1, 0, 4, 0, 1, 1, 0, 3, 1, - 3, 2, 1, 0, 2, 4, 0, 9, 3, 5, - 0, 3, 3, 0, 1, 0, 2, 2, 0, 2, - 2, 2, 0, 3, 0, 3, 0, 3, 0, 4, - 0, 3, 0, 4, 0, 1, 2, 1, 5, 4, - 4, 1, 3, 3, 5, 0, 5, 1, 3, 1, - 2, 3, 1, 1, 3, 3, 1, 3, 3, 3, - 3, 3, 2, 1, 2, 1, 1, 1, 1, 1, - 1, 1, 0, 2, 0, 3, 0, 1, 0, 1, - 1, 0, 1, 1, 1, 0, 1, 2, 1, 1, + 2, 2, 2, 2, 2, 8, 8, 0, 2, 3, + 4, 4, 4, 4, 4, 4, 4, 0, 3, 4, + 7, 3, 1, 1, 1, 2, 3, 3, 1, 2, + 2, 1, 2, 1, 2, 2, 1, 2, 0, 1, + 0, 2, 1, 2, 4, 0, 2, 1, 3, 5, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, + 2, 0, 3, 0, 2, 0, 3, 1, 3, 2, + 0, 1, 1, 0, 2, 4, 4, 0, 2, 2, + 1, 1, 3, 3, 3, 3, 3, 3, 3, 0, + 3, 3, 3, 0, 3, 1, 1, 0, 4, 0, + 1, 1, 0, 3, 1, 3, 2, 1, 0, 2, + 4, 0, 9, 3, 5, 0, 3, 3, 0, 1, + 0, 2, 2, 0, 2, 2, 2, 0, 2, 1, + 2, 3, 3, 0, 2, 1, 2, 3, 4, 3, + 0, 1, 2, 1, 5, 4, 4, 1, 3, 3, + 5, 0, 5, 1, 3, 1, 2, 3, 1, 1, + 3, 3, 1, 3, 3, 3, 3, 3, 2, 1, + 2, 1, 1, 1, 1, 1, 1, 1, 0, 1, + 0, 2, 0, 3, 0, 1, 0, 1, 1, 5, + 0, 1, 0, 1, 2, 1, 1, 1, 1, 1, + 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, @@ -4360,460 +4969,483 @@ var yyR2 = [...]int{ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, - 0, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 0, 1, 1, } var yyChk = [...]int{ - -1000, -275, -1, -3, -8, -9, -10, -11, -12, -13, - -14, -15, -16, -17, -18, -19, -38, -39, -40, -42, - -43, -44, -45, -46, -6, -41, -20, -21, -47, -48, - -49, -4, -277, 6, 7, 8, -57, 10, 11, 31, - -23, -30, 152, -31, -24, 153, -32, 155, 154, 190, - 156, 183, 70, 224, 225, 227, 228, 229, 230, -58, - 188, 189, 158, 35, 41, 32, 33, 80, 9, 322, - 185, 184, 26, -276, 451, -63, 5, -128, 16, -3, - -50, -280, -50, -50, -50, -50, -50, -50, -232, -234, - 80, 125, 80, -64, 162, -146, -263, 106, 168, 171, - 172, 313, 161, -36, -35, -34, -33, -37, 30, -28, - -29, -255, -27, -26, 157, 154, 198, 101, 102, 190, - 191, 192, 156, 174, 189, 193, 188, 207, -25, 76, - 32, 335, 338, -239, 153, 159, 160, 323, 104, 103, - 71, 155, -236, 274, 428, -37, 430, 94, 96, 429, - 40, 163, 431, 432, 433, 434, 173, 435, 436, 437, - 438, 444, 445, 446, 447, 105, 5, 162, -263, -71, - 284, 76, -262, -259, 83, 84, 85, 162, 162, 163, - 164, -263, 162, -92, -187, -259, -181, 332, 176, 366, - 367, 222, 76, 274, 428, 224, 238, 232, 259, 251, - 333, 368, 177, 211, 249, 252, 300, 430, 369, 191, - 296, 279, 287, 94, 227, 309, 443, 370, 441, 96, - 429, 75, 47, 40, 186, 247, 243, 431, 212, 371, - 343, 205, 104, 101, 450, 241, 46, 28, 440, 103, - 45, 432, 372, 433, 289, 264, 44, 192, 373, 79, - 337, 291, 242, 288, 221, 439, 158, 374, 422, 298, - 375, 265, 269, 376, 301, 48, 377, 378, 102, 379, - 74, 434, 236, 237, 380, 219, 175, 303, 263, 173, - 34, 292, 334, 223, 54, 199, 304, 42, 267, 41, - 426, 381, 262, 258, 49, 382, 383, 384, 385, 435, - 261, 235, 257, 449, 217, 436, 58, 160, 271, 270, - 272, 206, 299, 254, 386, 387, 388, 180, 77, 389, - 244, 19, 390, 391, 213, 392, 52, 393, 394, 307, - 189, 395, 50, 437, 37, 194, 438, 396, 397, 398, - 399, 400, 297, 401, 290, 266, 268, 201, 286, 336, - 402, 240, 193, 442, 403, 181, 195, 198, 188, 308, - 182, 404, 405, 406, 407, 408, 409, 410, 228, 444, - 39, 411, 412, 413, 414, 220, 216, 302, 311, 57, - 78, 276, 415, 234, 214, 416, 225, 51, 445, 446, - 447, 208, 448, 282, 105, 218, 43, 255, 200, 417, - 418, 245, 246, 260, 233, 256, 226, 423, 202, 190, - 419, 310, 215, 277, 340, 207, 339, 253, 250, 209, - 420, 164, 203, 204, 421, 424, 293, 283, 294, 295, - 284, 210, 338, 248, 278, 162, -157, 279, 280, 281, - 292, 293, 298, 297, 201, -274, 301, 162, -167, 143, - 152, 289, -171, 290, 283, 284, 210, -270, -259, 433, - 448, 300, 252, 302, 426, 285, 291, 295, 294, -187, - 226, -192, 231, -182, -259, -181, 229, -92, -56, 422, - 156, -194, -194, -194, -112, -76, -98, 109, -103, 30, - 24, -102, -99, -119, -116, -117, 143, 144, 146, 145, - 147, 132, 133, 140, 110, 148, -107, -105, -106, -108, - 87, 86, 95, 88, 89, 90, 91, 97, 98, 99, - -182, -187, -114, -277, 64, 65, 323, 324, 325, 326, - 331, 327, 112, 53, 318, 312, 321, 320, 319, 316, - 317, 314, 315, 329, 330, 167, 313, 161, 138, 322, - -259, -181, 40, 282, 282, -5, -4, -277, 6, 21, - 22, -132, 18, 17, -278, 82, -59, -69, 59, 60, - -70, 22, 36, 63, 61, -51, -68, 134, -76, -187, - -68, -166, 166, -166, -166, -156, -197, 226, -160, 302, - 301, -183, -158, -182, -180, -157, 299, 157, 341, 108, - 23, 25, 111, 143, 17, 112, 159, 174, 142, 170, - 323, 152, 68, 342, 314, 315, 312, 318, 325, 326, - 313, 280, 30, 11, 344, 26, 184, 22, 36, 136, - 154, 115, 116, 187, 24, 185, 99, 347, 20, 71, - 179, 12, 172, 14, 348, 349, 15, 167, 166, 127, - 163, 66, 9, 148, 27, 124, 62, 350, 29, 351, - 352, 353, 354, 64, 125, 18, 316, 317, 32, 427, - 355, 331, 196, 138, 69, 55, 109, 356, 357, 97, - 358, 100, 72, 106, 16, 67, 38, 359, 197, 360, - 169, 361, 305, 362, 126, 155, 322, 65, 363, 161, - 281, 6, 328, 31, 183, 171, 63, 364, 162, 114, - 329, 330, 165, 98, 5, 168, 33, 10, 70, 73, - 319, 320, 321, 53, 335, 113, 13, 365, 306, 107, - 300, 252, -233, 125, -220, -224, -182, 178, -252, 174, - -92, -242, -241, -182, -71, -176, 167, 163, -176, 322, - -33, -34, -157, 142, 195, 81, 81, -224, -223, -222, - -264, 197, 178, -251, -240, 170, 179, -230, 171, 172, - -225, 163, 29, -264, -225, 169, 179, 197, 197, 105, - 197, 105, 197, 197, 197, 197, 197, 197, 197, 197, - 197, 194, -231, 117, -231, 339, 339, -236, -264, -264, - -264, 165, 34, 34, -179, -225, 165, 23, -231, -231, - -157, 142, -231, -231, -231, -231, 205, 205, -231, -231, - -231, -231, -231, -231, -231, -231, -231, -231, -231, -231, - -231, -231, -231, -92, -74, 212, 152, 154, 157, 72, - 117, -35, 207, -22, -92, -175, 167, -259, -175, -175, - -92, 149, -92, -173, 125, 13, -173, -173, -173, -173, - -173, 208, 296, 208, 296, 208, 209, 208, 209, 208, - -170, -169, 287, 288, 282, 286, -259, 313, 298, -259, - 201, 162, 202, 164, -226, 163, 34, 175, 209, 282, - 204, -173, -195, -277, -183, -195, -195, 31, 165, -182, - -52, -182, 87, -7, -3, -11, -10, -12, 117, 81, - 108, 106, 107, 124, -76, -100, 127, 109, 125, 126, - 111, 129, 128, 139, 132, 133, 134, 135, 136, 137, - 138, 130, 131, 142, 117, 118, 119, 120, 121, 122, - 123, -165, -277, -117, -277, 150, 151, -103, -103, -103, - -103, -103, -103, -103, -103, -103, -103, -277, 149, -2, - -112, -4, -277, -277, -277, -277, -277, -277, -277, -277, - -124, -76, -277, -281, -277, -281, -109, -277, -281, -109, - -281, -109, -281, -281, -109, -281, -109, -281, -281, -109, - -277, -277, -277, -277, -277, -277, -277, -194, -271, -272, - -95, -92, -128, -3, -50, -147, 20, 32, -76, -129, - -130, -76, -128, 55, -66, -68, -70, 59, 60, 93, - 12, -185, -184, 23, -182, 87, 149, 12, -93, 27, - -92, -78, -79, -80, -81, -95, -118, -277, 12, -85, - -86, -92, -94, -187, 81, 226, -160, -197, -162, -161, - 303, 305, 117, -186, -182, 87, 30, 82, 81, -92, - -199, -202, -204, -203, -205, -200, -201, 249, 250, 143, - 253, 255, 256, 257, 258, 259, 260, 261, 262, 263, - 264, 31, 186, 245, 246, 247, 248, 265, 266, 267, - 268, 269, 270, 271, 272, 232, 251, 333, 233, 234, - 235, 236, 237, 238, 240, 241, 242, 243, 244, -262, - -259, 80, 82, 81, -206, 80, -74, -92, 109, -259, - -259, -231, -231, 194, -27, -26, -255, 16, -25, -26, - 157, 101, 102, 154, 80, -220, 80, -229, -262, -259, - 80, 29, 169, 168, -228, -225, -228, -229, -259, -119, - -182, -187, -259, 29, 29, -153, -182, -153, -153, 21, - -153, 21, -153, 21, 88, -182, -153, 21, -153, 21, - -153, 21, -153, 21, -153, 21, 30, 74, 75, 30, - 77, 78, 79, -119, -119, -220, -157, -92, -259, 88, - 88, -231, -231, 88, 87, 87, 87, -231, -231, 88, - 87, -259, 87, -265, 180, 221, 223, 88, 88, 88, - 88, 30, 87, -266, 30, 440, 439, 441, 442, 443, - 88, 30, 88, 30, 88, -182, 80, -73, 214, 117, - 203, 203, 162, 162, 216, -92, 215, 217, 218, 40, - 81, 165, -85, 24, 72, -87, -92, -259, -188, -187, - -180, 87, -76, -173, -92, -173, -92, -173, -173, -173, - -173, -168, 12, 127, -227, 12, 127, -168, -195, -195, - -92, -195, -195, -92, -195, -195, -227, -174, 125, 72, - -193, 229, 263, 423, 424, 425, -76, -76, -76, -76, - -110, 97, 109, 98, 99, -103, -111, -114, -117, 92, - 127, 125, 126, 111, -103, -103, -103, -103, -103, -103, - -103, -103, -103, -103, -103, -103, -103, -103, -103, -196, - -259, 87, 143, -259, -102, -102, -182, -67, 22, 36, - -66, -183, -188, -180, -63, -278, -278, -128, -66, -66, - -76, -76, -119, 87, -66, -119, 87, -66, -66, -62, - 22, 36, -122, -123, 113, -119, -278, -103, -182, -182, - -66, -67, -67, -66, -66, 81, -273, 305, 306, 427, - -190, 197, -189, 23, -187, 87, -132, -278, -133, 27, - 10, 127, 81, 19, 81, -131, 25, 26, -132, -104, - -182, 88, 91, -77, 81, 12, -70, -92, -184, 134, - -188, -92, -152, 197, -92, 31, 81, -88, -90, -89, - -91, 62, 66, 68, 63, 64, 65, 69, -191, 23, - -78, -3, -277, -92, -85, -279, 81, 12, 73, -279, - 81, 149, -160, -162, 81, 304, 306, 307, 72, 100, - -76, -211, 142, -238, -237, -236, -220, -222, -223, -224, - 82, -177, 97, 109, -215, 277, -206, -206, -206, -206, - -206, -210, -157, -210, -210, -210, 80, 80, -206, -206, - -206, -206, -212, 80, -212, -212, -213, 80, -213, -252, - -76, -248, -247, -243, -246, 173, 94, 335, 73, -241, - -131, 88, -73, 24, -250, -246, -259, 87, -259, 87, - 81, 17, -221, -220, -120, 221, -254, 197, -251, -242, - 80, 29, -228, -229, -229, 149, -259, 81, 27, 105, - 105, 105, 105, 335, 154, 31, -220, -120, -196, 165, - -196, -196, 87, 87, -172, 448, -85, 164, 220, -75, - 318, 87, 83, -92, -92, -92, -92, -92, 157, 154, - 205, -92, -92, -55, 182, 177, -92, 81, -55, -173, - -187, -187, -92, -173, -92, 87, -92, -182, 97, 98, - 99, -111, -103, -103, -103, -65, 187, 108, -278, -278, - -66, -66, -277, 149, -5, -132, -278, -278, 81, 73, - 23, 12, 12, -278, 12, 12, -278, -278, -66, -125, - -123, 115, -76, -278, -278, 81, 81, -278, -278, -278, - -278, -278, -272, 426, 306, -96, 70, 166, 71, -277, - -189, -147, 38, 46, 57, -76, -76, -130, -147, -164, - 20, 12, 53, 53, -97, 13, -68, -78, -70, 149, - -97, -101, 31, 53, -3, -277, -277, -155, -159, -119, - -79, -80, -80, -79, -80, 62, 62, 62, 67, 62, - 67, 62, -89, -187, -278, -278, -3, -152, 73, -78, - -92, -78, -94, -187, 134, -161, -163, 308, 305, 311, - -259, 87, 81, -236, -224, -208, 30, 97, -216, 278, - -210, -210, -211, -259, 143, -211, -211, -211, -219, 87, - -219, 88, 88, 82, -249, -244, -245, 32, 76, -243, - -231, 87, 37, -182, 82, 164, 72, 16, -149, -182, - 81, 82, -121, 222, -119, 82, -182, 82, -149, -229, - -183, -182, -277, 162, 30, 30, -120, -121, -211, -259, - 450, 449, 82, -92, -72, 212, 219, 80, 84, -261, - 73, 203, 274, 203, 206, 165, -195, -92, -168, -168, - -65, 108, -103, -103, -278, -278, -67, -183, -128, -147, - -198, 143, 249, 186, 247, 243, 263, 254, 276, 245, - 277, -196, -198, -103, -103, -103, -103, 332, -128, 116, - -76, 114, -103, -103, 163, 163, 163, -153, 39, 87, - 87, 58, -92, -126, 14, -76, 134, -132, -154, 72, - -155, -113, -115, -114, -277, -148, -278, -182, -153, -97, - 81, 117, -83, -82, 72, 73, -84, 72, -82, 62, - 62, -278, -97, -78, -97, -97, 149, 305, 309, 310, - -236, -209, 72, -103, -211, -211, 82, 81, 82, 81, - 82, 81, -178, 372, 109, -245, -244, -231, -231, 88, - -259, -92, -92, 17, 81, -220, -119, 53, -248, 82, - -253, -254, -92, -102, -121, -150, 80, 82, -258, 335, - -260, -259, -182, -182, -182, -92, -173, -173, -103, -278, - -132, -278, -206, -206, -206, -213, -206, 237, -206, 237, - -278, -278, 20, 20, 20, 20, -277, -60, 328, -76, - 81, 81, -277, -277, -277, -278, 87, -210, -127, 15, - 17, 28, -154, 81, -278, -278, 81, 53, 149, -278, - -128, -159, -76, -76, 80, -76, -128, -97, -214, 274, - 10, -210, 87, -210, 88, 88, 372, 30, 77, 78, - 79, 30, 74, 75, -150, -149, -182, 199, 181, -278, - 81, -217, 335, 338, 23, -149, -257, -256, -183, 80, - 73, -147, -210, -259, -103, -103, -103, -103, -103, -132, - 87, -103, -103, -151, -278, -182, 169, -151, -151, -190, - -210, -135, -140, -170, -76, -112, 29, -115, 53, -3, - -182, -113, -182, -132, -149, -132, -218, 169, 29, 168, - -106, -211, -211, 82, 82, 23, 200, -92, -254, 339, - 339, -3, 82, 81, 117, -149, -92, -278, -278, -278, - -278, -61, 127, 335, -278, -278, -278, 81, -278, -278, - -278, -96, -138, 422, -141, 42, -142, 43, 10, -113, - 149, 82, -207, 94, 29, 29, -3, -277, 80, -53, - 335, -256, -235, -183, 87, 88, 82, -278, 333, 69, - 336, -182, 169, -135, 47, 255, -143, 51, -144, -139, - 52, 17, -155, -182, 87, -53, -103, 196, -149, -54, - 211, 426, -261, 58, 334, 337, -136, 49, -134, 48, - -134, -142, 17, -145, 44, 45, 87, -278, -278, 82, - 174, -258, 58, -137, 50, 72, 100, 87, 17, 17, - -268, -269, 72, 213, 335, 72, 100, 87, 87, -269, - 72, 11, 10, 336, -267, 182, 177, 180, 31, -267, - 337, 176, 30, 97, + -1000, -277, -1, -3, -8, -9, -10, -11, -12, -13, + -14, -15, -16, -17, -18, -19, -41, -42, -43, -45, + -46, -47, -48, -49, -6, -44, -20, -21, -50, -51, + -52, -53, -54, -4, -279, 6, 7, 8, -62, 10, + 11, 31, -23, -33, 153, -34, -24, 154, -35, 156, + 155, 192, 157, 185, 71, 231, 232, 234, 235, 236, + 237, -63, 190, 191, 159, 35, 42, 32, 33, 36, + 162, 81, 9, 334, 187, 186, 26, -278, 474, -70, + 5, -138, 16, -3, -55, -282, -55, -55, -55, -55, + -55, -55, -237, -239, 81, 126, 81, -71, -185, 165, + 174, 173, 170, -265, 107, 220, 325, 163, -39, -38, + -37, -36, -40, 30, -30, -31, -257, -29, -26, 158, + 155, 200, 102, 103, 192, 193, 194, 157, 176, 191, + 195, 190, 209, -25, 77, 32, 347, 350, -244, 154, + 160, 161, 335, 105, 104, 72, 156, -241, 281, 451, + -40, 453, 95, 97, 452, 41, 165, 454, 455, 456, + 457, 175, 458, 459, 460, 461, 467, 468, 469, 470, + 106, 5, 164, -265, -80, 291, 227, 77, -199, -195, + -261, -189, 84, 85, 86, 344, 178, 378, 379, 225, + 77, 281, 451, 231, 245, 239, 266, 258, 345, 380, + 228, 179, 213, 448, 256, 312, 453, 381, 193, 304, + 286, 294, 95, 234, 321, 466, 230, 382, 464, 97, + 452, 76, 48, 41, 188, 254, 250, 454, 214, 383, + 355, 207, 105, 102, 473, 248, 47, 28, 463, 104, + 46, 455, 384, 456, 296, 271, 442, 45, 309, 194, + 385, 80, 349, 450, 298, 249, 295, 224, 462, 159, + 386, 434, 306, 443, 387, 272, 276, 388, 313, 49, + 389, 390, 444, 103, 391, 75, 457, 243, 244, 392, + 222, 177, 315, 270, 175, 34, 299, 346, 226, 55, + 201, 316, 43, 274, 42, 438, 393, 441, 269, 265, + 50, 394, 395, 396, 397, 458, 268, 242, 264, 472, + 219, 459, 59, 161, 278, 277, 279, 208, 311, 261, + 398, 399, 400, 182, 78, 401, 251, 19, 402, 403, + 307, 215, 404, 53, 405, 406, 319, 191, 407, 51, + 460, 38, 196, 461, 408, 409, 410, 411, 412, 305, + 413, 297, 273, 275, 203, 293, 348, 414, 247, 195, + 465, 415, 183, 449, 197, 200, 190, 320, 184, 416, + 417, 418, 419, 420, 229, 421, 422, 235, 467, 40, + 423, 424, 425, 426, 223, 218, 314, 323, 58, 79, + 283, 427, 447, 241, 216, 428, 232, 52, 468, 469, + 470, 210, 471, 289, 106, 220, 221, 44, 262, 202, + 429, 430, 252, 253, 267, 240, 263, 233, 435, 204, + 308, 192, 431, 322, 217, 284, 352, 209, 310, 446, + 351, 260, 257, 211, 432, 166, 205, 206, 433, 436, + 300, 290, 301, 302, 227, 303, 291, 212, 350, 255, + 285, 164, -185, 165, 166, -265, 164, -101, -195, 164, + -167, 286, -186, 287, 288, 299, 300, 306, -178, 307, + 305, 203, -276, 313, 164, 308, 153, 144, 296, 297, + 290, 303, 291, 212, -272, -261, 456, 471, 312, 259, + 292, 298, 314, 438, 302, 301, -195, 233, -202, 238, + -190, -261, -189, 236, -101, -61, 434, 157, -204, -204, + -72, 438, 440, -121, -85, -107, 110, -112, 30, 24, + -111, -108, -129, -126, -127, 144, 145, 147, 146, 148, + 133, 134, 141, 111, 149, -116, -114, -115, -117, 88, + 87, 96, 89, 90, 91, 92, 98, 99, 100, -190, + -195, -124, -279, 65, 66, 335, 336, 337, 338, 343, + 339, 113, 54, 330, 324, 333, 332, 331, 328, 329, + 326, 327, 341, 342, 169, 325, 163, 139, 334, -261, + -189, 41, 289, 289, -101, 227, -5, -4, -279, 6, + 21, 22, -142, 18, 17, -280, 83, -64, -77, 60, + 61, -79, 22, 37, 64, 62, 21, -56, -76, 135, + -85, -195, -76, -176, 168, -176, -176, -166, -207, 233, + -170, 314, 313, -191, -168, -190, -188, -167, 311, 158, + 353, 109, 23, 25, 112, 144, 17, 113, 36, 160, + 259, 176, 143, 172, 335, 153, 69, 354, 326, 327, + 324, 330, 337, 338, 325, 287, 30, 11, 356, 26, + 186, 22, 37, 137, 155, 116, 117, 189, 24, 187, + 100, 359, 20, 72, 181, 12, 174, 14, 360, 361, + 15, 169, 168, 128, 165, 67, 9, 149, 27, 125, + 63, 362, 29, 363, 364, 365, 366, 65, 126, 18, + 328, 329, 32, 439, 367, 343, 198, 139, 70, 56, + 440, 110, 368, 369, 98, 370, 101, 73, 445, 107, + 16, 68, 39, 371, 199, 372, 171, 373, 317, 374, + 127, 156, 334, 66, 375, 163, 288, 6, 340, 31, + 185, 173, 64, 376, 164, 115, 341, 342, 167, 99, + 5, 170, 33, 10, 71, 74, 331, 332, 333, 54, + 347, 114, 13, 377, 318, 108, 312, -238, 126, -225, + -229, -190, 180, -254, 176, -101, -247, -246, -190, -80, + 164, -261, 165, 165, 165, -55, 334, -36, -37, -167, + 143, 197, 82, 82, -229, -228, -227, -266, 199, 180, + -253, -245, 172, 181, -235, 173, 174, -230, 165, 29, + -266, -230, 171, 181, 199, 199, 106, 199, 106, 199, + 199, 199, 199, 199, 199, 199, 199, 199, 196, -236, + 118, -236, 351, 351, -241, -266, -266, -266, 167, 34, + 34, -187, -230, 167, 23, -236, -236, -167, 143, -236, + -236, -236, -236, 207, 207, -236, -236, -236, -236, -236, + -236, -236, -236, -236, -236, -236, -236, -236, -236, -236, + -101, -83, 214, 153, 155, 158, 73, 88, 228, 118, + -38, 209, -22, -101, 164, -261, -182, 169, -55, -101, + 150, -101, -180, 126, 13, -180, -177, 289, 293, 294, + 295, -180, -180, -180, -180, 210, 304, -231, 165, 34, + 177, 289, 210, 304, 210, 211, 210, 211, 210, -200, + 12, 128, 325, 309, 306, 203, 164, 204, 166, 310, + -261, 441, 211, -200, 289, 206, -180, -205, -279, -191, + 259, -205, -205, 31, 167, -190, -57, -190, 88, -7, + -3, -11, -10, -12, 118, -78, 289, -66, 144, 456, + 442, 443, 444, 441, 305, 449, 447, 445, 210, 446, + 82, 109, 107, 108, 125, -85, -109, 128, 110, 126, + 127, 112, 130, 129, 140, 133, 134, 135, 136, 137, + 138, 139, 131, 132, 143, 118, 119, 120, 121, 122, + 123, 124, -175, -279, -127, -279, 151, 152, -112, -112, + -112, -112, -112, -112, -112, -112, -112, -112, -279, 150, + -2, -121, -4, -279, -279, -279, -279, -279, -279, -279, + -279, -134, -85, -279, -283, -279, -283, -118, -279, -283, + -118, -283, -118, -283, -283, -118, -283, -118, -283, -283, + -118, -279, -279, -279, -279, -279, -279, -279, -204, -273, + -274, -104, -101, -279, 88, -138, -3, -55, -157, 20, + 32, -85, -139, -140, -85, -138, 56, -74, -76, -79, + 60, 61, 94, 12, -193, -192, 23, -190, 88, 150, + 12, -102, 27, -101, -87, -88, -89, -90, -104, -128, + -279, 12, -94, -95, -101, -103, -195, 82, 233, -170, + -207, -172, -171, 315, 317, 118, -194, -190, 88, 30, + 83, 82, -101, -209, -212, -214, -213, -215, -210, -211, + 256, 257, 144, 260, 262, 263, 264, 265, 266, 267, + 268, 269, 270, 271, 31, 188, 252, 253, 254, 255, + 272, 273, 274, 275, 276, 277, 278, 279, 239, 258, + 345, 240, 241, 242, 243, 244, 245, 247, 248, 249, + 250, 251, -264, -261, 81, 83, 82, -216, 81, -83, + -183, 169, -252, -249, 74, -261, -261, -261, -183, -236, + -236, 196, -29, -26, -257, 16, -25, -26, 158, 102, + 103, 155, 81, -225, 81, -234, -264, -261, 81, 29, + 171, 170, -233, -230, -233, -234, -261, -129, -190, -195, + -261, 29, 29, -163, -190, -163, -163, 21, -163, 21, + -163, 21, 89, -190, -163, 21, -163, 21, -163, 21, + -163, 21, -163, 21, 30, 75, 76, 30, 78, 79, + 80, -129, -129, -225, -167, -101, -261, 89, 89, -236, + -236, 89, 88, 88, 88, -236, -236, 89, 88, -261, + 88, -267, 182, 224, 226, 89, 89, 89, 89, 30, + 88, -268, 30, 463, 462, 464, 465, 466, 89, 30, + 89, 30, 89, -190, 81, -82, 216, 118, 205, 205, + 164, 164, 218, -101, 229, 230, 228, 21, 217, 219, + 221, 41, 82, 167, -182, 73, -96, -101, 24, -182, + -196, -195, -188, 88, -85, -232, 12, 128, -200, -200, + -180, -101, -232, -200, -180, -101, -180, -180, -180, -180, + -200, -180, -195, -195, -101, -101, -101, -101, -101, -101, + -101, -205, -205, -205, -181, 126, -180, 73, -203, 236, + 270, 435, 436, 437, 82, 347, -94, 441, 441, 441, + 441, 441, 441, -85, -85, -85, -85, -119, 98, 110, + 99, 100, -112, -120, -124, -127, 93, 128, 126, 127, + 112, -112, -112, -112, -112, -112, -112, -112, -112, -112, + -112, -112, -112, -112, -112, -112, -206, -261, 88, 144, + -261, -111, -111, -190, -75, 22, 37, -74, -191, -196, + -188, -70, -280, -280, -138, -74, -74, -85, -85, -129, + 88, -74, -129, 88, -74, -74, -69, 22, 37, -132, + -133, 114, -129, -280, -112, -190, -190, -74, -75, -75, + -74, -74, 82, -275, 317, 318, 439, -198, 199, -197, + 23, -195, 88, -122, -121, -142, -280, -143, 27, 10, + 128, 82, 19, 82, -141, 25, 26, -142, -113, -190, + 89, 92, -86, 82, 12, -79, -101, -192, 135, -196, + -101, -162, 199, -101, 31, 82, -97, -99, -98, -100, + 63, 67, 69, 64, 65, 66, 70, -201, 23, -87, + -3, -279, -101, -94, -281, 82, 12, 74, -281, 82, + 150, -170, -172, 82, 316, 318, 319, 73, 101, -85, + -218, 143, -243, -242, -241, -225, -227, -228, -229, 83, + -144, -221, 284, -216, -216, -216, -216, -216, -217, -167, + -217, -217, -217, 81, 81, -216, -216, -216, -216, -219, + 81, -219, -219, -220, 81, -220, -254, -85, -251, -250, + -248, -249, 175, 95, 347, -246, -141, 89, -82, -101, + 110, 73, -190, -252, -252, -252, -195, -261, 88, -261, + 88, 82, 17, -226, -225, -130, 224, -256, 199, -253, + -247, 81, 29, -233, -234, -234, 150, -261, 82, 27, + 106, 106, 106, 106, 347, 155, 31, -225, -130, -206, + 167, -206, -206, 88, 88, -179, 471, -94, 166, 223, + -84, 330, 88, 84, -101, -101, -101, -101, -101, 158, + 155, 207, -101, -101, -94, -101, 82, -60, 184, 179, + -195, -101, -180, -180, -101, -180, -180, 88, -101, -190, + -66, 317, 347, 20, -67, 20, 98, 99, 100, -120, + -112, -112, -112, -73, 189, 109, -280, -280, -74, -74, + -279, 150, -5, -142, -280, -280, 82, 74, 23, 12, + 12, -280, 12, 12, -280, -280, -74, -135, -133, 116, + -85, -280, -280, 82, 82, -280, -280, -280, -280, -280, + -274, 438, 318, -105, 71, 168, 72, -279, -197, -280, + -157, 39, 47, 58, -85, -85, -140, -157, -174, 20, + 12, 54, 54, -106, 13, -76, -87, -79, 150, -106, + -110, 31, 54, -3, -279, -279, -165, -169, -129, -88, + -89, -89, -88, -89, 63, 63, 63, 68, 63, 68, + 63, -98, -195, -280, -280, -3, -162, 74, -87, -101, + -87, -103, -195, 135, -171, -173, 320, 317, 323, -261, + 88, 82, -241, -229, 98, 110, 30, 73, 281, 95, + 171, 29, 170, -222, 285, -217, -217, -218, -261, 88, + 144, -218, -218, -218, -224, 88, -224, 89, 89, 83, + -32, -27, -28, 32, 77, -248, -236, 88, 38, 83, + 166, 24, -101, 73, 73, 73, 16, -159, -190, 82, + 83, -131, 225, -129, 83, -190, 83, -159, -234, -191, + -190, -279, 164, 30, 30, -130, -131, -218, -261, 473, + 472, 83, -101, -81, 214, 222, 81, 85, -263, 74, + 205, 281, 205, 208, 167, -60, -32, -101, -200, -200, + 32, 317, 450, 448, -73, 109, -112, -112, -280, -280, + -75, -191, -138, -157, -208, 144, 256, 188, 254, 250, + 270, 261, 283, 252, 284, -206, -208, -112, -112, -112, + -112, 344, -138, 117, -85, 115, -112, -112, 165, 165, + 165, -163, 40, 88, 88, 59, -101, -136, 14, -85, + 135, -142, -164, 73, -165, -123, -125, -124, -279, -158, + -280, -190, -163, -106, 82, 118, -92, -91, 73, 74, + -93, 73, -91, 63, 63, -280, -106, -87, -106, -106, + 150, 317, 321, 322, -241, 98, -112, 10, 88, 29, + 29, -218, -218, 83, 82, 83, 82, 83, 82, -184, + 384, 110, -28, -27, -236, -236, 89, -261, -101, -101, + -101, -101, 17, 82, -225, -129, 54, -251, 83, -255, + -256, -101, -111, -131, -160, 81, 83, -260, 347, -262, + -261, -190, -190, -190, -101, -180, -180, 32, -261, -112, + -280, -142, -280, -216, -216, -216, -220, -216, 244, -216, + 244, -280, -280, 20, 20, 20, 20, -279, -65, 340, + -85, 82, 82, -279, -279, -279, -280, 88, -217, -137, + 15, 17, 28, -164, 82, -280, -280, 82, 54, 150, + -280, -138, -169, -85, -85, 81, -85, -138, -106, -115, + -217, 88, -217, 89, 89, 384, 30, 78, 79, 80, + 30, 75, 76, -160, -159, -190, 201, 183, -280, 82, + -223, 347, 350, 23, -159, -259, -258, -191, 81, 74, + -157, -217, -261, -112, -112, -112, -112, -112, -142, 88, + -112, -112, -161, -280, -190, 171, -161, -161, -198, -217, + -146, -151, -177, -85, -121, 29, -125, 54, -3, -190, + -123, -190, -142, -159, -142, -218, -218, 83, 83, 23, + 202, -101, -256, 351, 351, -3, 83, 82, 118, -159, + -101, -280, -280, -280, -280, -68, 128, 347, -280, -280, + -280, 82, -280, -280, -280, -105, -149, 434, -154, 43, + -152, -153, 44, -150, 45, 53, 10, -123, 150, 83, + -3, -279, 81, -58, 347, -258, -240, -191, 88, 89, + 83, -280, 345, 70, 348, -190, 171, -146, 48, 262, + -156, -155, 52, 44, -153, 17, 46, 17, -165, -190, + -58, -112, 198, -159, -59, 213, 438, -263, 59, 346, + 349, -147, 50, -145, 49, -145, -155, 17, 17, 88, + 17, 88, -280, -280, 83, 176, -260, 59, -148, 51, + 73, 101, 88, 88, 88, -270, -271, 73, 215, 347, + 73, 101, -271, 73, 11, 10, 348, -269, 184, 179, + 182, 31, -269, 349, 178, 30, 98, } var yyDef = [...]int{ - 32, -2, 2, 4, 5, 6, 7, 8, 9, 10, + 34, -2, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, - 31, 790, 0, 523, 523, 523, 523, 523, 523, 523, - 0, 0, -2, -2, -2, 814, 36, 0, 0, 0, - 0, -2, 481, 482, 0, 484, -2, 0, 0, 493, - 1309, 1309, 1309, 0, 0, 0, 0, 1307, 53, 54, - 499, 500, 501, 1, 3, 0, 527, 798, 0, 0, - -2, 525, 0, 0, 906, 906, 906, 0, 84, 85, - 0, 0, 0, 814, 904, 0, 904, 0, 912, 913, - 914, 104, 105, 88, -2, 109, 110, 0, 114, 367, - 328, 370, 326, 356, -2, 319, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 331, 223, - 223, 0, 0, -2, 319, 319, 319, 0, 0, 0, - 353, 908, 273, 223, 223, 0, 223, 223, 223, 223, - 0, 0, 223, 223, 223, 223, 223, 223, 223, 223, - 223, 223, 223, 223, 223, 223, 223, 0, 103, 827, - 0, 0, 113, 37, 33, 34, 35, 0, 902, 0, - 902, 902, 0, 419, 611, 922, 923, 1059, 1060, 1061, - 1062, 1063, 1064, 1065, 1066, 1067, 1068, 1069, 1070, 1071, - 1072, 1073, 1074, 1075, 1076, 1077, 1078, 1079, 1080, 1081, - 1082, 1083, 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091, - 1092, 1093, 1094, 1095, 1096, 1097, 1098, 1099, 1100, 1101, - 1102, 1103, 1104, 1105, 1106, 1107, 1108, 1109, 1110, 1111, - 1112, 1113, 1114, 1115, 1116, 1117, 1118, 1119, 1120, 1121, - 1122, 1123, 1124, 1125, 1126, 1127, 1128, 1129, 1130, 1131, - 1132, 1133, 1134, 1135, 1136, 1137, 1138, 1139, 1140, 1141, - 1142, 1143, 1144, 1145, 1146, 1147, 1148, 1149, 1150, 1151, - 1152, 1153, 1154, 1155, 1156, 1157, 1158, 1159, 1160, 1161, - 1162, 1163, 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1171, - 1172, 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181, - 1182, 1183, 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, - 1192, 1193, 1194, 1195, 1196, 1197, 1198, 1199, 1200, 1201, - 1202, 1203, 1204, 1205, 1206, 1207, 1208, 1209, 1210, 1211, - 1212, 1213, 1214, 1215, 1216, 1217, 1218, 1219, 1220, 1221, - 1222, 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, 1231, - 1232, 1233, 1234, 1235, 1236, 1237, 1238, 1239, 1240, 1241, - 1242, 1243, 1244, 1245, 1246, 1247, 1248, 1249, 1250, 1251, - 1252, 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260, 1261, - 1262, 1263, 1264, 1265, 1266, 1267, 1268, 1269, 1270, 1271, - 1272, 1273, 1274, 1275, 1276, 1277, 1278, 1279, 1280, 1281, - 1282, 1283, 1284, 1285, 1286, 1287, 1288, 1289, 1290, 1291, - 1292, 1293, 1294, 1295, 1296, 1297, 1298, 1299, 1300, 1301, - 1302, 1303, 1304, 1305, 1306, 0, 472, 472, 472, 472, - 472, 472, 0, 428, 0, 0, 0, 0, 0, 0, - 0, 444, 0, 447, 0, 0, 454, 472, 1310, 1310, - 1310, 893, 0, 478, 479, 466, 464, 461, 462, 480, - 483, 0, 488, 491, 918, 919, 0, 506, 0, 1130, - 498, 511, 512, 522, 38, 662, 621, 0, 627, 629, - 0, 664, 665, 666, 667, 668, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 694, 695, 696, 697, - 775, 776, 777, 778, 779, 780, 781, 782, 631, 632, - 772, 0, 882, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 763, 0, 732, 732, 732, 732, 732, 732, - 732, 732, 732, 0, 0, 0, 0, 0, 0, 0, - -2, -2, 1309, 0, 521, 790, 49, 0, 523, 528, - 529, 833, 0, 0, 790, 1308, 0, 0, -2, -2, - 539, 545, 546, 547, 548, 524, 0, 551, 555, 0, - 0, 0, 907, 0, 0, 70, 0, 1278, 886, -2, - -2, 0, 0, 920, 921, 895, -2, 926, 927, 928, - 929, 930, 931, 932, 933, 934, 935, 936, 937, 938, - 939, 940, 941, 942, 943, 944, 945, 946, 947, 948, - 949, 950, 951, 952, 953, 954, 955, 956, 957, 958, - 959, 960, 961, 962, 963, 964, 965, 966, 967, 968, - 969, 970, 971, 972, 973, 974, 975, 976, 977, 978, - 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, - 989, 990, 991, 992, 993, 994, 995, 996, 997, 998, - 999, 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, - 1009, 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, 1018, - 1019, 1020, 1021, 1022, 1023, 1024, 1025, 1026, 1027, 1028, - 1029, 1030, 1031, 1032, 1033, 1034, 1035, 1036, 1037, 1038, - 1039, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048, - 1049, 1050, 1051, 1052, 1053, 1054, 1055, 1056, 1057, 1058, - -2, 1077, 0, 0, 123, 124, 0, 36, 249, 0, - 119, 0, 243, 181, 827, 0, 0, 0, 0, 90, - 111, 112, 223, 223, 0, 113, 113, 335, 336, 337, - 0, 0, -2, 247, 0, 320, 0, 0, 237, 237, - 241, 239, 240, 0, 0, 0, 0, 0, 0, 347, - 0, 348, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 403, 0, 224, 0, 365, 366, 274, 0, 0, - 0, 0, 345, 346, 0, 0, 909, 910, 0, 0, - 223, 223, 0, 0, 0, 0, 223, 223, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 100, 818, 0, 0, 0, 0, 0, - 0, -2, 0, 411, 0, 0, 0, 0, 0, 0, - 418, 0, 420, 421, 0, 0, 422, 423, 424, 425, - 426, 472, 0, 472, 0, 472, 472, 472, 472, 469, - 0, 469, 467, 468, 459, 460, 1310, 1310, 0, 1310, - 1310, 0, 1310, 1310, 0, 232, 233, 234, 475, 451, - 452, 455, 456, 1311, 1312, 457, 458, 894, 489, 492, - 509, 507, 508, 510, 502, 503, 504, 505, 0, 0, - 0, 0, 0, 0, 625, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 649, 650, 651, 652, 653, 654, - 655, 628, 0, 642, 0, 0, 0, 684, 685, 686, - 687, 688, 689, 690, 691, 692, 0, 536, 0, 0, - 0, 790, 0, 0, 0, 0, 0, 0, 0, 533, - 0, 764, 0, 715, 0, 716, 724, 0, 717, 725, - 718, 726, 719, 720, 727, 721, 728, 722, 723, 729, - 0, 0, 0, 536, 536, 0, 0, 39, 513, 514, - 0, 594, 798, 0, 538, 836, 0, 0, 799, 791, - 792, 795, 798, 0, 560, 549, 540, 543, 544, 526, - 0, 552, 556, 0, 558, 559, 0, 0, 68, 0, - 610, 0, 562, 564, 565, 566, 592, 0, 0, 0, - 0, 64, 66, 611, 0, 1278, 892, 0, 72, 73, - 0, 0, 0, 204, 897, 898, 899, -2, 230, 0, - 192, 188, 132, 133, 134, 181, 136, 181, 181, 181, - 181, 201, 201, 201, 201, 164, 165, 166, 167, 168, - 0, 0, 151, 181, 181, 181, 181, 171, 172, 173, - 174, 175, 176, 177, 178, 137, 138, 139, 140, 141, - 142, 143, 144, 145, 183, 183, 183, 185, 185, 0, - 37, 0, 215, 0, 795, 0, 818, 99, 0, 915, - 102, 0, 0, 368, 329, 357, 369, 0, 332, 333, - -2, 0, 0, 319, 0, 321, 0, 231, 0, -2, - 0, 0, 0, 237, 241, 238, 241, 229, 242, 349, - 772, 0, 350, 351, 0, 383, 580, 0, 0, 0, - 0, 0, 389, 390, 391, 0, 393, 394, 395, 396, - 397, 398, 399, 400, 401, 402, 358, 359, 360, 361, - 362, 363, 364, 0, 0, 321, 0, 354, 0, 275, - 276, 0, 0, 279, 280, 281, 282, 0, 0, 285, - 286, 287, 288, 289, 313, 314, 315, 290, 291, 292, - 293, 294, 295, 296, 307, 308, 309, 310, 311, 312, - 297, 298, 299, 300, 301, 304, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 815, 816, 817, 0, - 0, 0, 262, 903, 0, 262, 62, 417, 612, 924, - 925, 473, 474, 427, 445, 429, 448, 430, 432, 431, - 433, 472, 0, 0, 0, 235, 236, 472, 436, 437, - 438, 439, 440, 441, 442, 443, 0, 450, 0, 0, - 0, 490, 494, 495, 496, 497, 663, 622, 623, 624, - 626, 643, 0, 645, 647, 633, 634, 658, 659, 660, - 0, 0, 0, 0, 656, 638, 0, 669, 670, 671, - 672, 673, 674, 675, 676, 677, 678, 679, 680, 683, - 747, 748, 749, 0, 681, 682, 693, 0, 0, 0, - 537, 773, 0, -2, 0, 661, 881, 798, 0, 0, - 0, 0, 666, 775, 0, 666, 775, 0, 0, 0, - 534, 535, 770, 767, 0, 0, 733, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 516, 517, 519, 0, - 614, 0, 595, 0, 597, 598, 833, 50, 40, 0, - 834, 0, 0, 0, 0, 794, 796, 797, 833, 0, - 783, 0, 0, 619, 0, 0, 541, 46, 557, 553, - 0, 619, 0, 0, 609, 0, 0, 0, 0, 0, - 0, 599, 0, 0, 602, 0, 0, 0, 0, 593, - 0, 0, 0, -2, 0, 0, 0, 60, 61, 0, - 0, 0, 887, 71, 0, 0, 76, 77, 888, 889, - 890, 891, 0, 106, -2, 270, 125, 127, 128, 129, - 120, 195, 193, 0, 190, 189, 135, 201, 201, 158, - 159, 204, 0, 204, 204, 204, 0, 0, 152, 153, - 154, 155, 146, 0, 147, 148, 149, 0, 150, 248, - 0, 802, 216, 217, 219, 223, 0, 0, 0, 244, - 245, 0, 0, 905, 0, 916, 115, 116, 117, 118, - 113, 0, 0, 121, 323, 0, 0, 0, 246, 0, - 0, 225, 241, 226, 227, 0, 352, 0, 0, 385, - 386, 387, 388, 0, 0, 0, 321, 323, 204, 0, - 277, 278, 283, 284, 302, 0, 0, 0, 0, 828, - 829, 0, 832, 91, 375, 377, 376, 380, 0, 0, - 0, 0, 412, 414, 263, 264, 1310, 0, 416, 434, - 470, 471, 469, 449, 469, 476, 453, 486, 644, 646, - 648, 635, 656, 639, 0, 636, 0, 0, 630, 698, - 0, 0, 536, 0, 790, 833, 702, 703, 0, 0, - 0, 0, 0, 740, 0, 0, 741, 0, 790, 0, - 768, 0, 0, 714, 734, 0, 0, 735, 736, 737, - 738, 739, 515, 518, 520, 570, 0, 0, 0, 0, - 596, 42, 0, 0, 0, 800, 801, 793, 41, 0, - 900, 901, 784, 785, 786, 0, 550, 561, 542, 0, - 798, 875, 0, 0, 867, 0, 0, 619, 883, 0, - 563, 588, 590, 0, 585, 600, 601, 603, 0, 605, - 0, 607, 608, 567, 568, 569, 0, 619, 0, 619, - 65, 619, 67, 0, 613, 74, 75, 0, 0, 81, - 205, 206, 113, 272, 126, 197, 0, 194, 131, 191, - 204, 204, 160, 202, 203, 161, 162, 163, 0, 179, - 0, 0, 0, 265, 86, 806, 805, 223, 223, 218, - 0, 221, 0, 917, 182, 0, 0, 0, 327, 574, - 0, 338, 339, 0, 322, 382, 0, 215, 0, 228, - 773, 581, 0, 0, 340, 0, 323, 343, 344, 355, - 305, 306, 303, 572, 819, 820, 821, 0, 831, 94, - 0, 0, 0, 0, 373, 0, 415, 63, 472, 472, - 637, 0, 657, 640, 699, 700, 0, 774, 798, 44, - 0, 181, 181, 753, 181, 185, 756, 181, 758, 181, - 761, 0, 0, 0, 0, 0, 0, 0, 765, 713, - 771, 0, 0, 0, 0, 0, 0, 0, 0, 201, - 838, 835, 43, 788, 0, 620, 554, 47, 51, 0, - 875, 866, 877, 879, 0, 0, 0, 871, 0, 790, - 0, 0, 582, 589, 0, 0, 583, 0, 584, 604, - 606, -2, 790, 619, 58, 59, 0, 78, 79, 80, - 271, 199, 0, 196, 156, 157, 201, 0, 201, 0, - 186, 0, 254, 266, 0, 803, 804, 0, 0, 220, - 222, 572, 101, 0, 0, 122, 324, 0, 214, 0, - 0, 407, 404, 341, 342, 0, 0, 830, 374, 0, - 92, 93, 0, 0, 379, 413, 435, 446, 641, 701, - 833, 704, 750, 201, 754, 755, 757, 759, 760, 762, - 706, 705, 0, 0, 0, 0, 0, 798, 0, 769, - 0, 0, 0, 0, 0, 594, 201, 858, 48, 0, - 0, 0, 52, 0, 880, 0, 0, 0, 0, 69, - 798, 884, 885, 586, 0, 591, 798, 57, 207, 200, - 0, 204, 180, 204, 0, 0, 267, 807, 808, 809, - 810, 811, 812, 813, 0, 330, 575, 0, 0, 384, - 0, 392, 0, 0, 0, 0, 95, 96, 0, 0, - 0, 45, 751, 752, 0, 0, 0, 0, 742, 0, - 766, 0, 0, 0, 616, 576, 577, 0, 0, 614, - 840, 839, 852, 856, 789, 787, 0, 878, 0, 870, - 873, 869, 872, 55, 0, 56, 212, 0, 209, 211, - 198, 169, 170, 184, 187, 0, 0, 0, 408, 405, - 406, 822, 573, 0, 0, 0, 381, 707, 709, 708, - 710, 0, 0, 0, 712, 730, 731, 0, 615, 617, - 618, 571, 858, 0, 851, 854, -2, 0, 0, 868, - 0, 587, 130, 0, 208, 210, 822, 0, 0, 371, - 824, 97, 98, 316, 317, 318, 91, 711, 0, 0, - 0, 578, 579, 845, 843, 843, 856, 0, 860, 0, - 865, 0, 876, 874, 213, 87, 0, 0, 0, 0, - 825, 826, 94, 743, 0, 746, 848, 0, 841, 844, - 842, 853, 0, 859, 0, 0, 857, 409, 410, 250, - 0, 378, 744, 837, 0, 846, 847, 855, 0, 0, - 251, 252, 0, 823, 0, 849, 850, 861, 863, 253, - 0, 0, 0, 0, 255, 257, 258, 0, 0, 256, - 745, 259, 260, 261, + 31, 32, 33, 825, 0, 557, 557, 557, 557, 557, + 557, 557, 0, 0, -2, -2, -2, 849, 961, 0, + 938, 0, 0, -2, 490, 491, 0, 493, -2, 0, + 0, 502, 1370, 1370, 552, 0, 0, 0, 0, 0, + 0, 1368, 55, 56, 508, 509, 510, 1, 3, 0, + 561, 833, 0, 0, -2, 559, 0, 0, 944, 944, + 944, 0, 86, 87, 0, 0, 0, 849, 0, 0, + 0, 0, 0, 557, 0, 939, 109, 110, 90, -2, + 114, 115, 0, 119, 368, 329, 371, 327, 357, -2, + 320, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 332, 224, 224, 0, 0, -2, 320, + 320, 320, 0, 0, 0, 354, 946, 274, 224, 224, + 0, 224, 224, 224, 224, 0, 0, 224, 224, 224, + 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, + 224, 224, 0, 108, 862, 0, 0, 0, 118, 962, + 959, 960, 35, 36, 37, 1102, 1103, 1104, 1105, 1106, + 1107, 1108, 1109, 1110, 1111, 1112, 1113, 1114, 1115, 1116, + 1117, 1118, 1119, 1120, 1121, 1122, 1123, 1124, 1125, 1126, + 1127, 1128, 1129, 1130, 1131, 1132, 1133, 1134, 1135, 1136, + 1137, 1138, 1139, 1140, 1141, 1142, 1143, 1144, 1145, 1146, + 1147, 1148, 1149, 1150, 1151, 1152, 1153, 1154, 1155, 1156, + 1157, 1158, 1159, 1160, 1161, 1162, 1163, 1164, 1165, 1166, + 1167, 1168, 1169, 1170, 1171, 1172, 1173, 1174, 1175, 1176, + 1177, 1178, 1179, 1180, 1181, 1182, 1183, 1184, 1185, 1186, + 1187, 1188, 1189, 1190, 1191, 1192, 1193, 1194, 1195, 1196, + 1197, 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205, 1206, + 1207, 1208, 1209, 1210, 1211, 1212, 1213, 1214, 1215, 1216, + 1217, 1218, 1219, 1220, 1221, 1222, 1223, 1224, 1225, 1226, + 1227, 1228, 1229, 1230, 1231, 1232, 1233, 1234, 1235, 1236, + 1237, 1238, 1239, 1240, 1241, 1242, 1243, 1244, 1245, 1246, + 1247, 1248, 1249, 1250, 1251, 1252, 1253, 1254, 1255, 1256, + 1257, 1258, 1259, 1260, 1261, 1262, 1263, 1264, 1265, 1266, + 1267, 1268, 1269, 1270, 1271, 1272, 1273, 1274, 1275, 1276, + 1277, 1278, 1279, 1280, 1281, 1282, 1283, 1284, 1285, 1286, + 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, 1295, 1296, + 1297, 1298, 1299, 1300, 1301, 1302, 1303, 1304, 1305, 1306, + 1307, 1308, 1309, 1310, 1311, 1312, 1313, 1314, 1315, 1316, + 1317, 1318, 1319, 1320, 1321, 1322, 1323, 1324, 1325, 1326, + 1327, 1328, 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, + 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344, 1345, 1346, + 1347, 1348, 1349, 1350, 1351, 1352, 1353, 1354, 1355, 1356, + 1357, 1358, 1359, 1360, 1361, 1362, 1363, 1364, 1365, 1366, + 1367, 0, 0, 0, 940, 557, 0, 424, 646, 0, + 481, 481, 0, 481, 481, 481, 481, 0, 0, 0, + 436, 0, 0, 0, 0, 478, 0, 0, 455, 457, + 0, 478, 0, 465, 481, 1371, 1371, 1371, 929, 0, + 475, 473, 487, 488, 470, 471, 489, 492, 0, 497, + 500, 955, 956, 0, 515, 0, 1178, 507, 520, 521, + 0, 553, 554, 40, 697, 656, 0, 662, 664, 0, + 699, 700, 701, 702, 703, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 729, 730, 731, 732, 810, + 811, 812, 813, 814, 815, 816, 817, 666, 667, 807, + 0, 918, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 798, 0, 767, 767, 767, 767, 767, 767, 767, + 767, 767, 0, 0, 0, 0, 0, 0, 0, -2, + -2, 1370, 0, 530, 0, 0, 825, 51, 0, 557, + 562, 563, 868, 0, 0, 825, 1369, 0, 0, -2, + -2, 573, 579, 580, 581, 582, 583, 558, 0, 586, + 590, 0, 0, 0, 945, 0, 0, 72, 0, 1334, + 922, -2, -2, 0, 0, 957, 958, 931, -2, 965, + 966, 967, 968, 969, 970, 971, 972, 973, 974, 975, + 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, + 986, 987, 988, 989, 990, 991, 992, 993, 994, 995, + 996, 997, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, + 1006, 1007, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, + 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023, 1024, 1025, + 1026, 1027, 1028, 1029, 1030, 1031, 1032, 1033, 1034, 1035, + 1036, 1037, 1038, 1039, 1040, 1041, 1042, 1043, 1044, 1045, + 1046, 1047, 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055, + 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 1064, 1065, + 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073, 1074, 1075, + 1076, 1077, 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, + 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1093, 1094, 1095, + 1096, 1097, 1098, 1099, 1100, 1101, -2, 0, 0, 128, + 129, 0, 38, 250, 0, 124, 0, 244, 197, 862, + 942, 952, 0, 0, 0, 942, 92, 116, 117, 224, + 224, 0, 118, 118, 336, 337, 338, 0, 0, -2, + 248, 0, 321, 0, 0, 238, 238, 242, 240, 241, + 0, 0, 0, 0, 0, 0, 348, 0, 349, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 408, 0, + 225, 0, 366, 367, 275, 0, 0, 0, 0, 346, + 347, 0, 0, 947, 948, 0, 0, 224, 224, 0, + 0, 0, 0, 224, 224, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 102, 853, 0, 0, 0, 0, 0, 0, 0, 0, + -2, 0, 416, 0, 940, 0, 0, 0, 940, 423, + 0, 425, 426, 0, 0, 427, 0, 478, 478, 476, + 477, 429, 430, 431, 432, 481, 0, 0, 233, 234, + 235, 478, 481, 0, 481, 481, 481, 481, 478, 481, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1371, + 1371, 1371, 484, 481, 462, 463, 466, 467, 1372, 1373, + 976, 468, 469, 930, 498, 501, 518, 516, 517, 519, + 511, 512, 513, 514, 0, 532, 533, 538, 0, 0, + 0, 0, 544, 545, 546, 0, 0, 549, 550, 551, + 0, 0, 0, 0, 0, 660, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 684, 685, 686, 687, 688, + 689, 690, 663, 0, 677, 0, 0, 0, 719, 720, + 721, 722, 723, 724, 725, 726, 727, 0, 570, 0, + 0, 0, 825, 0, 0, 0, 0, 0, 0, 0, + 567, 0, 799, 0, 750, 0, 751, 759, 0, 752, + 760, 753, 761, 754, 755, 762, 756, 763, 757, 758, + 764, 0, 0, 0, 570, 570, 0, 0, 41, 522, + 523, 0, 629, 950, 531, 833, 0, 572, 871, 0, + 0, 834, 826, 827, 830, 833, 0, 595, 584, 574, + 577, 578, 560, 0, 587, 591, 0, 593, 594, 0, + 0, 70, 0, 645, 0, 597, 599, 600, 601, 627, + 0, 0, 0, 0, 66, 68, 646, 0, 1334, 928, + 0, 74, 75, 0, 0, 0, 212, 933, 934, 935, + -2, 231, 0, 136, 204, 148, 149, 150, 197, 152, + 197, 197, 197, 197, 208, 208, 208, 208, 180, 181, + 182, 183, 184, 0, 0, 167, 197, 197, 197, 197, + 187, 188, 189, 190, 191, 192, 193, 194, 153, 154, + 155, 156, 157, 158, 159, 160, 161, 199, 199, 199, + 201, 201, 0, 39, 0, 216, 0, 830, 0, 853, + 0, 0, 0, 953, 0, 952, 952, 952, 0, 0, + 0, 369, 330, 358, 370, 0, 333, 334, -2, 0, + 0, 320, 0, 322, 0, 232, 0, -2, 0, 0, + 0, 238, 242, 239, 242, 230, 243, 350, 807, 0, + 351, 352, 0, 388, 615, 0, 0, 0, 0, 0, + 394, 395, 396, 0, 398, 399, 400, 401, 402, 403, + 404, 405, 406, 407, 359, 360, 361, 362, 363, 364, + 365, 0, 0, 322, 0, 355, 0, 276, 277, 0, + 0, 280, 281, 282, 283, 0, 0, 286, 287, 288, + 289, 290, 314, 315, 316, 291, 292, 293, 294, 295, + 296, 297, 308, 309, 310, 311, 312, 313, 298, 299, + 300, 301, 302, 305, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 383, 384, 385, 386, 850, 851, + 852, 0, 0, 0, 0, 0, 263, 64, 941, 0, + 647, 963, 964, 482, 483, 0, 236, 237, 481, 481, + 433, 456, 0, 481, 437, 458, 438, 440, 439, 441, + 481, 444, 479, 480, 445, 446, 447, 448, 449, 450, + 451, 452, 453, 454, 460, 0, 461, 0, 0, 499, + 503, 504, 505, 506, 0, 0, 535, 540, 541, 542, + 543, 555, 548, 698, 657, 658, 659, 661, 678, 0, + 680, 682, 668, 669, 693, 694, 695, 0, 0, 0, + 0, 691, 673, 0, 704, 705, 706, 707, 708, 709, + 710, 711, 712, 713, 714, 715, 718, 782, 783, 784, + 0, 716, 717, 728, 0, 0, 0, 571, 808, 0, + -2, 0, 696, 917, 833, 0, 0, 0, 0, 701, + 810, 0, 701, 810, 0, 0, 0, 568, 569, 805, + 802, 0, 0, 768, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 525, 526, 528, 0, 649, 0, 630, + 0, 632, 633, 0, 951, 868, 52, 42, 0, 869, + 0, 0, 0, 0, 829, 831, 832, 868, 0, 818, + 0, 0, 654, 0, 0, 575, 48, 592, 588, 0, + 654, 0, 0, 644, 0, 0, 0, 0, 0, 0, + 634, 0, 0, 637, 0, 0, 0, 0, 628, 0, + 0, 0, -2, 0, 0, 0, 62, 63, 0, 0, + 0, 923, 73, 0, 0, 78, 79, 924, 925, 926, + 927, 0, 111, -2, 271, 130, 132, 133, 134, 125, + 135, 206, 205, 151, 208, 208, 174, 175, 212, 0, + 212, 212, 212, 0, 0, 168, 169, 170, 171, 162, + 0, 163, 164, 165, 0, 166, 249, 0, 837, 217, + 218, 220, 224, 0, 0, 245, 246, 0, 0, 101, + 0, 0, 954, 0, 0, 0, 107, 120, 121, 122, + 123, 118, 0, 0, 126, 324, 0, 0, 0, 247, + 0, 0, 226, 242, 227, 228, 0, 353, 0, 0, + 390, 391, 392, 393, 0, 0, 0, 322, 324, 212, + 0, 278, 279, 284, 285, 303, 0, 0, 0, 0, + 863, 864, 0, 867, 93, 376, 378, 377, 381, 0, + 0, 0, 0, 417, 263, 837, 0, 421, 264, 265, + 422, 478, 443, 459, 478, 435, 442, 485, 464, 495, + 539, 0, 0, 0, 547, 0, 679, 681, 683, 670, + 691, 674, 0, 671, 0, 0, 665, 733, 0, 0, + 570, 0, 825, 868, 737, 738, 0, 0, 0, 0, + 0, 775, 0, 0, 776, 0, 825, 0, 803, 0, + 0, 749, 769, 0, 0, 770, 771, 772, 773, 774, + 524, 527, 529, 605, 0, 0, 0, 0, 631, 949, + 44, 0, 0, 0, 835, 836, 828, 43, 0, 936, + 937, 819, 820, 821, 0, 585, 596, 576, 0, 833, + 911, 0, 0, 903, 0, 0, 654, 919, 0, 598, + 623, 625, 0, 620, 635, 636, 638, 0, 640, 0, + 642, 643, 602, 603, 604, 0, 654, 0, 654, 67, + 654, 69, 0, 648, 76, 77, 0, 0, 83, 213, + 214, 118, 273, 131, 137, 0, 0, 0, 141, 0, + 0, 144, 146, 147, 207, 212, 212, 176, 209, 210, + 211, 177, 178, 179, 0, 195, 0, 0, 0, 266, + 88, 841, 840, 224, 224, 219, 0, 222, 0, 198, + 0, 943, 103, 0, 0, 0, 0, 328, 609, 0, + 339, 340, 0, 323, 387, 0, 216, 0, 229, 808, + 616, 0, 0, 341, 0, 324, 344, 345, 356, 306, + 307, 304, 607, 854, 855, 856, 0, 866, 96, 0, + 0, 0, 0, 374, 0, 419, 420, 65, 481, 481, + 534, 0, 537, 0, 672, 0, 692, 675, 734, 735, + 0, 809, 833, 46, 0, 197, 197, 788, 197, 201, + 791, 197, 793, 197, 796, 0, 0, 0, 0, 0, + 0, 0, 800, 748, 806, 0, 0, 0, 0, 0, + 0, 0, 0, 208, 873, 870, 45, 823, 0, 655, + 589, 49, 53, 0, 911, 902, 913, 915, 0, 0, + 0, 907, 0, 825, 0, 0, 617, 624, 0, 0, + 618, 0, 619, 639, 641, -2, 825, 654, 60, 61, + 0, 80, 81, 82, 272, 138, 139, 0, 142, 143, + 145, 172, 173, 208, 0, 208, 0, 202, 0, 255, + 267, 0, 838, 839, 0, 0, 221, 223, 607, 104, + 105, 106, 0, 0, 127, 325, 0, 215, 0, 0, + 412, 409, 342, 343, 0, 0, 865, 375, 0, 94, + 95, 0, 0, 380, 418, 428, 434, 536, 556, 676, + 736, 868, 739, 785, 208, 789, 790, 792, 794, 795, + 797, 741, 740, 0, 0, 0, 0, 0, 833, 0, + 804, 0, 0, 0, 0, 0, 629, 208, 893, 50, + 0, 0, 0, 54, 0, 916, 0, 0, 0, 0, + 71, 833, 920, 921, 621, 0, 626, 833, 59, 140, + 212, 196, 212, 0, 0, 268, 842, 843, 844, 845, + 846, 847, 848, 0, 331, 610, 0, 0, 389, 0, + 397, 0, 0, 0, 0, 97, 98, 0, 0, 0, + 47, 786, 787, 0, 0, 0, 0, 777, 0, 801, + 0, 0, 0, 651, 611, 612, 0, 0, 649, 875, + 874, 887, 900, 824, 822, 0, 914, 0, 906, 909, + 905, 908, 57, 0, 58, 185, 186, 200, 203, 0, + 0, 0, 413, 410, 411, 857, 608, 0, 0, 0, + 382, 742, 744, 743, 745, 0, 0, 0, 747, 765, + 766, 0, 650, 652, 653, 606, 893, 0, 886, 0, + -2, 895, 0, 0, 0, 901, 0, 904, 0, 622, + 857, 0, 0, 372, 859, 99, 100, 317, 318, 319, + 93, 746, 0, 0, 0, 613, 614, 880, 878, 878, + 888, 889, 0, 0, 896, 0, 0, 0, 912, 910, + 89, 0, 0, 0, 0, 860, 861, 96, 778, 0, + 781, 883, 0, 876, 879, 877, 890, 0, 0, 897, + 0, 899, 414, 415, 251, 0, 379, 779, 872, 0, + 881, 882, 891, 892, 898, 252, 253, 0, 858, 0, + 884, 885, 254, 0, 0, 0, 0, 256, 258, 259, + 0, 0, 257, 780, 260, 261, 262, } var yyTok1 = [...]int{ 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 110, 3, 3, 3, 137, 129, 3, - 80, 82, 134, 132, 81, 133, 149, 135, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 451, - 118, 117, 119, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 111, 3, 3, 3, 138, 130, 3, + 81, 83, 135, 133, 82, 134, 150, 136, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 474, + 119, 118, 120, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 139, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 140, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 128, 3, 140, + 3, 3, 3, 3, 129, 3, 141, } var yyTok2 = [...]int{ @@ -4824,13 +5456,13 @@ var yyTok2 = [...]int{ 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, - 72, 73, 74, 75, 76, 77, 78, 79, 83, 84, + 72, 73, 74, 75, 76, 77, 78, 79, 80, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, - 105, 106, 107, 108, 109, 111, 112, 113, 114, 115, - 116, 120, 121, 122, 123, 124, 125, 126, 127, 130, - 131, 136, 138, 141, 142, 143, 144, 145, 146, 147, - 148, 150, 151, 152, 153, 154, 155, 156, 157, 158, + 105, 106, 107, 108, 109, 110, 112, 113, 114, 115, + 116, 117, 121, 122, 123, 124, 125, 126, 127, 128, + 131, 132, 137, 139, 142, 143, 144, 145, 146, 147, + 148, 149, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, @@ -4881,7 +5513,11 @@ var yyTok3 = [...]int{ 57760, 435, 57761, 436, 57762, 437, 57763, 438, 57764, 439, 57765, 440, 57766, 441, 57767, 442, 57768, 443, 57769, 444, 57770, 445, 57771, 446, 57772, 447, 57773, 448, 57774, 449, - 57775, 450, 0, + 57775, 450, 57776, 451, 57777, 452, 57778, 453, 57779, 454, + 57780, 455, 57781, 456, 57782, 457, 57783, 458, 57784, 459, + 57785, 460, 57786, 461, 57787, 462, 57788, 463, 57789, 464, + 57790, 465, 57791, 466, 57792, 467, 57793, 468, 57794, 469, + 57795, 470, 57796, 471, 57797, 472, 57798, 473, 0, } var yyErrorMessages = [...]struct { @@ -4894,6 +5530,14 @@ var yyErrorMessages = [...]struct { /* parser for yacc output */ +func yyIaddr(v interface{}) __yyunsafe__.Pointer { + type h struct { + t __yyunsafe__.Pointer + p __yyunsafe__.Pointer + } + return (*h)(__yyunsafe__.Pointer(&v)).p +} + var ( yyDebug = 0 yyErrorVerbose = false @@ -5223,2095 +5867,2529 @@ yydefault: case 1: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:407 +//line sql.y:424 { - setParseTree(yylex, yyDollar[1].statement) + setParseTree(yylex, yyDollar[1].statementUnion()) } case 2: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:412 +//line sql.y:429 { } case 3: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:413 +//line sql.y:430 { } case 4: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:417 + var yyLOCAL Statement +//line sql.y:434 { - yyVAL.statement = yyDollar[1].selStmt + yyLOCAL = yyDollar[1].selStmtUnion() } - case 32: + yyVAL.union = yyLOCAL + case 34: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:448 +//line sql.y:467 { setParseTree(yylex, nil) } - case 33: + case 35: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:454 +//line sql.y:473 { - yyVAL.colIdent = NewColIdentWithAt(string(yyDollar[1].bytes), NoAt) + yyVAL.colIdent = NewColIdentWithAt(string(yyDollar[1].str), NoAt) } - case 34: + case 36: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:458 +//line sql.y:477 { - yyVAL.colIdent = NewColIdentWithAt(string(yyDollar[1].bytes), SingleAt) + yyVAL.colIdent = NewColIdentWithAt(string(yyDollar[1].str), SingleAt) } - case 35: + case 37: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:462 +//line sql.y:481 { - yyVAL.colIdent = NewColIdentWithAt(string(yyDollar[1].bytes), DoubleAt) + yyVAL.colIdent = NewColIdentWithAt(string(yyDollar[1].str), DoubleAt) } - case 36: + case 38: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:467 +//line sql.y:486 { yyVAL.colIdent = NewColIdentWithAt("", NoAt) } - case 37: + case 39: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:471 +//line sql.y:490 { yyVAL.colIdent = yyDollar[1].colIdent } - case 38: + case 40: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:477 + var yyLOCAL Statement +//line sql.y:496 { - yyVAL.statement = &OtherAdmin{} + yyLOCAL = &OtherAdmin{} } - case 39: + yyVAL.union = yyLOCAL + case 41: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:483 + var yyLOCAL Statement +//line sql.y:502 { - yyVAL.statement = &Load{} + yyLOCAL = &Load{} } - case 40: + yyVAL.union = yyLOCAL + case 42: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:489 + var yyLOCAL SelectStatement +//line sql.y:508 { - sel := yyDollar[1].selStmt.(*Select) - sel.OrderBy = yyDollar[2].orderBy - sel.Limit = yyDollar[3].limit - sel.Lock = yyDollar[4].lock - sel.Into = yyDollar[5].selectInto - yyVAL.selStmt = sel + sel := yyDollar[1].selStmtUnion().(*Select) + sel.OrderBy = yyDollar[2].orderByUnion() + sel.Limit = yyDollar[3].limitUnion() + sel.Lock = yyDollar[4].lockUnion() + sel.Into = yyDollar[5].selectIntoUnion() + yyLOCAL = sel } - case 41: + yyVAL.union = yyLOCAL + case 43: yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:498 + var yyLOCAL SelectStatement +//line sql.y:517 { - yyVAL.selStmt = &Union{FirstStatement: &ParenSelect{Select: yyDollar[2].selStmt}, OrderBy: yyDollar[4].orderBy, Limit: yyDollar[5].limit, Lock: yyDollar[6].lock} + yyLOCAL = &Union{FirstStatement: &ParenSelect{Select: yyDollar[2].selStmtUnion()}, OrderBy: yyDollar[4].orderByUnion(), Limit: yyDollar[5].limitUnion(), Lock: yyDollar[6].lockUnion()} } - case 42: + yyVAL.union = yyLOCAL + case 44: yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:502 + var yyLOCAL SelectStatement +//line sql.y:521 { - yyVAL.selStmt = Unionize(yyDollar[1].selStmt, yyDollar[3].selStmt, yyDollar[2].boolean, yyDollar[4].orderBy, yyDollar[5].limit, yyDollar[6].lock) + yyLOCAL = Unionize(yyDollar[1].selStmtUnion(), yyDollar[3].selStmtUnion(), yyDollar[2].booleanUnion(), yyDollar[4].orderByUnion(), yyDollar[5].limitUnion(), yyDollar[6].lockUnion()) } - case 43: + yyVAL.union = yyLOCAL + case 45: yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:506 + var yyLOCAL SelectStatement +//line sql.y:525 { - yyVAL.selStmt = NewSelect(Comments(yyDollar[2].bytes2), SelectExprs{Nextval{Expr: yyDollar[5].expr}}, []string{yyDollar[3].str} /*options*/, TableExprs{&AliasedTableExpr{Expr: yyDollar[7].tableName}}, nil /*where*/, nil /*groupBy*/, nil /*having*/) + yyLOCAL = NewSelect(Comments(yyDollar[2].strs), SelectExprs{&Nextval{Expr: yyDollar[5].exprUnion()}}, []string{yyDollar[3].str} /*options*/, TableExprs{&AliasedTableExpr{Expr: yyDollar[7].tableName}}, nil /*where*/, nil /*groupBy*/, nil /*having*/) } - case 44: + yyVAL.union = yyLOCAL + case 46: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:529 + var yyLOCAL SelectStatement +//line sql.y:548 { - sel := yyDollar[1].selStmt.(*Select) - sel.OrderBy = yyDollar[2].orderBy - sel.Limit = yyDollar[3].limit - sel.Lock = yyDollar[4].lock - yyVAL.selStmt = sel + sel := yyDollar[1].selStmtUnion().(*Select) + sel.OrderBy = yyDollar[2].orderByUnion() + sel.Limit = yyDollar[3].limitUnion() + sel.Lock = yyDollar[4].lockUnion() + yyLOCAL = sel } - case 45: + yyVAL.union = yyLOCAL + case 47: yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:537 + var yyLOCAL SelectStatement +//line sql.y:556 { - yyVAL.selStmt = Unionize(yyDollar[1].selStmt, yyDollar[3].selStmt, yyDollar[2].boolean, yyDollar[4].orderBy, yyDollar[5].limit, yyDollar[6].lock) + yyLOCAL = Unionize(yyDollar[1].selStmtUnion(), yyDollar[3].selStmtUnion(), yyDollar[2].booleanUnion(), yyDollar[4].orderByUnion(), yyDollar[5].limitUnion(), yyDollar[6].lockUnion()) } - case 46: + yyVAL.union = yyLOCAL + case 48: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:543 + var yyLOCAL Statement +//line sql.y:562 { - yyVAL.statement = &Stream{Comments: Comments(yyDollar[2].bytes2), SelectExpr: yyDollar[3].selectExpr, Table: yyDollar[5].tableName} + yyLOCAL = &Stream{Comments: Comments(yyDollar[2].strs), SelectExpr: yyDollar[3].selectExprUnion(), Table: yyDollar[5].tableName} } - case 47: + yyVAL.union = yyLOCAL + case 49: yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:549 + var yyLOCAL Statement +//line sql.y:568 { - yyVAL.statement = &VStream{Comments: Comments(yyDollar[2].bytes2), SelectExpr: yyDollar[3].selectExpr, Table: yyDollar[5].tableName, Where: NewWhere(WhereClause, yyDollar[6].expr), Limit: yyDollar[7].limit} + yyLOCAL = &VStream{Comments: Comments(yyDollar[2].strs), SelectExpr: yyDollar[3].selectExprUnion(), Table: yyDollar[5].tableName, Where: NewWhere(WhereClause, yyDollar[6].exprUnion()), Limit: yyDollar[7].limitUnion()} } - case 48: + yyVAL.union = yyLOCAL + case 50: yyDollar = yyS[yypt-8 : yypt+1] -//line sql.y:557 + var yyLOCAL SelectStatement +//line sql.y:576 { - yyVAL.selStmt = NewSelect(Comments(yyDollar[2].bytes2), yyDollar[4].selectExprs /*SelectExprs*/, yyDollar[3].strs /*options*/, yyDollar[5].tableExprs /*from*/, NewWhere(WhereClause, yyDollar[6].expr), GroupBy(yyDollar[7].exprs), NewWhere(HavingClause, yyDollar[8].expr)) + yyLOCAL = NewSelect(Comments(yyDollar[2].strs), yyDollar[4].selectExprsUnion() /*SelectExprs*/, yyDollar[3].strs /*options*/, yyDollar[5].tableExprsUnion() /*from*/, NewWhere(WhereClause, yyDollar[6].exprUnion()), GroupBy(yyDollar[7].exprsUnion()), NewWhere(HavingClause, yyDollar[8].exprUnion())) } - case 49: + yyVAL.union = yyLOCAL + case 51: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:563 + var yyLOCAL SelectStatement +//line sql.y:582 { - yyVAL.selStmt = yyDollar[1].selStmt + yyLOCAL = yyDollar[1].selStmtUnion() } - case 50: + yyVAL.union = yyLOCAL + case 52: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:567 + var yyLOCAL SelectStatement +//line sql.y:586 { - yyVAL.selStmt = &ParenSelect{Select: yyDollar[2].selStmt} + yyLOCAL = &ParenSelect{Select: yyDollar[2].selStmtUnion()} } - case 51: + yyVAL.union = yyLOCAL + case 53: yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:574 + var yyLOCAL Statement +//line sql.y:593 { // insert_data returns a *Insert pre-filled with Columns & Values - ins := yyDollar[6].ins - ins.Action = yyDollar[1].insertAction - ins.Comments = yyDollar[2].bytes2 - ins.Ignore = yyDollar[3].ignore + ins := yyDollar[6].insUnion() + ins.Action = yyDollar[1].insertActionUnion() + ins.Comments = yyDollar[2].strs + ins.Ignore = yyDollar[3].ignoreUnion() ins.Table = yyDollar[4].tableName - ins.Partitions = yyDollar[5].partitions - ins.OnDup = OnDup(yyDollar[7].updateExprs) - yyVAL.statement = ins + ins.Partitions = yyDollar[5].partitionsUnion() + ins.OnDup = OnDup(yyDollar[7].updateExprsUnion()) + yyLOCAL = ins } - case 52: + yyVAL.union = yyLOCAL + case 54: yyDollar = yyS[yypt-8 : yypt+1] -//line sql.y:586 + var yyLOCAL Statement +//line sql.y:605 { - cols := make(Columns, 0, len(yyDollar[7].updateExprs)) - vals := make(ValTuple, 0, len(yyDollar[8].updateExprs)) - for _, updateList := range yyDollar[7].updateExprs { + cols := make(Columns, 0, len(yyDollar[7].updateExprsUnion())) + vals := make(ValTuple, 0, len(yyDollar[8].updateExprsUnion())) + for _, updateList := range yyDollar[7].updateExprsUnion() { cols = append(cols, updateList.Name.Name) vals = append(vals, updateList.Expr) } - yyVAL.statement = &Insert{Action: yyDollar[1].insertAction, Comments: Comments(yyDollar[2].bytes2), Ignore: yyDollar[3].ignore, Table: yyDollar[4].tableName, Partitions: yyDollar[5].partitions, Columns: cols, Rows: Values{vals}, OnDup: OnDup(yyDollar[8].updateExprs)} + yyLOCAL = &Insert{Action: yyDollar[1].insertActionUnion(), Comments: Comments(yyDollar[2].strs), Ignore: yyDollar[3].ignoreUnion(), Table: yyDollar[4].tableName, Partitions: yyDollar[5].partitionsUnion(), Columns: cols, Rows: Values{vals}, OnDup: OnDup(yyDollar[8].updateExprsUnion())} } - case 53: + yyVAL.union = yyLOCAL + case 55: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:598 + var yyLOCAL InsertAction +//line sql.y:617 { - yyVAL.insertAction = InsertAct - } - case 54: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:602 - { - yyVAL.insertAction = ReplaceAct - } - case 55: - yyDollar = yyS[yypt-9 : yypt+1] -//line sql.y:608 - { - yyVAL.statement = &Update{Comments: Comments(yyDollar[2].bytes2), Ignore: yyDollar[3].ignore, TableExprs: yyDollar[4].tableExprs, Exprs: yyDollar[6].updateExprs, Where: NewWhere(WhereClause, yyDollar[7].expr), OrderBy: yyDollar[8].orderBy, Limit: yyDollar[9].limit} + yyLOCAL = InsertAct } + yyVAL.union = yyLOCAL case 56: - yyDollar = yyS[yypt-9 : yypt+1] -//line sql.y:614 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL InsertAction +//line sql.y:621 { - yyVAL.statement = &Delete{Comments: Comments(yyDollar[2].bytes2), Ignore: yyDollar[3].ignore, TableExprs: TableExprs{&AliasedTableExpr{Expr: yyDollar[5].tableName}}, Partitions: yyDollar[6].partitions, Where: NewWhere(WhereClause, yyDollar[7].expr), OrderBy: yyDollar[8].orderBy, Limit: yyDollar[9].limit} + yyLOCAL = ReplaceAct } + yyVAL.union = yyLOCAL case 57: - yyDollar = yyS[yypt-8 : yypt+1] -//line sql.y:618 + yyDollar = yyS[yypt-9 : yypt+1] + var yyLOCAL Statement +//line sql.y:627 { - yyVAL.statement = &Delete{Comments: Comments(yyDollar[2].bytes2), Ignore: yyDollar[3].ignore, Targets: yyDollar[5].tableNames, TableExprs: yyDollar[7].tableExprs, Where: NewWhere(WhereClause, yyDollar[8].expr)} + yyLOCAL = &Update{Comments: Comments(yyDollar[2].strs), Ignore: yyDollar[3].ignoreUnion(), TableExprs: yyDollar[4].tableExprsUnion(), Exprs: yyDollar[6].updateExprsUnion(), Where: NewWhere(WhereClause, yyDollar[7].exprUnion()), OrderBy: yyDollar[8].orderByUnion(), Limit: yyDollar[9].limitUnion()} } + yyVAL.union = yyLOCAL case 58: - yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:622 + yyDollar = yyS[yypt-9 : yypt+1] + var yyLOCAL Statement +//line sql.y:633 { - yyVAL.statement = &Delete{Comments: Comments(yyDollar[2].bytes2), Ignore: yyDollar[3].ignore, Targets: yyDollar[4].tableNames, TableExprs: yyDollar[6].tableExprs, Where: NewWhere(WhereClause, yyDollar[7].expr)} + yyLOCAL = &Delete{Comments: Comments(yyDollar[2].strs), Ignore: yyDollar[3].ignoreUnion(), TableExprs: TableExprs{&AliasedTableExpr{Expr: yyDollar[5].tableName}}, Partitions: yyDollar[6].partitionsUnion(), Where: NewWhere(WhereClause, yyDollar[7].exprUnion()), OrderBy: yyDollar[8].orderByUnion(), Limit: yyDollar[9].limitUnion()} } + yyVAL.union = yyLOCAL case 59: - yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:626 + yyDollar = yyS[yypt-8 : yypt+1] + var yyLOCAL Statement +//line sql.y:637 { - yyVAL.statement = &Delete{Comments: Comments(yyDollar[2].bytes2), Ignore: yyDollar[3].ignore, Targets: yyDollar[4].tableNames, TableExprs: yyDollar[6].tableExprs, Where: NewWhere(WhereClause, yyDollar[7].expr)} + yyLOCAL = &Delete{Comments: Comments(yyDollar[2].strs), Ignore: yyDollar[3].ignoreUnion(), Targets: yyDollar[5].tableNamesUnion(), TableExprs: yyDollar[7].tableExprsUnion(), Where: NewWhere(WhereClause, yyDollar[8].exprUnion())} } + yyVAL.union = yyLOCAL case 60: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:631 + yyDollar = yyS[yypt-7 : yypt+1] + var yyLOCAL Statement +//line sql.y:641 { + yyLOCAL = &Delete{Comments: Comments(yyDollar[2].strs), Ignore: yyDollar[3].ignoreUnion(), Targets: yyDollar[4].tableNamesUnion(), TableExprs: yyDollar[6].tableExprsUnion(), Where: NewWhere(WhereClause, yyDollar[7].exprUnion())} } + yyVAL.union = yyLOCAL case 61: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:632 + yyDollar = yyS[yypt-7 : yypt+1] + var yyLOCAL Statement +//line sql.y:645 { + yyLOCAL = &Delete{Comments: Comments(yyDollar[2].strs), Ignore: yyDollar[3].ignoreUnion(), Targets: yyDollar[4].tableNamesUnion(), TableExprs: yyDollar[6].tableExprsUnion(), Where: NewWhere(WhereClause, yyDollar[7].exprUnion())} } + yyVAL.union = yyLOCAL case 62: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:636 +//line sql.y:650 { - yyVAL.tableNames = TableNames{yyDollar[1].tableName.ToViewName()} } case 63: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:640 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:651 { - yyVAL.tableNames = append(yyVAL.tableNames, yyDollar[3].tableName.ToViewName()) } case 64: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:646 + var yyLOCAL TableNames +//line sql.y:655 { - yyVAL.tableNames = TableNames{yyDollar[1].tableName} + yyLOCAL = TableNames{yyDollar[1].tableName.ToViewName()} } + yyVAL.union = yyLOCAL case 65: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:650 +//line sql.y:659 { - yyVAL.tableNames = append(yyVAL.tableNames, yyDollar[3].tableName) + yySLICE := (*TableNames)(yyIaddr(yyVAL.union)) + *yySLICE = append(*yySLICE, yyDollar[3].tableName.ToViewName()) } case 66: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:656 + var yyLOCAL TableNames +//line sql.y:665 { - yyVAL.tableNames = TableNames{yyDollar[1].tableName} + yyLOCAL = TableNames{yyDollar[1].tableName} } + yyVAL.union = yyLOCAL case 67: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:660 +//line sql.y:669 { - yyVAL.tableNames = append(yyVAL.tableNames, yyDollar[3].tableName) + yySLICE := (*TableNames)(yyIaddr(yyVAL.union)) + *yySLICE = append(*yySLICE, yyDollar[3].tableName) } case 68: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:665 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL TableNames +//line sql.y:675 { - yyVAL.partitions = nil + yyLOCAL = TableNames{yyDollar[1].tableName} } + yyVAL.union = yyLOCAL case 69: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:669 + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:679 { - yyVAL.partitions = yyDollar[3].partitions + yySLICE := (*TableNames)(yyIaddr(yyVAL.union)) + *yySLICE = append(*yySLICE, yyDollar[3].tableName) } case 70: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:675 + yyDollar = yyS[yypt-0 : yypt+1] + var yyLOCAL Partitions +//line sql.y:684 { - yyVAL.statement = &Set{Comments: Comments(yyDollar[2].bytes2), Exprs: yyDollar[3].setExprs} + yyLOCAL = nil } + yyVAL.union = yyLOCAL case 71: - yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:681 + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Partitions +//line sql.y:688 { - yyVAL.statement = &SetTransaction{Comments: Comments(yyDollar[2].bytes2), Scope: yyDollar[3].scope, Characteristics: yyDollar[5].characteristics} + yyLOCAL = yyDollar[3].partitionsUnion() } + yyVAL.union = yyLOCAL case 72: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:685 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL Statement +//line sql.y:694 { - yyVAL.statement = &SetTransaction{Comments: Comments(yyDollar[2].bytes2), Characteristics: yyDollar[4].characteristics, Scope: ImplicitScope} + yyLOCAL = &Set{Comments: Comments(yyDollar[2].strs), Exprs: yyDollar[3].setExprsUnion()} } + yyVAL.union = yyLOCAL case 73: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:691 + yyDollar = yyS[yypt-5 : yypt+1] + var yyLOCAL Statement +//line sql.y:700 { - yyVAL.characteristics = []Characteristic{yyDollar[1].characteristic} + yyLOCAL = &SetTransaction{Comments: Comments(yyDollar[2].strs), Scope: yyDollar[3].scopeUnion(), Characteristics: yyDollar[5].characteristicsUnion()} } + yyVAL.union = yyLOCAL case 74: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:695 + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Statement +//line sql.y:704 { - yyVAL.characteristics = append(yyVAL.characteristics, yyDollar[3].characteristic) + yyLOCAL = &SetTransaction{Comments: Comments(yyDollar[2].strs), Characteristics: yyDollar[4].characteristicsUnion(), Scope: ImplicitScope} } + yyVAL.union = yyLOCAL case 75: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:701 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL []Characteristic +//line sql.y:710 { - yyVAL.characteristic = yyDollar[3].isolationLevel + yyLOCAL = []Characteristic{yyDollar[1].characteristicUnion()} } + yyVAL.union = yyLOCAL case 76: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:705 + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:714 { - yyVAL.characteristic = ReadWrite + yySLICE := (*[]Characteristic)(yyIaddr(yyVAL.union)) + *yySLICE = append(*yySLICE, yyDollar[3].characteristicUnion()) } case 77: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:709 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL Characteristic +//line sql.y:720 { - yyVAL.characteristic = ReadOnly + yyLOCAL = yyDollar[3].isolationLevelUnion() } + yyVAL.union = yyLOCAL case 78: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:715 + var yyLOCAL Characteristic +//line sql.y:724 { - yyVAL.isolationLevel = RepeatableRead + yyLOCAL = ReadWrite } + yyVAL.union = yyLOCAL case 79: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:719 + var yyLOCAL Characteristic +//line sql.y:728 { - yyVAL.isolationLevel = ReadCommitted + yyLOCAL = ReadOnly } + yyVAL.union = yyLOCAL case 80: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:723 + var yyLOCAL IsolationLevel +//line sql.y:734 { - yyVAL.isolationLevel = ReadUncommitted + yyLOCAL = RepeatableRead } + yyVAL.union = yyLOCAL case 81: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:727 + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL IsolationLevel +//line sql.y:738 { - yyVAL.isolationLevel = Serializable + yyLOCAL = ReadCommitted } + yyVAL.union = yyLOCAL case 82: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:733 + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL IsolationLevel +//line sql.y:742 { - yyVAL.scope = SessionScope + yyLOCAL = ReadUncommitted } + yyVAL.union = yyLOCAL case 83: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:737 + var yyLOCAL IsolationLevel +//line sql.y:746 { - yyVAL.scope = GlobalScope + yyLOCAL = Serializable } + yyVAL.union = yyLOCAL case 84: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:743 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL Scope +//line sql.y:752 { - yyDollar[1].createTable.TableSpec = yyDollar[2].TableSpec - yyDollar[1].createTable.FullyParsed = true - yyVAL.statement = yyDollar[1].createTable + yyLOCAL = SessionScope } + yyVAL.union = yyLOCAL case 85: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:749 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL Scope +//line sql.y:756 { - // Create table [name] like [name] - yyDollar[1].createTable.OptLike = yyDollar[2].optLike - yyDollar[1].createTable.FullyParsed = true - yyVAL.statement = yyDollar[1].createTable + yyLOCAL = GlobalScope } + yyVAL.union = yyLOCAL case 86: - yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:756 + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL Statement +//line sql.y:762 { - yyDollar[1].createIndex.Columns = yyDollar[3].indexColumns - yyDollar[1].createIndex.Options = append(yyDollar[1].createIndex.Options, yyDollar[5].indexOptions...) - yyDollar[1].createIndex.Options = append(yyDollar[1].createIndex.Options, yyDollar[6].indexOptions...) - yyDollar[1].createIndex.FullyParsed = true - yyVAL.statement = yyDollar[1].createIndex + yyDollar[1].createTableUnion().TableSpec = yyDollar[2].tableSpecUnion() + yyDollar[1].createTableUnion().FullyParsed = true + yyLOCAL = yyDollar[1].createTableUnion() } + yyVAL.union = yyLOCAL case 87: - yyDollar = yyS[yypt-11 : yypt+1] -//line sql.y:764 + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL Statement +//line sql.y:768 { - yyVAL.statement = &CreateView{ViewName: yyDollar[7].tableName.ToViewName(), IsReplace: yyDollar[2].boolean, Algorithm: yyDollar[3].str, Definer: yyDollar[4].str, Security: yyDollar[5].str, Columns: yyDollar[8].columns, Select: yyDollar[10].selStmt, CheckOption: yyDollar[11].str} + // Create table [name] like [name] + yyDollar[1].createTableUnion().OptLike = yyDollar[2].optLikeUnion() + yyDollar[1].createTableUnion().FullyParsed = true + yyLOCAL = yyDollar[1].createTableUnion() } + yyVAL.union = yyLOCAL case 88: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:768 + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Statement +//line sql.y:775 { - yyDollar[1].createDatabase.FullyParsed = true - yyDollar[1].createDatabase.CreateOptions = yyDollar[2].collateAndCharsets - yyVAL.statement = yyDollar[1].createDatabase + indexDef := yyDollar[1].alterTableUnion().AlterOptions[0].(*AddIndexDefinition).IndexDefinition + indexDef.Columns = yyDollar[3].indexColumnsUnion() + indexDef.Options = append(indexDef.Options, yyDollar[5].indexOptionsUnion()...) + yyDollar[1].alterTableUnion().AlterOptions = append(yyDollar[1].alterTableUnion().AlterOptions, yyDollar[6].alterOptionsUnion()...) + yyDollar[1].alterTableUnion().FullyParsed = true + yyLOCAL = yyDollar[1].alterTableUnion() } + yyVAL.union = yyLOCAL case 89: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:775 + yyDollar = yyS[yypt-11 : yypt+1] + var yyLOCAL Statement +//line sql.y:784 { - yyVAL.boolean = false + yyLOCAL = &CreateView{ViewName: yyDollar[7].tableName.ToViewName(), IsReplace: yyDollar[2].booleanUnion(), Algorithm: yyDollar[3].str, Definer: yyDollar[4].str, Security: yyDollar[5].str, Columns: yyDollar[8].columnsUnion(), Select: yyDollar[10].selStmtUnion(), CheckOption: yyDollar[11].str} } + yyVAL.union = yyLOCAL case 90: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:779 + var yyLOCAL Statement +//line sql.y:788 { - yyVAL.boolean = true + yyDollar[1].createDatabaseUnion().FullyParsed = true + yyDollar[1].createDatabaseUnion().CreateOptions = yyDollar[2].collateAndCharsetsUnion() + yyLOCAL = yyDollar[1].createDatabaseUnion() } + yyVAL.union = yyLOCAL case 91: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:784 + var yyLOCAL bool +//line sql.y:795 { - yyVAL.colIdent = NewColIdent("") + yyLOCAL = false } + yyVAL.union = yyLOCAL case 92: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:788 + var yyLOCAL bool +//line sql.y:799 { - yyVAL.colIdent = yyDollar[2].colIdent + yyLOCAL = true } + yyVAL.union = yyLOCAL case 93: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:794 + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:804 { - yyVAL.colIdent = yyDollar[1].colIdent + yyVAL.colIdent = NewColIdent("") } case 94: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:799 + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:808 { - var v []VindexParam - yyVAL.vindexParams = v + yyVAL.colIdent = yyDollar[2].colIdent } case 95: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:804 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:814 { - yyVAL.vindexParams = yyDollar[2].vindexParams + yyVAL.colIdent = yyDollar[1].colIdent } case 96: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:810 + yyDollar = yyS[yypt-0 : yypt+1] + var yyLOCAL []VindexParam +//line sql.y:819 { - yyVAL.vindexParams = make([]VindexParam, 0, 4) - yyVAL.vindexParams = append(yyVAL.vindexParams, yyDollar[1].vindexParam) + var v []VindexParam + yyLOCAL = v } + yyVAL.union = yyLOCAL case 97: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:815 + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL []VindexParam +//line sql.y:824 { - yyVAL.vindexParams = append(yyVAL.vindexParams, yyDollar[3].vindexParam) + yyLOCAL = yyDollar[2].vindexParamsUnion() } + yyVAL.union = yyLOCAL case 98: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:821 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL []VindexParam +//line sql.y:830 { - yyVAL.vindexParam = VindexParam{Key: yyDollar[1].colIdent, Val: yyDollar[3].str} + yyLOCAL = make([]VindexParam, 0, 4) + yyLOCAL = append(yyLOCAL, yyDollar[1].vindexParam) } + yyVAL.union = yyLOCAL case 99: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:827 + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:835 { - yyVAL.createTable = &CreateTable{Table: yyDollar[4].tableName, IfNotExists: yyDollar[3].boolean} - setDDL(yylex, yyVAL.createTable) + yySLICE := (*[]VindexParam)(yyIaddr(yyVAL.union)) + *yySLICE = append(*yySLICE, yyDollar[3].vindexParam) } case 100: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:834 +//line sql.y:841 { - yyVAL.alterTable = &AlterTable{Table: yyDollar[3].tableName} - setDDL(yylex, yyVAL.alterTable) + yyVAL.vindexParam = VindexParam{Key: yyDollar[1].colIdent, Val: yyDollar[3].str} } case 101: - yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:841 + yyDollar = yyS[yypt-5 : yypt+1] + var yyLOCAL *CreateTable +//line sql.y:847 { - yyVAL.createIndex = &CreateIndex{Constraint: yyDollar[2].str, Name: yyDollar[4].colIdent, Options: yyDollar[5].indexOptions, Table: yyDollar[7].tableName} - setDDL(yylex, yyVAL.createIndex) + yyLOCAL = &CreateTable{Table: yyDollar[5].tableName, IfNotExists: yyDollar[4].booleanUnion(), Temp: yyDollar[2].booleanUnion()} + setDDL(yylex, yyLOCAL) } + yyVAL.union = yyLOCAL case 102: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:848 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL *AlterTable +//line sql.y:854 { - yyVAL.createDatabase = &CreateDatabase{DBName: string(yyDollar[4].colIdent.String()), IfNotExists: yyDollar[3].boolean} - setDDL(yylex, yyVAL.createDatabase) + yyLOCAL = &AlterTable{Table: yyDollar[3].tableName} + setDDL(yylex, yyLOCAL) } + yyVAL.union = yyLOCAL case 103: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:855 + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL *AlterTable +//line sql.y:861 { - yyVAL.alterDatabase = &AlterDatabase{} - setDDL(yylex, yyVAL.alterDatabase) + yyLOCAL = &AlterTable{Table: yyDollar[6].tableName, AlterOptions: []AlterOption{&AddIndexDefinition{IndexDefinition: &IndexDefinition{Info: &IndexInfo{Name: yyDollar[3].colIdent, Type: string(yyDollar[2].str)}, Options: yyDollar[4].indexOptionsUnion()}}}} + setDDL(yylex, yyLOCAL) } - case 106: - yyDollar = yyS[yypt-4 : yypt+1] + yyVAL.union = yyLOCAL + case 104: + yyDollar = yyS[yypt-7 : yypt+1] + var yyLOCAL *AlterTable //line sql.y:866 { - yyVAL.TableSpec = yyDollar[2].TableSpec - yyVAL.TableSpec.Options = yyDollar[4].tableOptions + yyLOCAL = &AlterTable{Table: yyDollar[7].tableName, AlterOptions: []AlterOption{&AddIndexDefinition{IndexDefinition: &IndexDefinition{Info: &IndexInfo{Name: yyDollar[4].colIdent, Type: string(yyDollar[2].str) + " " + string(yyDollar[3].str), Fulltext: true}, Options: yyDollar[5].indexOptionsUnion()}}}} + setDDL(yylex, yyLOCAL) } - case 107: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:872 + yyVAL.union = yyLOCAL + case 105: + yyDollar = yyS[yypt-7 : yypt+1] + var yyLOCAL *AlterTable +//line sql.y:871 { - yyVAL.collateAndCharsets = nil + yyLOCAL = &AlterTable{Table: yyDollar[7].tableName, AlterOptions: []AlterOption{&AddIndexDefinition{IndexDefinition: &IndexDefinition{Info: &IndexInfo{Name: yyDollar[4].colIdent, Type: string(yyDollar[2].str) + " " + string(yyDollar[3].str), Spatial: true}, Options: yyDollar[5].indexOptionsUnion()}}}} + setDDL(yylex, yyLOCAL) } - case 108: - yyDollar = yyS[yypt-1 : yypt+1] + yyVAL.union = yyLOCAL + case 106: + yyDollar = yyS[yypt-7 : yypt+1] + var yyLOCAL *AlterTable //line sql.y:876 { - yyVAL.collateAndCharsets = yyDollar[1].collateAndCharsets + yyLOCAL = &AlterTable{Table: yyDollar[7].tableName, AlterOptions: []AlterOption{&AddIndexDefinition{IndexDefinition: &IndexDefinition{Info: &IndexInfo{Name: yyDollar[4].colIdent, Type: string(yyDollar[2].str) + " " + string(yyDollar[3].str), Unique: true}, Options: yyDollar[5].indexOptionsUnion()}}}} + setDDL(yylex, yyLOCAL) } - case 109: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:882 + yyVAL.union = yyLOCAL + case 107: + yyDollar = yyS[yypt-5 : yypt+1] + var yyLOCAL *CreateDatabase +//line sql.y:883 { - yyVAL.collateAndCharsets = []CollateAndCharset{yyDollar[1].collateAndCharset} + yyLOCAL = &CreateDatabase{Comments: Comments(yyDollar[3].strs), DBName: yyDollar[5].tableIdent, IfNotExists: yyDollar[4].booleanUnion()} + setDDL(yylex, yyLOCAL) } - case 110: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:886 + yyVAL.union = yyLOCAL + case 108: + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL *AlterDatabase +//line sql.y:890 { - yyVAL.collateAndCharsets = []CollateAndCharset{yyDollar[1].collateAndCharset} + yyLOCAL = &AlterDatabase{} + setDDL(yylex, yyLOCAL) } + yyVAL.union = yyLOCAL case 111: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:890 + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL *TableSpec +//line sql.y:901 { - yyVAL.collateAndCharsets = append(yyDollar[1].collateAndCharsets, yyDollar[2].collateAndCharset) + yyLOCAL = yyDollar[2].tableSpecUnion() + yyLOCAL.Options = yyDollar[4].tableOptionsUnion() } + yyVAL.union = yyLOCAL case 112: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:894 + yyDollar = yyS[yypt-0 : yypt+1] + var yyLOCAL []CollateAndCharset +//line sql.y:907 { - yyVAL.collateAndCharsets = append(yyDollar[1].collateAndCharsets, yyDollar[2].collateAndCharset) + yyLOCAL = nil } + yyVAL.union = yyLOCAL case 113: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:899 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL []CollateAndCharset +//line sql.y:911 { - yyVAL.boolean = false + yyLOCAL = yyDollar[1].collateAndCharsetsUnion() } + yyVAL.union = yyLOCAL case 114: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:903 + var yyLOCAL []CollateAndCharset +//line sql.y:917 { - yyVAL.boolean = true + yyLOCAL = []CollateAndCharset{yyDollar[1].collateAndCharset} } + yyVAL.union = yyLOCAL case 115: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:909 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL []CollateAndCharset +//line sql.y:921 { - yyVAL.collateAndCharset = CollateAndCharset{Type: CharacterSetType, Value: (yyDollar[4].colIdent.String()), IsDefault: yyDollar[1].boolean} + yyLOCAL = []CollateAndCharset{yyDollar[1].collateAndCharset} } + yyVAL.union = yyLOCAL case 116: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:913 + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:925 { - yyVAL.collateAndCharset = CollateAndCharset{Type: CharacterSetType, Value: ("'" + string(yyDollar[4].bytes) + "'"), IsDefault: yyDollar[1].boolean} + yySLICE := (*[]CollateAndCharset)(yyIaddr(yyVAL.union)) + *yySLICE = append(*yySLICE, yyDollar[2].collateAndCharset) } case 117: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:919 + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:929 { - yyVAL.collateAndCharset = CollateAndCharset{Type: CollateType, Value: (yyDollar[4].colIdent.String()), IsDefault: yyDollar[1].boolean} + yySLICE := (*[]CollateAndCharset)(yyIaddr(yyVAL.union)) + *yySLICE = append(*yySLICE, yyDollar[2].collateAndCharset) } case 118: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:923 + yyDollar = yyS[yypt-0 : yypt+1] + var yyLOCAL bool +//line sql.y:934 { - yyVAL.collateAndCharset = CollateAndCharset{Type: CollateType, Value: ("'" + string(yyDollar[4].bytes) + "'"), IsDefault: yyDollar[1].boolean} + yyLOCAL = false } + yyVAL.union = yyLOCAL case 119: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:930 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL bool +//line sql.y:938 { - yyVAL.optLike = &OptLike{LikeTable: yyDollar[2].tableName} + yyLOCAL = true } + yyVAL.union = yyLOCAL case 120: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:934 +//line sql.y:944 { - yyVAL.optLike = &OptLike{LikeTable: yyDollar[3].tableName} + yyVAL.collateAndCharset = CollateAndCharset{Type: CharacterSetType, Value: (yyDollar[4].colIdent.String()), IsDefault: yyDollar[1].booleanUnion()} } case 121: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:940 + yyDollar = yyS[yypt-4 : yypt+1] +//line sql.y:948 { - yyVAL.columnDefinitions = []*ColumnDefinition{yyDollar[1].columnDefinition} + yyVAL.collateAndCharset = CollateAndCharset{Type: CharacterSetType, Value: (encodeSQLString(yyDollar[4].str)), IsDefault: yyDollar[1].booleanUnion()} } case 122: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:944 + yyDollar = yyS[yypt-4 : yypt+1] +//line sql.y:954 { - yyVAL.columnDefinitions = append(yyDollar[1].columnDefinitions, yyDollar[3].columnDefinition) + yyVAL.collateAndCharset = CollateAndCharset{Type: CollateType, Value: (yyDollar[4].colIdent.String()), IsDefault: yyDollar[1].booleanUnion()} } case 123: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:950 + yyDollar = yyS[yypt-4 : yypt+1] +//line sql.y:958 { - yyVAL.TableSpec = &TableSpec{} - yyVAL.TableSpec.AddColumn(yyDollar[1].columnDefinition) + yyVAL.collateAndCharset = CollateAndCharset{Type: CollateType, Value: (encodeSQLString(yyDollar[4].str)), IsDefault: yyDollar[1].booleanUnion()} } case 124: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:955 + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL *OptLike +//line sql.y:965 { - yyVAL.TableSpec = &TableSpec{} - yyVAL.TableSpec.AddConstraint(yyDollar[1].constraintDefinition) + yyLOCAL = &OptLike{LikeTable: yyDollar[2].tableName} } + yyVAL.union = yyLOCAL case 125: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:960 + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL *OptLike +//line sql.y:969 { - yyVAL.TableSpec.AddColumn(yyDollar[3].columnDefinition) + yyLOCAL = &OptLike{LikeTable: yyDollar[3].tableName} } + yyVAL.union = yyLOCAL case 126: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:964 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL []*ColumnDefinition +//line sql.y:975 { - yyVAL.TableSpec.AddColumn(yyDollar[3].columnDefinition) - yyVAL.TableSpec.AddConstraint(yyDollar[4].constraintDefinition) + yyLOCAL = []*ColumnDefinition{yyDollar[1].columnDefinitionUnion()} } + yyVAL.union = yyLOCAL case 127: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:969 +//line sql.y:979 { - yyVAL.TableSpec.AddIndex(yyDollar[3].indexDefinition) + yySLICE := (*[]*ColumnDefinition)(yyIaddr(yyVAL.union)) + *yySLICE = append(*yySLICE, yyDollar[3].columnDefinitionUnion()) } case 128: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:973 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL *TableSpec +//line sql.y:985 { - yyVAL.TableSpec.AddConstraint(yyDollar[3].constraintDefinition) + yyLOCAL = &TableSpec{} + yyLOCAL.AddColumn(yyDollar[1].columnDefinitionUnion()) } + yyVAL.union = yyLOCAL case 129: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:977 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL *TableSpec +//line sql.y:990 { - yyVAL.TableSpec.AddConstraint(yyDollar[3].constraintDefinition) + yyLOCAL = &TableSpec{} + yyLOCAL.AddConstraint(yyDollar[1].constraintDefinitionUnion()) } + yyVAL.union = yyLOCAL case 130: - yyDollar = yyS[yypt-8 : yypt+1] -//line sql.y:983 + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:995 { - yyDollar[2].columnType.NotNull = yyDollar[3].boolean - yyDollar[2].columnType.Default = yyDollar[4].optVal - yyDollar[2].columnType.OnUpdate = yyDollar[5].optVal - yyDollar[2].columnType.Autoincrement = yyDollar[6].boolean - yyDollar[2].columnType.KeyOpt = yyDollar[7].colKeyOpt - yyDollar[2].columnType.Comment = yyDollar[8].literal - yyVAL.columnDefinition = &ColumnDefinition{Name: yyDollar[1].colIdent, Type: yyDollar[2].columnType} + yyVAL.tableSpecUnion().AddColumn(yyDollar[3].columnDefinitionUnion()) } case 131: + yyDollar = yyS[yypt-4 : yypt+1] +//line sql.y:999 + { + yyVAL.tableSpecUnion().AddColumn(yyDollar[3].columnDefinitionUnion()) + yyVAL.tableSpecUnion().AddConstraint(yyDollar[4].constraintDefinitionUnion()) + } + case 132: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:994 +//line sql.y:1004 { - yyVAL.columnType = yyDollar[1].columnType - yyVAL.columnType.Unsigned = yyDollar[2].boolean - yyVAL.columnType.Zerofill = yyDollar[3].boolean + yyVAL.tableSpecUnion().AddIndex(yyDollar[3].indexDefinitionUnion()) + } + case 133: + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:1008 + { + yyVAL.tableSpecUnion().AddConstraint(yyDollar[3].constraintDefinitionUnion()) + } + case 134: + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:1012 + { + yyVAL.tableSpecUnion().AddConstraint(yyDollar[3].constraintDefinitionUnion()) } case 135: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1005 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL *ColumnDefinition +//line sql.y:1018 { - yyVAL.columnType = yyDollar[1].columnType - yyVAL.columnType.Length = yyDollar[2].literal + yyDollar[2].columnType.Options = yyDollar[3].columnTypeOptionsUnion() + yyLOCAL = &ColumnDefinition{Name: yyDollar[1].colIdent, Type: yyDollar[2].columnType} } + yyVAL.union = yyLOCAL case 136: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1010 + yyDollar = yyS[yypt-0 : yypt+1] + var yyLOCAL *ColumnTypeOptions +//line sql.y:1028 { - yyVAL.columnType = yyDollar[1].columnType + yyLOCAL = &ColumnTypeOptions{Null: nil, Default: nil, OnUpdate: nil, Autoincrement: false, KeyOpt: colKeyNone, Comment: nil} } + yyVAL.union = yyLOCAL case 137: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1016 + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL *ColumnTypeOptions +//line sql.y:1032 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + val := true + yyDollar[1].columnTypeOptionsUnion().Null = &val + yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } + yyVAL.union = yyLOCAL case 138: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1020 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL *ColumnTypeOptions +//line sql.y:1038 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + val := false + yyDollar[1].columnTypeOptionsUnion().Null = &val + yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } + yyVAL.union = yyLOCAL case 139: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1024 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL *ColumnTypeOptions +//line sql.y:1044 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyDollar[1].columnTypeOptionsUnion().Default = yyDollar[3].exprUnion() + yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } + yyVAL.union = yyLOCAL case 140: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1028 + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL *ColumnTypeOptions +//line sql.y:1049 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyDollar[1].columnTypeOptionsUnion().OnUpdate = yyDollar[4].exprUnion() + yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } + yyVAL.union = yyLOCAL case 141: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1032 + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL *ColumnTypeOptions +//line sql.y:1054 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyDollar[1].columnTypeOptionsUnion().Autoincrement = true + yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } + yyVAL.union = yyLOCAL case 142: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1036 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL *ColumnTypeOptions +//line sql.y:1059 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyDollar[1].columnTypeOptionsUnion().Comment = NewStrLiteral(yyDollar[3].str) + yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } + yyVAL.union = yyLOCAL case 143: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1040 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL *ColumnTypeOptions +//line sql.y:1064 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyDollar[1].columnTypeOptionsUnion().KeyOpt = colKeyPrimary + yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } + yyVAL.union = yyLOCAL case 144: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1044 + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL *ColumnTypeOptions +//line sql.y:1069 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyDollar[1].columnTypeOptionsUnion().KeyOpt = colKey + yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } + yyVAL.union = yyLOCAL case 145: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1048 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL *ColumnTypeOptions +//line sql.y:1074 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyDollar[1].columnTypeOptionsUnion().KeyOpt = colKeyUniqueKey + yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } + yyVAL.union = yyLOCAL case 146: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1054 + var yyLOCAL *ColumnTypeOptions +//line sql.y:1079 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} - yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length - yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale + yyDollar[1].columnTypeOptionsUnion().KeyOpt = colKeyUnique + yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } + yyVAL.union = yyLOCAL case 147: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1060 - { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} - yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length - yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale - } - case 148: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1066 - { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} - yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length - yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale - } - case 149: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1072 - { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} - yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length - yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale - } - case 150: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1078 + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:1086 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} - yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length - yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale + yyVAL.columnType = yyDollar[1].columnType + yyVAL.columnType.Unsigned = yyDollar[2].booleanUnion() + yyVAL.columnType.Zerofill = yyDollar[3].booleanUnion() } case 151: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1086 + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:1097 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyVAL.columnType = yyDollar[1].columnType + yyVAL.columnType.Length = yyDollar[2].literalUnion() } case 152: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1090 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1102 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].literal} + yyVAL.columnType = yyDollar[1].columnType } case 153: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1094 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1108 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].literal} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)} } case 154: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1098 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1112 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].literal} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)} } case 155: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1102 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1116 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].literal} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)} } case 156: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1108 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1120 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].literal, Charset: yyDollar[3].str, Collate: yyDollar[4].str} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)} } case 157: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1112 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1124 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].literal, Charset: yyDollar[3].str, Collate: yyDollar[4].str} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)} } case 158: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1116 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1128 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].literal} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)} } case 159: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1120 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1132 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].literal} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)} } case 160: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1124 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1136 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Charset: yyDollar[2].str, Collate: yyDollar[3].str} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)} } case 161: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1128 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1140 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Charset: yyDollar[2].str, Collate: yyDollar[3].str} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)} } case 162: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1132 + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:1146 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Charset: yyDollar[2].str, Collate: yyDollar[3].str} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)} + yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length + yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale } case 163: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1136 + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:1152 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Charset: yyDollar[2].str, Collate: yyDollar[3].str} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)} + yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length + yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale } case 164: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1140 + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:1158 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)} + yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length + yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale } case 165: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1144 + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:1164 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)} + yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length + yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale } case 166: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1148 + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:1170 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)} + yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length + yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale } case 167: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1152 +//line sql.y:1178 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)} } case 168: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1156 + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:1182 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} } case 169: - yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:1160 + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:1186 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), EnumValues: yyDollar[3].strs, Charset: yyDollar[5].str, Collate: yyDollar[6].str} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} } case 170: - yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:1165 + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:1190 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), EnumValues: yyDollar[3].strs, Charset: yyDollar[5].str, Collate: yyDollar[6].str} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} } case 171: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1171 + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:1194 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} } case 172: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1175 + yyDollar = yyS[yypt-4 : yypt+1] +//line sql.y:1200 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion(), Charset: yyDollar[3].str, Collate: yyDollar[4].str} } case 173: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1179 + yyDollar = yyS[yypt-4 : yypt+1] +//line sql.y:1204 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion(), Charset: yyDollar[3].str, Collate: yyDollar[4].str} } case 174: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1183 + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:1208 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} } case 175: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1187 + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:1212 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} } case 176: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1191 + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:1216 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), Charset: yyDollar[2].str, Collate: yyDollar[3].str} } case 177: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1195 + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:1220 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), Charset: yyDollar[2].str, Collate: yyDollar[3].str} } case 178: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1199 + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:1224 { - yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), Charset: yyDollar[2].str, Collate: yyDollar[3].str} } case 179: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1205 + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:1228 { - yyVAL.strs = make([]string, 0, 4) - yyVAL.strs = append(yyVAL.strs, "'"+string(yyDollar[1].bytes)+"'") + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), Charset: yyDollar[2].str, Collate: yyDollar[3].str} } case 180: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1210 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1232 { - yyVAL.strs = append(yyDollar[1].strs, "'"+string(yyDollar[3].bytes)+"'") + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)} } case 181: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1215 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1236 { - yyVAL.literal = nil + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)} } case 182: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1219 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1240 { - yyVAL.literal = NewIntLiteral(yyDollar[2].bytes) + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)} } case 183: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1224 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1244 { - yyVAL.LengthScaleOption = LengthScaleOption{} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)} } case 184: - yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:1228 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1248 { - yyVAL.LengthScaleOption = LengthScaleOption{ - Length: NewIntLiteral(yyDollar[2].bytes), - Scale: NewIntLiteral(yyDollar[4].bytes), - } + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)} } case 185: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1236 + yyDollar = yyS[yypt-6 : yypt+1] +//line sql.y:1252 { - yyVAL.LengthScaleOption = LengthScaleOption{} + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), EnumValues: yyDollar[3].strs, Charset: yyDollar[5].str, Collate: yyDollar[6].str} } case 186: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1240 + yyDollar = yyS[yypt-6 : yypt+1] +//line sql.y:1257 { - yyVAL.LengthScaleOption = LengthScaleOption{ - Length: NewIntLiteral(yyDollar[2].bytes), - } + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), EnumValues: yyDollar[3].strs, Charset: yyDollar[5].str, Collate: yyDollar[6].str} } case 187: - yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:1246 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1263 { - yyVAL.LengthScaleOption = LengthScaleOption{ - Length: NewIntLiteral(yyDollar[2].bytes), - Scale: NewIntLiteral(yyDollar[4].bytes), - } + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)} } case 188: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1254 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1267 { - yyVAL.boolean = false + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)} } case 189: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1258 +//line sql.y:1271 { - yyVAL.boolean = true + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)} } case 190: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1263 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1275 { - yyVAL.boolean = false + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)} } case 191: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1267 +//line sql.y:1279 { - yyVAL.boolean = true + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)} } case 192: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1273 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1283 { - yyVAL.boolean = false + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)} } case 193: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1277 +//line sql.y:1287 { - yyVAL.boolean = false + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)} } case 194: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1281 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1291 { - yyVAL.boolean = true + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)} } case 195: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1286 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1297 { - yyVAL.optVal = nil + yyVAL.strs = make([]string, 0, 4) + yyVAL.strs = append(yyVAL.strs, encodeSQLString(yyDollar[1].str)) } case 196: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1290 + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:1302 { - yyVAL.optVal = yyDollar[2].expr + yyVAL.strs = append(yyDollar[1].strs, encodeSQLString(yyDollar[3].str)) } case 197: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1295 + var yyLOCAL *Literal +//line sql.y:1307 { - yyVAL.optVal = nil + yyLOCAL = nil } + yyVAL.union = yyLOCAL case 198: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1299 + var yyLOCAL *Literal +//line sql.y:1311 { - yyVAL.optVal = yyDollar[3].expr + yyLOCAL = NewIntLiteral(yyDollar[2].str) } + yyVAL.union = yyLOCAL case 199: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1304 +//line sql.y:1316 { - yyVAL.boolean = false + yyVAL.LengthScaleOption = LengthScaleOption{} } case 200: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1308 + yyDollar = yyS[yypt-5 : yypt+1] +//line sql.y:1320 { - yyVAL.boolean = true + yyVAL.LengthScaleOption = LengthScaleOption{ + Length: NewIntLiteral(yyDollar[2].str), + Scale: NewIntLiteral(yyDollar[4].str), + } } case 201: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1313 +//line sql.y:1328 { - yyVAL.str = "" + yyVAL.LengthScaleOption = LengthScaleOption{} } case 202: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1317 + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:1332 { - yyVAL.str = string(yyDollar[2].colIdent.String()) + yyVAL.LengthScaleOption = LengthScaleOption{ + Length: NewIntLiteral(yyDollar[2].str), + } } case 203: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1321 + yyDollar = yyS[yypt-5 : yypt+1] +//line sql.y:1338 { - yyVAL.str = string(yyDollar[2].bytes) + yyVAL.LengthScaleOption = LengthScaleOption{ + Length: NewIntLiteral(yyDollar[2].str), + Scale: NewIntLiteral(yyDollar[4].str), + } } case 204: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1326 + var yyLOCAL bool +//line sql.y:1346 { - yyVAL.str = "" + yyLOCAL = false } + yyVAL.union = yyLOCAL case 205: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1330 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL bool +//line sql.y:1350 { - yyVAL.str = string(yyDollar[2].colIdent.String()) + yyLOCAL = true } + yyVAL.union = yyLOCAL case 206: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1334 + yyDollar = yyS[yypt-0 : yypt+1] + var yyLOCAL bool +//line sql.y:1355 { - yyVAL.str = string(yyDollar[2].bytes) + yyLOCAL = false } + yyVAL.union = yyLOCAL case 207: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1339 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL bool +//line sql.y:1359 { - yyVAL.colKeyOpt = colKeyNone + yyLOCAL = true } + yyVAL.union = yyLOCAL case 208: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1343 + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:1364 { - yyVAL.colKeyOpt = colKeyPrimary + yyVAL.str = "" } case 209: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1347 + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:1368 { - yyVAL.colKeyOpt = colKey + yyVAL.str = string(yyDollar[2].colIdent.String()) } case 210: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1351 +//line sql.y:1372 { - yyVAL.colKeyOpt = colKeyUniqueKey + yyVAL.str = encodeSQLString(yyDollar[2].str) } case 211: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1355 + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:1376 { - yyVAL.colKeyOpt = colKeyUnique + yyVAL.str = string(yyDollar[2].str) } case 212: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1360 +//line sql.y:1381 { - yyVAL.literal = nil + yyVAL.str = "" } case 213: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1364 +//line sql.y:1385 { - yyVAL.literal = NewStrLiteral(yyDollar[2].bytes) + yyVAL.str = string(yyDollar[2].colIdent.String()) } case 214: - yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:1370 + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:1389 { - yyVAL.indexDefinition = &IndexDefinition{Info: yyDollar[1].indexInfo, Columns: yyDollar[3].indexColumns, Options: yyDollar[5].indexOptions} + yyVAL.str = encodeSQLString(yyDollar[2].str) } case 215: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1375 + yyDollar = yyS[yypt-5 : yypt+1] + var yyLOCAL *IndexDefinition +//line sql.y:1396 { - yyVAL.indexOptions = nil + yyLOCAL = &IndexDefinition{Info: yyDollar[1].indexInfoUnion(), Columns: yyDollar[3].indexColumnsUnion(), Options: yyDollar[5].indexOptionsUnion()} } + yyVAL.union = yyLOCAL case 216: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1379 + yyDollar = yyS[yypt-0 : yypt+1] + var yyLOCAL []*IndexOption +//line sql.y:1401 { - yyVAL.indexOptions = yyDollar[1].indexOptions + yyLOCAL = nil } + yyVAL.union = yyLOCAL case 217: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1385 + var yyLOCAL []*IndexOption +//line sql.y:1405 { - yyVAL.indexOptions = []*IndexOption{yyDollar[1].indexOption} + yyLOCAL = yyDollar[1].indexOptionsUnion() } + yyVAL.union = yyLOCAL case 218: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1389 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL []*IndexOption +//line sql.y:1411 { - yyVAL.indexOptions = append(yyVAL.indexOptions, yyDollar[2].indexOption) + yyLOCAL = []*IndexOption{yyDollar[1].indexOptionUnion()} } + yyVAL.union = yyLOCAL case 219: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1395 + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:1415 { - yyVAL.indexOption = yyDollar[1].indexOption + yySLICE := (*[]*IndexOption)(yyIaddr(yyVAL.union)) + *yySLICE = append(*yySLICE, yyDollar[2].indexOptionUnion()) } case 220: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1399 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL *IndexOption +//line sql.y:1421 { - // should not be string - yyVAL.indexOption = &IndexOption{Name: string(yyDollar[1].bytes), Value: NewIntLiteral(yyDollar[3].bytes)} + yyLOCAL = yyDollar[1].indexOptionUnion() } + yyVAL.union = yyLOCAL case 221: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1404 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL *IndexOption +//line sql.y:1425 { - yyVAL.indexOption = &IndexOption{Name: string(yyDollar[1].bytes), Value: NewStrLiteral(yyDollar[2].bytes)} + // should not be string + yyLOCAL = &IndexOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } + yyVAL.union = yyLOCAL case 222: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1408 + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL *IndexOption +//line sql.y:1430 { - yyVAL.indexOption = &IndexOption{Name: string(yyDollar[1].bytes) + " " + string(yyDollar[2].bytes), String: yyDollar[3].colIdent.String()} + yyLOCAL = &IndexOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[2].str)} } + yyVAL.union = yyLOCAL case 223: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1414 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL *IndexOption +//line sql.y:1434 { - yyVAL.str = "" + yyLOCAL = &IndexOption{Name: string(yyDollar[1].str) + " " + string(yyDollar[2].str), String: yyDollar[3].colIdent.String()} } + yyVAL.union = yyLOCAL case 224: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1418 + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:1440 { - yyVAL.str = string(yyDollar[1].bytes) + yyVAL.str = "" } case 225: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1424 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1444 { - yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes), ConstraintName: NewColIdent(yyDollar[1].str), Name: NewColIdent("PRIMARY"), Primary: true, Unique: true} + yyVAL.str = string(yyDollar[1].str) } case 226: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1428 + var yyLOCAL *IndexInfo +//line sql.y:1450 { - yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[1].bytes) + " " + string(yyDollar[2].str), Name: NewColIdent(yyDollar[3].str), Spatial: true, Unique: false} + yyLOCAL = &IndexInfo{Type: string(yyDollar[2].str) + " " + string(yyDollar[3].str), ConstraintName: NewColIdent(yyDollar[1].str), Name: NewColIdent("PRIMARY"), Primary: true, Unique: true} } + yyVAL.union = yyLOCAL case 227: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1432 + var yyLOCAL *IndexInfo +//line sql.y:1454 { - yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[1].bytes) + " " + string(yyDollar[2].str), Name: NewColIdent(yyDollar[3].str), Fulltext: true, Unique: false} + yyLOCAL = &IndexInfo{Type: string(yyDollar[1].str) + " " + string(yyDollar[2].str), Name: NewColIdent(yyDollar[3].str), Spatial: true, Unique: false} } + yyVAL.union = yyLOCAL case 228: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1436 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL *IndexInfo +//line sql.y:1458 { - yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].str), ConstraintName: NewColIdent(yyDollar[1].str), Name: NewColIdent(yyDollar[4].str), Unique: true} + yyLOCAL = &IndexInfo{Type: string(yyDollar[1].str) + " " + string(yyDollar[2].str), Name: NewColIdent(yyDollar[3].str), Fulltext: true, Unique: false} } + yyVAL.union = yyLOCAL case 229: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1440 + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL *IndexInfo +//line sql.y:1462 { - yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[1].str), Name: NewColIdent(yyDollar[2].str), Unique: false} + yyLOCAL = &IndexInfo{Type: string(yyDollar[2].str) + " " + string(yyDollar[3].str), ConstraintName: NewColIdent(yyDollar[1].str), Name: NewColIdent(yyDollar[4].str), Unique: true} } + yyVAL.union = yyLOCAL case 230: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1445 + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL *IndexInfo +//line sql.y:1466 { - yyVAL.str = "" + yyLOCAL = &IndexInfo{Type: string(yyDollar[1].str), Name: NewColIdent(yyDollar[2].str), Unique: false} } + yyVAL.union = yyLOCAL case 231: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1449 + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:1471 { - yyVAL.str = yyDollar[2].str + yyVAL.str = "" } case 232: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1455 + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:1475 { - yyVAL.str = string(yyDollar[1].bytes) + yyVAL.str = yyDollar[2].str } case 233: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1459 +//line sql.y:1481 { - yyVAL.str = string(yyDollar[1].bytes) + yyVAL.str = string(yyDollar[1].str) } case 234: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1463 +//line sql.y:1485 { - yyVAL.str = string(yyDollar[1].bytes) + yyVAL.str = string(yyDollar[1].str) } case 235: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1470 +//line sql.y:1489 { - yyVAL.str = string(yyDollar[1].bytes) + yyVAL.str = string(yyDollar[1].str) } case 236: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1474 +//line sql.y:1496 { - yyVAL.str = string(yyDollar[1].bytes) + yyVAL.str = string(yyDollar[1].str) } case 237: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1479 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1500 { - yyVAL.str = "key" + yyVAL.str = string(yyDollar[1].str) } case 238: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1483 + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:1505 { - yyVAL.str = yyDollar[1].str + yyVAL.str = "key" } case 239: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1489 +//line sql.y:1509 { - yyVAL.str = string(yyDollar[1].bytes) + yyVAL.str = yyDollar[1].str } case 240: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1493 +//line sql.y:1515 { - yyVAL.str = string(yyDollar[1].bytes) + yyVAL.str = string(yyDollar[1].str) } case 241: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1498 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1519 { - yyVAL.str = "" + yyVAL.str = string(yyDollar[1].str) } case 242: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1502 + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:1524 { - yyVAL.str = string(yyDollar[1].colIdent.String()) + yyVAL.str = "" } case 243: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1508 +//line sql.y:1528 { - yyVAL.indexColumns = []*IndexColumn{yyDollar[1].indexColumn} + yyVAL.str = string(yyDollar[1].colIdent.String()) } case 244: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1512 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL []*IndexColumn +//line sql.y:1534 { - yyVAL.indexColumns = append(yyVAL.indexColumns, yyDollar[3].indexColumn) + yyLOCAL = []*IndexColumn{yyDollar[1].indexColumnUnion()} } + yyVAL.union = yyLOCAL case 245: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1518 +//line sql.y:1538 { - yyVAL.indexColumn = &IndexColumn{Column: yyDollar[1].colIdent, Length: yyDollar[2].literal, Direction: yyDollar[3].orderDirection} + yySLICE := (*[]*IndexColumn)(yyIaddr(yyVAL.union)) + *yySLICE = append(*yySLICE, yyDollar[3].indexColumnUnion()) } case 246: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1524 + var yyLOCAL *IndexColumn +//line sql.y:1544 { - yyVAL.constraintDefinition = &ConstraintDefinition{Name: string(yyDollar[2].colIdent.String()), Details: yyDollar[3].constraintInfo} + yyLOCAL = &IndexColumn{Column: yyDollar[1].colIdent, Length: yyDollar[2].literalUnion(), Direction: yyDollar[3].orderDirectionUnion()} } + yyVAL.union = yyLOCAL case 247: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1528 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL *ConstraintDefinition +//line sql.y:1550 { - yyVAL.constraintDefinition = &ConstraintDefinition{Details: yyDollar[1].constraintInfo} + yyLOCAL = &ConstraintDefinition{Name: yyDollar[2].colIdent, Details: yyDollar[3].constraintInfoUnion()} } + yyVAL.union = yyLOCAL case 248: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1534 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL *ConstraintDefinition +//line sql.y:1554 { - yyVAL.constraintDefinition = &ConstraintDefinition{Name: string(yyDollar[2].colIdent.String()), Details: yyDollar[3].constraintInfo} + yyLOCAL = &ConstraintDefinition{Details: yyDollar[1].constraintInfoUnion()} } + yyVAL.union = yyLOCAL case 249: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1538 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL *ConstraintDefinition +//line sql.y:1560 { - yyVAL.constraintDefinition = &ConstraintDefinition{Details: yyDollar[1].constraintInfo} + yyLOCAL = &ConstraintDefinition{Name: yyDollar[2].colIdent, Details: yyDollar[3].constraintInfoUnion()} } + yyVAL.union = yyLOCAL case 250: - yyDollar = yyS[yypt-10 : yypt+1] -//line sql.y:1544 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL *ConstraintDefinition +//line sql.y:1564 { - yyVAL.constraintInfo = &ForeignKeyDefinition{Source: yyDollar[4].columns, ReferencedTable: yyDollar[7].tableName, ReferencedColumns: yyDollar[9].columns} + yyLOCAL = &ConstraintDefinition{Details: yyDollar[1].constraintInfoUnion()} } + yyVAL.union = yyLOCAL case 251: - yyDollar = yyS[yypt-11 : yypt+1] -//line sql.y:1548 + yyDollar = yyS[yypt-10 : yypt+1] + var yyLOCAL ConstraintInfo +//line sql.y:1570 { - yyVAL.constraintInfo = &ForeignKeyDefinition{Source: yyDollar[4].columns, ReferencedTable: yyDollar[7].tableName, ReferencedColumns: yyDollar[9].columns, OnDelete: yyDollar[11].ReferenceAction} + yyLOCAL = &ForeignKeyDefinition{Source: yyDollar[4].columnsUnion(), ReferencedTable: yyDollar[7].tableName, ReferencedColumns: yyDollar[9].columnsUnion()} } + yyVAL.union = yyLOCAL case 252: yyDollar = yyS[yypt-11 : yypt+1] -//line sql.y:1552 + var yyLOCAL ConstraintInfo +//line sql.y:1574 { - yyVAL.constraintInfo = &ForeignKeyDefinition{Source: yyDollar[4].columns, ReferencedTable: yyDollar[7].tableName, ReferencedColumns: yyDollar[9].columns, OnUpdate: yyDollar[11].ReferenceAction} + yyLOCAL = &ForeignKeyDefinition{Source: yyDollar[4].columnsUnion(), ReferencedTable: yyDollar[7].tableName, ReferencedColumns: yyDollar[9].columnsUnion(), OnDelete: yyDollar[11].ReferenceActionUnion()} } + yyVAL.union = yyLOCAL case 253: - yyDollar = yyS[yypt-12 : yypt+1] -//line sql.y:1556 + yyDollar = yyS[yypt-11 : yypt+1] + var yyLOCAL ConstraintInfo +//line sql.y:1578 { - yyVAL.constraintInfo = &ForeignKeyDefinition{Source: yyDollar[4].columns, ReferencedTable: yyDollar[7].tableName, ReferencedColumns: yyDollar[9].columns, OnDelete: yyDollar[11].ReferenceAction, OnUpdate: yyDollar[12].ReferenceAction} + yyLOCAL = &ForeignKeyDefinition{Source: yyDollar[4].columnsUnion(), ReferencedTable: yyDollar[7].tableName, ReferencedColumns: yyDollar[9].columnsUnion(), OnUpdate: yyDollar[11].ReferenceActionUnion()} } + yyVAL.union = yyLOCAL case 254: - yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:1562 + yyDollar = yyS[yypt-12 : yypt+1] + var yyLOCAL ConstraintInfo +//line sql.y:1582 { - yyVAL.constraintInfo = &CheckConstraintDefinition{Expr: yyDollar[3].expr, Enforced: yyDollar[5].boolean} + yyLOCAL = &ForeignKeyDefinition{Source: yyDollar[4].columnsUnion(), ReferencedTable: yyDollar[7].tableName, ReferencedColumns: yyDollar[9].columnsUnion(), OnDelete: yyDollar[11].ReferenceActionUnion(), OnUpdate: yyDollar[12].ReferenceActionUnion()} } + yyVAL.union = yyLOCAL case 255: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1568 + yyDollar = yyS[yypt-5 : yypt+1] + var yyLOCAL ConstraintInfo +//line sql.y:1588 { - yyVAL.ReferenceAction = yyDollar[3].ReferenceAction + yyLOCAL = &CheckConstraintDefinition{Expr: yyDollar[3].exprUnion(), Enforced: yyDollar[5].booleanUnion()} } + yyVAL.union = yyLOCAL case 256: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1574 + var yyLOCAL ReferenceAction +//line sql.y:1594 { - yyVAL.ReferenceAction = yyDollar[3].ReferenceAction + yyLOCAL = yyDollar[3].ReferenceActionUnion() } + yyVAL.union = yyLOCAL case 257: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1580 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL ReferenceAction +//line sql.y:1600 { - yyVAL.ReferenceAction = Restrict + yyLOCAL = yyDollar[3].ReferenceActionUnion() } + yyVAL.union = yyLOCAL case 258: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1584 + var yyLOCAL ReferenceAction +//line sql.y:1606 { - yyVAL.ReferenceAction = Cascade + yyLOCAL = Restrict } + yyVAL.union = yyLOCAL case 259: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1588 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL ReferenceAction +//line sql.y:1610 { - yyVAL.ReferenceAction = NoAction + yyLOCAL = Cascade } + yyVAL.union = yyLOCAL case 260: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1592 + var yyLOCAL ReferenceAction +//line sql.y:1614 { - yyVAL.ReferenceAction = SetDefault + yyLOCAL = NoAction } + yyVAL.union = yyLOCAL case 261: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1596 + var yyLOCAL ReferenceAction +//line sql.y:1618 { - yyVAL.ReferenceAction = SetNull + yyLOCAL = SetDefault } + yyVAL.union = yyLOCAL case 262: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1601 + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL ReferenceAction +//line sql.y:1622 { - yyVAL.str = "" + yyLOCAL = SetNull } + yyVAL.union = yyLOCAL case 263: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1605 + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:1627 { - yyVAL.str = string(yyDollar[1].bytes) + yyVAL.str = "" } case 264: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1609 +//line sql.y:1631 { - yyVAL.str = string(yyDollar[1].bytes) + yyVAL.str = string(yyDollar[1].str) } case 265: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1614 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1635 { - yyVAL.boolean = true + yyVAL.str = string(yyDollar[1].str) } case 266: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1618 + yyDollar = yyS[yypt-0 : yypt+1] + var yyLOCAL bool +//line sql.y:1640 { - yyVAL.boolean = true + yyLOCAL = true } + yyVAL.union = yyLOCAL case 267: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1622 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL bool +//line sql.y:1644 { - yyVAL.boolean = false + yyLOCAL = true } + yyVAL.union = yyLOCAL case 268: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1627 + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL bool +//line sql.y:1648 { - yyVAL.tableOptions = nil + yyLOCAL = false } + yyVAL.union = yyLOCAL case 269: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1631 + yyDollar = yyS[yypt-0 : yypt+1] + var yyLOCAL TableOptions +//line sql.y:1653 { - yyVAL.tableOptions = yyDollar[1].tableOptions + yyLOCAL = nil } + yyVAL.union = yyLOCAL case 270: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1637 + var yyLOCAL TableOptions +//line sql.y:1657 { - yyVAL.tableOptions = TableOptions{yyDollar[1].tableOption} + yyLOCAL = yyDollar[1].tableOptionsUnion() } + yyVAL.union = yyLOCAL case 271: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1641 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL TableOptions +//line sql.y:1663 { - yyVAL.tableOptions = append(yyDollar[1].tableOptions, yyDollar[3].tableOption) + yyLOCAL = TableOptions{yyDollar[1].tableOptionUnion()} } + yyVAL.union = yyLOCAL case 272: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1645 + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:1667 { - yyVAL.tableOptions = append(yyDollar[1].tableOptions, yyDollar[2].tableOption) + yySLICE := (*TableOptions)(yyIaddr(yyVAL.union)) + *yySLICE = append(*yySLICE, yyDollar[3].tableOptionUnion()) } case 273: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1651 + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:1671 { - yyVAL.tableOptions = TableOptions{yyDollar[1].tableOption} + yySLICE := (*TableOptions)(yyIaddr(yyVAL.union)) + *yySLICE = append(*yySLICE, yyDollar[2].tableOptionUnion()) } case 274: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1655 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL TableOptions +//line sql.y:1677 { - yyVAL.tableOptions = append(yyDollar[1].tableOptions, yyDollar[2].tableOption) + yyLOCAL = TableOptions{yyDollar[1].tableOptionUnion()} } + yyVAL.union = yyLOCAL case 275: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1661 + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:1681 { - yyVAL.tableOption = &TableOption{Name: string(yyDollar[1].bytes), Value: NewIntLiteral(yyDollar[3].bytes)} + yySLICE := (*TableOptions)(yyIaddr(yyVAL.union)) + *yySLICE = append(*yySLICE, yyDollar[2].tableOptionUnion()) } case 276: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1665 + var yyLOCAL *TableOption +//line sql.y:1687 { - yyVAL.tableOption = &TableOption{Name: string(yyDollar[1].bytes), Value: NewIntLiteral(yyDollar[3].bytes)} + yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } + yyVAL.union = yyLOCAL case 277: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1669 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL *TableOption +//line sql.y:1691 { - yyVAL.tableOption = &TableOption{Name: (string(yyDollar[2].bytes)), String: yyDollar[4].str} + yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } + yyVAL.union = yyLOCAL case 278: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1673 + var yyLOCAL *TableOption +//line sql.y:1695 { - yyVAL.tableOption = &TableOption{Name: string(yyDollar[2].bytes), String: yyDollar[4].str} + yyLOCAL = &TableOption{Name: (string(yyDollar[2].str)), String: yyDollar[4].str} } + yyVAL.union = yyLOCAL case 279: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1677 + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL *TableOption +//line sql.y:1699 { - yyVAL.tableOption = &TableOption{Name: string(yyDollar[1].bytes), Value: NewIntLiteral(yyDollar[3].bytes)} + yyLOCAL = &TableOption{Name: string(yyDollar[2].str), String: yyDollar[4].str} } + yyVAL.union = yyLOCAL case 280: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1681 + var yyLOCAL *TableOption +//line sql.y:1703 { - yyVAL.tableOption = &TableOption{Name: string(yyDollar[1].bytes), Value: NewStrLiteral(yyDollar[3].bytes)} + yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } + yyVAL.union = yyLOCAL case 281: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1685 + var yyLOCAL *TableOption +//line sql.y:1707 { - yyVAL.tableOption = &TableOption{Name: string(yyDollar[1].bytes), Value: NewStrLiteral(yyDollar[3].bytes)} + yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)} } + yyVAL.union = yyLOCAL case 282: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1689 + var yyLOCAL *TableOption +//line sql.y:1711 { - yyVAL.tableOption = &TableOption{Name: string(yyDollar[1].bytes), Value: NewStrLiteral(yyDollar[3].bytes)} + yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)} } + yyVAL.union = yyLOCAL case 283: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1693 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL *TableOption +//line sql.y:1715 { - yyVAL.tableOption = &TableOption{Name: (string(yyDollar[1].bytes) + " " + string(yyDollar[2].bytes)), Value: NewStrLiteral(yyDollar[4].bytes)} + yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)} } + yyVAL.union = yyLOCAL case 284: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1697 + var yyLOCAL *TableOption +//line sql.y:1719 { - yyVAL.tableOption = &TableOption{Name: (string(yyDollar[1].bytes) + " " + string(yyDollar[2].bytes)), Value: NewStrLiteral(yyDollar[4].bytes)} + yyLOCAL = &TableOption{Name: (string(yyDollar[1].str) + " " + string(yyDollar[2].str)), Value: NewStrLiteral(yyDollar[4].str)} } + yyVAL.union = yyLOCAL case 285: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1701 + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL *TableOption +//line sql.y:1723 { - yyVAL.tableOption = &TableOption{Name: string(yyDollar[1].bytes), Value: NewIntLiteral(yyDollar[3].bytes)} + yyLOCAL = &TableOption{Name: (string(yyDollar[1].str) + " " + string(yyDollar[2].str)), Value: NewStrLiteral(yyDollar[4].str)} } + yyVAL.union = yyLOCAL case 286: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1705 + var yyLOCAL *TableOption +//line sql.y:1727 { - yyVAL.tableOption = &TableOption{Name: string(yyDollar[1].bytes), Value: NewStrLiteral(yyDollar[3].bytes)} + yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } + yyVAL.union = yyLOCAL case 287: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1709 + var yyLOCAL *TableOption +//line sql.y:1731 { - yyVAL.tableOption = &TableOption{Name: string(yyDollar[1].bytes), String: yyDollar[3].colIdent.String()} + yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)} } + yyVAL.union = yyLOCAL case 288: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1713 + var yyLOCAL *TableOption +//line sql.y:1735 { - yyVAL.tableOption = &TableOption{Name: string(yyDollar[1].bytes), Value: NewStrLiteral(yyDollar[3].bytes)} + yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: yyDollar[3].colIdent.String()} } + yyVAL.union = yyLOCAL case 289: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1717 + var yyLOCAL *TableOption +//line sql.y:1739 { - yyVAL.tableOption = &TableOption{Name: string(yyDollar[1].bytes), String: string(yyDollar[3].bytes)} + yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)} } + yyVAL.union = yyLOCAL case 290: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1721 + var yyLOCAL *TableOption +//line sql.y:1743 { - yyVAL.tableOption = &TableOption{Name: string(yyDollar[1].bytes), Value: NewIntLiteral(yyDollar[3].bytes)} + yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: string(yyDollar[3].str)} } + yyVAL.union = yyLOCAL case 291: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1725 + var yyLOCAL *TableOption +//line sql.y:1747 { - yyVAL.tableOption = &TableOption{Name: string(yyDollar[1].bytes), Value: NewIntLiteral(yyDollar[3].bytes)} + yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } + yyVAL.union = yyLOCAL case 292: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1729 + var yyLOCAL *TableOption +//line sql.y:1751 { - yyVAL.tableOption = &TableOption{Name: string(yyDollar[1].bytes), Value: NewIntLiteral(yyDollar[3].bytes)} + yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } + yyVAL.union = yyLOCAL case 293: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1733 + var yyLOCAL *TableOption +//line sql.y:1755 { - yyVAL.tableOption = &TableOption{Name: string(yyDollar[1].bytes), Value: NewIntLiteral(yyDollar[3].bytes)} + yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } + yyVAL.union = yyLOCAL case 294: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1737 + var yyLOCAL *TableOption +//line sql.y:1759 { - yyVAL.tableOption = &TableOption{Name: string(yyDollar[1].bytes), String: string(yyDollar[3].bytes)} + yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } + yyVAL.union = yyLOCAL case 295: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1741 + var yyLOCAL *TableOption +//line sql.y:1763 { - yyVAL.tableOption = &TableOption{Name: string(yyDollar[1].bytes), Value: NewStrLiteral(yyDollar[3].bytes)} + yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: string(yyDollar[3].str)} } + yyVAL.union = yyLOCAL case 296: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1745 + var yyLOCAL *TableOption +//line sql.y:1767 { - yyVAL.tableOption = &TableOption{Name: string(yyDollar[1].bytes), String: string(yyDollar[3].bytes)} + yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)} } + yyVAL.union = yyLOCAL case 297: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1749 + var yyLOCAL *TableOption +//line sql.y:1771 { - yyVAL.tableOption = &TableOption{Name: string(yyDollar[1].bytes), Value: NewIntLiteral(yyDollar[3].bytes)} + yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: string(yyDollar[3].str)} } + yyVAL.union = yyLOCAL case 298: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1753 + var yyLOCAL *TableOption +//line sql.y:1775 { - yyVAL.tableOption = &TableOption{Name: string(yyDollar[1].bytes), String: string(yyDollar[3].bytes)} + yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } + yyVAL.union = yyLOCAL case 299: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1757 + var yyLOCAL *TableOption +//line sql.y:1779 { - yyVAL.tableOption = &TableOption{Name: string(yyDollar[1].bytes), Value: NewIntLiteral(yyDollar[3].bytes)} + yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: string(yyDollar[3].str)} } + yyVAL.union = yyLOCAL case 300: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1761 + var yyLOCAL *TableOption +//line sql.y:1783 { - yyVAL.tableOption = &TableOption{Name: string(yyDollar[1].bytes), String: string(yyDollar[3].bytes)} + yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } + yyVAL.union = yyLOCAL case 301: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1765 + var yyLOCAL *TableOption +//line sql.y:1787 { - yyVAL.tableOption = &TableOption{Name: string(yyDollar[1].bytes), Value: NewIntLiteral(yyDollar[3].bytes)} + yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: string(yyDollar[3].str)} } + yyVAL.union = yyLOCAL case 302: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1769 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL *TableOption +//line sql.y:1791 { - yyVAL.tableOption = &TableOption{Name: string(yyDollar[1].bytes), String: (yyDollar[3].colIdent.String() + yyDollar[4].str)} + yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } + yyVAL.union = yyLOCAL case 303: - yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:1773 + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL *TableOption +//line sql.y:1795 { - yyVAL.tableOption = &TableOption{Name: string(yyDollar[1].bytes), Tables: yyDollar[4].tableNames} + yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: (yyDollar[3].colIdent.String() + yyDollar[4].str)} } + yyVAL.union = yyLOCAL case 304: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1778 + yyDollar = yyS[yypt-5 : yypt+1] + var yyLOCAL *TableOption +//line sql.y:1799 { - yyVAL.str = "" + yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Tables: yyDollar[4].tableNamesUnion()} } + yyVAL.union = yyLOCAL case 305: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1782 + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:1804 { - yyVAL.str = " " + string(yyDollar[1].bytes) + " " + string(yyDollar[2].bytes) + yyVAL.str = "" } case 306: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1786 +//line sql.y:1808 { - yyVAL.str = " " + string(yyDollar[1].bytes) + " " + string(yyDollar[2].bytes) + yyVAL.str = " " + string(yyDollar[1].str) + " " + string(yyDollar[2].str) } - case 316: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1805 + case 307: + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:1812 { - yyVAL.str = yyDollar[1].colIdent.String() + yyVAL.str = " " + string(yyDollar[1].str) + " " + string(yyDollar[2].str) } case 317: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1809 +//line sql.y:1831 { - yyVAL.str = "'" + string(yyDollar[1].bytes) + "'" + yyVAL.str = yyDollar[1].colIdent.String() } case 318: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1813 +//line sql.y:1835 { - yyVAL.str = string(yyDollar[1].bytes) + yyVAL.str = encodeSQLString(yyDollar[1].str) } case 319: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1818 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:1839 { - yyVAL.bytes = []byte("") + yyVAL.str = string(yyDollar[1].str) } - case 321: + case 320: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1824 +//line sql.y:1844 { - yyVAL.colName = nil + yyVAL.str = "" } case 322: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1828 + yyDollar = yyS[yypt-0 : yypt+1] + var yyLOCAL *ColName +//line sql.y:1850 { - yyVAL.colName = yyDollar[2].colName + yyLOCAL = nil } + yyVAL.union = yyLOCAL case 323: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1833 + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL *ColName +//line sql.y:1854 { - yyVAL.colName = nil + yyLOCAL = yyDollar[2].colNameUnion() } + yyVAL.union = yyLOCAL case 324: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1837 + yyDollar = yyS[yypt-0 : yypt+1] + var yyLOCAL *ColName +//line sql.y:1859 { - yyVAL.colName = yyDollar[2].colName + yyLOCAL = nil } + yyVAL.union = yyLOCAL case 325: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1842 + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL *ColName +//line sql.y:1863 { - yyVAL.alterOptions = nil + yyLOCAL = yyDollar[2].colNameUnion() } + yyVAL.union = yyLOCAL case 326: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1846 + yyDollar = yyS[yypt-0 : yypt+1] + var yyLOCAL []AlterOption +//line sql.y:1868 { - yyVAL.alterOptions = yyDollar[1].alterOptions + yyLOCAL = nil } + yyVAL.union = yyLOCAL case 327: - yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:1850 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL []AlterOption +//line sql.y:1872 { - yyVAL.alterOptions = append(yyDollar[1].alterOptions, &OrderByOption{Cols: yyDollar[5].columns}) + yyLOCAL = yyDollar[1].alterOptionsUnion() } + yyVAL.union = yyLOCAL case 328: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1854 + yyDollar = yyS[yypt-5 : yypt+1] +//line sql.y:1876 { - yyVAL.alterOptions = yyDollar[1].alterOptions + yySLICE := (*[]AlterOption)(yyIaddr(yyVAL.union)) + *yySLICE = append(*yySLICE, &OrderByOption{Cols: yyDollar[5].columnsUnion()}) } case 329: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1858 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL []AlterOption +//line sql.y:1880 { - yyVAL.alterOptions = append(yyDollar[1].alterOptions, yyDollar[3].alterOptions...) + yyLOCAL = yyDollar[1].alterOptionsUnion() } + yyVAL.union = yyLOCAL case 330: - yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:1862 + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:1884 { - yyVAL.alterOptions = append(append(yyDollar[1].alterOptions, yyDollar[3].alterOptions...), &OrderByOption{Cols: yyDollar[7].columns}) + yySLICE := (*[]AlterOption)(yyIaddr(yyVAL.union)) + *yySLICE = append(*yySLICE, yyDollar[3].alterOptionsUnion()...) } case 331: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1868 + yyDollar = yyS[yypt-7 : yypt+1] + var yyLOCAL []AlterOption +//line sql.y:1888 { - yyVAL.alterOptions = []AlterOption{yyDollar[1].alterOption} + yyLOCAL = append(append(yyDollar[1].alterOptionsUnion(), yyDollar[3].alterOptionsUnion()...), &OrderByOption{Cols: yyDollar[7].columnsUnion()}) } + yyVAL.union = yyLOCAL case 332: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1872 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL []AlterOption +//line sql.y:1894 { - yyVAL.alterOptions = append(yyDollar[1].alterOptions, yyDollar[3].alterOption) + yyLOCAL = []AlterOption{yyDollar[1].alterOptionUnion()} } + yyVAL.union = yyLOCAL case 333: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1876 +//line sql.y:1898 { - yyVAL.alterOptions = append(yyDollar[1].alterOptions, yyDollar[3].alterOption) + yySLICE := (*[]AlterOption)(yyIaddr(yyVAL.union)) + *yySLICE = append(*yySLICE, yyDollar[3].alterOptionUnion()) } case 334: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1882 + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:1902 { - yyVAL.alterOption = yyDollar[1].tableOptions + yySLICE := (*[]AlterOption)(yyIaddr(yyVAL.union)) + *yySLICE = append(*yySLICE, yyDollar[3].alterOptionUnion()) } case 335: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1886 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL AlterOption +//line sql.y:1908 { - yyVAL.alterOption = &AddConstraintDefinition{ConstraintDefinition: yyDollar[2].constraintDefinition} + yyLOCAL = yyDollar[1].tableOptionsUnion() } + yyVAL.union = yyLOCAL case 336: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1890 + var yyLOCAL AlterOption +//line sql.y:1912 { - yyVAL.alterOption = &AddConstraintDefinition{ConstraintDefinition: yyDollar[2].constraintDefinition} + yyLOCAL = &AddConstraintDefinition{ConstraintDefinition: yyDollar[2].constraintDefinitionUnion()} } + yyVAL.union = yyLOCAL case 337: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1894 + var yyLOCAL AlterOption +//line sql.y:1916 { - yyVAL.alterOption = &AddIndexDefinition{IndexDefinition: yyDollar[2].indexDefinition} + yyLOCAL = &AddConstraintDefinition{ConstraintDefinition: yyDollar[2].constraintDefinitionUnion()} } + yyVAL.union = yyLOCAL case 338: - yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:1898 + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL AlterOption +//line sql.y:1920 { - yyVAL.alterOption = &AddColumns{Columns: yyDollar[4].columnDefinitions} + yyLOCAL = &AddIndexDefinition{IndexDefinition: yyDollar[2].indexDefinitionUnion()} } + yyVAL.union = yyLOCAL case 339: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:1902 + var yyLOCAL AlterOption +//line sql.y:1924 { - yyVAL.alterOption = &AddColumns{Columns: []*ColumnDefinition{yyDollar[3].columnDefinition}, First: yyDollar[4].colName, After: yyDollar[5].colName} + yyLOCAL = &AddColumns{Columns: yyDollar[4].columnDefinitionsUnion()} } + yyVAL.union = yyLOCAL case 340: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:1906 + var yyLOCAL AlterOption +//line sql.y:1928 { - yyVAL.alterOption = &AlterColumn{Column: yyDollar[3].colName, DropDefault: true} + yyLOCAL = &AddColumns{Columns: []*ColumnDefinition{yyDollar[3].columnDefinitionUnion()}, First: yyDollar[4].colNameUnion(), After: yyDollar[5].colNameUnion()} } + yyVAL.union = yyLOCAL case 341: - yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:1910 + yyDollar = yyS[yypt-5 : yypt+1] + var yyLOCAL AlterOption +//line sql.y:1932 { - yyVAL.alterOption = &AlterColumn{Column: yyDollar[3].colName, DropDefault: false, DefaultVal: yyDollar[6].expr} + yyLOCAL = &AlterColumn{Column: yyDollar[3].colNameUnion(), DropDefault: true} } + yyVAL.union = yyLOCAL case 342: yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:1914 + var yyLOCAL AlterOption +//line sql.y:1936 { - yyVAL.alterOption = &ChangeColumn{OldColumn: yyDollar[3].colName, NewColDefinition: yyDollar[4].columnDefinition, First: yyDollar[5].colName, After: yyDollar[6].colName} + yyLOCAL = &AlterColumn{Column: yyDollar[3].colNameUnion(), DropDefault: false, DefaultVal: yyDollar[6].exprUnion()} } + yyVAL.union = yyLOCAL case 343: - yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:1918 + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL AlterOption +//line sql.y:1940 { - yyVAL.alterOption = &ModifyColumn{NewColDefinition: yyDollar[3].columnDefinition, First: yyDollar[4].colName, After: yyDollar[5].colName} + yyLOCAL = &ChangeColumn{OldColumn: yyDollar[3].colNameUnion(), NewColDefinition: yyDollar[4].columnDefinitionUnion(), First: yyDollar[5].colNameUnion(), After: yyDollar[6].colNameUnion()} } + yyVAL.union = yyLOCAL case 344: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:1922 + var yyLOCAL AlterOption +//line sql.y:1944 { - yyVAL.alterOption = &AlterCharset{CharacterSet: yyDollar[4].str, Collate: yyDollar[5].str} + yyLOCAL = &ModifyColumn{NewColDefinition: yyDollar[3].columnDefinitionUnion(), First: yyDollar[4].colNameUnion(), After: yyDollar[5].colNameUnion()} } + yyVAL.union = yyLOCAL case 345: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1926 + yyDollar = yyS[yypt-5 : yypt+1] + var yyLOCAL AlterOption +//line sql.y:1948 { - yyVAL.alterOption = &KeyState{Enable: false} + yyLOCAL = &AlterCharset{CharacterSet: yyDollar[4].str, Collate: yyDollar[5].str} } + yyVAL.union = yyLOCAL case 346: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1930 + var yyLOCAL AlterOption +//line sql.y:1952 { - yyVAL.alterOption = &KeyState{Enable: true} + yyLOCAL = &KeyState{Enable: false} } + yyVAL.union = yyLOCAL case 347: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1934 + var yyLOCAL AlterOption +//line sql.y:1956 { - yyVAL.alterOption = &TablespaceOperation{Import: false} + yyLOCAL = &KeyState{Enable: true} } + yyVAL.union = yyLOCAL case 348: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1938 + var yyLOCAL AlterOption +//line sql.y:1960 { - yyVAL.alterOption = &TablespaceOperation{Import: true} + yyLOCAL = &TablespaceOperation{Import: false} } + yyVAL.union = yyLOCAL case 349: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1942 + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL AlterOption +//line sql.y:1964 { - yyVAL.alterOption = &DropColumn{Name: yyDollar[3].colName} + yyLOCAL = &TablespaceOperation{Import: true} } + yyVAL.union = yyLOCAL case 350: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1946 + var yyLOCAL AlterOption +//line sql.y:1968 { - yyVAL.alterOption = &DropKey{Type: NormalKeyType, Name: yyDollar[3].colIdent.String()} + yyLOCAL = &DropColumn{Name: yyDollar[3].colNameUnion()} } + yyVAL.union = yyLOCAL case 351: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1950 + var yyLOCAL AlterOption +//line sql.y:1972 { - yyVAL.alterOption = &DropKey{Type: PrimaryKeyType} + yyLOCAL = &DropKey{Type: NormalKeyType, Name: yyDollar[3].colIdent} } + yyVAL.union = yyLOCAL case 352: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1954 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL AlterOption +//line sql.y:1976 { - yyVAL.alterOption = &DropKey{Type: ForeignKeyType, Name: yyDollar[4].colIdent.String()} + yyLOCAL = &DropKey{Type: PrimaryKeyType} } + yyVAL.union = yyLOCAL case 353: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1958 + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL AlterOption +//line sql.y:1980 { - yyVAL.alterOption = &Force{} + yyLOCAL = &DropKey{Type: ForeignKeyType, Name: yyDollar[4].colIdent} } + yyVAL.union = yyLOCAL case 354: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1962 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL AlterOption +//line sql.y:1984 { - yyVAL.alterOption = &RenameTable{Table: yyDollar[3].tableName} + yyLOCAL = &Force{} } + yyVAL.union = yyLOCAL case 355: - yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:1966 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL AlterOption +//line sql.y:1988 { - yyVAL.alterOption = &RenameIndex{OldName: yyDollar[3].colIdent.String(), NewName: yyDollar[5].colIdent.String()} + yyLOCAL = &RenameTableName{Table: yyDollar[3].tableName} } + yyVAL.union = yyLOCAL case 356: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1972 + yyDollar = yyS[yypt-5 : yypt+1] + var yyLOCAL AlterOption +//line sql.y:1992 { - yyVAL.alterOptions = []AlterOption{yyDollar[1].alterOption} + yyLOCAL = &RenameIndex{OldName: yyDollar[3].colIdent, NewName: yyDollar[5].colIdent} } + yyVAL.union = yyLOCAL case 357: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1976 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL []AlterOption +//line sql.y:1998 { - yyVAL.alterOptions = append(yyDollar[1].alterOptions, yyDollar[3].alterOption) + yyLOCAL = []AlterOption{yyDollar[1].alterOptionUnion()} } + yyVAL.union = yyLOCAL case 358: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1982 +//line sql.y:2002 { - yyVAL.alterOption = AlgorithmValue(string(yyDollar[3].bytes)) + yySLICE := (*[]AlterOption)(yyIaddr(yyVAL.union)) + *yySLICE = append(*yySLICE, yyDollar[3].alterOptionUnion()) } case 359: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1986 + var yyLOCAL AlterOption +//line sql.y:2008 { - yyVAL.alterOption = AlgorithmValue(string(yyDollar[3].bytes)) + yyLOCAL = AlgorithmValue(string(yyDollar[3].str)) } + yyVAL.union = yyLOCAL case 360: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1990 + var yyLOCAL AlterOption +//line sql.y:2012 { - yyVAL.alterOption = AlgorithmValue(string(yyDollar[3].bytes)) + yyLOCAL = AlgorithmValue(string(yyDollar[3].str)) } + yyVAL.union = yyLOCAL case 361: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1994 + var yyLOCAL AlterOption +//line sql.y:2016 { - yyVAL.alterOption = &LockOption{Type: DefaultType} + yyLOCAL = AlgorithmValue(string(yyDollar[3].str)) } + yyVAL.union = yyLOCAL case 362: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1998 + var yyLOCAL AlterOption +//line sql.y:2020 { - yyVAL.alterOption = &LockOption{Type: NoneType} + yyLOCAL = &LockOption{Type: DefaultType} } + yyVAL.union = yyLOCAL case 363: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2002 + var yyLOCAL AlterOption +//line sql.y:2024 { - yyVAL.alterOption = &LockOption{Type: SharedType} + yyLOCAL = &LockOption{Type: NoneType} } + yyVAL.union = yyLOCAL case 364: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2006 + var yyLOCAL AlterOption +//line sql.y:2028 { - yyVAL.alterOption = &LockOption{Type: ExclusiveType} + yyLOCAL = &LockOption{Type: SharedType} } + yyVAL.union = yyLOCAL case 365: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2010 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL AlterOption +//line sql.y:2032 { - yyVAL.alterOption = &Validation{With: true} + yyLOCAL = &LockOption{Type: ExclusiveType} } + yyVAL.union = yyLOCAL case 366: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2014 + var yyLOCAL AlterOption +//line sql.y:2036 { - yyVAL.alterOption = &Validation{With: false} + yyLOCAL = &Validation{With: true} } + yyVAL.union = yyLOCAL case 367: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2020 + var yyLOCAL AlterOption +//line sql.y:2040 { - yyDollar[1].alterTable.FullyParsed = true - yyDollar[1].alterTable.AlterOptions = yyDollar[2].alterOptions - yyVAL.statement = yyDollar[1].alterTable + yyLOCAL = &Validation{With: false} } + yyVAL.union = yyLOCAL case 368: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2026 + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL Statement +//line sql.y:2046 { - yyDollar[1].alterTable.FullyParsed = true - yyDollar[1].alterTable.AlterOptions = yyDollar[2].alterOptions - yyDollar[1].alterTable.PartitionSpec = &PartitionSpec{Action: RemoveAction} - yyVAL.statement = yyDollar[1].alterTable + yyDollar[1].alterTableUnion().FullyParsed = true + yyDollar[1].alterTableUnion().AlterOptions = yyDollar[2].alterOptionsUnion() + yyLOCAL = yyDollar[1].alterTableUnion() } + yyVAL.union = yyLOCAL case 369: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2033 + var yyLOCAL Statement +//line sql.y:2052 { - yyDollar[1].alterTable.FullyParsed = true - yyDollar[1].alterTable.AlterOptions = yyDollar[2].alterOptions - yyDollar[1].alterTable.PartitionSpec = yyDollar[4].partSpec - yyVAL.statement = yyDollar[1].alterTable + yyDollar[1].alterTableUnion().FullyParsed = true + yyDollar[1].alterTableUnion().AlterOptions = yyDollar[2].alterOptionsUnion() + yyDollar[1].alterTableUnion().PartitionSpec = &PartitionSpec{Action: RemoveAction} + yyLOCAL = yyDollar[1].alterTableUnion() } + yyVAL.union = yyLOCAL case 370: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2040 + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Statement +//line sql.y:2059 { - yyDollar[1].alterTable.FullyParsed = true - yyDollar[1].alterTable.PartitionSpec = yyDollar[2].partSpec - yyVAL.statement = yyDollar[1].alterTable + yyDollar[1].alterTableUnion().FullyParsed = true + yyDollar[1].alterTableUnion().AlterOptions = yyDollar[2].alterOptionsUnion() + yyDollar[1].alterTableUnion().PartitionSpec = yyDollar[4].partSpecUnion() + yyLOCAL = yyDollar[1].alterTableUnion() } + yyVAL.union = yyLOCAL case 371: - yyDollar = yyS[yypt-10 : yypt+1] -//line sql.y:2046 + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL Statement +//line sql.y:2066 { - yyVAL.statement = &AlterView{ViewName: yyDollar[6].tableName.ToViewName(), Algorithm: yyDollar[2].str, Definer: yyDollar[3].str, Security: yyDollar[4].str, Columns: yyDollar[7].columns, Select: yyDollar[9].selStmt, CheckOption: yyDollar[10].str} + yyDollar[1].alterTableUnion().FullyParsed = true + yyDollar[1].alterTableUnion().PartitionSpec = yyDollar[2].partSpecUnion() + yyLOCAL = yyDollar[1].alterTableUnion() } + yyVAL.union = yyLOCAL case 372: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2050 + yyDollar = yyS[yypt-10 : yypt+1] + var yyLOCAL Statement +//line sql.y:2072 { - yyDollar[1].alterDatabase.FullyParsed = true - yyDollar[1].alterDatabase.DBName = yyDollar[2].colIdent.String() - yyDollar[1].alterDatabase.AlterOptions = yyDollar[3].collateAndCharsets - yyVAL.statement = yyDollar[1].alterDatabase + yyLOCAL = &AlterView{ViewName: yyDollar[6].tableName.ToViewName(), Algorithm: yyDollar[2].str, Definer: yyDollar[3].str, Security: yyDollar[4].str, Columns: yyDollar[7].columnsUnion(), Select: yyDollar[9].selStmtUnion(), CheckOption: yyDollar[10].str} } + yyVAL.union = yyLOCAL case 373: - yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:2057 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL Statement +//line sql.y:2076 { - yyDollar[1].alterDatabase.FullyParsed = true - yyDollar[1].alterDatabase.DBName = yyDollar[2].colIdent.String() - yyDollar[1].alterDatabase.UpdateDataDirectory = true - yyVAL.statement = yyDollar[1].alterDatabase + yyDollar[1].alterDatabaseUnion().FullyParsed = true + yyDollar[1].alterDatabaseUnion().DBName = yyDollar[2].tableIdent + yyDollar[1].alterDatabaseUnion().AlterOptions = yyDollar[3].collateAndCharsetsUnion() + yyLOCAL = yyDollar[1].alterDatabaseUnion() } + yyVAL.union = yyLOCAL case 374: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Statement +//line sql.y:2083 + { + yyDollar[1].alterDatabaseUnion().FullyParsed = true + yyDollar[1].alterDatabaseUnion().DBName = yyDollar[2].tableIdent + yyDollar[1].alterDatabaseUnion().UpdateDataDirectory = true + yyLOCAL = yyDollar[1].alterDatabaseUnion() + } + yyVAL.union = yyLOCAL + case 375: yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:2064 + var yyLOCAL Statement +//line sql.y:2090 { - yyVAL.statement = &AlterVschema{ + yyLOCAL = &AlterVschema{ Action: CreateVindexDDLAction, Table: yyDollar[5].tableName, VindexSpec: &VindexSpec{ Name: NewColIdent(yyDollar[5].tableName.Name.String()), Type: yyDollar[6].colIdent, - Params: yyDollar[7].vindexParams, + Params: yyDollar[7].vindexParamsUnion(), }, } } - case 375: + yyVAL.union = yyLOCAL + case 376: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2076 + var yyLOCAL Statement +//line sql.y:2102 { - yyVAL.statement = &AlterVschema{ + yyLOCAL = &AlterVschema{ Action: DropVindexDDLAction, Table: yyDollar[5].tableName, VindexSpec: &VindexSpec{ @@ -7319,38 +8397,46 @@ yydefault: }, } } - case 376: + yyVAL.union = yyLOCAL + case 377: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2086 + var yyLOCAL Statement +//line sql.y:2112 { - yyVAL.statement = &AlterVschema{Action: AddVschemaTableDDLAction, Table: yyDollar[5].tableName} + yyLOCAL = &AlterVschema{Action: AddVschemaTableDDLAction, Table: yyDollar[5].tableName} } - case 377: + yyVAL.union = yyLOCAL + case 378: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2090 + var yyLOCAL Statement +//line sql.y:2116 { - yyVAL.statement = &AlterVschema{Action: DropVschemaTableDDLAction, Table: yyDollar[5].tableName} + yyLOCAL = &AlterVschema{Action: DropVschemaTableDDLAction, Table: yyDollar[5].tableName} } - case 378: + yyVAL.union = yyLOCAL + case 379: yyDollar = yyS[yypt-12 : yypt+1] -//line sql.y:2094 + var yyLOCAL Statement +//line sql.y:2120 { - yyVAL.statement = &AlterVschema{ + yyLOCAL = &AlterVschema{ Action: AddColVindexDDLAction, Table: yyDollar[4].tableName, VindexSpec: &VindexSpec{ Name: yyDollar[7].colIdent, Type: yyDollar[11].colIdent, - Params: yyDollar[12].vindexParams, + Params: yyDollar[12].vindexParamsUnion(), }, - VindexCols: yyDollar[9].columns, + VindexCols: yyDollar[9].columnsUnion(), } } - case 379: + yyVAL.union = yyLOCAL + case 380: yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:2107 + var yyLOCAL Statement +//line sql.y:2133 { - yyVAL.statement = &AlterVschema{ + yyLOCAL = &AlterVschema{ Action: DropColVindexDDLAction, Table: yyDollar[4].tableName, VindexSpec: &VindexSpec{ @@ -7358,17 +8444,21 @@ yydefault: }, } } - case 380: + yyVAL.union = yyLOCAL + case 381: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2117 + var yyLOCAL Statement +//line sql.y:2143 { - yyVAL.statement = &AlterVschema{Action: AddSequenceDDLAction, Table: yyDollar[5].tableName} + yyLOCAL = &AlterVschema{Action: AddSequenceDDLAction, Table: yyDollar[5].tableName} } - case 381: + yyVAL.union = yyLOCAL + case 382: yyDollar = yyS[yypt-9 : yypt+1] -//line sql.y:2121 + var yyLOCAL Statement +//line sql.y:2147 { - yyVAL.statement = &AlterVschema{ + yyLOCAL = &AlterVschema{ Action: AddAutoIncDDLAction, Table: yyDollar[4].tableName, AutoIncSpec: &AutoIncSpec{ @@ -7377,3253 +8467,4304 @@ yydefault: }, } } - case 382: - yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2134 - { - yyVAL.partSpec = &PartitionSpec{Action: AddAction, Definitions: []*PartitionDefinition{yyDollar[4].partDef}} - } + yyVAL.union = yyLOCAL case 383: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2138 + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Statement +//line sql.y:2158 { - yyVAL.partSpec = &PartitionSpec{Action: DropAction, Names: yyDollar[3].partitions} + yyLOCAL = &AlterMigration{ + Type: RetryMigrationType, + UUID: string(yyDollar[3].str), + } } + yyVAL.union = yyLOCAL case 384: - yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:2142 + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Statement +//line sql.y:2165 { - yyVAL.partSpec = &PartitionSpec{Action: ReorganizeAction, Names: yyDollar[3].partitions, Definitions: yyDollar[6].partDefs} + yyLOCAL = &AlterMigration{ + Type: CompleteMigrationType, + UUID: string(yyDollar[3].str), + } } + yyVAL.union = yyLOCAL case 385: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2146 + var yyLOCAL Statement +//line sql.y:2172 { - yyVAL.partSpec = &PartitionSpec{Action: DiscardAction, Names: yyDollar[3].partitions} + yyLOCAL = &AlterMigration{ + Type: CancelMigrationType, + UUID: string(yyDollar[3].str), + } } + yyVAL.union = yyLOCAL case 386: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2150 + var yyLOCAL Statement +//line sql.y:2179 { - yyVAL.partSpec = &PartitionSpec{Action: DiscardAction, IsAll: true} + yyLOCAL = &AlterMigration{ + Type: CancelAllMigrationType, + } } + yyVAL.union = yyLOCAL case 387: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2154 + yyDollar = yyS[yypt-5 : yypt+1] + var yyLOCAL *PartitionSpec +//line sql.y:2187 { - yyVAL.partSpec = &PartitionSpec{Action: ImportAction, Names: yyDollar[3].partitions} + yyLOCAL = &PartitionSpec{Action: AddAction, Definitions: []*PartitionDefinition{yyDollar[4].partDefUnion()}} } + yyVAL.union = yyLOCAL case 388: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2158 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL *PartitionSpec +//line sql.y:2191 { - yyVAL.partSpec = &PartitionSpec{Action: ImportAction, IsAll: true} + yyLOCAL = &PartitionSpec{Action: DropAction, Names: yyDollar[3].partitionsUnion()} } + yyVAL.union = yyLOCAL case 389: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2162 + yyDollar = yyS[yypt-7 : yypt+1] + var yyLOCAL *PartitionSpec +//line sql.y:2195 { - yyVAL.partSpec = &PartitionSpec{Action: TruncateAction, Names: yyDollar[3].partitions} + yyLOCAL = &PartitionSpec{Action: ReorganizeAction, Names: yyDollar[3].partitionsUnion(), Definitions: yyDollar[6].partDefsUnion()} } + yyVAL.union = yyLOCAL case 390: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2166 + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL *PartitionSpec +//line sql.y:2199 { - yyVAL.partSpec = &PartitionSpec{Action: TruncateAction, IsAll: true} + yyLOCAL = &PartitionSpec{Action: DiscardAction, Names: yyDollar[3].partitionsUnion()} } + yyVAL.union = yyLOCAL case 391: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2170 + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL *PartitionSpec +//line sql.y:2203 { - yyVAL.partSpec = &PartitionSpec{Action: CoalesceAction, Number: NewIntLiteral(yyDollar[3].bytes)} + yyLOCAL = &PartitionSpec{Action: DiscardAction, IsAll: true} } + yyVAL.union = yyLOCAL case 392: - yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:2174 + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL *PartitionSpec +//line sql.y:2207 { - yyVAL.partSpec = &PartitionSpec{Action: ExchangeAction, Names: Partitions{yyDollar[3].colIdent}, TableName: yyDollar[6].tableName, WithoutValidation: yyDollar[7].boolean} + yyLOCAL = &PartitionSpec{Action: ImportAction, Names: yyDollar[3].partitionsUnion()} } + yyVAL.union = yyLOCAL case 393: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2178 + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL *PartitionSpec +//line sql.y:2211 { - yyVAL.partSpec = &PartitionSpec{Action: AnalyzeAction, Names: yyDollar[3].partitions} + yyLOCAL = &PartitionSpec{Action: ImportAction, IsAll: true} } + yyVAL.union = yyLOCAL case 394: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2182 + var yyLOCAL *PartitionSpec +//line sql.y:2215 { - yyVAL.partSpec = &PartitionSpec{Action: AnalyzeAction, IsAll: true} + yyLOCAL = &PartitionSpec{Action: TruncateAction, Names: yyDollar[3].partitionsUnion()} } + yyVAL.union = yyLOCAL case 395: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2186 + var yyLOCAL *PartitionSpec +//line sql.y:2219 { - yyVAL.partSpec = &PartitionSpec{Action: CheckAction, Names: yyDollar[3].partitions} + yyLOCAL = &PartitionSpec{Action: TruncateAction, IsAll: true} } + yyVAL.union = yyLOCAL case 396: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2190 + var yyLOCAL *PartitionSpec +//line sql.y:2223 { - yyVAL.partSpec = &PartitionSpec{Action: CheckAction, IsAll: true} + yyLOCAL = &PartitionSpec{Action: CoalesceAction, Number: NewIntLiteral(yyDollar[3].str)} } + yyVAL.union = yyLOCAL case 397: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2194 + yyDollar = yyS[yypt-7 : yypt+1] + var yyLOCAL *PartitionSpec +//line sql.y:2227 { - yyVAL.partSpec = &PartitionSpec{Action: OptimizeAction, Names: yyDollar[3].partitions} + yyLOCAL = &PartitionSpec{Action: ExchangeAction, Names: Partitions{yyDollar[3].colIdent}, TableName: yyDollar[6].tableName, WithoutValidation: yyDollar[7].booleanUnion()} } + yyVAL.union = yyLOCAL case 398: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2198 + var yyLOCAL *PartitionSpec +//line sql.y:2231 { - yyVAL.partSpec = &PartitionSpec{Action: OptimizeAction, IsAll: true} + yyLOCAL = &PartitionSpec{Action: AnalyzeAction, Names: yyDollar[3].partitionsUnion()} } + yyVAL.union = yyLOCAL case 399: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2202 + var yyLOCAL *PartitionSpec +//line sql.y:2235 { - yyVAL.partSpec = &PartitionSpec{Action: RebuildAction, Names: yyDollar[3].partitions} + yyLOCAL = &PartitionSpec{Action: AnalyzeAction, IsAll: true} } + yyVAL.union = yyLOCAL case 400: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2206 + var yyLOCAL *PartitionSpec +//line sql.y:2239 { - yyVAL.partSpec = &PartitionSpec{Action: RebuildAction, IsAll: true} + yyLOCAL = &PartitionSpec{Action: CheckAction, Names: yyDollar[3].partitionsUnion()} } + yyVAL.union = yyLOCAL case 401: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2210 + var yyLOCAL *PartitionSpec +//line sql.y:2243 { - yyVAL.partSpec = &PartitionSpec{Action: RepairAction, Names: yyDollar[3].partitions} + yyLOCAL = &PartitionSpec{Action: CheckAction, IsAll: true} } + yyVAL.union = yyLOCAL case 402: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2214 + var yyLOCAL *PartitionSpec +//line sql.y:2247 { - yyVAL.partSpec = &PartitionSpec{Action: RepairAction, IsAll: true} + yyLOCAL = &PartitionSpec{Action: OptimizeAction, Names: yyDollar[3].partitionsUnion()} } + yyVAL.union = yyLOCAL case 403: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2218 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL *PartitionSpec +//line sql.y:2251 { - yyVAL.partSpec = &PartitionSpec{Action: UpgradeAction} + yyLOCAL = &PartitionSpec{Action: OptimizeAction, IsAll: true} } + yyVAL.union = yyLOCAL case 404: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2223 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL *PartitionSpec +//line sql.y:2255 { - yyVAL.boolean = false + yyLOCAL = &PartitionSpec{Action: RebuildAction, Names: yyDollar[3].partitionsUnion()} } + yyVAL.union = yyLOCAL case 405: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2227 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL *PartitionSpec +//line sql.y:2259 { - yyVAL.boolean = false + yyLOCAL = &PartitionSpec{Action: RebuildAction, IsAll: true} } + yyVAL.union = yyLOCAL case 406: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2231 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL *PartitionSpec +//line sql.y:2263 { - yyVAL.boolean = true + yyLOCAL = &PartitionSpec{Action: RepairAction, Names: yyDollar[3].partitionsUnion()} } + yyVAL.union = yyLOCAL case 407: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2238 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL *PartitionSpec +//line sql.y:2267 { - yyVAL.partDefs = []*PartitionDefinition{yyDollar[1].partDef} + yyLOCAL = &PartitionSpec{Action: RepairAction, IsAll: true} } + yyVAL.union = yyLOCAL case 408: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2242 + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL *PartitionSpec +//line sql.y:2271 { - yyVAL.partDefs = append(yyDollar[1].partDefs, yyDollar[3].partDef) + yyLOCAL = &PartitionSpec{Action: UpgradeAction} } + yyVAL.union = yyLOCAL case 409: - yyDollar = yyS[yypt-8 : yypt+1] -//line sql.y:2248 + yyDollar = yyS[yypt-0 : yypt+1] + var yyLOCAL bool +//line sql.y:2276 { - yyVAL.partDef = &PartitionDefinition{Name: yyDollar[2].colIdent, Limit: yyDollar[7].expr} + yyLOCAL = false } + yyVAL.union = yyLOCAL case 410: - yyDollar = yyS[yypt-8 : yypt+1] -//line sql.y:2252 + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL bool +//line sql.y:2280 { - yyVAL.partDef = &PartitionDefinition{Name: yyDollar[2].colIdent, Maxvalue: true} + yyLOCAL = false } + yyVAL.union = yyLOCAL case 411: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2258 + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL bool +//line sql.y:2284 { - yyVAL.statement = yyDollar[3].ddl + yyLOCAL = true } + yyVAL.union = yyLOCAL case 412: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2264 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL []*PartitionDefinition +//line sql.y:2291 { - yyVAL.ddl = &DDL{Action: RenameDDLAction, FromTables: TableNames{yyDollar[1].tableName}, ToTables: TableNames{yyDollar[3].tableName}} + yyLOCAL = []*PartitionDefinition{yyDollar[1].partDefUnion()} } + yyVAL.union = yyLOCAL case 413: - yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2268 + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:2295 { - yyVAL.ddl = yyDollar[1].ddl - yyVAL.ddl.FromTables = append(yyVAL.ddl.FromTables, yyDollar[3].tableName) - yyVAL.ddl.ToTables = append(yyVAL.ddl.ToTables, yyDollar[5].tableName) + yySLICE := (*[]*PartitionDefinition)(yyIaddr(yyVAL.union)) + *yySLICE = append(*yySLICE, yyDollar[3].partDefUnion()) } case 414: - yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2276 + yyDollar = yyS[yypt-8 : yypt+1] + var yyLOCAL *PartitionDefinition +//line sql.y:2301 { - yyVAL.statement = &DropTable{FromTables: yyDollar[4].tableNames, IfExists: yyDollar[3].boolean} + yyLOCAL = &PartitionDefinition{Name: yyDollar[2].colIdent, Limit: yyDollar[7].exprUnion()} } + yyVAL.union = yyLOCAL case 415: - yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:2280 + yyDollar = yyS[yypt-8 : yypt+1] + var yyLOCAL *PartitionDefinition +//line sql.y:2305 { - // Change this to an alter statement - yyVAL.statement = &DDL{Action: AlterDDLAction, Table: yyDollar[5].tableName} + yyLOCAL = &PartitionDefinition{Name: yyDollar[2].colIdent, Maxvalue: true} } + yyVAL.union = yyLOCAL case 416: - yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2285 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL Statement +//line sql.y:2311 { - yyVAL.statement = &DropView{FromTables: yyDollar[4].tableNames, IfExists: yyDollar[3].boolean} + yyLOCAL = &RenameTable{TablePairs: yyDollar[3].renameTablePairsUnion()} } + yyVAL.union = yyLOCAL case 417: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2289 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL []*RenameTablePair +//line sql.y:2317 { - yyVAL.statement = &DropDatabase{DBName: string(yyDollar[4].colIdent.String()), IfExists: yyDollar[3].boolean} + yyLOCAL = []*RenameTablePair{{FromTable: yyDollar[1].tableName, ToTable: yyDollar[3].tableName}} } + yyVAL.union = yyLOCAL case 418: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2295 + yyDollar = yyS[yypt-5 : yypt+1] +//line sql.y:2321 { - yyVAL.statement = &DDL{Action: TruncateDDLAction, Table: yyDollar[3].tableName} + yySLICE := (*[]*RenameTablePair)(yyIaddr(yyVAL.union)) + *yySLICE = append(*yySLICE, &RenameTablePair{FromTable: yyDollar[3].tableName, ToTable: yyDollar[5].tableName}) } case 419: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2299 + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Statement +//line sql.y:2327 { - yyVAL.statement = &DDL{Action: TruncateDDLAction, Table: yyDollar[2].tableName} + yyLOCAL = &DropTable{FromTables: yyDollar[5].tableNamesUnion(), IfExists: yyDollar[4].booleanUnion(), Temp: yyDollar[2].booleanUnion()} } + yyVAL.union = yyLOCAL case 420: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2304 + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Statement +//line sql.y:2331 { - yyVAL.statement = &OtherRead{} + // Change this to an alter statement + if yyDollar[3].colIdent.Lowered() == "primary" { + yyLOCAL = &AlterTable{Table: yyDollar[5].tableName, AlterOptions: append([]AlterOption{&DropKey{Type: PrimaryKeyType}}, yyDollar[6].alterOptionsUnion()...)} + } else { + yyLOCAL = &AlterTable{Table: yyDollar[5].tableName, AlterOptions: append([]AlterOption{&DropKey{Type: NormalKeyType, Name: yyDollar[3].colIdent}}, yyDollar[6].alterOptionsUnion()...)} + } } + yyVAL.union = yyLOCAL case 421: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2310 + yyDollar = yyS[yypt-5 : yypt+1] + var yyLOCAL Statement +//line sql.y:2340 { - yyVAL.statement = &Show{&ShowBasic{Command: Charset, Filter: yyDollar[3].showFilter}} + yyLOCAL = &DropView{FromTables: yyDollar[4].tableNamesUnion(), IfExists: yyDollar[3].booleanUnion()} } + yyVAL.union = yyLOCAL case 422: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2314 + yyDollar = yyS[yypt-5 : yypt+1] + var yyLOCAL Statement +//line sql.y:2344 { - yyVAL.statement = &Show{&ShowBasic{Command: Collation, Filter: yyDollar[3].showFilter}} + yyLOCAL = &DropDatabase{Comments: Comments(yyDollar[3].strs), DBName: yyDollar[5].tableIdent, IfExists: yyDollar[4].booleanUnion()} } + yyVAL.union = yyLOCAL case 423: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2318 + var yyLOCAL Statement +//line sql.y:2350 { - yyVAL.statement = &Show{&ShowBasic{Command: Database, Filter: yyDollar[3].showFilter}} + yyLOCAL = &TruncateTable{Table: yyDollar[3].tableName} } + yyVAL.union = yyLOCAL case 424: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2322 + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL Statement +//line sql.y:2354 { - yyVAL.statement = &Show{&ShowBasic{Command: Database, Filter: yyDollar[3].showFilter}} + yyLOCAL = &TruncateTable{Table: yyDollar[2].tableName} } + yyVAL.union = yyLOCAL case 425: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2326 + var yyLOCAL Statement +//line sql.y:2359 { - yyVAL.statement = &Show{&ShowBasic{Command: Database, Filter: yyDollar[3].showFilter}} + yyLOCAL = &OtherRead{} } + yyVAL.union = yyLOCAL case 426: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2330 + var yyLOCAL Statement +//line sql.y:2365 { - yyVAL.statement = &Show{&ShowBasic{Command: Database, Filter: yyDollar[3].showFilter}} + yyLOCAL = &Show{&ShowBasic{Command: Charset, Filter: yyDollar[3].showFilterUnion()}} } + yyVAL.union = yyLOCAL case 427: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2334 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL Statement +//line sql.y:2369 { - yyVAL.statement = &Show{&ShowBasic{Command: Function, Filter: yyDollar[4].showFilter}} + yyLOCAL = &Show{&ShowBasic{Command: Collation, Filter: yyDollar[3].showFilterUnion()}} } + yyVAL.union = yyLOCAL case 428: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2338 + yyDollar = yyS[yypt-7 : yypt+1] + var yyLOCAL Statement +//line sql.y:2373 { - yyVAL.statement = &Show{&ShowBasic{Command: Privilege}} + yyLOCAL = &Show{&ShowBasic{Full: yyDollar[2].booleanUnion(), Command: Column, Tbl: yyDollar[5].tableName, DbName: yyDollar[6].tableIdent, Filter: yyDollar[7].showFilterUnion()}} } + yyVAL.union = yyLOCAL case 429: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2342 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL Statement +//line sql.y:2377 { - yyVAL.statement = &Show{&ShowBasic{Command: Procedure, Filter: yyDollar[4].showFilter}} + yyLOCAL = &Show{&ShowBasic{Command: Database, Filter: yyDollar[3].showFilterUnion()}} } + yyVAL.union = yyLOCAL case 430: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2346 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL Statement +//line sql.y:2381 { - yyVAL.statement = &Show{&ShowBasic{Command: StatusSession, Filter: yyDollar[4].showFilter}} + yyLOCAL = &Show{&ShowBasic{Command: Database, Filter: yyDollar[3].showFilterUnion()}} } + yyVAL.union = yyLOCAL case 431: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2350 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL Statement +//line sql.y:2385 { - yyVAL.statement = &Show{&ShowBasic{Command: StatusGlobal, Filter: yyDollar[4].showFilter}} + yyLOCAL = &Show{&ShowBasic{Command: Keyspace, Filter: yyDollar[3].showFilterUnion()}} } + yyVAL.union = yyLOCAL case 432: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2354 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL Statement +//line sql.y:2389 { - yyVAL.statement = &Show{&ShowBasic{Command: VariableSession, Filter: yyDollar[4].showFilter}} + yyLOCAL = &Show{&ShowBasic{Command: Keyspace, Filter: yyDollar[3].showFilterUnion()}} } + yyVAL.union = yyLOCAL case 433: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2358 + var yyLOCAL Statement +//line sql.y:2393 { - yyVAL.statement = &Show{&ShowBasic{Command: VariableGlobal, Filter: yyDollar[4].showFilter}} + yyLOCAL = &Show{&ShowBasic{Command: Function, Filter: yyDollar[4].showFilterUnion()}} } + yyVAL.union = yyLOCAL case 434: - yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2362 + yyDollar = yyS[yypt-7 : yypt+1] + var yyLOCAL Statement +//line sql.y:2397 { - yyVAL.statement = &Show{&ShowTableStatus{DatabaseName: yyDollar[4].str, Filter: yyDollar[5].showFilter}} + yyLOCAL = &Show{&ShowBasic{Command: Index, Tbl: yyDollar[5].tableName, DbName: yyDollar[6].tableIdent, Filter: yyDollar[7].showFilterUnion()}} } + yyVAL.union = yyLOCAL case 435: - yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:2366 + yyDollar = yyS[yypt-5 : yypt+1] + var yyLOCAL Statement +//line sql.y:2401 { - yyVAL.statement = &Show{&ShowColumns{Full: yyDollar[2].str, Table: yyDollar[5].tableName, DbName: yyDollar[6].str, Filter: yyDollar[7].showFilter}} + yyLOCAL = &Show{&ShowBasic{Command: OpenTable, DbName: yyDollar[4].tableIdent, Filter: yyDollar[5].showFilterUnion()}} } + yyVAL.union = yyLOCAL case 436: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2370 + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL Statement +//line sql.y:2405 { - yyVAL.statement = &Show{&ShowLegacy{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].colIdent.String()), Scope: ImplicitScope}} + yyLOCAL = &Show{&ShowBasic{Command: Privilege}} } + yyVAL.union = yyLOCAL case 437: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2374 + var yyLOCAL Statement +//line sql.y:2409 { - yyVAL.statement = &Show{&ShowLegacy{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes), Scope: ImplicitScope}} + yyLOCAL = &Show{&ShowBasic{Command: Procedure, Filter: yyDollar[4].showFilterUnion()}} } + yyVAL.union = yyLOCAL case 438: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2378 + var yyLOCAL Statement +//line sql.y:2413 { - yyVAL.statement = &Show{&ShowLegacy{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes), Table: yyDollar[4].tableName, Scope: ImplicitScope}} + yyLOCAL = &Show{&ShowBasic{Command: StatusSession, Filter: yyDollar[4].showFilterUnion()}} } + yyVAL.union = yyLOCAL case 439: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2383 + var yyLOCAL Statement +//line sql.y:2417 { - yyVAL.statement = &Show{&ShowLegacy{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].colIdent.String()), Scope: ImplicitScope}} + yyLOCAL = &Show{&ShowBasic{Command: StatusGlobal, Filter: yyDollar[4].showFilterUnion()}} } + yyVAL.union = yyLOCAL case 440: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2387 + var yyLOCAL Statement +//line sql.y:2421 { - yyVAL.statement = &Show{&ShowLegacy{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes), Scope: ImplicitScope}} + yyLOCAL = &Show{&ShowBasic{Command: VariableSession, Filter: yyDollar[4].showFilterUnion()}} } + yyVAL.union = yyLOCAL case 441: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2391 + var yyLOCAL Statement +//line sql.y:2425 { - yyVAL.statement = &Show{&ShowLegacy{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes), Table: yyDollar[4].tableName, Scope: ImplicitScope}} + yyLOCAL = &Show{&ShowBasic{Command: VariableGlobal, Filter: yyDollar[4].showFilterUnion()}} } + yyVAL.union = yyLOCAL case 442: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2395 + yyDollar = yyS[yypt-5 : yypt+1] + var yyLOCAL Statement +//line sql.y:2429 { - yyVAL.statement = &Show{&ShowLegacy{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes), Scope: ImplicitScope}} + yyLOCAL = &Show{&ShowBasic{Command: TableStatus, DbName: yyDollar[4].tableIdent, Filter: yyDollar[5].showFilterUnion()}} } + yyVAL.union = yyLOCAL case 443: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2399 + yyDollar = yyS[yypt-5 : yypt+1] + var yyLOCAL Statement +//line sql.y:2433 { - yyVAL.statement = &Show{&ShowLegacy{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes), Scope: ImplicitScope}} + yyLOCAL = &Show{&ShowBasic{Command: Table, Full: yyDollar[2].booleanUnion(), DbName: yyDollar[4].tableIdent, Filter: yyDollar[5].showFilterUnion()}} } + yyVAL.union = yyLOCAL case 444: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2403 + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Statement +//line sql.y:2437 { - yyVAL.statement = &Show{&ShowLegacy{Type: string(yyDollar[2].bytes), Scope: ImplicitScope}} + yyLOCAL = &Show{&ShowBasic{Command: Trigger, DbName: yyDollar[3].tableIdent, Filter: yyDollar[4].showFilterUnion()}} } + yyVAL.union = yyLOCAL case 445: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2407 + var yyLOCAL Statement +//line sql.y:2441 { - yyVAL.statement = &Show{&ShowLegacy{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes), Table: yyDollar[4].tableName, Scope: ImplicitScope}} + yyLOCAL = &Show{&ShowCreate{Command: CreateDb, Op: yyDollar[4].tableName}} } + yyVAL.union = yyLOCAL case 446: - yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:2411 + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Statement +//line sql.y:2445 { - showTablesOpt := &ShowTablesOpt{DbName: yyDollar[6].str, Filter: yyDollar[7].showFilter} - yyVAL.statement = &Show{&ShowLegacy{Extended: string(yyDollar[2].str), Type: string(yyDollar[3].str), ShowTablesOpt: showTablesOpt, OnTable: yyDollar[5].tableName, Scope: ImplicitScope}} + yyLOCAL = &Show{&ShowCreate{Command: CreateE, Op: yyDollar[4].tableName}} } + yyVAL.union = yyLOCAL case 447: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2416 + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Statement +//line sql.y:2449 { - yyVAL.statement = &Show{&ShowLegacy{Type: string(yyDollar[2].bytes), Scope: ImplicitScope}} + yyLOCAL = &Show{&ShowCreate{Command: CreateF, Op: yyDollar[4].tableName}} } + yyVAL.union = yyLOCAL case 448: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2420 + var yyLOCAL Statement +//line sql.y:2453 { - yyVAL.statement = &Show{&ShowLegacy{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes), Table: yyDollar[4].tableName, Scope: ImplicitScope}} + yyLOCAL = &Show{&ShowCreate{Command: CreateProc, Op: yyDollar[4].tableName}} } + yyVAL.union = yyLOCAL case 449: - yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2424 + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Statement +//line sql.y:2457 { - // this is ugly, but I couldn't find a better way for now - if yyDollar[3].str == "processlist" { - yyVAL.statement = &Show{&ShowLegacy{Type: yyDollar[3].str, Scope: ImplicitScope}} - } else { - showTablesOpt := &ShowTablesOpt{Full: yyDollar[2].str, DbName: yyDollar[4].str, Filter: yyDollar[5].showFilter} - yyVAL.statement = &Show{&ShowLegacy{Type: yyDollar[3].str, ShowTablesOpt: showTablesOpt, Scope: ImplicitScope}} - } + yyLOCAL = &Show{&ShowCreate{Command: CreateTbl, Op: yyDollar[4].tableName}} } + yyVAL.union = yyLOCAL case 450: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2434 + var yyLOCAL Statement +//line sql.y:2461 { - showTablesOpt := &ShowTablesOpt{Filter: yyDollar[4].showFilter} - yyVAL.statement = &Show{&ShowLegacy{Scope: VitessMetadataScope, Type: string(yyDollar[3].bytes), ShowTablesOpt: showTablesOpt}} + yyLOCAL = &Show{&ShowCreate{Command: CreateTr, Op: yyDollar[4].tableName}} } + yyVAL.union = yyLOCAL case 451: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2439 + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Statement +//line sql.y:2465 { - yyVAL.statement = &Show{&ShowLegacy{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes), Scope: ImplicitScope}} + yyLOCAL = &Show{&ShowCreate{Command: CreateV, Op: yyDollar[4].tableName}} } + yyVAL.union = yyLOCAL case 452: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2443 + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Statement +//line sql.y:2469 { - yyVAL.statement = &Show{&ShowLegacy{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes), Scope: ImplicitScope}} + yyLOCAL = &Show{&ShowLegacy{Type: string(yyDollar[2].str) + " " + string(yyDollar[3].str), Scope: ImplicitScope}} } + yyVAL.union = yyLOCAL case 453: - yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2447 + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Statement +//line sql.y:2473 { - yyVAL.statement = &Show{&ShowLegacy{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes), OnTable: yyDollar[5].tableName, Scope: ImplicitScope}} + yyLOCAL = &Show{&ShowLegacy{Type: string(yyDollar[2].str) + " " + string(yyDollar[3].colIdent.String()), Scope: ImplicitScope}} } + yyVAL.union = yyLOCAL case 454: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2451 + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Statement +//line sql.y:2477 { - yyVAL.statement = &Show{&ShowLegacy{Type: string(yyDollar[2].bytes), Scope: ImplicitScope}} + yyLOCAL = &Show{&ShowLegacy{Type: string(yyDollar[2].str) + " " + string(yyDollar[3].str), Scope: ImplicitScope}} } + yyVAL.union = yyLOCAL case 455: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2456 + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL Statement +//line sql.y:2481 { - // This should probably be a different type (ShowVitessTopoOpt), but - // just getting the thing working for now - showTablesOpt := &ShowTablesOpt{Filter: yyDollar[3].showFilter} - yyVAL.statement = &Show{&ShowLegacy{Type: yyDollar[2].str, ShowTablesOpt: showTablesOpt}} + yyLOCAL = &Show{&ShowLegacy{Type: string(yyDollar[2].str), Scope: ImplicitScope}} } + yyVAL.union = yyLOCAL case 456: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2470 + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Statement +//line sql.y:2485 { - yyVAL.statement = &Show{&ShowLegacy{Type: string(yyDollar[2].colIdent.String()), Scope: ImplicitScope}} + yyLOCAL = &Show{&ShowLegacy{Type: string(yyDollar[2].str) + " " + string(yyDollar[3].str), Table: yyDollar[4].tableName, Scope: ImplicitScope}} } + yyVAL.union = yyLOCAL case 457: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2474 + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL Statement +//line sql.y:2489 { - yyVAL.statement = &Show{&ShowLegacy{Type: string(yyDollar[2].bytes), Scope: ImplicitScope}} + yyLOCAL = &Show{&ShowLegacy{Type: string(yyDollar[2].str), Scope: ImplicitScope}} } + yyVAL.union = yyLOCAL case 458: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2478 + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Statement +//line sql.y:2493 { - yyVAL.statement = &Show{&ShowLegacy{Type: string(yyDollar[2].bytes), Scope: ImplicitScope}} + yyLOCAL = &Show{&ShowLegacy{Type: string(yyDollar[2].str) + " " + string(yyDollar[3].str), Table: yyDollar[4].tableName, Scope: ImplicitScope}} } + yyVAL.union = yyLOCAL case 459: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2484 + yyDollar = yyS[yypt-5 : yypt+1] + var yyLOCAL Statement +//line sql.y:2497 { - yyVAL.str = string(yyDollar[1].bytes) + yyLOCAL = &Show{&ShowLegacy{Type: string(yyDollar[3].str), Scope: ImplicitScope}} } + yyVAL.union = yyLOCAL case 460: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2488 + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Statement +//line sql.y:2501 { - yyVAL.str = string(yyDollar[1].bytes) + showTablesOpt := &ShowTablesOpt{Filter: yyDollar[4].showFilterUnion()} + yyLOCAL = &Show{&ShowLegacy{Scope: VitessMetadataScope, Type: string(yyDollar[3].str), ShowTablesOpt: showTablesOpt}} } + yyVAL.union = yyLOCAL case 461: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2494 + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Statement +//line sql.y:2506 { - yyVAL.str = string(yyDollar[1].bytes) + yyLOCAL = &Show{&ShowBasic{Command: VitessMigrations, Filter: yyDollar[4].showFilterUnion(), DbName: yyDollar[3].tableIdent}} } + yyVAL.union = yyLOCAL case 462: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2498 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL Statement +//line sql.y:2510 { - yyVAL.str = string(yyDollar[1].bytes) + yyLOCAL = &Show{&ShowLegacy{Type: string(yyDollar[2].str) + " " + string(yyDollar[3].str), Scope: ImplicitScope}} } + yyVAL.union = yyLOCAL case 463: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2504 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL Statement +//line sql.y:2514 { - yyVAL.str = "" + yyLOCAL = &Show{&ShowLegacy{Type: string(yyDollar[2].str) + " " + string(yyDollar[3].str), Scope: ImplicitScope}} } + yyVAL.union = yyLOCAL case 464: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2508 + yyDollar = yyS[yypt-5 : yypt+1] + var yyLOCAL Statement +//line sql.y:2518 { - yyVAL.str = "extended " + yyLOCAL = &Show{&ShowLegacy{Type: string(yyDollar[2].str) + " " + string(yyDollar[3].str), OnTable: yyDollar[5].tableName, Scope: ImplicitScope}} } + yyVAL.union = yyLOCAL case 465: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2514 + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL Statement +//line sql.y:2522 { - yyVAL.str = "" + yyLOCAL = &Show{&ShowLegacy{Type: string(yyDollar[2].str), Scope: ImplicitScope}} } + yyVAL.union = yyLOCAL case 466: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2518 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL Statement +//line sql.y:2527 { - yyVAL.str = "full " + // This should probably be a different type (ShowVitessTopoOpt), but + // just getting the thing working for now + showTablesOpt := &ShowTablesOpt{Filter: yyDollar[3].showFilterUnion()} + yyLOCAL = &Show{&ShowLegacy{Type: yyDollar[2].str, ShowTablesOpt: showTablesOpt}} } + yyVAL.union = yyLOCAL case 467: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2524 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL Statement +//line sql.y:2541 { - yyVAL.str = string(yyDollar[1].bytes) + yyLOCAL = &Show{&ShowLegacy{Type: string(yyDollar[2].colIdent.String()), Scope: ImplicitScope}} } + yyVAL.union = yyLOCAL case 468: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2528 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL Statement +//line sql.y:2545 { - yyVAL.str = string(yyDollar[1].bytes) + yyLOCAL = &Show{&ShowLegacy{Type: string(yyDollar[2].str), Scope: ImplicitScope}} } + yyVAL.union = yyLOCAL case 469: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2534 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL Statement +//line sql.y:2549 { - yyVAL.str = "" + yyLOCAL = &Show{&ShowLegacy{Type: string(yyDollar[2].str), Scope: ImplicitScope}} } + yyVAL.union = yyLOCAL case 470: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2538 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:2555 { - yyVAL.str = yyDollar[2].tableIdent.v + yyVAL.str = string(yyDollar[1].str) } case 471: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2542 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:2559 { - yyVAL.str = yyDollar[2].tableIdent.v + yyVAL.str = string(yyDollar[1].str) } case 472: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2548 +//line sql.y:2565 { - yyVAL.showFilter = nil + yyVAL.str = "" } case 473: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2552 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:2569 { - yyVAL.showFilter = &ShowFilter{Like: string(yyDollar[2].bytes)} + yyVAL.str = "extended " } case 474: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2556 + yyDollar = yyS[yypt-0 : yypt+1] + var yyLOCAL bool +//line sql.y:2575 { - yyVAL.showFilter = &ShowFilter{Filter: yyDollar[2].expr} + yyLOCAL = false } + yyVAL.union = yyLOCAL case 475: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2562 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL bool +//line sql.y:2579 { - yyVAL.showFilter = nil + yyLOCAL = true } + yyVAL.union = yyLOCAL case 476: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2566 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:2585 { - yyVAL.showFilter = &ShowFilter{Like: string(yyDollar[2].bytes)} + yyVAL.str = string(yyDollar[1].str) } case 477: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2572 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:2589 { - yyVAL.empty = struct{}{} + yyVAL.str = string(yyDollar[1].str) } case 478: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2576 + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:2595 { - yyVAL.empty = struct{}{} + yyVAL.tableIdent = NewTableIdent("") } case 479: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2580 + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:2599 { - yyVAL.empty = struct{}{} + yyVAL.tableIdent = yyDollar[2].tableIdent } case 480: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2586 +//line sql.y:2603 { - yyVAL.statement = &Use{DBName: yyDollar[2].tableIdent} + yyVAL.tableIdent = yyDollar[2].tableIdent } case 481: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2590 + yyDollar = yyS[yypt-0 : yypt+1] + var yyLOCAL *ShowFilter +//line sql.y:2609 { - yyVAL.statement = &Use{DBName: TableIdent{v: ""}} + yyLOCAL = nil } + yyVAL.union = yyLOCAL case 482: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2596 + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL *ShowFilter +//line sql.y:2613 { - yyVAL.statement = &Begin{} + yyLOCAL = &ShowFilter{Like: string(yyDollar[2].str)} } + yyVAL.union = yyLOCAL case 483: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2600 + var yyLOCAL *ShowFilter +//line sql.y:2617 { - yyVAL.statement = &Begin{} + yyLOCAL = &ShowFilter{Filter: yyDollar[2].exprUnion()} } + yyVAL.union = yyLOCAL case 484: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2606 + yyDollar = yyS[yypt-0 : yypt+1] + var yyLOCAL *ShowFilter +//line sql.y:2623 { - yyVAL.statement = &Commit{} + yyLOCAL = nil } + yyVAL.union = yyLOCAL case 485: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2612 + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL *ShowFilter +//line sql.y:2627 { - yyVAL.statement = &Rollback{} + yyLOCAL = &ShowFilter{Like: string(yyDollar[2].str)} } + yyVAL.union = yyLOCAL case 486: - yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2616 + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:2633 { - yyVAL.statement = &SRollback{Name: yyDollar[5].colIdent} + yyVAL.empty = struct{}{} } case 487: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2621 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:2637 { yyVAL.empty = struct{}{} } case 488: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2623 +//line sql.y:2641 { yyVAL.empty = struct{}{} } case 489: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2626 + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL Statement +//line sql.y:2647 { - yyVAL.empty = struct{}{} + yyLOCAL = &Use{DBName: yyDollar[2].tableIdent} } + yyVAL.union = yyLOCAL case 490: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2628 + var yyLOCAL Statement +//line sql.y:2651 { - yyVAL.empty = struct{}{} + yyLOCAL = &Use{DBName: TableIdent{v: ""}} } + yyVAL.union = yyLOCAL case 491: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2633 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL Statement +//line sql.y:2657 { - yyVAL.statement = &Savepoint{Name: yyDollar[2].colIdent} + yyLOCAL = &Begin{} } + yyVAL.union = yyLOCAL case 492: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2639 + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL Statement +//line sql.y:2661 { - yyVAL.statement = &Release{Name: yyDollar[3].colIdent} + yyLOCAL = &Begin{} } + yyVAL.union = yyLOCAL case 493: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2644 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL Statement +//line sql.y:2667 { - yyVAL.explainType = EmptyType + yyLOCAL = &Commit{} } + yyVAL.union = yyLOCAL case 494: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2648 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL Statement +//line sql.y:2673 { - yyVAL.explainType = JSONType + yyLOCAL = &Rollback{} } + yyVAL.union = yyLOCAL case 495: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2652 + yyDollar = yyS[yypt-5 : yypt+1] + var yyLOCAL Statement +//line sql.y:2677 { - yyVAL.explainType = TreeType + yyLOCAL = &SRollback{Name: yyDollar[5].colIdent} } + yyVAL.union = yyLOCAL case 496: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2656 + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:2682 { - yyVAL.explainType = VitessType + yyVAL.empty = struct{}{} } case 497: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2660 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:2684 { - yyVAL.explainType = TraditionalType + yyVAL.empty = struct{}{} } case 498: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2664 + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:2687 { - yyVAL.explainType = AnalyzeType + yyVAL.empty = struct{}{} } case 499: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2670 +//line sql.y:2689 { - yyVAL.bytes = yyDollar[1].bytes + yyVAL.empty = struct{}{} } case 500: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2674 + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL Statement +//line sql.y:2694 { - yyVAL.bytes = yyDollar[1].bytes + yyLOCAL = &Savepoint{Name: yyDollar[2].colIdent} } + yyVAL.union = yyLOCAL case 501: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2678 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL Statement +//line sql.y:2700 { - yyVAL.bytes = yyDollar[1].bytes + yyLOCAL = &Release{Name: yyDollar[3].colIdent} } + yyVAL.union = yyLOCAL case 502: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2684 + yyDollar = yyS[yypt-0 : yypt+1] + var yyLOCAL ExplainType +//line sql.y:2705 { - yyVAL.statement = yyDollar[1].selStmt + yyLOCAL = EmptyType } + yyVAL.union = yyLOCAL case 503: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2688 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL ExplainType +//line sql.y:2709 { - yyVAL.statement = yyDollar[1].statement + yyLOCAL = JSONType } + yyVAL.union = yyLOCAL case 504: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2692 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL ExplainType +//line sql.y:2713 { - yyVAL.statement = yyDollar[1].statement + yyLOCAL = TreeType } + yyVAL.union = yyLOCAL case 505: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2696 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL ExplainType +//line sql.y:2717 { - yyVAL.statement = yyDollar[1].statement + yyLOCAL = VitessType } + yyVAL.union = yyLOCAL case 506: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2701 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL ExplainType +//line sql.y:2721 { - yyVAL.str = "" + yyLOCAL = TraditionalType } + yyVAL.union = yyLOCAL case 507: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2705 + var yyLOCAL ExplainType +//line sql.y:2725 { - yyVAL.str = "" + yyLOCAL = AnalyzeType } + yyVAL.union = yyLOCAL case 508: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2709 +//line sql.y:2731 { - yyVAL.str = "" + yyVAL.str = yyDollar[1].str } case 509: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2715 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:2735 { - yyVAL.statement = &OtherRead{} + yyVAL.str = yyDollar[1].str } case 510: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2719 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:2739 { - yyVAL.statement = &Explain{Type: yyDollar[2].explainType, Statement: yyDollar[3].statement} + yyVAL.str = yyDollar[1].str } case 511: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2725 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL Statement +//line sql.y:2745 { - yyVAL.statement = &OtherAdmin{} + yyLOCAL = yyDollar[1].selStmtUnion() } + yyVAL.union = yyLOCAL case 512: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2729 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL Statement +//line sql.y:2749 { - yyVAL.statement = &OtherAdmin{} + yyLOCAL = yyDollar[1].statementUnion() } + yyVAL.union = yyLOCAL case 513: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2735 + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL Statement +//line sql.y:2753 { - yyVAL.statement = &LockTables{Tables: yyDollar[3].tableAndLockTypes} + yyLOCAL = yyDollar[1].statementUnion() } + yyVAL.union = yyLOCAL case 514: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2741 + var yyLOCAL Statement +//line sql.y:2757 { - yyVAL.tableAndLockTypes = TableAndLockTypes{yyDollar[1].tableAndLockType} + yyLOCAL = yyDollar[1].statementUnion() } + yyVAL.union = yyLOCAL case 515: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2745 + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:2762 { - yyVAL.tableAndLockTypes = append(yyDollar[1].tableAndLockTypes, yyDollar[3].tableAndLockType) + yyVAL.str = "" } case 516: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2751 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:2766 { - yyVAL.tableAndLockType = &TableAndLockType{Table: yyDollar[1].aliasedTableName, Lock: yyDollar[2].lockType} + yyVAL.str = yyDollar[1].colIdent.val } case 517: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2757 +//line sql.y:2770 { - yyVAL.lockType = Read + yyVAL.str = encodeSQLString(yyDollar[1].str) } case 518: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2761 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL Statement +//line sql.y:2776 { - yyVAL.lockType = ReadLocal + yyLOCAL = &ExplainTab{Table: yyDollar[2].tableName, Wild: yyDollar[3].str} } + yyVAL.union = yyLOCAL case 519: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2765 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL Statement +//line sql.y:2780 { - yyVAL.lockType = Write + yyLOCAL = &ExplainStmt{Type: yyDollar[2].explainTypeUnion(), Statement: yyDollar[3].statementUnion()} } + yyVAL.union = yyLOCAL case 520: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2769 + var yyLOCAL Statement +//line sql.y:2786 { - yyVAL.lockType = LowPriorityWrite + yyLOCAL = &OtherAdmin{} } + yyVAL.union = yyLOCAL case 521: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2775 + var yyLOCAL Statement +//line sql.y:2790 { - yyVAL.statement = &UnlockTables{} + yyLOCAL = &OtherAdmin{} } + yyVAL.union = yyLOCAL case 522: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2781 + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL Statement +//line sql.y:2796 { - yyVAL.statement = &DDL{Action: FlushDDLAction} + yyLOCAL = &LockTables{Tables: yyDollar[3].tableAndLockTypesUnion()} } + yyVAL.union = yyLOCAL case 523: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL TableAndLockTypes +//line sql.y:2802 + { + yyLOCAL = TableAndLockTypes{yyDollar[1].tableAndLockTypeUnion()} + } + yyVAL.union = yyLOCAL + case 524: + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:2806 + { + yySLICE := (*TableAndLockTypes)(yyIaddr(yyVAL.union)) + *yySLICE = append(*yySLICE, yyDollar[3].tableAndLockTypeUnion()) + } + case 525: + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL *TableAndLockType +//line sql.y:2812 + { + yyLOCAL = &TableAndLockType{Table: yyDollar[1].aliasedTableNameUnion(), Lock: yyDollar[2].lockTypeUnion()} + } + yyVAL.union = yyLOCAL + case 526: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL LockType +//line sql.y:2818 + { + yyLOCAL = Read + } + yyVAL.union = yyLOCAL + case 527: + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL LockType +//line sql.y:2822 + { + yyLOCAL = ReadLocal + } + yyVAL.union = yyLOCAL + case 528: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL LockType +//line sql.y:2826 + { + yyLOCAL = Write + } + yyVAL.union = yyLOCAL + case 529: + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL LockType +//line sql.y:2830 + { + yyLOCAL = LowPriorityWrite + } + yyVAL.union = yyLOCAL + case 530: + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL Statement +//line sql.y:2836 + { + yyLOCAL = &UnlockTables{} + } + yyVAL.union = yyLOCAL + case 531: + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL Statement +//line sql.y:2842 + { + yyLOCAL = &RevertMigration{UUID: string(yyDollar[3].str)} + } + yyVAL.union = yyLOCAL + case 532: + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL Statement +//line sql.y:2848 + { + yyLOCAL = &Flush{IsLocal: yyDollar[2].booleanUnion(), FlushOptions: yyDollar[3].strs} + } + yyVAL.union = yyLOCAL + case 533: + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL Statement +//line sql.y:2852 + { + yyLOCAL = &Flush{IsLocal: yyDollar[2].booleanUnion()} + } + yyVAL.union = yyLOCAL + case 534: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Statement +//line sql.y:2856 + { + yyLOCAL = &Flush{IsLocal: yyDollar[2].booleanUnion(), WithLock: true} + } + yyVAL.union = yyLOCAL + case 535: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Statement +//line sql.y:2860 + { + yyLOCAL = &Flush{IsLocal: yyDollar[2].booleanUnion(), TableNames: yyDollar[4].tableNamesUnion()} + } + yyVAL.union = yyLOCAL + case 536: + yyDollar = yyS[yypt-7 : yypt+1] + var yyLOCAL Statement +//line sql.y:2864 + { + yyLOCAL = &Flush{IsLocal: yyDollar[2].booleanUnion(), TableNames: yyDollar[4].tableNamesUnion(), WithLock: true} + } + yyVAL.union = yyLOCAL + case 537: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Statement +//line sql.y:2868 + { + yyLOCAL = &Flush{IsLocal: yyDollar[2].booleanUnion(), TableNames: yyDollar[4].tableNamesUnion(), ForExport: true} + } + yyVAL.union = yyLOCAL + case 538: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:2874 + { + yyVAL.strs = []string{yyDollar[1].str} + } + case 539: + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:2878 + { + yyVAL.strs = append(yyDollar[1].strs, yyDollar[3].str) + } + case 540: + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:2884 + { + yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str) + } + case 541: + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:2888 + { + yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str) + } + case 542: + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:2892 + { + yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str) + } + case 543: + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:2896 + { + yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str) + } + case 544: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:2900 + { + yyVAL.str = string(yyDollar[1].str) + } + case 545: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:2904 + { + yyVAL.str = string(yyDollar[1].str) + } + case 546: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:2908 + { + yyVAL.str = string(yyDollar[1].str) + } + case 547: + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:2912 + { + yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str) + yyDollar[3].str + } + case 548: + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:2916 + { + yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str) + } + case 549: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:2920 + { + yyVAL.str = string(yyDollar[1].str) + } + case 550: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:2924 + { + yyVAL.str = string(yyDollar[1].str) + } + case 551: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:2928 + { + yyVAL.str = string(yyDollar[1].str) + } + case 552: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2785 + var yyLOCAL bool +//line sql.y:2933 + { + yyLOCAL = false + } + yyVAL.union = yyLOCAL + case 553: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL bool +//line sql.y:2937 + { + yyLOCAL = true + } + yyVAL.union = yyLOCAL + case 554: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL bool +//line sql.y:2941 + { + yyLOCAL = true + } + yyVAL.union = yyLOCAL + case 555: + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:2946 + { + yyVAL.str = "" + } + case 556: + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:2950 + { + yyVAL.str = " " + string(yyDollar[1].str) + " " + string(yyDollar[2].str) + " " + yyDollar[3].colIdent.String() + } + case 557: + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:2955 { setAllowComments(yylex, true) } - case 524: + case 558: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2789 +//line sql.y:2959 { - yyVAL.bytes2 = yyDollar[2].bytes2 + yyVAL.strs = yyDollar[2].strs setAllowComments(yylex, false) } - case 525: + case 559: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2795 +//line sql.y:2965 { - yyVAL.bytes2 = nil + yyVAL.strs = nil } - case 526: + case 560: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2799 +//line sql.y:2969 { - yyVAL.bytes2 = append(yyDollar[1].bytes2, yyDollar[2].bytes) + yyVAL.strs = append(yyDollar[1].strs, yyDollar[2].str) } - case 527: + case 561: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2805 + var yyLOCAL bool +//line sql.y:2975 { - yyVAL.boolean = true + yyLOCAL = true } - case 528: + yyVAL.union = yyLOCAL + case 562: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2809 + var yyLOCAL bool +//line sql.y:2979 { - yyVAL.boolean = false + yyLOCAL = false } - case 529: + yyVAL.union = yyLOCAL + case 563: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2813 + var yyLOCAL bool +//line sql.y:2983 { - yyVAL.boolean = true + yyLOCAL = true } - case 530: + yyVAL.union = yyLOCAL + case 564: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2818 +//line sql.y:2988 { yyVAL.str = "" } - case 531: + case 565: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2822 +//line sql.y:2992 { yyVAL.str = SQLNoCacheStr } - case 532: + case 566: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2826 +//line sql.y:2996 { yyVAL.str = SQLCacheStr } - case 533: + case 567: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2831 + var yyLOCAL bool +//line sql.y:3001 { - yyVAL.boolean = false + yyLOCAL = false } - case 534: + yyVAL.union = yyLOCAL + case 568: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2835 + var yyLOCAL bool +//line sql.y:3005 { - yyVAL.boolean = true + yyLOCAL = true } - case 535: + yyVAL.union = yyLOCAL + case 569: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2839 + var yyLOCAL bool +//line sql.y:3009 { - yyVAL.boolean = true + yyLOCAL = true } - case 536: + yyVAL.union = yyLOCAL + case 570: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2844 + var yyLOCAL SelectExprs +//line sql.y:3014 { - yyVAL.selectExprs = nil + yyLOCAL = nil } - case 537: + yyVAL.union = yyLOCAL + case 571: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2848 + var yyLOCAL SelectExprs +//line sql.y:3018 { - yyVAL.selectExprs = yyDollar[1].selectExprs + yyLOCAL = yyDollar[1].selectExprsUnion() } - case 538: + yyVAL.union = yyLOCAL + case 572: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2853 +//line sql.y:3023 { yyVAL.strs = nil } - case 539: + case 573: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2857 +//line sql.y:3027 { yyVAL.strs = []string{yyDollar[1].str} } - case 540: + case 574: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2861 +//line sql.y:3031 { // TODO: This is a hack since I couldn't get it to work in a nicer way. I got 'conflicts: 8 shift/reduce' yyVAL.strs = []string{yyDollar[1].str, yyDollar[2].str} } - case 541: + case 575: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2865 +//line sql.y:3035 { yyVAL.strs = []string{yyDollar[1].str, yyDollar[2].str, yyDollar[3].str} } - case 542: + case 576: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:2869 +//line sql.y:3039 { yyVAL.strs = []string{yyDollar[1].str, yyDollar[2].str, yyDollar[3].str, yyDollar[4].str} } - case 543: + case 577: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2875 +//line sql.y:3045 { yyVAL.str = SQLNoCacheStr } - case 544: + case 578: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2879 +//line sql.y:3049 { yyVAL.str = SQLCacheStr } - case 545: + case 579: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2883 +//line sql.y:3053 { yyVAL.str = DistinctStr } - case 546: + case 580: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2887 +//line sql.y:3057 { yyVAL.str = DistinctStr } - case 547: + case 581: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2891 +//line sql.y:3061 { yyVAL.str = StraightJoinHint } - case 548: + case 582: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2895 +//line sql.y:3065 { yyVAL.str = SQLCalcFoundRowsStr } - case 549: + case 583: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2901 +//line sql.y:3069 { - yyVAL.selectExprs = SelectExprs{yyDollar[1].selectExpr} + yyVAL.str = AllStr // These are not picked up by NewSelect, and so ALL will be dropped. But this is OK, since it's redundant anyway } - case 550: + case 584: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL SelectExprs +//line sql.y:3075 + { + yyLOCAL = SelectExprs{yyDollar[1].selectExprUnion()} + } + yyVAL.union = yyLOCAL + case 585: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2905 +//line sql.y:3079 { - yyVAL.selectExprs = append(yyVAL.selectExprs, yyDollar[3].selectExpr) + yySLICE := (*SelectExprs)(yyIaddr(yyVAL.union)) + *yySLICE = append(*yySLICE, yyDollar[3].selectExprUnion()) } - case 551: + case 586: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2911 + var yyLOCAL SelectExpr +//line sql.y:3085 { - yyVAL.selectExpr = &StarExpr{} + yyLOCAL = &StarExpr{} } - case 552: + yyVAL.union = yyLOCAL + case 587: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2915 + var yyLOCAL SelectExpr +//line sql.y:3089 { - yyVAL.selectExpr = &AliasedExpr{Expr: yyDollar[1].expr, As: yyDollar[2].colIdent} + yyLOCAL = &AliasedExpr{Expr: yyDollar[1].exprUnion(), As: yyDollar[2].colIdent} } - case 553: + yyVAL.union = yyLOCAL + case 588: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2919 + var yyLOCAL SelectExpr +//line sql.y:3093 { - yyVAL.selectExpr = &StarExpr{TableName: TableName{Name: yyDollar[1].tableIdent}} + yyLOCAL = &StarExpr{TableName: TableName{Name: yyDollar[1].tableIdent}} } - case 554: + yyVAL.union = yyLOCAL + case 589: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2923 + var yyLOCAL SelectExpr +//line sql.y:3097 { - yyVAL.selectExpr = &StarExpr{TableName: TableName{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].tableIdent}} + yyLOCAL = &StarExpr{TableName: TableName{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].tableIdent}} } - case 555: + yyVAL.union = yyLOCAL + case 590: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2928 +//line sql.y:3102 { yyVAL.colIdent = ColIdent{} } - case 556: + case 591: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2932 +//line sql.y:3106 { yyVAL.colIdent = yyDollar[1].colIdent } - case 557: + case 592: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2936 +//line sql.y:3110 { yyVAL.colIdent = yyDollar[2].colIdent } - case 559: + case 594: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2943 +//line sql.y:3117 { - yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) + yyVAL.colIdent = NewColIdent(string(yyDollar[1].str)) } - case 560: + case 595: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2948 + var yyLOCAL TableExprs +//line sql.y:3122 { - yyVAL.tableExprs = TableExprs{&AliasedTableExpr{Expr: TableName{Name: NewTableIdent("dual")}}} + yyLOCAL = TableExprs{&AliasedTableExpr{Expr: TableName{Name: NewTableIdent("dual")}}} } - case 561: + yyVAL.union = yyLOCAL + case 596: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2952 + var yyLOCAL TableExprs +//line sql.y:3126 { - yyVAL.tableExprs = yyDollar[2].tableExprs + yyLOCAL = yyDollar[2].tableExprsUnion() } - case 562: + yyVAL.union = yyLOCAL + case 597: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2958 + var yyLOCAL TableExprs +//line sql.y:3132 { - yyVAL.tableExprs = TableExprs{yyDollar[1].tableExpr} + yyLOCAL = TableExprs{yyDollar[1].tableExprUnion()} } - case 563: + yyVAL.union = yyLOCAL + case 598: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2962 +//line sql.y:3136 { - yyVAL.tableExprs = append(yyVAL.tableExprs, yyDollar[3].tableExpr) + yySLICE := (*TableExprs)(yyIaddr(yyVAL.union)) + *yySLICE = append(*yySLICE, yyDollar[3].tableExprUnion()) } - case 566: + case 601: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2972 + var yyLOCAL TableExpr +//line sql.y:3146 { - yyVAL.tableExpr = yyDollar[1].aliasedTableName + yyLOCAL = yyDollar[1].aliasedTableNameUnion() } - case 567: + yyVAL.union = yyLOCAL + case 602: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2976 + var yyLOCAL TableExpr +//line sql.y:3150 { - yyVAL.tableExpr = &AliasedTableExpr{Expr: yyDollar[1].derivedTable, As: yyDollar[3].tableIdent} + yyLOCAL = &AliasedTableExpr{Expr: yyDollar[1].derivedTableUnion(), As: yyDollar[3].tableIdent} } - case 568: + yyVAL.union = yyLOCAL + case 603: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2980 + var yyLOCAL TableExpr +//line sql.y:3154 { - yyVAL.tableExpr = &ParenTableExpr{Exprs: yyDollar[2].tableExprs} + yyLOCAL = &ParenTableExpr{Exprs: yyDollar[2].tableExprsUnion()} } - case 569: + yyVAL.union = yyLOCAL + case 604: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2986 + var yyLOCAL *DerivedTable +//line sql.y:3160 { - yyVAL.derivedTable = &DerivedTable{yyDollar[2].selStmt} + yyLOCAL = &DerivedTable{yyDollar[2].selStmtUnion()} } - case 570: + yyVAL.union = yyLOCAL + case 605: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2992 + var yyLOCAL *AliasedTableExpr +//line sql.y:3166 { - yyVAL.aliasedTableName = &AliasedTableExpr{Expr: yyDollar[1].tableName, As: yyDollar[2].tableIdent, Hints: yyDollar[3].indexHints} + yyLOCAL = &AliasedTableExpr{Expr: yyDollar[1].tableName, As: yyDollar[2].tableIdent, Hints: yyDollar[3].indexHintsUnion()} } - case 571: + yyVAL.union = yyLOCAL + case 606: yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:2996 + var yyLOCAL *AliasedTableExpr +//line sql.y:3170 { - yyVAL.aliasedTableName = &AliasedTableExpr{Expr: yyDollar[1].tableName, Partitions: yyDollar[4].partitions, As: yyDollar[6].tableIdent, Hints: yyDollar[7].indexHints} + yyLOCAL = &AliasedTableExpr{Expr: yyDollar[1].tableName, Partitions: yyDollar[4].partitionsUnion(), As: yyDollar[6].tableIdent, Hints: yyDollar[7].indexHintsUnion()} } - case 572: + yyVAL.union = yyLOCAL + case 607: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:3001 + var yyLOCAL Columns +//line sql.y:3175 { - yyVAL.columns = nil + yyLOCAL = nil } - case 573: + yyVAL.union = yyLOCAL + case 608: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3005 + var yyLOCAL Columns +//line sql.y:3179 { - yyVAL.columns = yyDollar[2].columns + yyLOCAL = yyDollar[2].columnsUnion() } - case 574: + yyVAL.union = yyLOCAL + case 609: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3011 + var yyLOCAL Columns +//line sql.y:3185 { - yyVAL.columns = Columns{yyDollar[1].colIdent} + yyLOCAL = Columns{yyDollar[1].colIdent} } - case 575: + yyVAL.union = yyLOCAL + case 610: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3015 +//line sql.y:3189 { - yyVAL.columns = append(yyVAL.columns, yyDollar[3].colIdent) + yySLICE := (*Columns)(yyIaddr(yyVAL.union)) + *yySLICE = append(*yySLICE, yyDollar[3].colIdent) } - case 576: + case 611: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3021 + var yyLOCAL Columns +//line sql.y:3195 { - yyVAL.columns = Columns{yyDollar[1].colIdent} + yyLOCAL = Columns{yyDollar[1].colIdent} } - case 577: + yyVAL.union = yyLOCAL + case 612: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3025 + var yyLOCAL Columns +//line sql.y:3199 { - yyVAL.columns = Columns{NewColIdent(string(yyDollar[1].bytes))} + yyLOCAL = Columns{NewColIdent(string(yyDollar[1].str))} } - case 578: + yyVAL.union = yyLOCAL + case 613: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3029 +//line sql.y:3203 { - yyVAL.columns = append(yyVAL.columns, yyDollar[3].colIdent) + yySLICE := (*Columns)(yyIaddr(yyVAL.union)) + *yySLICE = append(*yySLICE, yyDollar[3].colIdent) } - case 579: + case 614: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3033 +//line sql.y:3207 { - yyVAL.columns = append(yyVAL.columns, NewColIdent(string(yyDollar[3].bytes))) + yySLICE := (*Columns)(yyIaddr(yyVAL.union)) + *yySLICE = append(*yySLICE, NewColIdent(string(yyDollar[3].str))) } - case 580: + case 615: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3039 + var yyLOCAL Partitions +//line sql.y:3213 { - yyVAL.partitions = Partitions{yyDollar[1].colIdent} + yyLOCAL = Partitions{yyDollar[1].colIdent} } - case 581: + yyVAL.union = yyLOCAL + case 616: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3043 +//line sql.y:3217 { - yyVAL.partitions = append(yyVAL.partitions, yyDollar[3].colIdent) + yySLICE := (*Partitions)(yyIaddr(yyVAL.union)) + *yySLICE = append(*yySLICE, yyDollar[3].colIdent) } - case 582: + case 617: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:3056 + var yyLOCAL TableExpr +//line sql.y:3230 { - yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].joinType, RightExpr: yyDollar[3].tableExpr, Condition: yyDollar[4].joinCondition} + yyLOCAL = &JoinTableExpr{LeftExpr: yyDollar[1].tableExprUnion(), Join: yyDollar[2].joinTypeUnion(), RightExpr: yyDollar[3].tableExprUnion(), Condition: yyDollar[4].joinCondition} } - case 583: + yyVAL.union = yyLOCAL + case 618: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:3060 + var yyLOCAL TableExpr +//line sql.y:3234 { - yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].joinType, RightExpr: yyDollar[3].tableExpr, Condition: yyDollar[4].joinCondition} + yyLOCAL = &JoinTableExpr{LeftExpr: yyDollar[1].tableExprUnion(), Join: yyDollar[2].joinTypeUnion(), RightExpr: yyDollar[3].tableExprUnion(), Condition: yyDollar[4].joinCondition} } - case 584: + yyVAL.union = yyLOCAL + case 619: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:3064 + var yyLOCAL TableExpr +//line sql.y:3238 { - yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].joinType, RightExpr: yyDollar[3].tableExpr, Condition: yyDollar[4].joinCondition} + yyLOCAL = &JoinTableExpr{LeftExpr: yyDollar[1].tableExprUnion(), Join: yyDollar[2].joinTypeUnion(), RightExpr: yyDollar[3].tableExprUnion(), Condition: yyDollar[4].joinCondition} } - case 585: + yyVAL.union = yyLOCAL + case 620: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3068 + var yyLOCAL TableExpr +//line sql.y:3242 { - yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].joinType, RightExpr: yyDollar[3].tableExpr} + yyLOCAL = &JoinTableExpr{LeftExpr: yyDollar[1].tableExprUnion(), Join: yyDollar[2].joinTypeUnion(), RightExpr: yyDollar[3].tableExprUnion()} } - case 586: + yyVAL.union = yyLOCAL + case 621: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3074 +//line sql.y:3248 { - yyVAL.joinCondition = JoinCondition{On: yyDollar[2].expr} + yyVAL.joinCondition = JoinCondition{On: yyDollar[2].exprUnion()} } - case 587: + case 622: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:3076 +//line sql.y:3250 { - yyVAL.joinCondition = JoinCondition{Using: yyDollar[3].columns} + yyVAL.joinCondition = JoinCondition{Using: yyDollar[3].columnsUnion()} } - case 588: + case 623: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:3080 +//line sql.y:3254 { yyVAL.joinCondition = JoinCondition{} } - case 589: + case 624: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3082 +//line sql.y:3256 { yyVAL.joinCondition = yyDollar[1].joinCondition } - case 590: + case 625: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:3086 +//line sql.y:3260 { yyVAL.joinCondition = JoinCondition{} } - case 591: + case 626: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3088 +//line sql.y:3262 { - yyVAL.joinCondition = JoinCondition{On: yyDollar[2].expr} + yyVAL.joinCondition = JoinCondition{On: yyDollar[2].exprUnion()} } - case 592: + case 627: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:3091 +//line sql.y:3265 { yyVAL.empty = struct{}{} } - case 593: + case 628: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3093 +//line sql.y:3267 { yyVAL.empty = struct{}{} } - case 594: + case 629: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:3096 +//line sql.y:3270 { yyVAL.tableIdent = NewTableIdent("") } - case 595: + case 630: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3100 +//line sql.y:3274 { yyVAL.tableIdent = yyDollar[1].tableIdent } - case 596: + case 631: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3104 +//line sql.y:3278 { yyVAL.tableIdent = yyDollar[2].tableIdent } - case 598: + case 633: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3111 +//line sql.y:3285 { - yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes)) + yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].str)) } - case 599: + case 634: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3117 + var yyLOCAL JoinType +//line sql.y:3291 { - yyVAL.joinType = NormalJoinType + yyLOCAL = NormalJoinType } - case 600: + yyVAL.union = yyLOCAL + case 635: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3121 + var yyLOCAL JoinType +//line sql.y:3295 { - yyVAL.joinType = NormalJoinType + yyLOCAL = NormalJoinType } - case 601: + yyVAL.union = yyLOCAL + case 636: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3125 + var yyLOCAL JoinType +//line sql.y:3299 { - yyVAL.joinType = NormalJoinType + yyLOCAL = NormalJoinType } - case 602: + yyVAL.union = yyLOCAL + case 637: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3131 + var yyLOCAL JoinType +//line sql.y:3305 { - yyVAL.joinType = StraightJoinType + yyLOCAL = StraightJoinType } - case 603: + yyVAL.union = yyLOCAL + case 638: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3137 + var yyLOCAL JoinType +//line sql.y:3311 { - yyVAL.joinType = LeftJoinType + yyLOCAL = LeftJoinType } - case 604: + yyVAL.union = yyLOCAL + case 639: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3141 + var yyLOCAL JoinType +//line sql.y:3315 { - yyVAL.joinType = LeftJoinType + yyLOCAL = LeftJoinType } - case 605: + yyVAL.union = yyLOCAL + case 640: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3145 + var yyLOCAL JoinType +//line sql.y:3319 { - yyVAL.joinType = RightJoinType + yyLOCAL = RightJoinType } - case 606: + yyVAL.union = yyLOCAL + case 641: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3149 + var yyLOCAL JoinType +//line sql.y:3323 { - yyVAL.joinType = RightJoinType + yyLOCAL = RightJoinType } - case 607: + yyVAL.union = yyLOCAL + case 642: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3155 + var yyLOCAL JoinType +//line sql.y:3329 { - yyVAL.joinType = NaturalJoinType + yyLOCAL = NaturalJoinType } - case 608: + yyVAL.union = yyLOCAL + case 643: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3159 + var yyLOCAL JoinType +//line sql.y:3333 { - if yyDollar[2].joinType == LeftJoinType { - yyVAL.joinType = NaturalLeftJoinType + if yyDollar[2].joinTypeUnion() == LeftJoinType { + yyLOCAL = NaturalLeftJoinType } else { - yyVAL.joinType = NaturalRightJoinType + yyLOCAL = NaturalRightJoinType } } - case 609: + yyVAL.union = yyLOCAL + case 644: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3169 +//line sql.y:3343 { yyVAL.tableName = yyDollar[2].tableName } - case 610: + case 645: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3173 +//line sql.y:3347 { yyVAL.tableName = yyDollar[1].tableName } - case 611: + case 646: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3179 +//line sql.y:3353 { yyVAL.tableName = TableName{Name: yyDollar[1].tableIdent} } - case 612: + case 647: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3183 +//line sql.y:3357 { yyVAL.tableName = TableName{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].tableIdent} } - case 613: + case 648: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3189 +//line sql.y:3363 { yyVAL.tableName = TableName{Name: yyDollar[1].tableIdent} } - case 614: + case 649: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:3194 + var yyLOCAL *IndexHints +//line sql.y:3368 { - yyVAL.indexHints = nil + yyLOCAL = nil } - case 615: + yyVAL.union = yyLOCAL + case 650: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:3198 + var yyLOCAL *IndexHints +//line sql.y:3372 { - yyVAL.indexHints = &IndexHints{Type: UseOp, Indexes: yyDollar[4].columns} + yyLOCAL = &IndexHints{Type: UseOp, Indexes: yyDollar[4].columnsUnion()} } - case 616: + yyVAL.union = yyLOCAL + case 651: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:3202 + var yyLOCAL *IndexHints +//line sql.y:3376 { - yyVAL.indexHints = &IndexHints{Type: UseOp} + yyLOCAL = &IndexHints{Type: UseOp} } - case 617: + yyVAL.union = yyLOCAL + case 652: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:3206 + var yyLOCAL *IndexHints +//line sql.y:3380 { - yyVAL.indexHints = &IndexHints{Type: IgnoreOp, Indexes: yyDollar[4].columns} + yyLOCAL = &IndexHints{Type: IgnoreOp, Indexes: yyDollar[4].columnsUnion()} } - case 618: + yyVAL.union = yyLOCAL + case 653: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:3210 + var yyLOCAL *IndexHints +//line sql.y:3384 { - yyVAL.indexHints = &IndexHints{Type: ForceOp, Indexes: yyDollar[4].columns} + yyLOCAL = &IndexHints{Type: ForceOp, Indexes: yyDollar[4].columnsUnion()} } - case 619: + yyVAL.union = yyLOCAL + case 654: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:3215 + var yyLOCAL Expr +//line sql.y:3389 { - yyVAL.expr = nil + yyLOCAL = nil } - case 620: + yyVAL.union = yyLOCAL + case 655: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3219 + var yyLOCAL Expr +//line sql.y:3393 { - yyVAL.expr = yyDollar[2].expr + yyLOCAL = yyDollar[2].exprUnion() } - case 621: + yyVAL.union = yyLOCAL + case 656: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3225 + var yyLOCAL Expr +//line sql.y:3399 { - yyVAL.expr = yyDollar[1].expr + yyLOCAL = yyDollar[1].exprUnion() } - case 622: + yyVAL.union = yyLOCAL + case 657: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3229 + var yyLOCAL Expr +//line sql.y:3403 { - yyVAL.expr = &AndExpr{Left: yyDollar[1].expr, Right: yyDollar[3].expr} + yyLOCAL = &AndExpr{Left: yyDollar[1].exprUnion(), Right: yyDollar[3].exprUnion()} } - case 623: + yyVAL.union = yyLOCAL + case 658: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3233 + var yyLOCAL Expr +//line sql.y:3407 { - yyVAL.expr = &OrExpr{Left: yyDollar[1].expr, Right: yyDollar[3].expr} + yyLOCAL = &OrExpr{Left: yyDollar[1].exprUnion(), Right: yyDollar[3].exprUnion()} } - case 624: + yyVAL.union = yyLOCAL + case 659: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3237 + var yyLOCAL Expr +//line sql.y:3411 { - yyVAL.expr = &XorExpr{Left: yyDollar[1].expr, Right: yyDollar[3].expr} + yyLOCAL = &XorExpr{Left: yyDollar[1].exprUnion(), Right: yyDollar[3].exprUnion()} } - case 625: + yyVAL.union = yyLOCAL + case 660: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3241 + var yyLOCAL Expr +//line sql.y:3415 { - yyVAL.expr = &NotExpr{Expr: yyDollar[2].expr} + yyLOCAL = &NotExpr{Expr: yyDollar[2].exprUnion()} } - case 626: + yyVAL.union = yyLOCAL + case 661: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3245 + var yyLOCAL Expr +//line sql.y:3419 { - yyVAL.expr = &IsExpr{Operator: yyDollar[3].isExprOperator, Expr: yyDollar[1].expr} + yyLOCAL = &IsExpr{Operator: yyDollar[3].isExprOperatorUnion(), Expr: yyDollar[1].exprUnion()} } - case 627: + yyVAL.union = yyLOCAL + case 662: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3249 + var yyLOCAL Expr +//line sql.y:3423 { - yyVAL.expr = yyDollar[1].expr + yyLOCAL = yyDollar[1].exprUnion() } - case 628: + yyVAL.union = yyLOCAL + case 663: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3253 + var yyLOCAL Expr +//line sql.y:3427 { - yyVAL.expr = &Default{ColName: yyDollar[2].str} + yyLOCAL = &Default{ColName: yyDollar[2].str} } - case 629: + yyVAL.union = yyLOCAL + case 664: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:3259 +//line sql.y:3433 { yyVAL.str = "" } - case 630: + case 665: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3263 +//line sql.y:3437 { yyVAL.str = string(yyDollar[2].colIdent.String()) } - case 631: + case 666: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3269 + var yyLOCAL BoolVal +//line sql.y:3443 { - yyVAL.boolVal = BoolVal(true) + yyLOCAL = BoolVal(true) } - case 632: + yyVAL.union = yyLOCAL + case 667: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3273 + var yyLOCAL BoolVal +//line sql.y:3447 { - yyVAL.boolVal = BoolVal(false) + yyLOCAL = BoolVal(false) } - case 633: + yyVAL.union = yyLOCAL + case 668: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3279 + var yyLOCAL Expr +//line sql.y:3453 { - yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: yyDollar[2].comparisonExprOperator, Right: yyDollar[3].expr} + yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: yyDollar[2].comparisonExprOperatorUnion(), Right: yyDollar[3].exprUnion()} } - case 634: + yyVAL.union = yyLOCAL + case 669: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3283 + var yyLOCAL Expr +//line sql.y:3457 { - yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: InOp, Right: yyDollar[3].colTuple} + yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: InOp, Right: yyDollar[3].colTupleUnion()} } - case 635: + yyVAL.union = yyLOCAL + case 670: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:3287 + var yyLOCAL Expr +//line sql.y:3461 { - yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: NotInOp, Right: yyDollar[4].colTuple} + yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: NotInOp, Right: yyDollar[4].colTupleUnion()} } - case 636: + yyVAL.union = yyLOCAL + case 671: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:3291 + var yyLOCAL Expr +//line sql.y:3465 { - yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: LikeOp, Right: yyDollar[3].expr, Escape: yyDollar[4].expr} + yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: LikeOp, Right: yyDollar[3].exprUnion(), Escape: yyDollar[4].exprUnion()} } - case 637: + yyVAL.union = yyLOCAL + case 672: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:3295 + var yyLOCAL Expr +//line sql.y:3469 { - yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: NotLikeOp, Right: yyDollar[4].expr, Escape: yyDollar[5].expr} + yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: NotLikeOp, Right: yyDollar[4].exprUnion(), Escape: yyDollar[5].exprUnion()} } - case 638: + yyVAL.union = yyLOCAL + case 673: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3299 + var yyLOCAL Expr +//line sql.y:3473 { - yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: RegexpOp, Right: yyDollar[3].expr} + yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: RegexpOp, Right: yyDollar[3].exprUnion()} } - case 639: + yyVAL.union = yyLOCAL + case 674: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:3303 + var yyLOCAL Expr +//line sql.y:3477 { - yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: NotRegexpOp, Right: yyDollar[4].expr} + yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: NotRegexpOp, Right: yyDollar[4].exprUnion()} } - case 640: + yyVAL.union = yyLOCAL + case 675: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:3307 + var yyLOCAL Expr +//line sql.y:3481 { - yyVAL.expr = &RangeCond{Left: yyDollar[1].expr, Operator: BetweenOp, From: yyDollar[3].expr, To: yyDollar[5].expr} + yyLOCAL = &RangeCond{Left: yyDollar[1].exprUnion(), Operator: BetweenOp, From: yyDollar[3].exprUnion(), To: yyDollar[5].exprUnion()} } - case 641: + yyVAL.union = yyLOCAL + case 676: yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:3311 + var yyLOCAL Expr +//line sql.y:3485 { - yyVAL.expr = &RangeCond{Left: yyDollar[1].expr, Operator: NotBetweenOp, From: yyDollar[4].expr, To: yyDollar[6].expr} + yyLOCAL = &RangeCond{Left: yyDollar[1].exprUnion(), Operator: NotBetweenOp, From: yyDollar[4].exprUnion(), To: yyDollar[6].exprUnion()} } - case 642: + yyVAL.union = yyLOCAL + case 677: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3315 + var yyLOCAL Expr +//line sql.y:3489 { - yyVAL.expr = &ExistsExpr{Subquery: yyDollar[2].subquery} + yyLOCAL = &ExistsExpr{Subquery: yyDollar[2].subqueryUnion()} } - case 643: + yyVAL.union = yyLOCAL + case 678: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3321 + var yyLOCAL IsExprOperator +//line sql.y:3495 { - yyVAL.isExprOperator = IsNullOp + yyLOCAL = IsNullOp } - case 644: + yyVAL.union = yyLOCAL + case 679: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3325 + var yyLOCAL IsExprOperator +//line sql.y:3499 { - yyVAL.isExprOperator = IsNotNullOp + yyLOCAL = IsNotNullOp } - case 645: + yyVAL.union = yyLOCAL + case 680: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3329 + var yyLOCAL IsExprOperator +//line sql.y:3503 { - yyVAL.isExprOperator = IsTrueOp + yyLOCAL = IsTrueOp } - case 646: + yyVAL.union = yyLOCAL + case 681: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3333 + var yyLOCAL IsExprOperator +//line sql.y:3507 { - yyVAL.isExprOperator = IsNotTrueOp + yyLOCAL = IsNotTrueOp } - case 647: + yyVAL.union = yyLOCAL + case 682: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3337 + var yyLOCAL IsExprOperator +//line sql.y:3511 { - yyVAL.isExprOperator = IsFalseOp + yyLOCAL = IsFalseOp } - case 648: + yyVAL.union = yyLOCAL + case 683: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3341 + var yyLOCAL IsExprOperator +//line sql.y:3515 { - yyVAL.isExprOperator = IsNotFalseOp + yyLOCAL = IsNotFalseOp } - case 649: + yyVAL.union = yyLOCAL + case 684: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3347 + var yyLOCAL ComparisonExprOperator +//line sql.y:3521 { - yyVAL.comparisonExprOperator = EqualOp + yyLOCAL = EqualOp } - case 650: + yyVAL.union = yyLOCAL + case 685: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3351 + var yyLOCAL ComparisonExprOperator +//line sql.y:3525 { - yyVAL.comparisonExprOperator = LessThanOp + yyLOCAL = LessThanOp } - case 651: + yyVAL.union = yyLOCAL + case 686: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3355 + var yyLOCAL ComparisonExprOperator +//line sql.y:3529 { - yyVAL.comparisonExprOperator = GreaterThanOp + yyLOCAL = GreaterThanOp } - case 652: + yyVAL.union = yyLOCAL + case 687: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3359 + var yyLOCAL ComparisonExprOperator +//line sql.y:3533 { - yyVAL.comparisonExprOperator = LessEqualOp + yyLOCAL = LessEqualOp } - case 653: + yyVAL.union = yyLOCAL + case 688: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3363 + var yyLOCAL ComparisonExprOperator +//line sql.y:3537 { - yyVAL.comparisonExprOperator = GreaterEqualOp + yyLOCAL = GreaterEqualOp } - case 654: + yyVAL.union = yyLOCAL + case 689: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3367 + var yyLOCAL ComparisonExprOperator +//line sql.y:3541 { - yyVAL.comparisonExprOperator = NotEqualOp + yyLOCAL = NotEqualOp } - case 655: + yyVAL.union = yyLOCAL + case 690: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3371 + var yyLOCAL ComparisonExprOperator +//line sql.y:3545 { - yyVAL.comparisonExprOperator = NullSafeEqualOp + yyLOCAL = NullSafeEqualOp } - case 656: + yyVAL.union = yyLOCAL + case 691: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:3376 + var yyLOCAL Expr +//line sql.y:3550 { - yyVAL.expr = nil + yyLOCAL = nil } - case 657: + yyVAL.union = yyLOCAL + case 692: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3380 + var yyLOCAL Expr +//line sql.y:3554 { - yyVAL.expr = yyDollar[2].expr + yyLOCAL = yyDollar[2].exprUnion() } - case 658: + yyVAL.union = yyLOCAL + case 693: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3386 + var yyLOCAL ColTuple +//line sql.y:3560 { - yyVAL.colTuple = yyDollar[1].valTuple + yyLOCAL = yyDollar[1].valTupleUnion() } - case 659: + yyVAL.union = yyLOCAL + case 694: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3390 + var yyLOCAL ColTuple +//line sql.y:3564 { - yyVAL.colTuple = yyDollar[1].subquery + yyLOCAL = yyDollar[1].subqueryUnion() } - case 660: + yyVAL.union = yyLOCAL + case 695: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3394 + var yyLOCAL ColTuple +//line sql.y:3568 { - yyVAL.colTuple = ListArg(yyDollar[1].bytes) + yyLOCAL = ListArg(yyDollar[1].str) + bindVariable(yylex, yyDollar[1].str[2:]) } - case 661: + yyVAL.union = yyLOCAL + case 696: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3400 + var yyLOCAL *Subquery +//line sql.y:3575 { - yyVAL.subquery = &Subquery{yyDollar[2].selStmt} + yyLOCAL = &Subquery{yyDollar[2].selStmtUnion()} } - case 662: + yyVAL.union = yyLOCAL + case 697: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3406 + var yyLOCAL Exprs +//line sql.y:3581 { - yyVAL.exprs = Exprs{yyDollar[1].expr} + yyLOCAL = Exprs{yyDollar[1].exprUnion()} } - case 663: + yyVAL.union = yyLOCAL + case 698: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3410 +//line sql.y:3585 { - yyVAL.exprs = append(yyDollar[1].exprs, yyDollar[3].expr) + yySLICE := (*Exprs)(yyIaddr(yyVAL.union)) + *yySLICE = append(*yySLICE, yyDollar[3].exprUnion()) } - case 664: + case 699: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3416 + var yyLOCAL Expr +//line sql.y:3591 { - yyVAL.expr = yyDollar[1].expr + yyLOCAL = yyDollar[1].exprUnion() } - case 665: + yyVAL.union = yyLOCAL + case 700: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3420 + var yyLOCAL Expr +//line sql.y:3595 { - yyVAL.expr = yyDollar[1].boolVal + yyLOCAL = yyDollar[1].boolValUnion() } - case 666: + yyVAL.union = yyLOCAL + case 701: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3424 + var yyLOCAL Expr +//line sql.y:3599 { - yyVAL.expr = yyDollar[1].colName + yyLOCAL = yyDollar[1].colNameUnion() } - case 667: + yyVAL.union = yyLOCAL + case 702: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3428 + var yyLOCAL Expr +//line sql.y:3603 { - yyVAL.expr = yyDollar[1].expr + yyLOCAL = yyDollar[1].exprUnion() } - case 668: + yyVAL.union = yyLOCAL + case 703: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3432 + var yyLOCAL Expr +//line sql.y:3607 { - yyVAL.expr = yyDollar[1].subquery + yyLOCAL = yyDollar[1].subqueryUnion() } - case 669: + yyVAL.union = yyLOCAL + case 704: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3436 + var yyLOCAL Expr +//line sql.y:3611 { - yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: BitAndOp, Right: yyDollar[3].expr} + yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: BitAndOp, Right: yyDollar[3].exprUnion()} } - case 670: + yyVAL.union = yyLOCAL + case 705: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3440 + var yyLOCAL Expr +//line sql.y:3615 { - yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: BitOrOp, Right: yyDollar[3].expr} + yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: BitOrOp, Right: yyDollar[3].exprUnion()} } - case 671: + yyVAL.union = yyLOCAL + case 706: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3444 + var yyLOCAL Expr +//line sql.y:3619 { - yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: BitXorOp, Right: yyDollar[3].expr} + yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: BitXorOp, Right: yyDollar[3].exprUnion()} } - case 672: + yyVAL.union = yyLOCAL + case 707: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3448 + var yyLOCAL Expr +//line sql.y:3623 { - yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: PlusOp, Right: yyDollar[3].expr} + yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: PlusOp, Right: yyDollar[3].exprUnion()} } - case 673: + yyVAL.union = yyLOCAL + case 708: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3452 + var yyLOCAL Expr +//line sql.y:3627 { - yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: MinusOp, Right: yyDollar[3].expr} + yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: MinusOp, Right: yyDollar[3].exprUnion()} } - case 674: + yyVAL.union = yyLOCAL + case 709: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3456 + var yyLOCAL Expr +//line sql.y:3631 { - yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: MultOp, Right: yyDollar[3].expr} + yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: MultOp, Right: yyDollar[3].exprUnion()} } - case 675: + yyVAL.union = yyLOCAL + case 710: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3460 + var yyLOCAL Expr +//line sql.y:3635 { - yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: DivOp, Right: yyDollar[3].expr} + yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: DivOp, Right: yyDollar[3].exprUnion()} } - case 676: + yyVAL.union = yyLOCAL + case 711: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3464 + var yyLOCAL Expr +//line sql.y:3639 { - yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: IntDivOp, Right: yyDollar[3].expr} + yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: IntDivOp, Right: yyDollar[3].exprUnion()} } - case 677: + yyVAL.union = yyLOCAL + case 712: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3468 + var yyLOCAL Expr +//line sql.y:3643 { - yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: ModOp, Right: yyDollar[3].expr} + yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: ModOp, Right: yyDollar[3].exprUnion()} } - case 678: + yyVAL.union = yyLOCAL + case 713: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3472 + var yyLOCAL Expr +//line sql.y:3647 { - yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: ModOp, Right: yyDollar[3].expr} + yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: ModOp, Right: yyDollar[3].exprUnion()} } - case 679: + yyVAL.union = yyLOCAL + case 714: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3476 + var yyLOCAL Expr +//line sql.y:3651 { - yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: ShiftLeftOp, Right: yyDollar[3].expr} + yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: ShiftLeftOp, Right: yyDollar[3].exprUnion()} } - case 680: + yyVAL.union = yyLOCAL + case 715: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3480 + var yyLOCAL Expr +//line sql.y:3655 { - yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: ShiftRightOp, Right: yyDollar[3].expr} + yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: ShiftRightOp, Right: yyDollar[3].exprUnion()} } - case 681: + yyVAL.union = yyLOCAL + case 716: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3484 + var yyLOCAL Expr +//line sql.y:3659 { - yyVAL.expr = &BinaryExpr{Left: yyDollar[1].colName, Operator: JSONExtractOp, Right: yyDollar[3].expr} + yyLOCAL = &BinaryExpr{Left: yyDollar[1].colNameUnion(), Operator: JSONExtractOp, Right: yyDollar[3].exprUnion()} } - case 682: + yyVAL.union = yyLOCAL + case 717: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3488 + var yyLOCAL Expr +//line sql.y:3663 { - yyVAL.expr = &BinaryExpr{Left: yyDollar[1].colName, Operator: JSONUnquoteExtractOp, Right: yyDollar[3].expr} + yyLOCAL = &BinaryExpr{Left: yyDollar[1].colNameUnion(), Operator: JSONUnquoteExtractOp, Right: yyDollar[3].exprUnion()} } - case 683: + yyVAL.union = yyLOCAL + case 718: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3492 + var yyLOCAL Expr +//line sql.y:3667 { - yyVAL.expr = &CollateExpr{Expr: yyDollar[1].expr, Charset: yyDollar[3].str} + yyLOCAL = &CollateExpr{Expr: yyDollar[1].exprUnion(), Charset: yyDollar[3].str} } - case 684: + yyVAL.union = yyLOCAL + case 719: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3496 + var yyLOCAL Expr +//line sql.y:3671 { - yyVAL.expr = &UnaryExpr{Operator: BinaryOp, Expr: yyDollar[2].expr} + yyLOCAL = &UnaryExpr{Operator: BinaryOp, Expr: yyDollar[2].exprUnion()} } - case 685: + yyVAL.union = yyLOCAL + case 720: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3500 + var yyLOCAL Expr +//line sql.y:3675 { - yyVAL.expr = &UnaryExpr{Operator: UBinaryOp, Expr: yyDollar[2].expr} + yyLOCAL = &UnaryExpr{Operator: UBinaryOp, Expr: yyDollar[2].exprUnion()} } - case 686: + yyVAL.union = yyLOCAL + case 721: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3504 + var yyLOCAL Expr +//line sql.y:3679 { - yyVAL.expr = &UnaryExpr{Operator: Utf8Op, Expr: yyDollar[2].expr} + yyLOCAL = &UnaryExpr{Operator: Utf8Op, Expr: yyDollar[2].exprUnion()} } - case 687: + yyVAL.union = yyLOCAL + case 722: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3508 + var yyLOCAL Expr +//line sql.y:3683 { - yyVAL.expr = &UnaryExpr{Operator: Utf8mb4Op, Expr: yyDollar[2].expr} + yyLOCAL = &UnaryExpr{Operator: Utf8mb4Op, Expr: yyDollar[2].exprUnion()} } - case 688: + yyVAL.union = yyLOCAL + case 723: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3512 + var yyLOCAL Expr +//line sql.y:3687 { - yyVAL.expr = &UnaryExpr{Operator: Latin1Op, Expr: yyDollar[2].expr} + yyLOCAL = &UnaryExpr{Operator: Latin1Op, Expr: yyDollar[2].exprUnion()} } - case 689: + yyVAL.union = yyLOCAL + case 724: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3516 + var yyLOCAL Expr +//line sql.y:3691 { - if num, ok := yyDollar[2].expr.(*Literal); ok && num.Type == IntVal { - yyVAL.expr = num - } else { - yyVAL.expr = &UnaryExpr{Operator: UPlusOp, Expr: yyDollar[2].expr} - } + yyLOCAL = yyDollar[2].exprUnion() } - case 690: + yyVAL.union = yyLOCAL + case 725: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3524 + var yyLOCAL Expr +//line sql.y:3695 { - if num, ok := yyDollar[2].expr.(*Literal); ok && num.Type == IntVal { - // Handle double negative - if num.Val[0] == '-' { - num.Val = num.Val[1:] - yyVAL.expr = num - } else { - yyVAL.expr = NewIntLiteral(append([]byte("-"), num.Val...)) - } - } else { - yyVAL.expr = &UnaryExpr{Operator: UMinusOp, Expr: yyDollar[2].expr} - } + yyLOCAL = handleUnaryMinus(yyDollar[2].exprUnion()) } - case 691: + yyVAL.union = yyLOCAL + case 726: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3538 + var yyLOCAL Expr +//line sql.y:3699 { - yyVAL.expr = &UnaryExpr{Operator: TildaOp, Expr: yyDollar[2].expr} + yyLOCAL = &UnaryExpr{Operator: TildaOp, Expr: yyDollar[2].exprUnion()} } - case 692: + yyVAL.union = yyLOCAL + case 727: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3542 + var yyLOCAL Expr +//line sql.y:3703 { - yyVAL.expr = &UnaryExpr{Operator: BangOp, Expr: yyDollar[2].expr} + yyLOCAL = &UnaryExpr{Operator: BangOp, Expr: yyDollar[2].exprUnion()} } - case 693: + yyVAL.union = yyLOCAL + case 728: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3546 + var yyLOCAL Expr +//line sql.y:3707 { // This rule prevents the usage of INTERVAL // as a function. If support is needed for that, // we'll need to revisit this. The solution // will be non-trivial because of grammar conflicts. - yyVAL.expr = &IntervalExpr{Expr: yyDollar[2].expr, Unit: yyDollar[3].colIdent.String()} + yyLOCAL = &IntervalExpr{Expr: yyDollar[2].exprUnion(), Unit: yyDollar[3].colIdent.String()} } - case 698: + yyVAL.union = yyLOCAL + case 733: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:3564 + var yyLOCAL Expr +//line sql.y:3725 { - yyVAL.expr = &FuncExpr{Name: yyDollar[1].colIdent, Exprs: yyDollar[3].selectExprs} + yyLOCAL = &FuncExpr{Name: yyDollar[1].colIdent, Exprs: yyDollar[3].selectExprsUnion()} } - case 699: + yyVAL.union = yyLOCAL + case 734: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:3568 + var yyLOCAL Expr +//line sql.y:3729 { - yyVAL.expr = &FuncExpr{Name: yyDollar[1].colIdent, Distinct: true, Exprs: yyDollar[4].selectExprs} + yyLOCAL = &FuncExpr{Name: yyDollar[1].colIdent, Distinct: true, Exprs: yyDollar[4].selectExprsUnion()} } - case 700: + yyVAL.union = yyLOCAL + case 735: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:3572 + var yyLOCAL Expr +//line sql.y:3733 { - yyVAL.expr = &FuncExpr{Name: yyDollar[1].colIdent, Distinct: true, Exprs: yyDollar[4].selectExprs} + yyLOCAL = &FuncExpr{Name: yyDollar[1].colIdent, Distinct: true, Exprs: yyDollar[4].selectExprsUnion()} } - case 701: + yyVAL.union = yyLOCAL + case 736: yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:3576 + var yyLOCAL Expr +//line sql.y:3737 { - yyVAL.expr = &FuncExpr{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].colIdent, Exprs: yyDollar[5].selectExprs} + yyLOCAL = &FuncExpr{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].colIdent, Exprs: yyDollar[5].selectExprsUnion()} } - case 702: + yyVAL.union = yyLOCAL + case 737: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:3586 + var yyLOCAL Expr +//line sql.y:3747 { - yyVAL.expr = &FuncExpr{Name: NewColIdent("left"), Exprs: yyDollar[3].selectExprs} + yyLOCAL = &FuncExpr{Name: NewColIdent("left"), Exprs: yyDollar[3].selectExprsUnion()} } - case 703: + yyVAL.union = yyLOCAL + case 738: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:3590 + var yyLOCAL Expr +//line sql.y:3751 { - yyVAL.expr = &FuncExpr{Name: NewColIdent("right"), Exprs: yyDollar[3].selectExprs} + yyLOCAL = &FuncExpr{Name: NewColIdent("right"), Exprs: yyDollar[3].selectExprsUnion()} } - case 704: + yyVAL.union = yyLOCAL + case 739: yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:3594 + var yyLOCAL Expr +//line sql.y:3755 { - yyVAL.expr = &ConvertExpr{Expr: yyDollar[3].expr, Type: yyDollar[5].convertType} + yyLOCAL = &ConvertExpr{Expr: yyDollar[3].exprUnion(), Type: yyDollar[5].convertTypeUnion()} } - case 705: + yyVAL.union = yyLOCAL + case 740: yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:3598 + var yyLOCAL Expr +//line sql.y:3759 { - yyVAL.expr = &ConvertExpr{Expr: yyDollar[3].expr, Type: yyDollar[5].convertType} + yyLOCAL = &ConvertExpr{Expr: yyDollar[3].exprUnion(), Type: yyDollar[5].convertTypeUnion()} } - case 706: + yyVAL.union = yyLOCAL + case 741: yyDollar = yyS[yypt-6 : yypt+1] -//line sql.y:3602 + var yyLOCAL Expr +//line sql.y:3763 { - yyVAL.expr = &ConvertUsingExpr{Expr: yyDollar[3].expr, Type: yyDollar[5].str} + yyLOCAL = &ConvertUsingExpr{Expr: yyDollar[3].exprUnion(), Type: yyDollar[5].str} } - case 707: + yyVAL.union = yyLOCAL + case 742: yyDollar = yyS[yypt-8 : yypt+1] -//line sql.y:3606 + var yyLOCAL Expr +//line sql.y:3767 { - yyVAL.expr = &SubstrExpr{Name: yyDollar[3].colName, From: yyDollar[5].expr, To: yyDollar[7].expr} + yyLOCAL = &SubstrExpr{Name: yyDollar[3].colNameUnion(), From: yyDollar[5].exprUnion(), To: yyDollar[7].exprUnion()} } - case 708: + yyVAL.union = yyLOCAL + case 743: yyDollar = yyS[yypt-8 : yypt+1] -//line sql.y:3610 + var yyLOCAL Expr +//line sql.y:3771 { - yyVAL.expr = &SubstrExpr{Name: yyDollar[3].colName, From: yyDollar[5].expr, To: yyDollar[7].expr} + yyLOCAL = &SubstrExpr{Name: yyDollar[3].colNameUnion(), From: yyDollar[5].exprUnion(), To: yyDollar[7].exprUnion()} } - case 709: + yyVAL.union = yyLOCAL + case 744: yyDollar = yyS[yypt-8 : yypt+1] -//line sql.y:3614 + var yyLOCAL Expr +//line sql.y:3775 { - yyVAL.expr = &SubstrExpr{StrVal: NewStrLiteral(yyDollar[3].bytes), From: yyDollar[5].expr, To: yyDollar[7].expr} + yyLOCAL = &SubstrExpr{StrVal: NewStrLiteral(yyDollar[3].str), From: yyDollar[5].exprUnion(), To: yyDollar[7].exprUnion()} } - case 710: + yyVAL.union = yyLOCAL + case 745: yyDollar = yyS[yypt-8 : yypt+1] -//line sql.y:3618 + var yyLOCAL Expr +//line sql.y:3779 { - yyVAL.expr = &SubstrExpr{StrVal: NewStrLiteral(yyDollar[3].bytes), From: yyDollar[5].expr, To: yyDollar[7].expr} + yyLOCAL = &SubstrExpr{StrVal: NewStrLiteral(yyDollar[3].str), From: yyDollar[5].exprUnion(), To: yyDollar[7].exprUnion()} } - case 711: + yyVAL.union = yyLOCAL + case 746: yyDollar = yyS[yypt-9 : yypt+1] -//line sql.y:3622 + var yyLOCAL Expr +//line sql.y:3783 { - yyVAL.expr = &MatchExpr{Columns: yyDollar[3].selectExprs, Expr: yyDollar[7].expr, Option: yyDollar[8].matchExprOption} + yyLOCAL = &MatchExpr{Columns: yyDollar[3].selectExprsUnion(), Expr: yyDollar[7].exprUnion(), Option: yyDollar[8].matchExprOptionUnion()} } - case 712: + yyVAL.union = yyLOCAL + case 747: yyDollar = yyS[yypt-8 : yypt+1] -//line sql.y:3626 + var yyLOCAL Expr +//line sql.y:3787 { - yyVAL.expr = &GroupConcatExpr{Distinct: yyDollar[3].boolean, Exprs: yyDollar[4].selectExprs, OrderBy: yyDollar[5].orderBy, Separator: yyDollar[6].str, Limit: yyDollar[7].limit} + yyLOCAL = &GroupConcatExpr{Distinct: yyDollar[3].booleanUnion(), Exprs: yyDollar[4].selectExprsUnion(), OrderBy: yyDollar[5].orderByUnion(), Separator: yyDollar[6].str, Limit: yyDollar[7].limitUnion()} } - case 713: + yyVAL.union = yyLOCAL + case 748: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:3630 + var yyLOCAL Expr +//line sql.y:3791 { - yyVAL.expr = &CaseExpr{Expr: yyDollar[2].expr, Whens: yyDollar[3].whens, Else: yyDollar[4].expr} + yyLOCAL = &CaseExpr{Expr: yyDollar[2].exprUnion(), Whens: yyDollar[3].whensUnion(), Else: yyDollar[4].exprUnion()} } - case 714: + yyVAL.union = yyLOCAL + case 749: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:3634 + var yyLOCAL Expr +//line sql.y:3795 { - yyVAL.expr = &ValuesFuncExpr{Name: yyDollar[3].colName} + yyLOCAL = &ValuesFuncExpr{Name: yyDollar[3].colNameUnion()} } - case 715: + yyVAL.union = yyLOCAL + case 750: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3638 + var yyLOCAL Expr +//line sql.y:3799 { - yyVAL.expr = &FuncExpr{Name: NewColIdent(string(yyDollar[1].bytes))} + yyLOCAL = &FuncExpr{Name: NewColIdent(yyDollar[1].str)} } - case 716: + yyVAL.union = yyLOCAL + case 751: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3648 + var yyLOCAL Expr +//line sql.y:3809 { - yyVAL.expr = &FuncExpr{Name: NewColIdent("current_timestamp")} + yyLOCAL = &FuncExpr{Name: NewColIdent("current_timestamp")} } - case 717: + yyVAL.union = yyLOCAL + case 752: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3652 + var yyLOCAL Expr +//line sql.y:3813 { - yyVAL.expr = &FuncExpr{Name: NewColIdent("utc_timestamp")} + yyLOCAL = &FuncExpr{Name: NewColIdent("utc_timestamp")} } - case 718: + yyVAL.union = yyLOCAL + case 753: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3656 + var yyLOCAL Expr +//line sql.y:3817 { - yyVAL.expr = &FuncExpr{Name: NewColIdent("utc_time")} + yyLOCAL = &FuncExpr{Name: NewColIdent("utc_time")} } - case 719: + yyVAL.union = yyLOCAL + case 754: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3661 + var yyLOCAL Expr +//line sql.y:3822 { - yyVAL.expr = &FuncExpr{Name: NewColIdent("utc_date")} + yyLOCAL = &FuncExpr{Name: NewColIdent("utc_date")} } - case 720: + yyVAL.union = yyLOCAL + case 755: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3666 + var yyLOCAL Expr +//line sql.y:3827 { - yyVAL.expr = &FuncExpr{Name: NewColIdent("localtime")} + yyLOCAL = &FuncExpr{Name: NewColIdent("localtime")} } - case 721: + yyVAL.union = yyLOCAL + case 756: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3671 + var yyLOCAL Expr +//line sql.y:3832 { - yyVAL.expr = &FuncExpr{Name: NewColIdent("localtimestamp")} + yyLOCAL = &FuncExpr{Name: NewColIdent("localtimestamp")} } - case 722: + yyVAL.union = yyLOCAL + case 757: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3677 + var yyLOCAL Expr +//line sql.y:3838 { - yyVAL.expr = &FuncExpr{Name: NewColIdent("current_date")} + yyLOCAL = &FuncExpr{Name: NewColIdent("current_date")} } - case 723: + yyVAL.union = yyLOCAL + case 758: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3682 + var yyLOCAL Expr +//line sql.y:3843 { - yyVAL.expr = &FuncExpr{Name: NewColIdent("current_time")} + yyLOCAL = &FuncExpr{Name: NewColIdent("current_time")} } - case 724: + yyVAL.union = yyLOCAL + case 759: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3687 + var yyLOCAL Expr +//line sql.y:3848 { - yyVAL.expr = &CurTimeFuncExpr{Name: NewColIdent("current_timestamp"), Fsp: yyDollar[2].expr} + yyLOCAL = &CurTimeFuncExpr{Name: NewColIdent("current_timestamp"), Fsp: yyDollar[2].exprUnion()} } - case 725: + yyVAL.union = yyLOCAL + case 760: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3691 + var yyLOCAL Expr +//line sql.y:3852 { - yyVAL.expr = &CurTimeFuncExpr{Name: NewColIdent("utc_timestamp"), Fsp: yyDollar[2].expr} + yyLOCAL = &CurTimeFuncExpr{Name: NewColIdent("utc_timestamp"), Fsp: yyDollar[2].exprUnion()} } - case 726: + yyVAL.union = yyLOCAL + case 761: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3695 + var yyLOCAL Expr +//line sql.y:3856 { - yyVAL.expr = &CurTimeFuncExpr{Name: NewColIdent("utc_time"), Fsp: yyDollar[2].expr} + yyLOCAL = &CurTimeFuncExpr{Name: NewColIdent("utc_time"), Fsp: yyDollar[2].exprUnion()} } - case 727: + yyVAL.union = yyLOCAL + case 762: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3700 + var yyLOCAL Expr +//line sql.y:3861 { - yyVAL.expr = &CurTimeFuncExpr{Name: NewColIdent("localtime"), Fsp: yyDollar[2].expr} + yyLOCAL = &CurTimeFuncExpr{Name: NewColIdent("localtime"), Fsp: yyDollar[2].exprUnion()} } - case 728: + yyVAL.union = yyLOCAL + case 763: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3705 + var yyLOCAL Expr +//line sql.y:3866 { - yyVAL.expr = &CurTimeFuncExpr{Name: NewColIdent("localtimestamp"), Fsp: yyDollar[2].expr} + yyLOCAL = &CurTimeFuncExpr{Name: NewColIdent("localtimestamp"), Fsp: yyDollar[2].exprUnion()} } - case 729: + yyVAL.union = yyLOCAL + case 764: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3710 + var yyLOCAL Expr +//line sql.y:3871 { - yyVAL.expr = &CurTimeFuncExpr{Name: NewColIdent("current_time"), Fsp: yyDollar[2].expr} + yyLOCAL = &CurTimeFuncExpr{Name: NewColIdent("current_time"), Fsp: yyDollar[2].exprUnion()} } - case 730: + yyVAL.union = yyLOCAL + case 765: yyDollar = yyS[yypt-8 : yypt+1] -//line sql.y:3714 + var yyLOCAL Expr +//line sql.y:3875 { - yyVAL.expr = &TimestampFuncExpr{Name: string("timestampadd"), Unit: yyDollar[3].colIdent.String(), Expr1: yyDollar[5].expr, Expr2: yyDollar[7].expr} + yyLOCAL = &TimestampFuncExpr{Name: string("timestampadd"), Unit: yyDollar[3].colIdent.String(), Expr1: yyDollar[5].exprUnion(), Expr2: yyDollar[7].exprUnion()} } - case 731: + yyVAL.union = yyLOCAL + case 766: yyDollar = yyS[yypt-8 : yypt+1] -//line sql.y:3718 + var yyLOCAL Expr +//line sql.y:3879 { - yyVAL.expr = &TimestampFuncExpr{Name: string("timestampdiff"), Unit: yyDollar[3].colIdent.String(), Expr1: yyDollar[5].expr, Expr2: yyDollar[7].expr} + yyLOCAL = &TimestampFuncExpr{Name: string("timestampdiff"), Unit: yyDollar[3].colIdent.String(), Expr1: yyDollar[5].exprUnion(), Expr2: yyDollar[7].exprUnion()} } - case 734: + yyVAL.union = yyLOCAL + case 769: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3728 + var yyLOCAL Expr +//line sql.y:3889 { - yyVAL.expr = yyDollar[2].expr + yyLOCAL = yyDollar[2].exprUnion() } - case 735: + yyVAL.union = yyLOCAL + case 770: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:3738 + var yyLOCAL Expr +//line sql.y:3899 { - yyVAL.expr = &FuncExpr{Name: NewColIdent("if"), Exprs: yyDollar[3].selectExprs} + yyLOCAL = &FuncExpr{Name: NewColIdent("if"), Exprs: yyDollar[3].selectExprsUnion()} } - case 736: + yyVAL.union = yyLOCAL + case 771: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:3742 + var yyLOCAL Expr +//line sql.y:3903 { - yyVAL.expr = &FuncExpr{Name: NewColIdent("database"), Exprs: yyDollar[3].selectExprs} + yyLOCAL = &FuncExpr{Name: NewColIdent("database"), Exprs: yyDollar[3].selectExprsUnion()} } - case 737: + yyVAL.union = yyLOCAL + case 772: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:3746 + var yyLOCAL Expr +//line sql.y:3907 { - yyVAL.expr = &FuncExpr{Name: NewColIdent("schema"), Exprs: yyDollar[3].selectExprs} + yyLOCAL = &FuncExpr{Name: NewColIdent("schema"), Exprs: yyDollar[3].selectExprsUnion()} } - case 738: + yyVAL.union = yyLOCAL + case 773: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:3750 + var yyLOCAL Expr +//line sql.y:3911 { - yyVAL.expr = &FuncExpr{Name: NewColIdent("mod"), Exprs: yyDollar[3].selectExprs} + yyLOCAL = &FuncExpr{Name: NewColIdent("mod"), Exprs: yyDollar[3].selectExprsUnion()} } - case 739: + yyVAL.union = yyLOCAL + case 774: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:3754 + var yyLOCAL Expr +//line sql.y:3915 { - yyVAL.expr = &FuncExpr{Name: NewColIdent("replace"), Exprs: yyDollar[3].selectExprs} + yyLOCAL = &FuncExpr{Name: NewColIdent("replace"), Exprs: yyDollar[3].selectExprsUnion()} } - case 740: + yyVAL.union = yyLOCAL + case 775: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:3758 + var yyLOCAL Expr +//line sql.y:3919 { - yyVAL.expr = &FuncExpr{Name: NewColIdent("substr"), Exprs: yyDollar[3].selectExprs} + yyLOCAL = &FuncExpr{Name: NewColIdent("substr"), Exprs: yyDollar[3].selectExprsUnion()} } - case 741: + yyVAL.union = yyLOCAL + case 776: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:3762 + var yyLOCAL Expr +//line sql.y:3923 { - yyVAL.expr = &FuncExpr{Name: NewColIdent("substr"), Exprs: yyDollar[3].selectExprs} + yyLOCAL = &FuncExpr{Name: NewColIdent("substr"), Exprs: yyDollar[3].selectExprsUnion()} } - case 742: + yyVAL.union = yyLOCAL + case 777: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:3768 + var yyLOCAL MatchExprOption +//line sql.y:3929 { - yyVAL.matchExprOption = NoOption + yyLOCAL = NoOption } - case 743: + yyVAL.union = yyLOCAL + case 778: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3772 + var yyLOCAL MatchExprOption +//line sql.y:3933 { - yyVAL.matchExprOption = BooleanModeOpt + yyLOCAL = BooleanModeOpt } - case 744: + yyVAL.union = yyLOCAL + case 779: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:3776 + var yyLOCAL MatchExprOption +//line sql.y:3937 { - yyVAL.matchExprOption = NaturalLanguageModeOpt + yyLOCAL = NaturalLanguageModeOpt } - case 745: + yyVAL.union = yyLOCAL + case 780: yyDollar = yyS[yypt-7 : yypt+1] -//line sql.y:3780 + var yyLOCAL MatchExprOption +//line sql.y:3941 { - yyVAL.matchExprOption = NaturalLanguageModeWithQueryExpansionOpt + yyLOCAL = NaturalLanguageModeWithQueryExpansionOpt } - case 746: + yyVAL.union = yyLOCAL + case 781: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3784 + var yyLOCAL MatchExprOption +//line sql.y:3945 { - yyVAL.matchExprOption = QueryExpansionOpt + yyLOCAL = QueryExpansionOpt } - case 747: + yyVAL.union = yyLOCAL + case 782: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3790 +//line sql.y:3951 { yyVAL.str = string(yyDollar[1].colIdent.String()) } - case 748: + case 783: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3794 +//line sql.y:3955 { - yyVAL.str = string(yyDollar[1].bytes) + yyVAL.str = string(yyDollar[1].str) } - case 749: + case 784: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3798 +//line sql.y:3959 { - yyVAL.str = string(yyDollar[1].bytes) + yyVAL.str = string(yyDollar[1].str) } - case 750: + case 785: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3804 + var yyLOCAL *ConvertType +//line sql.y:3965 { - yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].literal} + yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} } - case 751: + yyVAL.union = yyLOCAL + case 786: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3808 + var yyLOCAL *ConvertType +//line sql.y:3969 { - yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].literal, Charset: yyDollar[3].str, Operator: CharacterSetOp} + yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion(), Charset: yyDollar[3].str, Operator: CharacterSetOp} } - case 752: + yyVAL.union = yyLOCAL + case 787: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3812 + var yyLOCAL *ConvertType +//line sql.y:3973 { - yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].literal, Charset: string(yyDollar[3].colIdent.String())} + yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion(), Charset: string(yyDollar[3].colIdent.String())} } - case 753: + yyVAL.union = yyLOCAL + case 788: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3816 + var yyLOCAL *ConvertType +//line sql.y:3977 { - yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} + yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)} } - case 754: + yyVAL.union = yyLOCAL + case 789: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3820 + var yyLOCAL *ConvertType +//line sql.y:3981 { - yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].literal} + yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} } - case 755: + yyVAL.union = yyLOCAL + case 790: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3824 + var yyLOCAL *ConvertType +//line sql.y:3985 { - yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} - yyVAL.convertType.Length = yyDollar[2].LengthScaleOption.Length - yyVAL.convertType.Scale = yyDollar[2].LengthScaleOption.Scale + yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)} + yyLOCAL.Length = yyDollar[2].LengthScaleOption.Length + yyLOCAL.Scale = yyDollar[2].LengthScaleOption.Scale } - case 756: + yyVAL.union = yyLOCAL + case 791: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3830 + var yyLOCAL *ConvertType +//line sql.y:3991 { - yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} + yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)} } - case 757: + yyVAL.union = yyLOCAL + case 792: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3834 + var yyLOCAL *ConvertType +//line sql.y:3995 { - yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].literal} + yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} } - case 758: + yyVAL.union = yyLOCAL + case 793: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3838 + var yyLOCAL *ConvertType +//line sql.y:3999 { - yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} + yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)} } - case 759: + yyVAL.union = yyLOCAL + case 794: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3842 + var yyLOCAL *ConvertType +//line sql.y:4003 { - yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} + yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)} } - case 760: + yyVAL.union = yyLOCAL + case 795: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3846 + var yyLOCAL *ConvertType +//line sql.y:4007 { - yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].literal} + yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} } - case 761: + yyVAL.union = yyLOCAL + case 796: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3850 + var yyLOCAL *ConvertType +//line sql.y:4011 { - yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} + yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)} } - case 762: + yyVAL.union = yyLOCAL + case 797: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3854 + var yyLOCAL *ConvertType +//line sql.y:4015 { - yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} + yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)} } - case 763: + yyVAL.union = yyLOCAL + case 798: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:3859 + var yyLOCAL Expr +//line sql.y:4020 { - yyVAL.expr = nil + yyLOCAL = nil } - case 764: + yyVAL.union = yyLOCAL + case 799: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3863 + var yyLOCAL Expr +//line sql.y:4024 { - yyVAL.expr = yyDollar[1].expr + yyLOCAL = yyDollar[1].exprUnion() } - case 765: + yyVAL.union = yyLOCAL + case 800: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:3868 +//line sql.y:4029 { yyVAL.str = string("") } - case 766: + case 801: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3872 +//line sql.y:4033 { - yyVAL.str = " separator '" + string(yyDollar[2].bytes) + "'" + yyVAL.str = " separator " + encodeSQLString(yyDollar[2].str) } - case 767: + case 802: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3878 + var yyLOCAL []*When +//line sql.y:4039 { - yyVAL.whens = []*When{yyDollar[1].when} + yyLOCAL = []*When{yyDollar[1].whenUnion()} } - case 768: + yyVAL.union = yyLOCAL + case 803: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3882 +//line sql.y:4043 { - yyVAL.whens = append(yyDollar[1].whens, yyDollar[2].when) + yySLICE := (*[]*When)(yyIaddr(yyVAL.union)) + *yySLICE = append(*yySLICE, yyDollar[2].whenUnion()) } - case 769: + case 804: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:3888 + var yyLOCAL *When +//line sql.y:4049 { - yyVAL.when = &When{Cond: yyDollar[2].expr, Val: yyDollar[4].expr} + yyLOCAL = &When{Cond: yyDollar[2].exprUnion(), Val: yyDollar[4].exprUnion()} } - case 770: + yyVAL.union = yyLOCAL + case 805: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:3893 + var yyLOCAL Expr +//line sql.y:4054 { - yyVAL.expr = nil + yyLOCAL = nil } - case 771: + yyVAL.union = yyLOCAL + case 806: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3897 + var yyLOCAL Expr +//line sql.y:4058 { - yyVAL.expr = yyDollar[2].expr + yyLOCAL = yyDollar[2].exprUnion() } - case 772: + yyVAL.union = yyLOCAL + case 807: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3903 + var yyLOCAL *ColName +//line sql.y:4064 { - yyVAL.colName = &ColName{Name: yyDollar[1].colIdent} + yyLOCAL = &ColName{Name: yyDollar[1].colIdent} } - case 773: + yyVAL.union = yyLOCAL + case 808: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3907 + var yyLOCAL *ColName +//line sql.y:4068 { - yyVAL.colName = &ColName{Qualifier: TableName{Name: yyDollar[1].tableIdent}, Name: yyDollar[3].colIdent} + yyLOCAL = &ColName{Qualifier: TableName{Name: yyDollar[1].tableIdent}, Name: yyDollar[3].colIdent} } - case 774: + yyVAL.union = yyLOCAL + case 809: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:3911 + var yyLOCAL *ColName +//line sql.y:4072 { - yyVAL.colName = &ColName{Qualifier: TableName{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].tableIdent}, Name: yyDollar[5].colIdent} + yyLOCAL = &ColName{Qualifier: TableName{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].tableIdent}, Name: yyDollar[5].colIdent} } - case 775: + yyVAL.union = yyLOCAL + case 810: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3917 + var yyLOCAL Expr +//line sql.y:4078 { - yyVAL.expr = NewStrLiteral(yyDollar[1].bytes) + yyLOCAL = NewStrLiteral(yyDollar[1].str) } - case 776: + yyVAL.union = yyLOCAL + case 811: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3921 + var yyLOCAL Expr +//line sql.y:4082 { - yyVAL.expr = NewHexLiteral(yyDollar[1].bytes) + yyLOCAL = NewHexLiteral(yyDollar[1].str) } - case 777: + yyVAL.union = yyLOCAL + case 812: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3925 + var yyLOCAL Expr +//line sql.y:4086 { - yyVAL.expr = NewBitLiteral(yyDollar[1].bytes) + yyLOCAL = NewBitLiteral(yyDollar[1].str) } - case 778: + yyVAL.union = yyLOCAL + case 813: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3929 + var yyLOCAL Expr +//line sql.y:4090 { - yyVAL.expr = NewIntLiteral(yyDollar[1].bytes) + yyLOCAL = NewIntLiteral(yyDollar[1].str) } - case 779: + yyVAL.union = yyLOCAL + case 814: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3933 + var yyLOCAL Expr +//line sql.y:4094 { - yyVAL.expr = NewFloatLiteral(yyDollar[1].bytes) + yyLOCAL = NewFloatLiteral(yyDollar[1].str) } - case 780: + yyVAL.union = yyLOCAL + case 815: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3937 + var yyLOCAL Expr +//line sql.y:4098 { - yyVAL.expr = NewHexNumLiteral(yyDollar[1].bytes) + yyLOCAL = NewHexNumLiteral(yyDollar[1].str) } - case 781: + yyVAL.union = yyLOCAL + case 816: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3941 + var yyLOCAL Expr +//line sql.y:4102 { - yyVAL.expr = NewArgument(yyDollar[1].bytes) + yyLOCAL = NewArgument(yyDollar[1].str) + bindVariable(yylex, yyDollar[1].str[1:]) } - case 782: + yyVAL.union = yyLOCAL + case 817: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3945 + var yyLOCAL Expr +//line sql.y:4107 { - yyVAL.expr = &NullVal{} + yyLOCAL = &NullVal{} } - case 783: + yyVAL.union = yyLOCAL + case 818: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3951 + var yyLOCAL Expr +//line sql.y:4113 { // TODO(sougou): Deprecate this construct. if yyDollar[1].colIdent.Lowered() != "value" { yylex.Error("expecting value after next") return 1 } - yyVAL.expr = NewIntLiteral([]byte("1")) + yyLOCAL = NewIntLiteral("1") } - case 784: + yyVAL.union = yyLOCAL + case 819: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3960 + var yyLOCAL Expr +//line sql.y:4122 { - yyVAL.expr = NewIntLiteral(yyDollar[1].bytes) + yyLOCAL = NewIntLiteral(yyDollar[1].str) } - case 785: + yyVAL.union = yyLOCAL + case 820: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3964 + var yyLOCAL Expr +//line sql.y:4126 { - yyVAL.expr = NewArgument(yyDollar[1].bytes) + yyLOCAL = NewArgument(yyDollar[1].str) + bindVariable(yylex, yyDollar[1].str[1:]) } - case 786: + yyVAL.union = yyLOCAL + case 821: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:3969 + var yyLOCAL Exprs +//line sql.y:4132 { - yyVAL.exprs = nil + yyLOCAL = nil } - case 787: + yyVAL.union = yyLOCAL + case 822: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3973 + var yyLOCAL Exprs +//line sql.y:4136 { - yyVAL.exprs = yyDollar[3].exprs + yyLOCAL = yyDollar[3].exprsUnion() } - case 788: + yyVAL.union = yyLOCAL + case 823: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:3978 + var yyLOCAL Expr +//line sql.y:4141 { - yyVAL.expr = nil + yyLOCAL = nil } - case 789: + yyVAL.union = yyLOCAL + case 824: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3982 + var yyLOCAL Expr +//line sql.y:4145 { - yyVAL.expr = yyDollar[2].expr + yyLOCAL = yyDollar[2].exprUnion() } - case 790: + yyVAL.union = yyLOCAL + case 825: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:3987 + var yyLOCAL OrderBy +//line sql.y:4150 { - yyVAL.orderBy = nil + yyLOCAL = nil } - case 791: + yyVAL.union = yyLOCAL + case 826: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3991 + var yyLOCAL OrderBy +//line sql.y:4154 { - yyVAL.orderBy = yyDollar[3].orderBy + yyLOCAL = yyDollar[3].orderByUnion() } - case 792: + yyVAL.union = yyLOCAL + case 827: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3997 + var yyLOCAL OrderBy +//line sql.y:4160 { - yyVAL.orderBy = OrderBy{yyDollar[1].order} + yyLOCAL = OrderBy{yyDollar[1].orderUnion()} } - case 793: + yyVAL.union = yyLOCAL + case 828: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4001 +//line sql.y:4164 { - yyVAL.orderBy = append(yyDollar[1].orderBy, yyDollar[3].order) + yySLICE := (*OrderBy)(yyIaddr(yyVAL.union)) + *yySLICE = append(*yySLICE, yyDollar[3].orderUnion()) } - case 794: + case 829: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4007 + var yyLOCAL *Order +//line sql.y:4170 { - yyVAL.order = &Order{Expr: yyDollar[1].expr, Direction: yyDollar[2].orderDirection} + yyLOCAL = &Order{Expr: yyDollar[1].exprUnion(), Direction: yyDollar[2].orderDirectionUnion()} } - case 795: + yyVAL.union = yyLOCAL + case 830: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4012 + var yyLOCAL OrderDirection +//line sql.y:4175 { - yyVAL.orderDirection = AscOrder + yyLOCAL = AscOrder } - case 796: + yyVAL.union = yyLOCAL + case 831: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4016 + var yyLOCAL OrderDirection +//line sql.y:4179 { - yyVAL.orderDirection = AscOrder + yyLOCAL = AscOrder } - case 797: + yyVAL.union = yyLOCAL + case 832: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4020 + var yyLOCAL OrderDirection +//line sql.y:4183 { - yyVAL.orderDirection = DescOrder + yyLOCAL = DescOrder } - case 798: + yyVAL.union = yyLOCAL + case 833: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4025 + var yyLOCAL *Limit +//line sql.y:4188 { - yyVAL.limit = nil + yyLOCAL = nil } - case 799: + yyVAL.union = yyLOCAL + case 834: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4029 + var yyLOCAL *Limit +//line sql.y:4192 { - yyVAL.limit = &Limit{Rowcount: yyDollar[2].expr} + yyLOCAL = &Limit{Rowcount: yyDollar[2].exprUnion()} } - case 800: + yyVAL.union = yyLOCAL + case 835: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:4033 + var yyLOCAL *Limit +//line sql.y:4196 { - yyVAL.limit = &Limit{Offset: yyDollar[2].expr, Rowcount: yyDollar[4].expr} + yyLOCAL = &Limit{Offset: yyDollar[2].exprUnion(), Rowcount: yyDollar[4].exprUnion()} } - case 801: + yyVAL.union = yyLOCAL + case 836: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:4037 + var yyLOCAL *Limit +//line sql.y:4200 { - yyVAL.limit = &Limit{Offset: yyDollar[4].expr, Rowcount: yyDollar[2].expr} + yyLOCAL = &Limit{Offset: yyDollar[4].exprUnion(), Rowcount: yyDollar[2].exprUnion()} } - case 802: + yyVAL.union = yyLOCAL + case 837: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4042 + var yyLOCAL []AlterOption +//line sql.y:4205 { - yyVAL.indexOptions = nil + yyLOCAL = nil } - case 803: + yyVAL.union = yyLOCAL + case 838: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4046 + var yyLOCAL []AlterOption +//line sql.y:4209 { - yyVAL.indexOptions = []*IndexOption{yyDollar[1].indexOption, yyDollar[2].indexOption} + yyLOCAL = []AlterOption{yyDollar[1].alterOptionUnion(), yyDollar[2].alterOptionUnion()} } - case 804: + yyVAL.union = yyLOCAL + case 839: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4050 + var yyLOCAL []AlterOption +//line sql.y:4213 { - yyVAL.indexOptions = []*IndexOption{yyDollar[1].indexOption, yyDollar[2].indexOption} + yyLOCAL = []AlterOption{yyDollar[1].alterOptionUnion(), yyDollar[2].alterOptionUnion()} } - case 805: + yyVAL.union = yyLOCAL + case 840: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4054 + var yyLOCAL []AlterOption +//line sql.y:4217 { - yyVAL.indexOptions = []*IndexOption{yyDollar[1].indexOption} + yyLOCAL = []AlterOption{yyDollar[1].alterOptionUnion()} } - case 806: + yyVAL.union = yyLOCAL + case 841: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4058 + var yyLOCAL []AlterOption +//line sql.y:4221 { - yyVAL.indexOptions = []*IndexOption{yyDollar[1].indexOption} + yyLOCAL = []AlterOption{yyDollar[1].alterOptionUnion()} } - case 807: + yyVAL.union = yyLOCAL + case 842: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4065 + var yyLOCAL AlterOption +//line sql.y:4228 { - yyVAL.indexOption = &IndexOption{Name: string(yyDollar[1].bytes), String: string(yyDollar[3].bytes)} + yyLOCAL = &LockOption{Type: DefaultType} } - case 808: + yyVAL.union = yyLOCAL + case 843: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4069 + var yyLOCAL AlterOption +//line sql.y:4232 { - yyVAL.indexOption = &IndexOption{Name: string(yyDollar[1].bytes), String: string(yyDollar[3].bytes)} + yyLOCAL = &LockOption{Type: NoneType} } - case 809: + yyVAL.union = yyLOCAL + case 844: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4073 + var yyLOCAL AlterOption +//line sql.y:4236 { - yyVAL.indexOption = &IndexOption{Name: string(yyDollar[1].bytes), String: string(yyDollar[3].bytes)} + yyLOCAL = &LockOption{Type: SharedType} } - case 810: + yyVAL.union = yyLOCAL + case 845: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4077 + var yyLOCAL AlterOption +//line sql.y:4240 { - yyVAL.indexOption = &IndexOption{Name: string(yyDollar[1].bytes), String: string(yyDollar[3].bytes)} + yyLOCAL = &LockOption{Type: ExclusiveType} } - case 811: + yyVAL.union = yyLOCAL + case 846: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4083 + var yyLOCAL AlterOption +//line sql.y:4246 { - yyVAL.indexOption = &IndexOption{Name: string(yyDollar[1].bytes), String: string(yyDollar[3].bytes)} + yyLOCAL = AlgorithmValue(yyDollar[3].str) } - case 812: + yyVAL.union = yyLOCAL + case 847: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4087 + var yyLOCAL AlterOption +//line sql.y:4250 { - yyVAL.indexOption = &IndexOption{Name: string(yyDollar[1].bytes), String: string(yyDollar[3].bytes)} + yyLOCAL = AlgorithmValue(yyDollar[3].str) } - case 813: + yyVAL.union = yyLOCAL + case 848: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4091 + var yyLOCAL AlterOption +//line sql.y:4254 { - yyVAL.indexOption = &IndexOption{Name: string(yyDollar[1].bytes), String: string(yyDollar[3].bytes)} + yyLOCAL = AlgorithmValue(yyDollar[3].str) } - case 814: + yyVAL.union = yyLOCAL + case 849: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4096 +//line sql.y:4259 { yyVAL.str = "" } - case 815: + case 850: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4100 +//line sql.y:4263 { - yyVAL.str = string(yyDollar[3].bytes) + yyVAL.str = string(yyDollar[3].str) } - case 816: + case 851: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4104 +//line sql.y:4267 { - yyVAL.str = string(yyDollar[3].bytes) + yyVAL.str = string(yyDollar[3].str) } - case 817: + case 852: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4108 +//line sql.y:4271 { - yyVAL.str = string(yyDollar[3].bytes) + yyVAL.str = string(yyDollar[3].str) } - case 818: + case 853: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4113 +//line sql.y:4276 { yyVAL.str = "" } - case 819: + case 854: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4117 +//line sql.y:4280 { yyVAL.str = yyDollar[3].str } - case 820: + case 855: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4123 +//line sql.y:4286 { - yyVAL.str = string(yyDollar[1].bytes) + yyVAL.str = string(yyDollar[1].str) } - case 821: + case 856: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4127 +//line sql.y:4290 { - yyVAL.str = string(yyDollar[1].bytes) + yyVAL.str = string(yyDollar[1].str) } - case 822: + case 857: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4132 +//line sql.y:4295 { yyVAL.str = "" } - case 823: + case 858: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:4136 +//line sql.y:4299 { yyVAL.str = yyDollar[2].str } - case 824: + case 859: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4141 +//line sql.y:4304 { yyVAL.str = "cascaded" } - case 825: + case 860: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4145 +//line sql.y:4308 { - yyVAL.str = string(yyDollar[1].bytes) + yyVAL.str = string(yyDollar[1].str) } - case 826: + case 861: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4149 +//line sql.y:4312 { - yyVAL.str = string(yyDollar[1].bytes) + yyVAL.str = string(yyDollar[1].str) } - case 827: + case 862: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4154 +//line sql.y:4317 { yyVAL.str = "" } - case 828: + case 863: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4158 +//line sql.y:4321 { yyVAL.str = yyDollar[3].str } - case 829: + case 864: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4164 +//line sql.y:4327 { - yyVAL.str = string(yyDollar[1].bytes) + yyVAL.str = string(yyDollar[1].str) } - case 830: + case 865: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4168 +//line sql.y:4331 { - yyVAL.str = string(yyDollar[1].bytes) + yyVAL.str = string(yyDollar[1].str) } - case 831: + case 866: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4172 +//line sql.y:4335 { - yyVAL.str = "'" + string(yyDollar[1].bytes) + "'@" + string(yyDollar[2].bytes) + yyVAL.str = encodeSQLString(yyDollar[1].str) + "@" + string(yyDollar[2].str) } - case 832: + case 867: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4176 +//line sql.y:4339 { - yyVAL.str = string(yyDollar[1].bytes) + yyVAL.str = string(yyDollar[1].str) } - case 833: + case 868: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4181 + var yyLOCAL Lock +//line sql.y:4344 { - yyVAL.lock = NoLock + yyLOCAL = NoLock } - case 834: + yyVAL.union = yyLOCAL + case 869: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4185 + var yyLOCAL Lock +//line sql.y:4348 { - yyVAL.lock = ForUpdateLock + yyLOCAL = ForUpdateLock } - case 835: + yyVAL.union = yyLOCAL + case 870: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:4189 + var yyLOCAL Lock +//line sql.y:4352 { - yyVAL.lock = ShareModeLock + yyLOCAL = ShareModeLock } - case 836: + yyVAL.union = yyLOCAL + case 871: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4194 + var yyLOCAL *SelectInto +//line sql.y:4357 { - yyVAL.selectInto = nil + yyLOCAL = nil } - case 837: + yyVAL.union = yyLOCAL + case 872: yyDollar = yyS[yypt-9 : yypt+1] -//line sql.y:4198 + var yyLOCAL *SelectInto +//line sql.y:4361 { - yyVAL.selectInto = &SelectInto{Type: IntoOutfileS3, FileName: string(yyDollar[4].bytes), Charset: yyDollar[5].str, FormatOption: yyDollar[6].str, ExportOption: yyDollar[7].str, Manifest: yyDollar[8].str, Overwrite: yyDollar[9].str} + yyLOCAL = &SelectInto{Type: IntoOutfileS3, FileName: encodeSQLString(yyDollar[4].str), Charset: yyDollar[5].str, FormatOption: yyDollar[6].str, ExportOption: yyDollar[7].str, Manifest: yyDollar[8].str, Overwrite: yyDollar[9].str} } - case 838: + yyVAL.union = yyLOCAL + case 873: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4202 + var yyLOCAL *SelectInto +//line sql.y:4365 { - yyVAL.selectInto = &SelectInto{Type: IntoDumpfile, FileName: string(yyDollar[3].bytes), Charset: "", FormatOption: "", ExportOption: "", Manifest: "", Overwrite: ""} + yyLOCAL = &SelectInto{Type: IntoDumpfile, FileName: encodeSQLString(yyDollar[3].str), Charset: "", FormatOption: "", ExportOption: "", Manifest: "", Overwrite: ""} } - case 839: + yyVAL.union = yyLOCAL + case 874: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:4206 + var yyLOCAL *SelectInto +//line sql.y:4369 { - yyVAL.selectInto = &SelectInto{Type: IntoOutfile, FileName: string(yyDollar[3].bytes), Charset: yyDollar[4].str, FormatOption: "", ExportOption: yyDollar[5].str, Manifest: "", Overwrite: ""} + yyLOCAL = &SelectInto{Type: IntoOutfile, FileName: encodeSQLString(yyDollar[3].str), Charset: yyDollar[4].str, FormatOption: "", ExportOption: yyDollar[5].str, Manifest: "", Overwrite: ""} } - case 840: + yyVAL.union = yyLOCAL + case 875: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4211 +//line sql.y:4374 { yyVAL.str = "" } - case 841: + case 876: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4215 +//line sql.y:4378 { yyVAL.str = " format csv" + yyDollar[3].str } - case 842: + case 877: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4219 +//line sql.y:4382 { yyVAL.str = " format text" + yyDollar[3].str } - case 843: + case 878: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4224 +//line sql.y:4387 { yyVAL.str = "" } - case 844: + case 879: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4228 +//line sql.y:4391 { yyVAL.str = " header" } - case 845: + case 880: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4233 +//line sql.y:4396 { yyVAL.str = "" } - case 846: + case 881: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4237 +//line sql.y:4400 { yyVAL.str = " manifest on" } - case 847: + case 882: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4241 +//line sql.y:4404 { yyVAL.str = " manifest off" } - case 848: + case 883: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4246 +//line sql.y:4409 { yyVAL.str = "" } - case 849: + case 884: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4250 +//line sql.y:4413 { yyVAL.str = " overwrite on" } - case 850: + case 885: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4254 +//line sql.y:4417 { yyVAL.str = " overwrite off" } - case 851: + case 886: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4260 +//line sql.y:4423 { yyVAL.str = yyDollar[1].str + yyDollar[2].str } - case 852: + case 887: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4265 +//line sql.y:4428 { yyVAL.str = "" } - case 853: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4269 + case 888: + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:4432 { - yyVAL.str = " lines" + yyDollar[2].str + yyDollar[3].str + yyVAL.str = " lines" + yyDollar[2].str } - case 854: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4274 + case 889: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:4438 { - yyVAL.str = "" + yyVAL.str = yyDollar[1].str } - case 855: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4278 + case 890: + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:4442 { - yyVAL.str = " starting by '" + string(yyDollar[3].bytes) + "'" + yyVAL.str = yyDollar[1].str + yyDollar[2].str } - case 856: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4283 + case 891: + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:4448 { - yyVAL.str = "" + yyVAL.str = " starting by " + encodeSQLString(yyDollar[3].str) } - case 857: + case 892: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4287 +//line sql.y:4452 { - yyVAL.str = " terminated by '" + string(yyDollar[3].bytes) + "'" + yyVAL.str = " terminated by " + encodeSQLString(yyDollar[3].str) } - case 858: + case 893: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4292 +//line sql.y:4457 { yyVAL.str = "" } - case 859: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:4296 + case 894: + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:4461 { - yyVAL.str = " " + yyDollar[1].str + yyDollar[2].str + yyDollar[3].str + yyDollar[4].str + yyVAL.str = " " + yyDollar[1].str + yyDollar[2].str } - case 860: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4301 + case 895: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:4467 { - yyVAL.str = "" + yyVAL.str = yyDollar[1].str } - case 861: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4305 + case 896: + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:4471 { - yyVAL.str = " escaped by '" + string(yyDollar[3].bytes) + "'" + yyVAL.str = yyDollar[1].str + yyDollar[2].str } - case 862: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4310 + case 897: + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:4477 { - yyVAL.str = "" + yyVAL.str = " terminated by " + encodeSQLString(yyDollar[3].str) } - case 863: + case 898: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:4314 +//line sql.y:4481 { - yyVAL.str = yyDollar[1].str + " enclosed by '" + string(yyDollar[4].bytes) + "'" + yyVAL.str = yyDollar[1].str + " enclosed by " + encodeSQLString(yyDollar[4].str) } - case 864: + case 899: + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:4485 + { + yyVAL.str = " escaped by " + encodeSQLString(yyDollar[3].str) + } + case 900: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4319 +//line sql.y:4490 { yyVAL.str = "" } - case 865: + case 901: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4323 +//line sql.y:4494 { yyVAL.str = " optionally" } - case 866: + case 902: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4336 + var yyLOCAL *Insert +//line sql.y:4507 { - yyVAL.ins = &Insert{Rows: yyDollar[2].values} + yyLOCAL = &Insert{Rows: yyDollar[2].valuesUnion()} } - case 867: + yyVAL.union = yyLOCAL + case 903: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4340 + var yyLOCAL *Insert +//line sql.y:4511 { - yyVAL.ins = &Insert{Rows: yyDollar[1].selStmt} + yyLOCAL = &Insert{Rows: yyDollar[1].selStmtUnion()} } - case 868: + yyVAL.union = yyLOCAL + case 904: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:4344 + var yyLOCAL *Insert +//line sql.y:4515 { - yyVAL.ins = &Insert{Columns: yyDollar[2].columns, Rows: yyDollar[5].values} + yyLOCAL = &Insert{Columns: yyDollar[2].columnsUnion(), Rows: yyDollar[5].valuesUnion()} } - case 869: + yyVAL.union = yyLOCAL + case 905: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:4348 + var yyLOCAL *Insert +//line sql.y:4519 { - yyVAL.ins = &Insert{Rows: yyDollar[4].values} + yyLOCAL = &Insert{Rows: yyDollar[4].valuesUnion()} } - case 870: + yyVAL.union = yyLOCAL + case 906: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:4352 + var yyLOCAL *Insert +//line sql.y:4523 { - yyVAL.ins = &Insert{Columns: yyDollar[2].columns, Rows: yyDollar[4].selStmt} + yyLOCAL = &Insert{Columns: yyDollar[2].columnsUnion(), Rows: yyDollar[4].selStmtUnion()} } - case 871: + yyVAL.union = yyLOCAL + case 907: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4358 + var yyLOCAL Columns +//line sql.y:4529 { - yyVAL.columns = Columns{yyDollar[1].colIdent} + yyLOCAL = Columns{yyDollar[1].colIdent} } - case 872: + yyVAL.union = yyLOCAL + case 908: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4362 + var yyLOCAL Columns +//line sql.y:4533 { - yyVAL.columns = Columns{yyDollar[3].colIdent} + yyLOCAL = Columns{yyDollar[3].colIdent} } - case 873: + yyVAL.union = yyLOCAL + case 909: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4366 +//line sql.y:4537 { - yyVAL.columns = append(yyVAL.columns, yyDollar[3].colIdent) + yySLICE := (*Columns)(yyIaddr(yyVAL.union)) + *yySLICE = append(*yySLICE, yyDollar[3].colIdent) } - case 874: + case 910: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:4370 +//line sql.y:4541 { - yyVAL.columns = append(yyVAL.columns, yyDollar[5].colIdent) + yySLICE := (*Columns)(yyIaddr(yyVAL.union)) + *yySLICE = append(*yySLICE, yyDollar[5].colIdent) } - case 875: + case 911: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4375 + var yyLOCAL UpdateExprs +//line sql.y:4546 { - yyVAL.updateExprs = nil + yyLOCAL = nil } - case 876: + yyVAL.union = yyLOCAL + case 912: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:4379 + var yyLOCAL UpdateExprs +//line sql.y:4550 { - yyVAL.updateExprs = yyDollar[5].updateExprs + yyLOCAL = yyDollar[5].updateExprsUnion() } - case 877: + yyVAL.union = yyLOCAL + case 913: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4385 + var yyLOCAL Values +//line sql.y:4556 { - yyVAL.values = Values{yyDollar[1].valTuple} + yyLOCAL = Values{yyDollar[1].valTupleUnion()} } - case 878: + yyVAL.union = yyLOCAL + case 914: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4389 +//line sql.y:4560 { - yyVAL.values = append(yyDollar[1].values, yyDollar[3].valTuple) + yySLICE := (*Values)(yyIaddr(yyVAL.union)) + *yySLICE = append(*yySLICE, yyDollar[3].valTupleUnion()) } - case 879: + case 915: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4395 + var yyLOCAL ValTuple +//line sql.y:4566 { - yyVAL.valTuple = yyDollar[1].valTuple + yyLOCAL = yyDollar[1].valTupleUnion() } - case 880: + yyVAL.union = yyLOCAL + case 916: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4399 + var yyLOCAL ValTuple +//line sql.y:4570 { - yyVAL.valTuple = ValTuple{} + yyLOCAL = ValTuple{} } - case 881: + yyVAL.union = yyLOCAL + case 917: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4405 + var yyLOCAL ValTuple +//line sql.y:4576 { - yyVAL.valTuple = ValTuple(yyDollar[2].exprs) + yyLOCAL = ValTuple(yyDollar[2].exprsUnion()) } - case 882: + yyVAL.union = yyLOCAL + case 918: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4411 + var yyLOCAL Expr +//line sql.y:4582 { - if len(yyDollar[1].valTuple) == 1 { - yyVAL.expr = yyDollar[1].valTuple[0] + if len(yyDollar[1].valTupleUnion()) == 1 { + yyLOCAL = yyDollar[1].valTupleUnion()[0] } else { - yyVAL.expr = yyDollar[1].valTuple + yyLOCAL = yyDollar[1].valTupleUnion() } } - case 883: + yyVAL.union = yyLOCAL + case 919: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4421 + var yyLOCAL UpdateExprs +//line sql.y:4592 { - yyVAL.updateExprs = UpdateExprs{yyDollar[1].updateExpr} + yyLOCAL = UpdateExprs{yyDollar[1].updateExprUnion()} } - case 884: + yyVAL.union = yyLOCAL + case 920: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4425 +//line sql.y:4596 { - yyVAL.updateExprs = append(yyDollar[1].updateExprs, yyDollar[3].updateExpr) + yySLICE := (*UpdateExprs)(yyIaddr(yyVAL.union)) + *yySLICE = append(*yySLICE, yyDollar[3].updateExprUnion()) } - case 885: + case 921: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4431 + var yyLOCAL *UpdateExpr +//line sql.y:4602 { - yyVAL.updateExpr = &UpdateExpr{Name: yyDollar[1].colName, Expr: yyDollar[3].expr} + yyLOCAL = &UpdateExpr{Name: yyDollar[1].colNameUnion(), Expr: yyDollar[3].exprUnion()} } - case 886: + yyVAL.union = yyLOCAL + case 922: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4437 + var yyLOCAL SetExprs +//line sql.y:4608 { - yyVAL.setExprs = SetExprs{yyDollar[1].setExpr} + yyLOCAL = SetExprs{yyDollar[1].setExprUnion()} } - case 887: + yyVAL.union = yyLOCAL + case 923: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4441 +//line sql.y:4612 { - yyVAL.setExprs = append(yyDollar[1].setExprs, yyDollar[3].setExpr) + yySLICE := (*SetExprs)(yyIaddr(yyVAL.union)) + *yySLICE = append(*yySLICE, yyDollar[3].setExprUnion()) } - case 888: + case 924: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4447 + var yyLOCAL *SetExpr +//line sql.y:4618 { - yyVAL.setExpr = &SetExpr{Name: yyDollar[1].colIdent, Scope: ImplicitScope, Expr: NewStrLiteral([]byte("on"))} + yyLOCAL = &SetExpr{Name: yyDollar[1].colIdent, Scope: ImplicitScope, Expr: NewStrLiteral("on")} } - case 889: + yyVAL.union = yyLOCAL + case 925: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4451 + var yyLOCAL *SetExpr +//line sql.y:4622 { - yyVAL.setExpr = &SetExpr{Name: yyDollar[1].colIdent, Scope: ImplicitScope, Expr: NewStrLiteral([]byte("off"))} + yyLOCAL = &SetExpr{Name: yyDollar[1].colIdent, Scope: ImplicitScope, Expr: NewStrLiteral("off")} } - case 890: + yyVAL.union = yyLOCAL + case 926: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4455 + var yyLOCAL *SetExpr +//line sql.y:4626 { - yyVAL.setExpr = &SetExpr{Name: yyDollar[1].colIdent, Scope: ImplicitScope, Expr: yyDollar[3].expr} + yyLOCAL = &SetExpr{Name: yyDollar[1].colIdent, Scope: ImplicitScope, Expr: yyDollar[3].exprUnion()} } - case 891: + yyVAL.union = yyLOCAL + case 927: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4459 + var yyLOCAL *SetExpr +//line sql.y:4630 { - yyVAL.setExpr = &SetExpr{Name: NewColIdent(string(yyDollar[1].bytes)), Scope: ImplicitScope, Expr: yyDollar[2].expr} + yyLOCAL = &SetExpr{Name: NewColIdent(string(yyDollar[1].str)), Scope: ImplicitScope, Expr: yyDollar[2].exprUnion()} } - case 892: + yyVAL.union = yyLOCAL + case 928: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4463 + var yyLOCAL *SetExpr +//line sql.y:4634 { - yyDollar[2].setExpr.Scope = yyDollar[1].scope - yyVAL.setExpr = yyDollar[2].setExpr + yyDollar[2].setExprUnion().Scope = yyDollar[1].scopeUnion() + yyLOCAL = yyDollar[2].setExprUnion() } - case 894: + yyVAL.union = yyLOCAL + case 930: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4471 +//line sql.y:4642 { - yyVAL.bytes = []byte("charset") + yyVAL.str = "charset" } - case 897: + case 933: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4481 + var yyLOCAL Expr +//line sql.y:4652 { - yyVAL.expr = NewStrLiteral([]byte(yyDollar[1].colIdent.String())) + yyLOCAL = NewStrLiteral(yyDollar[1].colIdent.String()) } - case 898: + yyVAL.union = yyLOCAL + case 934: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4485 + var yyLOCAL Expr +//line sql.y:4656 { - yyVAL.expr = NewStrLiteral(yyDollar[1].bytes) + yyLOCAL = NewStrLiteral(yyDollar[1].str) } - case 899: + yyVAL.union = yyLOCAL + case 935: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4489 + var yyLOCAL Expr +//line sql.y:4660 { - yyVAL.expr = &Default{} + yyLOCAL = &Default{} } - case 902: + yyVAL.union = yyLOCAL + case 938: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4498 + var yyLOCAL bool +//line sql.y:4669 { - yyVAL.boolean = false + yyLOCAL = false } - case 903: + yyVAL.union = yyLOCAL + case 939: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL bool +//line sql.y:4671 + { + yyLOCAL = true + } + yyVAL.union = yyLOCAL + case 940: + yyDollar = yyS[yypt-0 : yypt+1] + var yyLOCAL bool +//line sql.y:4674 + { + yyLOCAL = false + } + yyVAL.union = yyLOCAL + case 941: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4500 + var yyLOCAL bool +//line sql.y:4676 { - yyVAL.boolean = true + yyLOCAL = true } - case 904: + yyVAL.union = yyLOCAL + case 942: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4503 + var yyLOCAL bool +//line sql.y:4679 { - yyVAL.boolean = false + yyLOCAL = false } - case 905: + yyVAL.union = yyLOCAL + case 943: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4505 + var yyLOCAL bool +//line sql.y:4681 { - yyVAL.boolean = true + yyLOCAL = true } - case 906: + yyVAL.union = yyLOCAL + case 944: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4508 + var yyLOCAL Ignore +//line sql.y:4684 { - yyVAL.ignore = false + yyLOCAL = false } - case 907: + yyVAL.union = yyLOCAL + case 945: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4510 + var yyLOCAL Ignore +//line sql.y:4686 { - yyVAL.ignore = true + yyLOCAL = true } - case 908: + yyVAL.union = yyLOCAL + case 946: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4513 +//line sql.y:4689 { yyVAL.empty = struct{}{} } - case 909: + case 947: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4515 +//line sql.y:4691 { yyVAL.empty = struct{}{} } - case 910: + case 948: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4517 +//line sql.y:4693 { yyVAL.empty = struct{}{} } - case 911: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4520 - { - yyVAL.str = "" - } - case 912: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4522 + case 949: + yyDollar = yyS[yypt-5 : yypt+1] + var yyLOCAL Statement +//line sql.y:4697 { - yyVAL.str = string(yyDollar[1].bytes) + yyLOCAL = &CallProc{Name: yyDollar[2].tableName, Params: yyDollar[4].exprsUnion()} } - case 913: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4524 + yyVAL.union = yyLOCAL + case 950: + yyDollar = yyS[yypt-0 : yypt+1] + var yyLOCAL Exprs +//line sql.y:4702 { - yyVAL.str = string(yyDollar[1].bytes) + yyLOCAL = nil } - case 914: + yyVAL.union = yyLOCAL + case 951: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4526 + var yyLOCAL Exprs +//line sql.y:4706 { - yyVAL.str = string(yyDollar[1].bytes) + yyLOCAL = yyDollar[1].exprsUnion() } - case 915: + yyVAL.union = yyLOCAL + case 952: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4529 + var yyLOCAL []*IndexOption +//line sql.y:4711 { - yyVAL.indexOptions = nil + yyLOCAL = nil } - case 916: + yyVAL.union = yyLOCAL + case 953: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4531 + var yyLOCAL []*IndexOption +//line sql.y:4713 { - yyVAL.indexOptions = []*IndexOption{yyDollar[1].indexOption} + yyLOCAL = []*IndexOption{yyDollar[1].indexOptionUnion()} } - case 917: + yyVAL.union = yyLOCAL + case 954: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4535 + var yyLOCAL *IndexOption +//line sql.y:4717 { - yyVAL.indexOption = &IndexOption{Name: string(yyDollar[1].bytes), String: string(yyDollar[2].colIdent.String())} + yyLOCAL = &IndexOption{Name: string(yyDollar[1].str), String: string(yyDollar[2].colIdent.String())} } - case 918: + yyVAL.union = yyLOCAL + case 955: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4541 +//line sql.y:4723 { yyVAL.colIdent = yyDollar[1].colIdent } - case 919: + case 956: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4545 +//line sql.y:4727 { - yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) + yyVAL.colIdent = NewColIdent(string(yyDollar[1].str)) } - case 921: + case 958: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4552 +//line sql.y:4734 { - yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) + yyVAL.colIdent = NewColIdent(string(yyDollar[1].str)) } - case 922: + case 959: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4558 +//line sql.y:4740 { yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].colIdent.String())) } - case 923: + case 960: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4562 +//line sql.y:4744 { - yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes)) + yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].str)) } - case 925: + case 961: + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:4749 + { + yyVAL.tableIdent = NewTableIdent("") + } + case 962: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:4753 + { + yyVAL.tableIdent = yyDollar[1].tableIdent + } + case 964: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4569 +//line sql.y:4760 { - yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes)) + yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].str)) } - case 1307: + case 1368: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4975 +//line sql.y:5188 { if incNesting(yylex) { yylex.Error("max nesting level reached") return 1 } } - case 1308: + case 1369: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4984 +//line sql.y:5197 { decNesting(yylex) } - case 1309: + case 1370: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4989 +//line sql.y:5202 { skipToEnd(yylex) } - case 1310: + case 1371: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4994 +//line sql.y:5207 { skipToEnd(yylex) } - case 1311: + case 1372: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4998 +//line sql.y:5211 { skipToEnd(yylex) } - case 1312: + case 1373: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:5002 +//line sql.y:5215 { skipToEnd(yylex) } diff --git a/go/vt/sqlparser/sql.y b/go/vt/sqlparser/sql.y index 8aaa1351165..38fd54530f5 100644 --- a/go/vt/sqlparser/sql.y +++ b/go/vt/sqlparser/sql.y @@ -17,19 +17,19 @@ limitations under the License. %{ package sqlparser -func setParseTree(yylex interface{}, stmt Statement) { +func setParseTree(yylex yyLexer, stmt Statement) { yylex.(*Tokenizer).ParseTree = stmt } -func setAllowComments(yylex interface{}, allow bool) { +func setAllowComments(yylex yyLexer, allow bool) { yylex.(*Tokenizer).AllowComments = allow } -func setDDL(yylex interface{}, node Statement) { +func setDDL(yylex yyLexer, node Statement) { yylex.(*Tokenizer).partialDDL = node } -func incNesting(yylex interface{}) bool { +func incNesting(yylex yyLexer) bool { yylex.(*Tokenizer).nesting++ if yylex.(*Tokenizer).nesting == 200 { return true @@ -37,94 +37,114 @@ func incNesting(yylex interface{}) bool { return false } -func decNesting(yylex interface{}) { +func decNesting(yylex yyLexer) { yylex.(*Tokenizer).nesting-- } // skipToEnd forces the lexer to end prematurely. Not all SQL statements // are supported by the Parser, thus calling skipToEnd will make the lexer // return EOF early. -func skipToEnd(yylex interface{}) { +func skipToEnd(yylex yyLexer) { yylex.(*Tokenizer).SkipToEnd = true } +func bindVariable(yylex yyLexer, bvar string) { + yylex.(*Tokenizer).BindVars[bvar] = struct{}{} +} + %} -%union { +%struct { empty struct{} + LengthScaleOption LengthScaleOption + tableName TableName + tableIdent TableIdent + str string + strs []string + vindexParam VindexParam + colIdent ColIdent + joinCondition JoinCondition + collateAndCharset CollateAndCharset + columnType ColumnType +} + +%union { statement Statement selStmt SelectStatement - ddl *DDL + tableExpr TableExpr + expr Expr + colTuple ColTuple + optVal Expr + constraintInfo ConstraintInfo + alterOption AlterOption + characteristic Characteristic + ins *Insert - byt byte - bytes []byte - bytes2 [][]byte - str string - strs []string - selectExprs SelectExprs - selectExpr SelectExpr - columns Columns - partitions Partitions colName *ColName - tableExprs TableExprs - tableExpr TableExpr - joinCondition JoinCondition - tableName TableName - tableNames TableNames indexHints *IndexHints - expr Expr - exprs Exprs - boolVal BoolVal - boolean bool literal *Literal - colTuple ColTuple - values Values - valTuple ValTuple subquery *Subquery derivedTable *DerivedTable - whens []*When when *When - orderBy OrderBy order *Order limit *Limit - updateExprs UpdateExprs - setExprs SetExprs updateExpr *UpdateExpr setExpr *SetExpr - characteristic Characteristic - characteristics []Characteristic - colIdent ColIdent - tableIdent TableIdent convertType *ConvertType aliasedTableName *AliasedTableExpr - TableSpec *TableSpec - columnType ColumnType - colKeyOpt ColumnKeyOption - optVal Expr - LengthScaleOption LengthScaleOption + tableSpec *TableSpec columnDefinition *ColumnDefinition - columnDefinitions []*ColumnDefinition indexDefinition *IndexDefinition indexInfo *IndexInfo indexOption *IndexOption - indexOptions []*IndexOption indexColumn *IndexColumn - indexColumns []*IndexColumn - constraintDefinition *ConstraintDefinition - constraintInfo ConstraintInfo - ReferenceAction ReferenceAction - partDefs []*PartitionDefinition partDef *PartitionDefinition partSpec *PartitionSpec - partSpecs []*PartitionSpec - vindexParam VindexParam - vindexParams []VindexParam showFilter *ShowFilter optLike *OptLike + selectInto *SelectInto + createDatabase *CreateDatabase + alterDatabase *AlterDatabase + createTable *CreateTable + tableAndLockType *TableAndLockType + alterTable *AlterTable + tableOption *TableOption + columnTypeOptions *ColumnTypeOptions + constraintDefinition *ConstraintDefinition + revertMigration *RevertMigration + alterMigration *AlterMigration + + whens []*When + columnDefinitions []*ColumnDefinition + indexOptions []*IndexOption + indexColumns []*IndexColumn + collateAndCharsets []CollateAndCharset + tableAndLockTypes TableAndLockTypes + renameTablePairs []*RenameTablePair + alterOptions []AlterOption + vindexParams []VindexParam + partDefs []*PartitionDefinition + partSpecs []*PartitionSpec + characteristics []Characteristic + selectExpr SelectExpr + columns Columns + partitions Partitions + tableExprs TableExprs + tableNames TableNames + exprs Exprs + values Values + valTuple ValTuple + orderBy OrderBy + updateExprs UpdateExprs + setExprs SetExprs + selectExprs SelectExprs + tableOptions TableOptions + + colKeyOpt ColumnKeyOption + ReferenceAction ReferenceAction isolationLevel IsolationLevel insertAction InsertAction scope Scope - ignore Ignore lock Lock joinType JoinType comparisonExprOperator ComparisonExprOperator @@ -132,62 +152,52 @@ func skipToEnd(yylex interface{}) { matchExprOption MatchExprOption orderDirection OrderDirection explainType ExplainType - selectInto *SelectInto - createIndex *CreateIndex - createDatabase *CreateDatabase - alterDatabase *AlterDatabase - collateAndCharset CollateAndCharset - collateAndCharsets []CollateAndCharset - createTable *CreateTable - tableAndLockTypes []*TableAndLockType - tableAndLockType *TableAndLockType lockType LockType - alterTable *AlterTable - alterOption AlterOption - alterOptions []AlterOption - tableOption *TableOption - tableOptions TableOptions + + boolean bool + boolVal BoolVal + ignore Ignore } %token LEX_ERROR -%left UNION -%token SELECT STREAM VSTREAM INSERT UPDATE DELETE FROM WHERE GROUP HAVING ORDER BY LIMIT OFFSET FOR -%token ALL DISTINCT AS EXISTS ASC DESC INTO DUPLICATE KEY DEFAULT SET LOCK UNLOCK KEYS DO -%token DISTINCTROW PARSER -%token OUTFILE S3 DATA LOAD LINES TERMINATED ESCAPED ENCLOSED -%token DUMPFILE CSV HEADER MANIFEST OVERWRITE STARTING OPTIONALLY -%token VALUES LAST_INSERT_ID -%token NEXT VALUE SHARE MODE -%token SQL_NO_CACHE SQL_CACHE SQL_CALC_FOUND_ROWS -%left JOIN STRAIGHT_JOIN LEFT RIGHT INNER OUTER CROSS NATURAL USE FORCE -%left ON USING INPLACE COPY ALGORITHM NONE SHARED EXCLUSIVE +%left UNION +%token SELECT STREAM VSTREAM INSERT UPDATE DELETE FROM WHERE GROUP HAVING ORDER BY LIMIT OFFSET FOR +%token ALL DISTINCT AS EXISTS ASC DESC INTO DUPLICATE KEY DEFAULT SET LOCK UNLOCK KEYS DO CALL +%token DISTINCTROW PARSER +%token OUTFILE S3 DATA LOAD LINES TERMINATED ESCAPED ENCLOSED +%token DUMPFILE CSV HEADER MANIFEST OVERWRITE STARTING OPTIONALLY +%token VALUES LAST_INSERT_ID +%token NEXT VALUE SHARE MODE +%token SQL_NO_CACHE SQL_CACHE SQL_CALC_FOUND_ROWS +%left JOIN STRAIGHT_JOIN LEFT RIGHT INNER OUTER CROSS NATURAL USE FORCE +%left ON USING INPLACE COPY ALGORITHM NONE SHARED EXCLUSIVE %token '(' ',' ')' -%token ID AT_ID AT_AT_ID HEX STRING INTEGRAL FLOAT HEXNUM VALUE_ARG LIST_ARG COMMENT COMMENT_KEYWORD BIT_LITERAL COMPRESSION -%token NULL TRUE FALSE OFF -%token DISCARD IMPORT ENABLE DISABLE TABLESPACE +%token ID AT_ID AT_AT_ID HEX STRING INTEGRAL FLOAT HEXNUM VALUE_ARG LIST_ARG COMMENT COMMENT_KEYWORD BIT_LITERAL COMPRESSION +%token NULL TRUE FALSE OFF +%token DISCARD IMPORT ENABLE DISABLE TABLESPACE // Precedence dictated by mysql. But the vitess grammar is simplified. // Some of these operators don't conflict in our situation. Nevertheless, // it's better to have these listed in the correct order. Also, we don't // support all operators yet. // * NOTE: If you change anything here, update precedence.go as well * -%left OR -%left XOR -%left AND -%right NOT '!' -%left BETWEEN CASE WHEN THEN ELSE END -%left '=' '<' '>' LE GE NE NULL_SAFE_EQUAL IS LIKE REGEXP IN -%left '|' -%left '&' -%left SHIFT_LEFT SHIFT_RIGHT -%left '+' '-' -%left '*' '/' DIV '%' MOD -%left '^' -%right '~' UNARY -%left COLLATE -%right BINARY UNDERSCORE_BINARY UNDERSCORE_UTF8MB4 UNDERSCORE_UTF8 UNDERSCORE_LATIN1 -%right INTERVAL -%nonassoc '.' +%left OR +%left XOR +%left AND +%right NOT '!' +%left BETWEEN CASE WHEN THEN ELSE END +%left '=' '<' '>' LE GE NE NULL_SAFE_EQUAL IS LIKE REGEXP IN +%left '|' +%left '&' +%left SHIFT_LEFT SHIFT_RIGHT +%left '+' '-' +%left '*' '/' DIV '%' MOD +%left '^' +%right '~' UNARY +%left COLLATE +%right BINARY UNDERSCORE_BINARY UNDERSCORE_UTF8MB4 UNDERSCORE_UTF8 UNDERSCORE_LATIN1 +%right INTERVAL +%nonassoc '.' // There is no need to define precedence for the JSON // operators because the syntax is restricted enough that @@ -195,80 +205,87 @@ func skipToEnd(yylex interface{}) { %token JSON_EXTRACT_OP JSON_UNQUOTE_EXTRACT_OP // DDL Tokens -%token CREATE ALTER DROP RENAME ANALYZE ADD FLUSH CHANGE MODIFY -%token SCHEMA TABLE INDEX VIEW TO IGNORE IF UNIQUE PRIMARY COLUMN SPATIAL FULLTEXT KEY_BLOCK_SIZE CHECK INDEXES -%token ACTION CASCADE CONSTRAINT FOREIGN NO REFERENCES RESTRICT -%token SHOW DESCRIBE EXPLAIN DATE ESCAPE REPAIR OPTIMIZE TRUNCATE COALESCE EXCHANGE REBUILD PARTITIONING REMOVE -%token MAXVALUE PARTITION REORGANIZE LESS THAN PROCEDURE TRIGGER -%token VINDEX VINDEXES DIRECTORY NAME UPGRADE -%token STATUS VARIABLES WARNINGS CASCADED DEFINER OPTION SQL UNDEFINED -%token SEQUENCE MERGE TEMPTABLE INVOKER SECURITY FIRST AFTER LAST +%token CREATE ALTER DROP RENAME ANALYZE ADD FLUSH CHANGE MODIFY +%token REVERT +%token SCHEMA TABLE INDEX VIEW TO IGNORE IF UNIQUE PRIMARY COLUMN SPATIAL FULLTEXT KEY_BLOCK_SIZE CHECK INDEXES +%token ACTION CASCADE CONSTRAINT FOREIGN NO REFERENCES RESTRICT +%token SHOW DESCRIBE EXPLAIN DATE ESCAPE REPAIR OPTIMIZE TRUNCATE COALESCE EXCHANGE REBUILD PARTITIONING REMOVE +%token MAXVALUE PARTITION REORGANIZE LESS THAN PROCEDURE TRIGGER +%token VINDEX VINDEXES DIRECTORY NAME UPGRADE +%token STATUS VARIABLES WARNINGS CASCADED DEFINER OPTION SQL UNDEFINED +%token SEQUENCE MERGE TEMPORARY TEMPTABLE INVOKER SECURITY FIRST AFTER LAST + +// Migration tokens +%token VITESS_MIGRATION CANCEL RETRY COMPLETE // Transaction Tokens -%token BEGIN START TRANSACTION COMMIT ROLLBACK SAVEPOINT RELEASE WORK +%token BEGIN START TRANSACTION COMMIT ROLLBACK SAVEPOINT RELEASE WORK // Type Tokens -%token BIT TINYINT SMALLINT MEDIUMINT INT INTEGER BIGINT INTNUM -%token REAL DOUBLE FLOAT_TYPE DECIMAL NUMERIC -%token TIME TIMESTAMP DATETIME YEAR -%token CHAR VARCHAR BOOL CHARACTER VARBINARY NCHAR -%token TEXT TINYTEXT MEDIUMTEXT LONGTEXT -%token BLOB TINYBLOB MEDIUMBLOB LONGBLOB JSON ENUM -%token GEOMETRY POINT LINESTRING POLYGON GEOMETRYCOLLECTION MULTIPOINT MULTILINESTRING MULTIPOLYGON +%token BIT TINYINT SMALLINT MEDIUMINT INT INTEGER BIGINT INTNUM +%token REAL DOUBLE FLOAT_TYPE DECIMAL NUMERIC +%token TIME TIMESTAMP DATETIME YEAR +%token CHAR VARCHAR BOOL CHARACTER VARBINARY NCHAR +%token TEXT TINYTEXT MEDIUMTEXT LONGTEXT +%token BLOB TINYBLOB MEDIUMBLOB LONGBLOB JSON ENUM +%token GEOMETRY POINT LINESTRING POLYGON GEOMETRYCOLLECTION MULTIPOINT MULTILINESTRING MULTIPOLYGON // Type Modifiers -%token NULLX AUTO_INCREMENT APPROXNUM SIGNED UNSIGNED ZEROFILL +%token NULLX AUTO_INCREMENT APPROXNUM SIGNED UNSIGNED ZEROFILL // SHOW tokens -%token COLLATION DATABASES SCHEMAS TABLES VITESS_METADATA VSCHEMA FULL PROCESSLIST COLUMNS FIELDS ENGINES PLUGINS EXTENDED -%token KEYSPACES VITESS_KEYSPACES VITESS_SHARDS VITESS_TABLETS CODE PRIVILEGES FUNCTION +%token COLLATION DATABASES SCHEMAS TABLES VITESS_METADATA VSCHEMA FULL PROCESSLIST COLUMNS FIELDS ENGINES PLUGINS EXTENDED +%token KEYSPACES VITESS_KEYSPACES VITESS_SHARDS VITESS_TABLETS VITESS_MIGRATIONS CODE PRIVILEGES FUNCTION OPEN TRIGGERS EVENT USER // SET tokens -%token NAMES CHARSET GLOBAL SESSION ISOLATION LEVEL READ WRITE ONLY REPEATABLE COMMITTED UNCOMMITTED SERIALIZABLE +%token NAMES CHARSET GLOBAL SESSION ISOLATION LEVEL READ WRITE ONLY REPEATABLE COMMITTED UNCOMMITTED SERIALIZABLE // Functions -%token CURRENT_TIMESTAMP DATABASE CURRENT_DATE -%token CURRENT_TIME LOCALTIME LOCALTIMESTAMP CURRENT_USER -%token UTC_DATE UTC_TIME UTC_TIMESTAMP -%token REPLACE -%token CONVERT CAST -%token SUBSTR SUBSTRING -%token GROUP_CONCAT SEPARATOR -%token TIMESTAMPADD TIMESTAMPDIFF +%token CURRENT_TIMESTAMP DATABASE CURRENT_DATE +%token CURRENT_TIME LOCALTIME LOCALTIMESTAMP CURRENT_USER +%token UTC_DATE UTC_TIME UTC_TIMESTAMP +%token REPLACE +%token CONVERT CAST +%token SUBSTR SUBSTRING +%token GROUP_CONCAT SEPARATOR +%token TIMESTAMPADD TIMESTAMPDIFF // Match -%token MATCH AGAINST BOOLEAN LANGUAGE WITH QUERY EXPANSION WITHOUT VALIDATION +%token MATCH AGAINST BOOLEAN LANGUAGE WITH QUERY EXPANSION WITHOUT VALIDATION // MySQL reserved words that are unused by this grammar will map to this token. -%token UNUSED ARRAY CUME_DIST DESCRIPTION DENSE_RANK EMPTY EXCEPT FIRST_VALUE GROUPING GROUPS JSON_TABLE LAG LAST_VALUE LATERAL LEAD MEMBER -%token NTH_VALUE NTILE OF OVER PERCENT_RANK RANK RECURSIVE ROW_NUMBER SYSTEM WINDOW -%token ACTIVE ADMIN BUCKETS CLONE COMPONENT DEFINITION ENFORCED EXCLUDE FOLLOWING GEOMCOLLECTION GET_MASTER_PUBLIC_KEY HISTOGRAM HISTORY -%token INACTIVE INVISIBLE LOCKED MASTER_COMPRESSION_ALGORITHMS MASTER_PUBLIC_KEY_PATH MASTER_TLS_CIPHERSUITES MASTER_ZSTD_COMPRESSION_LEVEL -%token NESTED NETWORK_NAMESPACE NOWAIT NULLS OJ OLD OPTIONAL ORDINALITY ORGANIZATION OTHERS PATH PERSIST PERSIST_ONLY PRECEDING PRIVILEGE_CHECKS_USER PROCESS -%token RANDOM REFERENCE REQUIRE_ROW_FORMAT RESOURCE RESPECT RESTART RETAIN REUSE ROLE SECONDARY SECONDARY_ENGINE SECONDARY_LOAD SECONDARY_UNLOAD SKIP SRID -%token THREAD_PRIORITY TIES UNBOUNDED VCPU VISIBLE +%token UNUSED ARRAY CUME_DIST DESCRIPTION DENSE_RANK EMPTY EXCEPT FIRST_VALUE GROUPING GROUPS JSON_TABLE LAG LAST_VALUE LATERAL LEAD MEMBER +%token NTH_VALUE NTILE OF OVER PERCENT_RANK RANK RECURSIVE ROW_NUMBER SYSTEM WINDOW +%token ACTIVE ADMIN BUCKETS CLONE COMPONENT DEFINITION ENFORCED EXCLUDE FOLLOWING GEOMCOLLECTION GET_MASTER_PUBLIC_KEY HISTOGRAM HISTORY +%token INACTIVE INVISIBLE LOCKED MASTER_COMPRESSION_ALGORITHMS MASTER_PUBLIC_KEY_PATH MASTER_TLS_CIPHERSUITES MASTER_ZSTD_COMPRESSION_LEVEL +%token NESTED NETWORK_NAMESPACE NOWAIT NULLS OJ OLD OPTIONAL ORDINALITY ORGANIZATION OTHERS PATH PERSIST PERSIST_ONLY PRECEDING PRIVILEGE_CHECKS_USER PROCESS +%token RANDOM REFERENCE REQUIRE_ROW_FORMAT RESOURCE RESPECT RESTART RETAIN REUSE ROLE SECONDARY SECONDARY_ENGINE SECONDARY_LOAD SECONDARY_UNLOAD SKIP SRID +%token THREAD_PRIORITY TIES UNBOUNDED VCPU VISIBLE // Explain tokens -%token FORMAT TREE VITESS TRADITIONAL +%token FORMAT TREE VITESS TRADITIONAL // Lock type tokens -%token LOCAL LOW_PRIORITY +%token LOCAL LOW_PRIORITY + +// Flush tokens +%token NO_WRITE_TO_BINLOG LOGS ERROR GENERAL HOSTS OPTIMIZER_COSTS USER_RESOURCES SLOW CHANNEL RELAY EXPORT // TableOptions tokens -%token AVG_ROW_LENGTH CONNECTION CHECKSUM DELAY_KEY_WRITE ENCRYPTION ENGINE INSERT_METHOD MAX_ROWS MIN_ROWS PACK_KEYS PASSWORD -%token FIXED DYNAMIC COMPRESSED REDUNDANT COMPACT ROW_FORMAT STATS_AUTO_RECALC STATS_PERSISTENT STATS_SAMPLE_PAGES STORAGE MEMORY DISK +%token AVG_ROW_LENGTH CONNECTION CHECKSUM DELAY_KEY_WRITE ENCRYPTION ENGINE INSERT_METHOD MAX_ROWS MIN_ROWS PACK_KEYS PASSWORD +%token FIXED DYNAMIC COMPRESSED REDUNDANT COMPACT ROW_FORMAT STATS_AUTO_RECALC STATS_PERSISTENT STATS_SAMPLE_PAGES STORAGE MEMORY DISK %type command %type simple_select select_statement base_select union_rhs %type explain_statement explainable_statement %type stream_statement vstream_statement insert_statement update_statement delete_statement set_statement set_transaction_statement %type create_statement alter_statement rename_statement drop_statement truncate_statement flush_statement do_statement -%type rename_list +%type rename_list %type create_table_prefix %type alter_table_prefix -%type alter_option alter_commands_modifier -%type alter_options alter_commands_list alter_commands_modifier_list -%type create_index_prefix +%type alter_option alter_commands_modifier lock_index algorithm_index +%type alter_options alter_commands_list alter_commands_modifier_list algorithm_lock_opt +%type create_index_prefix %type create_database_prefix %type alter_database_prefix %type collate character_set @@ -276,19 +293,20 @@ func skipToEnd(yylex interface{}) { %type default_optional %type analyze_statement show_statement use_statement other_statement %type begin_statement commit_statement rollback_statement savepoint_statement release_statement load_statement -%type lock_statement unlock_statement -%type comment_opt comment_list +%type lock_statement unlock_statement call_statement +%type revert_statement +%type comment_opt comment_list %type wild_opt check_option_opt cascade_or_local_opt restrict_or_cascade_opt %type explain_format_opt %type insert_or_replace -%type explain_synonyms -%type cache_opt separator_opt +%type explain_synonyms +%type cache_opt separator_opt flush_option for_channel_opt %type match_option -%type distinct_opt union_op replace_opt +%type distinct_opt union_op replace_opt local_opt %type like_escape_opt %type select_expression_list select_expression_list_opt %type select_expression -%type select_options +%type select_options flush_option_list %type select_option algorithm_view security_view security_view_opt %type definer_opt user %type expression @@ -309,7 +327,7 @@ func skipToEnd(yylex interface{}) { %type function_call_keyword function_call_nonkeyword function_call_generic function_call_conflict func_datetime_precision %type is_suffix %type col_tuple -%type expression_list +%type expression_list expression_list_opt %type tuple_list %type row_tuple tuple_or_empty %type tuple_expression @@ -326,31 +344,32 @@ func skipToEnd(yylex interface{}) { %type asc_desc_opt %type limit_opt %type into_option +%type column_type_options %type header_opt export_options manifest_opt overwrite_opt format_opt optionally_opt -%type fields_opt lines_opt terminated_by_opt starting_by_opt enclosed_by_opt escaped_by_opt constraint_opt +%type fields_opts fields_opt_list fields_opt lines_opts lines_opt lines_opt_list %type lock_opt %type ins_column_list column_list column_list_opt index_list %type opt_partition_clause partition_list %type on_dup_opt %type update_list %type set_list -%type charset_or_character_set charset_or_character_set_or_names +%type charset_or_character_set charset_or_character_set_or_names %type update_expression %type set_expression %type transaction_char %type transaction_chars %type isolation_level -%type for_from +%type for_from %type default_opt %type ignore_opt -%type full_opt from_database_opt tables_or_processlist columns_or_fields extended_opt storage_opt +%type columns_or_fields extended_opt storage_opt %type like_or_where_opt like_opt -%type exists_opt not_exists_opt null_opt enforced_opt +%type exists_opt not_exists_opt enforced_opt temp_opt full_opt %type to_opt -%type reserved_keyword non_reserved_keyword +%type reserved_keyword non_reserved_keyword %type sql_id reserved_sql_id col_alias as_ci_opt %type charset_value -%type table_id reserved_table_id table_alias as_opt_id +%type table_id reserved_table_id table_alias as_opt_id table_id_opt from_database_opt %type as_opt work_opt savepoint_opt %type skip_to_end ddl_skip_to_end %type charset @@ -358,12 +377,10 @@ func skipToEnd(yylex interface{}) { %type convert_type %type column_type %type int_type decimal_type numeric_type time_type char_type spatial_type -%type length_opt column_comment_opt -%type column_default_opt on_update_opt +%type length_opt %type charset_opt collate_opt %type float_length_opt decimal_length_opt -%type auto_increment_opt unsigned_opt zero_fill_opt without_valid_opt -%type column_key_opt +%type unsigned_opt zero_fill_opt without_valid_opt %type enum_values %type column_definition %type column_definition_list @@ -372,7 +389,7 @@ func skipToEnd(yylex interface{}) { %type index_or_key index_symbols from_or_in index_or_key_opt %type name_opt constraint_name_opt %type equal_opt -%type table_spec table_column_list +%type table_spec table_column_list %type create_like %type table_opt_value %type table_option @@ -380,8 +397,8 @@ func skipToEnd(yylex interface{}) { %type index_info %type index_column %type index_column_list -%type index_option lock_index algorithm_index using_index_type -%type index_option_list index_option_list_opt algorithm_lock_opt using_opt +%type index_option using_index_type +%type index_option_list index_option_list_opt using_opt %type constraint_info check_constraint_info %type partition_definitions %type partition_definition @@ -389,7 +406,7 @@ func skipToEnd(yylex interface{}) { %type vindex_param %type vindex_param_list vindex_params_opt %type id_or_var vindex_type vindex_type_opt id_or_var_opt -%type database_or_schema column_opt insert_method_options row_format_options +%type database_or_schema column_opt insert_method_options row_format_options %type fk_reference_action fk_on_delete fk_on_update %type vitess_topo %type lock_table_list @@ -444,6 +461,8 @@ command: | load_statement | lock_statement | unlock_statement +| call_statement +| revert_statement | /*empty*/ { setParseTree(yylex, nil) @@ -504,7 +523,7 @@ select_statement: } | SELECT comment_opt cache_opt NEXT num_val for_from table_name { - $$ = NewSelect(Comments($2), SelectExprs{Nextval{Expr: $5}}, []string{$3}/*options*/, TableExprs{&AliasedTableExpr{Expr: $7}}, nil/*where*/, nil/*groupBy*/, nil/*having*/) + $$ = NewSelect(Comments($2), SelectExprs{&Nextval{Expr: $5}}, []string{$3}/*options*/, TableExprs{&AliasedTableExpr{Expr: $7}}, nil/*where*/, nil/*groupBy*/, nil/*having*/) } // simple_select is an unparenthesized select used for subquery. @@ -754,9 +773,10 @@ create_statement: } | create_index_prefix '(' index_column_list ')' index_option_list_opt algorithm_lock_opt { - $1.Columns = $3 - $1.Options = append($1.Options,$5...) - $1.Options = append($1.Options,$6...) + indexDef := $1.AlterOptions[0].(*AddIndexDefinition).IndexDefinition + indexDef.Columns = $3 + indexDef.Options = append(indexDef.Options,$5...) + $1.AlterOptions = append($1.AlterOptions,$6...) $1.FullyParsed = true $$ = $1 } @@ -823,9 +843,9 @@ vindex_param: } create_table_prefix: - CREATE TABLE not_exists_opt table_name + CREATE temp_opt TABLE not_exists_opt table_name { - $$ = &CreateTable{Table: $4, IfNotExists: $3} + $$ = &CreateTable{Table: $5, IfNotExists: $4, Temp: $2} setDDL(yylex, $$) } @@ -837,16 +857,31 @@ alter_table_prefix: } create_index_prefix: - CREATE constraint_opt INDEX id_or_var using_opt ON table_name + CREATE INDEX id_or_var using_opt ON table_name + { + $$ = &AlterTable{Table: $6, AlterOptions: []AlterOption{&AddIndexDefinition{IndexDefinition:&IndexDefinition{Info: &IndexInfo{Name:$3, Type:string($2)}, Options:$4}}}} + setDDL(yylex, $$) + } +| CREATE FULLTEXT INDEX id_or_var using_opt ON table_name { - $$ = &CreateIndex{Constraint: $2, Name: $4, Options: $5, Table: $7} + $$ = &AlterTable{Table: $7, AlterOptions: []AlterOption{&AddIndexDefinition{IndexDefinition:&IndexDefinition{Info: &IndexInfo{Name:$4, Type:string($2)+" "+string($3), Fulltext:true}, Options:$5}}}} + setDDL(yylex, $$) + } +| CREATE SPATIAL INDEX id_or_var using_opt ON table_name + { + $$ = &AlterTable{Table: $7, AlterOptions: []AlterOption{&AddIndexDefinition{IndexDefinition:&IndexDefinition{Info: &IndexInfo{Name:$4, Type:string($2)+" "+string($3), Spatial:true}, Options:$5}}}} + setDDL(yylex, $$) + } +| CREATE UNIQUE INDEX id_or_var using_opt ON table_name + { + $$ = &AlterTable{Table: $7, AlterOptions: []AlterOption{&AddIndexDefinition{IndexDefinition:&IndexDefinition{Info: &IndexInfo{Name:$4, Type:string($2)+" "+string($3), Unique:true}, Options:$5}}}} setDDL(yylex, $$) } create_database_prefix: - CREATE database_or_schema not_exists_opt id_or_var + CREATE database_or_schema comment_opt not_exists_opt table_id { - $$ = &CreateDatabase{DBName: string($4.String()), IfNotExists: $3} + $$ = &CreateDatabase{Comments: Comments($3), DBName: $5, IfNotExists: $4} setDDL(yylex,$$) } @@ -911,7 +946,7 @@ character_set: } | default_optional charset_or_character_set equal_opt STRING { - $$ = CollateAndCharset{Type:CharacterSetType, Value:("'" + string($4) + "'"), IsDefault:$1} + $$ = CollateAndCharset{Type:CharacterSetType, Value:(encodeSQLString($4)), IsDefault:$1} } collate: @@ -921,7 +956,7 @@ collate: } | default_optional COLLATE equal_opt STRING { - $$ = CollateAndCharset{Type:CollateType, Value:("'" + string($4) + "'"), IsDefault:$1} + $$ = CollateAndCharset{Type:CollateType, Value:(encodeSQLString($4)), IsDefault:$1} } @@ -979,16 +1014,73 @@ table_column_list: } column_definition: - sql_id column_type null_opt column_default_opt on_update_opt auto_increment_opt column_key_opt column_comment_opt - { - $2.NotNull = $3 - $2.Default = $4 - $2.OnUpdate = $5 - $2.Autoincrement = $6 - $2.KeyOpt = $7 - $2.Comment = $8 + sql_id column_type column_type_options + { + $2.Options = $3 $$ = &ColumnDefinition{Name: $1, Type: $2} } + +// There is a shift reduce conflict that arises here because UNIQUE and KEY are column_type_option and so is UNIQUE KEY. +// So in the state "column_type_options UNIQUE. KEY" there is a shift-reduce conflict. +// This has been added to emulate what MySQL does. The previous architecture was such that the order of the column options +// was specific (as stated in the MySQL guide) and did not accept arbitrary order options. For example NOT NULL DEFAULT 1 and not DEFAULT 1 NOT NULL +column_type_options: + { + $$ = &ColumnTypeOptions{Null: nil, Default: nil, OnUpdate: nil, Autoincrement: false, KeyOpt: colKeyNone, Comment: nil} + } +| column_type_options NULL + { + val := true + $1.Null = &val + $$ = $1 + } +| column_type_options NOT NULL + { + val := false + $1.Null = &val + $$ = $1 + } +| column_type_options DEFAULT value_expression + { + $1.Default = $3 + $$ = $1 + } +| column_type_options ON UPDATE function_call_nonkeyword + { + $1.OnUpdate = $4 + $$ = $1 + } +| column_type_options AUTO_INCREMENT + { + $1.Autoincrement = true + $$ = $1 + } +| column_type_options COMMENT_KEYWORD STRING + { + $1.Comment = NewStrLiteral($3) + $$ = $1 + } +| column_type_options PRIMARY KEY + { + $1.KeyOpt = colKeyPrimary + $$ = $1 + } +| column_type_options KEY + { + $1.KeyOpt = colKey + $$ = $1 + } +| column_type_options UNIQUE KEY + { + $1.KeyOpt = colKeyUniqueKey + $$ = $1 + } +| column_type_options UNIQUE + { + $1.KeyOpt = colKeyUnique + $$ = $1 + } + column_type: numeric_type unsigned_opt zero_fill_opt { @@ -1204,11 +1296,11 @@ enum_values: STRING { $$ = make([]string, 0, 4) - $$ = append($$, "'" + string($1) + "'") + $$ = append($$, encodeSQLString($1)) } | enum_values ',' STRING { - $$ = append($1, "'" + string($3) + "'") + $$ = append($1, encodeSQLString($3)) } length_opt: @@ -1268,47 +1360,6 @@ zero_fill_opt: $$ = true } -// Null opt returns false to mean NULL (i.e. the default) and true for NOT NULL -null_opt: - { - $$ = false - } -| NULL - { - $$ = false - } -| NOT NULL - { - $$ = true - } - -column_default_opt: - { - $$ = nil - } -| DEFAULT value_expression - { - $$ = $2 - } - -on_update_opt: - { - $$ = nil - } -| ON UPDATE function_call_nonkeyword -{ - $$ = $3 -} - -auto_increment_opt: - { - $$ = false - } -| AUTO_INCREMENT - { - $$ = true - } - charset_opt: { $$ = "" @@ -1317,6 +1368,10 @@ charset_opt: { $$ = string($2.String()) } +| charset_or_character_set STRING + { + $$ = encodeSQLString($2) + } | charset_or_character_set BINARY { $$ = string($2) @@ -1332,38 +1387,9 @@ collate_opt: } | COLLATE STRING { - $$ = string($2) - } - -column_key_opt: - { - $$ = colKeyNone - } -| PRIMARY KEY - { - $$ = colKeyPrimary - } -| KEY - { - $$ = colKey - } -| UNIQUE KEY - { - $$ = colKeyUniqueKey - } -| UNIQUE - { - $$ = colKeyUnique + $$ = encodeSQLString($2) } -column_comment_opt: - { - $$ = nil - } -| COMMENT_KEYWORD STRING - { - $$ = NewStrLiteral($2) - } index_definition: index_info '(' index_column_list ')' index_option_list_opt @@ -1522,7 +1548,7 @@ index_column: constraint_definition: CONSTRAINT id_or_var_opt constraint_info { - $$ = &ConstraintDefinition{Name: string($2.String()), Details: $3} + $$ = &ConstraintDefinition{Name: $2, Details: $3} } | constraint_info { @@ -1532,7 +1558,7 @@ constraint_definition: check_constraint_definition: CONSTRAINT id_or_var_opt check_constraint_info { - $$ = &ConstraintDefinition{Name: string($2.String()), Details: $3} + $$ = &ConstraintDefinition{Name: $2, Details: $3} } | check_constraint_info { @@ -1807,7 +1833,7 @@ table_opt_value: } | STRING { - $$ = "'" + string($1) + "'" + $$ = encodeSQLString($1) } | INTEGRAL { @@ -1816,7 +1842,7 @@ table_opt_value: column_opt: { - $$ = []byte("") + $$ = "" } | COLUMN @@ -1944,7 +1970,7 @@ alter_option: } | DROP index_or_key id_or_var { - $$ = &DropKey{Type:NormalKeyType, Name:$3.String()} + $$ = &DropKey{Type:NormalKeyType, Name:$3} } | DROP PRIMARY KEY { @@ -1952,7 +1978,7 @@ alter_option: } | DROP FOREIGN KEY id_or_var { - $$ = &DropKey{Type:ForeignKeyType, Name:$4.String()} + $$ = &DropKey{Type:ForeignKeyType, Name:$4} } | FORCE { @@ -1960,11 +1986,11 @@ alter_option: } | RENAME to_opt table_name { - $$ = &RenameTable{Table:$3} + $$ = &RenameTableName{Table:$3} } | RENAME index_or_key id_or_var TO id_or_var { - $$ = &RenameIndex{OldName:$3.String(), NewName:$5.String()} + $$ = &RenameIndex{OldName:$3, NewName:$5} } alter_commands_modifier_list: @@ -2046,17 +2072,17 @@ alter_statement: { $$ = &AlterView{ViewName: $6.ToViewName(), Algorithm:$2, Definer: $3 ,Security:$4, Columns:$7, Select: $9, CheckOption: $10 } } -| alter_database_prefix id_or_var_opt create_options +| alter_database_prefix table_id_opt create_options { $1.FullyParsed = true - $1.DBName = $2.String() + $1.DBName = $2 $1.AlterOptions = $3 $$ = $1 } -| alter_database_prefix id_or_var UPGRADE DATA DIRECTORY NAME +| alter_database_prefix table_id UPGRADE DATA DIRECTORY NAME { $1.FullyParsed = true - $1.DBName = $2.String() + $1.DBName = $2 $1.UpdateDataDirectory = true $$ = $1 } @@ -2128,6 +2154,33 @@ alter_statement: }, } } +| ALTER VITESS_MIGRATION STRING RETRY + { + $$ = &AlterMigration{ + Type: RetryMigrationType, + UUID: string($3), + } + } +| ALTER VITESS_MIGRATION STRING COMPLETE + { + $$ = &AlterMigration{ + Type: CompleteMigrationType, + UUID: string($3), + } + } +| ALTER VITESS_MIGRATION STRING CANCEL + { + $$ = &AlterMigration{ + Type: CancelMigrationType, + UUID: string($3), + } + } +| ALTER VITESS_MIGRATION CANCEL ALL + { + $$ = &AlterMigration{ + Type: CancelAllMigrationType, + } + } partition_operation: ADD PARTITION '(' partition_definition ')' @@ -2256,48 +2309,50 @@ partition_definition: rename_statement: RENAME TABLE rename_list { - $$ = $3 + $$ = &RenameTable{TablePairs: $3} } rename_list: table_name TO table_name { - $$ = &DDL{Action: RenameDDLAction, FromTables: TableNames{$1}, ToTables: TableNames{$3}} + $$ = []*RenameTablePair{{FromTable: $1, ToTable: $3}} } | rename_list ',' table_name TO table_name { - $$ = $1 - $$.FromTables = append($$.FromTables, $3) - $$.ToTables = append($$.ToTables, $5) + $$ = append($1, &RenameTablePair{FromTable: $3, ToTable: $5}) } drop_statement: - DROP TABLE exists_opt table_name_list restrict_or_cascade_opt + DROP temp_opt TABLE exists_opt table_name_list restrict_or_cascade_opt { - $$ = &DropTable{FromTables: $4, IfExists: $3} + $$ = &DropTable{FromTables: $5, IfExists: $4, Temp: $2} } -| DROP INDEX id_or_var ON table_name ddl_skip_to_end +| DROP INDEX id_or_var ON table_name algorithm_lock_opt { // Change this to an alter statement - $$ = &DDL{Action: AlterDDLAction, Table: $5} + if $3.Lowered() == "primary" { + $$ = &AlterTable{Table: $5,AlterOptions: append([]AlterOption{&DropKey{Type:PrimaryKeyType}},$6...)} + } else { + $$ = &AlterTable{Table: $5,AlterOptions: append([]AlterOption{&DropKey{Type:NormalKeyType, Name:$3}},$6...)} + } } | DROP VIEW exists_opt view_name_list restrict_or_cascade_opt { $$ = &DropView{FromTables: $4, IfExists: $3} } -| DROP database_or_schema exists_opt id_or_var +| DROP database_or_schema comment_opt exists_opt table_id { - $$ = &DropDatabase{DBName: string($4.String()), IfExists: $3} + $$ = &DropDatabase{Comments: Comments($3), DBName: $5, IfExists: $4} } truncate_statement: TRUNCATE TABLE table_name { - $$ = &DDL{Action: TruncateDDLAction, Table: $3} + $$ = &TruncateTable{Table: $3} } | TRUNCATE table_name { - $$ = &DDL{Action: TruncateDDLAction, Table: $2} + $$ = &TruncateTable{Table: $2} } analyze_statement: ANALYZE TABLE table_name @@ -2314,6 +2369,10 @@ show_statement: { $$ = &Show{&ShowBasic{Command: Collation, Filter: $3}} } +| SHOW full_opt columns_or_fields from_or_in table_name from_database_opt like_or_where_opt + { + $$ = &Show{&ShowBasic{Full: $2, Command: Column, Tbl: $5, DbName: $6, Filter: $7}} + } | SHOW DATABASES like_or_where_opt { $$ = &Show{&ShowBasic{Command: Database, Filter: $3}} @@ -2324,16 +2383,24 @@ show_statement: } | SHOW KEYSPACES like_or_where_opt { - $$ = &Show{&ShowBasic{Command: Database, Filter: $3}} + $$ = &Show{&ShowBasic{Command: Keyspace, Filter: $3}} } | SHOW VITESS_KEYSPACES like_or_where_opt { - $$ = &Show{&ShowBasic{Command: Database, Filter: $3}} + $$ = &Show{&ShowBasic{Command: Keyspace, Filter: $3}} } | SHOW FUNCTION STATUS like_or_where_opt { $$ = &Show{&ShowBasic{Command: Function, Filter: $4}} } +| SHOW extended_opt index_symbols from_or_in table_name from_database_opt like_or_where_opt + { + $$ = &Show{&ShowBasic{Command: Index, Tbl: $5, DbName: $6, Filter: $7}} + } +| SHOW OPEN TABLES from_database_opt like_or_where_opt + { + $$ = &Show{&ShowBasic{Command: OpenTable, DbName:$4, Filter: $5}} + } | SHOW PRIVILEGES { $$ = &Show{&ShowBasic{Command: Privilege}} @@ -2360,42 +2427,53 @@ show_statement: } | SHOW TABLE STATUS from_database_opt like_or_where_opt { - $$ = &Show{&ShowTableStatus{DatabaseName:$4, Filter:$5}} + $$ = &Show{&ShowBasic{Command: TableStatus, DbName:$4, Filter: $5}} } -| SHOW full_opt columns_or_fields from_or_in table_name from_database_opt like_or_where_opt +| SHOW full_opt TABLES from_database_opt like_or_where_opt { - $$ = &Show{&ShowColumns{Full: $2, Table: $5, DbName: $6, Filter: $7}} + $$ = &Show{&ShowBasic{Command: Table, Full: $2, DbName:$4, Filter: $5}} } -| SHOW BINARY id_or_var ddl_skip_to_end /* SHOW BINARY LOGS */ +| SHOW TRIGGERS from_database_opt like_or_where_opt { - $$ = &Show{&ShowLegacy{Type: string($2) + " " + string($3.String()), Scope: ImplicitScope}} + $$ = &Show{&ShowBasic{Command: Trigger, DbName:$3, Filter: $4}} } -| SHOW CREATE DATABASE ddl_skip_to_end +| SHOW CREATE DATABASE table_name { - $$ = &Show{&ShowLegacy{Type: string($2) + " " + string($3), Scope: ImplicitScope}} + $$ = &Show{&ShowCreate{Command: CreateDb, Op: $4}} } -| SHOW CREATE FUNCTION table_name +| SHOW CREATE EVENT table_name { - $$ = &Show{&ShowLegacy{Type: string($2) + " " + string($3), Table: $4, Scope: ImplicitScope}} + $$ = &Show{&ShowCreate{Command: CreateE, Op: $4}} } -/* Rule to handle SHOW CREATE EVENT, SHOW CREATE FUNCTION, etc. */ -| SHOW CREATE id_or_var ddl_skip_to_end +| SHOW CREATE FUNCTION table_name { - $$ = &Show{&ShowLegacy{Type: string($2) + " " + string($3.String()), Scope: ImplicitScope}} + $$ = &Show{&ShowCreate{Command: CreateF, Op: $4}} } -| SHOW CREATE PROCEDURE ddl_skip_to_end +| SHOW CREATE PROCEDURE table_name { - $$ = &Show{&ShowLegacy{Type: string($2) + " " + string($3), Scope: ImplicitScope}} + $$ = &Show{&ShowCreate{Command: CreateProc, Op: $4}} } | SHOW CREATE TABLE table_name { - $$ = &Show{&ShowLegacy{Type: string($2) + " " + string($3), Table: $4, Scope: ImplicitScope}} + $$ = &Show{&ShowCreate{Command: CreateTbl, Op: $4}} + } +| SHOW CREATE TRIGGER table_name + { + $$ = &Show{&ShowCreate{Command: CreateTr, Op: $4}} } -| SHOW CREATE TRIGGER ddl_skip_to_end +| SHOW CREATE VIEW table_name + { + $$ = &Show{&ShowCreate{Command: CreateV, Op: $4}} + } +| SHOW CREATE USER ddl_skip_to_end { $$ = &Show{&ShowLegacy{Type: string($2) + " " + string($3), Scope: ImplicitScope}} + } +| SHOW BINARY id_or_var ddl_skip_to_end /* SHOW BINARY ... */ + { + $$ = &Show{&ShowLegacy{Type: string($2) + " " + string($3.String()), Scope: ImplicitScope}} } -| SHOW CREATE VIEW ddl_skip_to_end +| SHOW BINARY LOGS ddl_skip_to_end /* SHOW BINARY LOGS */ { $$ = &Show{&ShowLegacy{Type: string($2) + " " + string($3), Scope: ImplicitScope}} } @@ -2407,11 +2485,6 @@ show_statement: { $$ = &Show{&ShowLegacy{Type: string($2) + " " + string($3), Table: $4, Scope: ImplicitScope}} } -| SHOW extended_opt index_symbols from_or_in table_name from_database_opt like_or_where_opt - { - showTablesOpt := &ShowTablesOpt{DbName:$6, Filter:$7} - $$ = &Show{&ShowLegacy{Extended: string($2), Type: string($3), ShowTablesOpt: showTablesOpt, OnTable: $5, Scope: ImplicitScope}} - } | SHOW PLUGINS { $$ = &Show{&ShowLegacy{Type: string($2), Scope: ImplicitScope}} @@ -2420,21 +2493,19 @@ show_statement: { $$ = &Show{&ShowLegacy{Type: string($2) + " " + string($3), Table: $4, Scope: ImplicitScope}} } -| SHOW full_opt tables_or_processlist from_database_opt like_or_where_opt +| SHOW full_opt PROCESSLIST from_database_opt like_or_where_opt { - // this is ugly, but I couldn't find a better way for now - if $3 == "processlist" { - $$ = &Show{&ShowLegacy{Type: $3, Scope: ImplicitScope}} - } else { - showTablesOpt := &ShowTablesOpt{Full:$2, DbName:$4, Filter:$5} - $$ = &Show{&ShowLegacy{Type: $3, ShowTablesOpt: showTablesOpt, Scope: ImplicitScope}} - } + $$ = &Show{&ShowLegacy{Type: string($3), Scope: ImplicitScope}} } | SHOW VITESS_METADATA VARIABLES like_opt { showTablesOpt := &ShowTablesOpt{Filter: $4} $$ = &Show{&ShowLegacy{Scope: VitessMetadataScope, Type: string($3), ShowTablesOpt: showTablesOpt}} } +| SHOW VITESS_MIGRATIONS from_database_opt like_or_where_opt + { + $$ = &Show{&ShowBasic{Command: VitessMigrations, Filter: $4, DbName: $3}} + } | SHOW VSCHEMA TABLES { $$ = &Show{&ShowLegacy{Type: string($2) + " " + string($3), Scope: ImplicitScope}} @@ -2479,16 +2550,6 @@ show_statement: $$ = &Show{&ShowLegacy{Type: string($2), Scope: ImplicitScope}} } -tables_or_processlist: - TABLES - { - $$ = string($1) - } -| PROCESSLIST - { - $$ = string($1) - } - vitess_topo: VITESS_TABLETS { @@ -2512,11 +2573,11 @@ extended_opt: full_opt: /* empty */ { - $$ = "" + $$ = false } | FULL { - $$ = "full " + $$ = true } columns_or_fields: @@ -2532,15 +2593,15 @@ columns_or_fields: from_database_opt: /* empty */ { - $$ = "" + $$ = NewTableIdent("") } | FROM table_id { - $$ = $2.v + $$ = $2 } | IN table_id { - $$ = $2.v + $$ = $2 } like_or_where_opt: @@ -2672,11 +2733,11 @@ explain_synonyms: } | DESCRIBE { - $$ = $1 + $$ = $1 } | DESC { - $$ = $1 + $$ = $1 } explainable_statement: @@ -2684,15 +2745,15 @@ explainable_statement: { $$ = $1 } -| update_statement +| update_statement { $$ = $1 } -| insert_statement +| insert_statement { $$ = $1 } -| delete_statement +| delete_statement { $$ = $1 } @@ -2703,21 +2764,21 @@ wild_opt: } | sql_id { - $$ = "" + $$ = $1.val } | STRING { - $$ = "" + $$ = encodeSQLString($1) } - + explain_statement: explain_synonyms table_name wild_opt { - $$ = &OtherRead{} + $$ = &ExplainTab{Table: $2, Wild: $3} } | explain_synonyms explain_format_opt explainable_statement { - $$ = &Explain{Type: $2, Statement: $3} + $$ = &ExplainStmt{Type: $2, Statement: $3} } other_statement: @@ -2776,11 +2837,120 @@ unlock_statement: $$ = &UnlockTables{} } +revert_statement: + REVERT VITESS_MIGRATION STRING + { + $$ = &RevertMigration{UUID: string($3)} + } + flush_statement: - FLUSH skip_to_end + FLUSH local_opt flush_option_list + { + $$ = &Flush{IsLocal: $2, FlushOptions:$3} + } +| FLUSH local_opt TABLES { - $$ = &DDL{Action: FlushDDLAction} + $$ = &Flush{IsLocal: $2} + } +| FLUSH local_opt TABLES WITH READ LOCK + { + $$ = &Flush{IsLocal: $2, WithLock:true} + } +| FLUSH local_opt TABLES table_name_list + { + $$ = &Flush{IsLocal: $2, TableNames:$4} + } +| FLUSH local_opt TABLES table_name_list WITH READ LOCK + { + $$ = &Flush{IsLocal: $2, TableNames:$4, WithLock:true} + } +| FLUSH local_opt TABLES table_name_list FOR EXPORT + { + $$ = &Flush{IsLocal: $2, TableNames:$4, ForExport:true} + } + +flush_option_list: + flush_option + { + $$ = []string{$1} + } +| flush_option_list ',' flush_option + { + $$ = append($1,$3) } + +flush_option: + BINARY LOGS + { + $$ = string($1) + " " + string($2) + } +| ENGINE LOGS + { + $$ = string($1) + " " + string($2) + } +| ERROR LOGS + { + $$ = string($1) + " " + string($2) + } +| GENERAL LOGS + { + $$ = string($1) + " " + string($2) + } +| HOSTS + { + $$ = string($1) + } +| LOGS + { + $$ = string($1) + } +| PRIVILEGES + { + $$ = string($1) + } +| RELAY LOGS for_channel_opt + { + $$ = string($1) + " " + string($2) + $3 + } +| SLOW LOGS + { + $$ = string($1) + " " + string($2) + } +| OPTIMIZER_COSTS + { + $$ = string($1) + } +| STATUS + { + $$ = string($1) + } +| USER_RESOURCES + { + $$ = string($1) + } + +local_opt: + { + $$ = false + } +| LOCAL + { + $$ = true + } +| NO_WRITE_TO_BINLOG + { + $$ = true + } + +for_channel_opt: + { + $$ = "" + } +| FOR CHANNEL id_or_var + { + $$ = " " + string($1) + " " + string($2) + " " + $3.String() + } + comment_opt: { setAllowComments(yylex, true) @@ -2857,15 +3027,15 @@ select_options: { $$ = []string{$1} } -| select_option select_option // TODO: figure out a way to do this recursively instead. +| select_option select_option // TODO: figure out a way to do this recursively instead. { // TODO: This is a hack since I couldn't get it to work in a nicer way. I got 'conflicts: 8 shift/reduce' $$ = []string{$1, $2} } -| select_option select_option select_option +| select_option select_option select_option { $$ = []string{$1, $2, $3} } -| select_option select_option select_option select_option +| select_option select_option select_option select_option { $$ = []string{$1, $2, $3, $4} } @@ -2895,6 +3065,10 @@ select_option: { $$ = SQLCalcFoundRowsStr } +| ALL + { + $$ = AllStr // These are not picked up by NewSelect, and so ALL will be dropped. But this is OK, since it's redundant anyway + } select_expression_list: select_expression @@ -3393,6 +3567,7 @@ col_tuple: | LIST_ARG { $$ = ListArg($1) + bindVariable(yylex, $1[2:]) } subquery: @@ -3514,25 +3689,11 @@ value_expression: } | '+' value_expression %prec UNARY { - if num, ok := $2.(*Literal); ok && num.Type == IntVal { - $$ = num - } else { - $$ = &UnaryExpr{Operator: UPlusOp, Expr: $2} - } + $$ = $2 } | '-' value_expression %prec UNARY { - if num, ok := $2.(*Literal); ok && num.Type == IntVal { - // Handle double negative - if num.Val[0] == '-' { - num.Val = num.Val[1:] - $$ = num - } else { - $$ = NewIntLiteral(append([]byte("-"), num.Val...)) - } - } else { - $$ = &UnaryExpr{Operator: UMinusOp, Expr: $2} - } + $$ = handleUnaryMinus($2) } | '~' value_expression { @@ -3571,7 +3732,7 @@ function_call_generic: | sql_id openb DISTINCTROW select_expression_list closeb { $$ = &FuncExpr{Name: $1, Distinct: true, Exprs: $4} - } + } | table_id '.' reserved_sql_id openb select_expression_list_opt closeb { $$ = &FuncExpr{Qualifier: $1, Name: $3, Exprs: $5} @@ -3636,7 +3797,7 @@ function_call_keyword: } | CURRENT_USER func_paren_opt { - $$ = &FuncExpr{Name: NewColIdent(string($1))} + $$ = &FuncExpr{Name: NewColIdent($1)} } /* @@ -3870,7 +4031,7 @@ separator_opt: } | SEPARATOR STRING { - $$ = " separator '"+string($2)+"'" + $$ = " separator "+encodeSQLString($2) } when_expression_list: @@ -3940,6 +4101,7 @@ value: | VALUE_ARG { $$ = NewArgument($1) + bindVariable(yylex, $1[1:]) } | NULL { @@ -3954,7 +4116,7 @@ num_val: yylex.Error("expecting value after next") return 1 } - $$ = NewIntLiteral([]byte("1")) + $$ = NewIntLiteral("1") } | INTEGRAL VALUES { @@ -3963,6 +4125,7 @@ num_val: | VALUE_ARG VALUES { $$ = NewArgument($1) + bindVariable(yylex, $1[1:]) } group_by_opt: @@ -4044,52 +4207,52 @@ algorithm_lock_opt: } | lock_index algorithm_index { - $$ = []*IndexOption{$1,$2} + $$ = []AlterOption{$1,$2} } | algorithm_index lock_index { - $$ = []*IndexOption{$1,$2} + $$ = []AlterOption{$1,$2} } | algorithm_index { - $$ = []*IndexOption{$1} + $$ = []AlterOption{$1} } | lock_index { - $$ = []*IndexOption{$1} + $$ = []AlterOption{$1} } lock_index: LOCK equal_opt DEFAULT { - $$ = &IndexOption{Name: string($1), String: string($3)} + $$ = &LockOption{Type:DefaultType} } | LOCK equal_opt NONE { - $$ = &IndexOption{Name: string($1), String: string($3)} + $$ = &LockOption{Type:NoneType} } | LOCK equal_opt SHARED { - $$ = &IndexOption{Name: string($1), String: string($3)} + $$ = &LockOption{Type:SharedType} } | LOCK equal_opt EXCLUSIVE { - $$ = &IndexOption{Name: string($1), String: string($3)} + $$ = &LockOption{Type:ExclusiveType} } algorithm_index: ALGORITHM equal_opt DEFAULT { - $$ = &IndexOption{Name: string($1), String: string($3)} + $$ = AlgorithmValue($3) } | ALGORITHM equal_opt INPLACE { - $$ = &IndexOption{Name: string($1), String: string($3)} + $$ = AlgorithmValue($3) } | ALGORITHM equal_opt COPY { - $$ = &IndexOption{Name: string($1), String: string($3)} + $$ = AlgorithmValue($3) } algorithm_view: @@ -4170,7 +4333,7 @@ CURRENT_USER } | STRING AT_ID { - $$ = "'" + string($1) + "'@" + string($2) + $$ = encodeSQLString($1) + "@" + string($2) } | ID { @@ -4196,15 +4359,15 @@ into_option: } | INTO OUTFILE S3 STRING charset_opt format_opt export_options manifest_opt overwrite_opt { - $$ = &SelectInto{Type:IntoOutfileS3, FileName:string($4), Charset:$5, FormatOption:$6, ExportOption:$7, Manifest:$8, Overwrite:$9} + $$ = &SelectInto{Type:IntoOutfileS3, FileName:encodeSQLString($4), Charset:$5, FormatOption:$6, ExportOption:$7, Manifest:$8, Overwrite:$9} } | INTO DUMPFILE STRING { - $$ = &SelectInto{Type:IntoDumpfile, FileName:string($3), Charset:"", FormatOption:"", ExportOption:"", Manifest:"", Overwrite:""} + $$ = &SelectInto{Type:IntoDumpfile, FileName:encodeSQLString($3), Charset:"", FormatOption:"", ExportOption:"", Manifest:"", Overwrite:""} } | INTO OUTFILE STRING charset_opt export_options { - $$ = &SelectInto{Type:IntoOutfile, FileName:string($3), Charset:$4, FormatOption:"", ExportOption:$5, Manifest:"", Overwrite:""} + $$ = &SelectInto{Type:IntoOutfile, FileName:encodeSQLString($3), Charset:$4, FormatOption:"", ExportOption:$5, Manifest:"", Overwrite:""} } format_opt: @@ -4256,63 +4419,71 @@ overwrite_opt: } export_options: - fields_opt lines_opt + fields_opts lines_opts { $$ = $1 + $2 } -lines_opt: +lines_opts: { $$ = "" } -| LINES starting_by_opt terminated_by_opt +| LINES lines_opt_list { - $$ = " lines" + $2 + $3 + $$ = " lines" + $2 } -starting_by_opt: +lines_opt_list: + lines_opt { - $$ = "" + $$ = $1 } -| STARTING BY STRING +| lines_opt_list lines_opt { - $$ = " starting by '" + string($3) + "'" + $$ = $1 + $2 } -terminated_by_opt: +lines_opt: + STARTING BY STRING { - $$ = "" + $$ = " starting by " + encodeSQLString($3) } | TERMINATED BY STRING { - $$ = " terminated by '" + string($3) + "'" + $$ = " terminated by " + encodeSQLString($3) } -fields_opt: +fields_opts: { $$ = "" } -| columns_or_fields terminated_by_opt enclosed_by_opt escaped_by_opt +| columns_or_fields fields_opt_list { - $$ = " " + $1 + $2 + $3 + $4 + $$ = " " + $1 + $2 } -escaped_by_opt: +fields_opt_list: + fields_opt { - $$ = "" + $$ = $1 } -| ESCAPED BY STRING +| fields_opt_list fields_opt { - $$ = " escaped by '" + string($3) + "'" + $$ = $1 + $2 } -enclosed_by_opt: +fields_opt: + TERMINATED BY STRING { - $$ = "" + $$ = " terminated by " + encodeSQLString($3) } | optionally_opt ENCLOSED BY STRING { - $$ = $1 + " enclosed by '" + string($4) + "'" + $$ = $1 + " enclosed by " + encodeSQLString($4) + } +| ESCAPED BY STRING + { + $$ = " escaped by " + encodeSQLString($3) } optionally_opt: @@ -4445,11 +4616,11 @@ set_list: set_expression: reserved_sql_id '=' ON { - $$ = &SetExpr{Name: $1, Scope: ImplicitScope, Expr: NewStrLiteral([]byte("on"))} + $$ = &SetExpr{Name: $1, Scope: ImplicitScope, Expr: NewStrLiteral("on")} } | reserved_sql_id '=' OFF { - $$ = &SetExpr{Name: $1, Scope: ImplicitScope, Expr: NewStrLiteral([]byte("off"))} + $$ = &SetExpr{Name: $1, Scope: ImplicitScope, Expr: NewStrLiteral("off")} } | reserved_sql_id '=' expression { @@ -4469,7 +4640,7 @@ charset_or_character_set: CHARSET | CHARACTER SET { - $$ = []byte("charset") + $$ = "charset" } charset_or_character_set_or_names: @@ -4479,7 +4650,7 @@ charset_or_character_set_or_names: charset_value: sql_id { - $$ = NewStrLiteral([]byte($1.String())) + $$ = NewStrLiteral($1.String()) } | STRING { @@ -4494,6 +4665,11 @@ for_from: FOR | FROM +temp_opt: + { $$ = false } +| TEMPORARY + { $$ = true } + exists_opt: { $$ = false } | IF EXISTS @@ -4516,14 +4692,20 @@ to_opt: | AS { $$ = struct{}{} } -constraint_opt: - { $$ = "" } -| UNIQUE - { $$ = string($1) } -| SPATIAL - { $$ = string($1) } -| FULLTEXT - { $$ = string($1) } +call_statement: + CALL table_name openb expression_list_opt closeb + { + $$ = &CallProc{Name: $2, Params: $4} + } + +expression_list_opt: + { + $$ = nil + } +| expression_list + { + $$ = $1 + } using_opt: { $$ = nil } @@ -4563,6 +4745,15 @@ table_id: $$ = NewTableIdent(string($1)) } +table_id_opt: + { + $$ = NewTableIdent("") + } +| table_id + { + $$ = $1 + } + reserved_table_id: table_id | reserved_keyword @@ -4580,7 +4771,7 @@ reserved_table_id: */ reserved_keyword: ADD -| ARRAY +| ARRAY | AND | AS | ASC @@ -4588,7 +4779,9 @@ reserved_keyword: | BINARY | BY | CASE +| CALL | CHANGE +| CHARACTER | CHECK | COLLATE | COLUMN @@ -4658,6 +4851,7 @@ reserved_keyword: | MOD | NATURAL | NEXT // next should be doable as non-reserved, but is not due to the special `select next num_val` query that vitess supports +| NO_WRITE_TO_BINLOG | NOT | NTH_VALUE | NTILE @@ -4665,6 +4859,7 @@ reserved_keyword: | OF | OFF | ON +| OPTIMIZER_COSTS | OR | ORDER | OUTER @@ -4736,10 +4931,11 @@ non_reserved_keyword: | BOOL | BOOLEAN | BUCKETS +| CANCEL | CASCADE | CASCADED +| CHANNEL | CHAR -| CHARACTER | CHARSET | CHECKSUM | CLONE @@ -4751,6 +4947,7 @@ non_reserved_keyword: | COMMIT | COMMITTED | COMPACT +| COMPLETE | COMPONENT | COMPRESSED | COMPRESSION @@ -4780,11 +4977,14 @@ non_reserved_keyword: | ENGINE | ENGINES | ENUM +| ERROR | ESCAPED +| EVENT | EXCHANGE | EXCLUDE | EXCLUSIVE | EXPANSION +| EXPORT | EXTENDED | FLOAT_TYPE | FIELDS @@ -4794,6 +4994,7 @@ non_reserved_keyword: | FOLLOWING | FORMAT | FUNCTION +| GENERAL | GEOMCOLLECTION | GEOMETRY | GEOMETRYCOLLECTION @@ -4802,6 +5003,7 @@ non_reserved_keyword: | HEADER | HISTOGRAM | HISTORY +| HOSTS | IMPORT | INACTIVE | INPLACE @@ -4826,6 +5028,7 @@ non_reserved_keyword: | LOAD | LOCAL | LOCKED +| LOGS | LONGBLOB | LONGTEXT | MANIFEST @@ -4858,6 +5061,7 @@ non_reserved_keyword: | OFFSET | OJ | OLD +| OPEN | OPTION | OPTIONAL | OPTIONALLY @@ -4890,6 +5094,7 @@ non_reserved_keyword: | REDUNDANT | REFERENCE | REFERENCES +| RELAY | REMOVE | REORGANIZE | REPAIR @@ -4900,6 +5105,7 @@ non_reserved_keyword: | RESPECT | RESTART | RETAIN +| RETRY | REUSE | ROLE | ROLLBACK @@ -4917,6 +5123,7 @@ non_reserved_keyword: | SHARED | SIGNED | SKIP +| SLOW | SMALLINT | SQL | SRID @@ -4929,6 +5136,7 @@ non_reserved_keyword: | STORAGE | TABLES | TABLESPACE +| TEMPORARY | TEMPTABLE | TERMINATED | TEXT @@ -4943,6 +5151,7 @@ non_reserved_keyword: | TRANSACTION | TREE | TRIGGER +| TRIGGERS | TRUNCATE | UNBOUNDED | UNCOMMITTED @@ -4950,6 +5159,8 @@ non_reserved_keyword: | UNSIGNED | UNUSED | UPGRADE +| USER +| USER_RESOURCES | VALIDATION | VARBINARY | VARCHAR @@ -4964,6 +5175,8 @@ non_reserved_keyword: | VITESS_METADATA | VITESS_SHARDS | VITESS_TABLETS +| VITESS_MIGRATION +| VITESS_MIGRATIONS | VSCHEMA | WARNINGS | WITHOUT diff --git a/go/vt/sqlparser/test_queries/django_queries.txt b/go/vt/sqlparser/testdata/django_queries.txt similarity index 100% rename from go/vt/sqlparser/test_queries/django_queries.txt rename to go/vt/sqlparser/testdata/django_queries.txt diff --git a/go/vt/sqlparser/testdata/lobsters.sql.gz b/go/vt/sqlparser/testdata/lobsters.sql.gz new file mode 100644 index 00000000000..ca7487cd8c9 Binary files /dev/null and b/go/vt/sqlparser/testdata/lobsters.sql.gz differ diff --git a/go/vt/sqlparser/token.go b/go/vt/sqlparser/token.go index 470acac4348..82ab10e3256 100644 --- a/go/vt/sqlparser/token.go +++ b/go/vt/sqlparser/token.go @@ -17,499 +17,45 @@ limitations under the License. package sqlparser import ( - "bytes" "fmt" - "io" + "strconv" "strings" - "vitess.io/vitess/go/bytes2" "vitess.io/vitess/go/sqltypes" ) const ( - defaultBufSize = 4096 - eofChar = 0x100 + eofChar = 0x100 ) // Tokenizer is the struct used to generate SQL // tokens for the parser. type Tokenizer struct { - InStream io.Reader AllowComments bool SkipSpecialComments bool SkipToEnd bool - lastChar uint16 - Position int - lastToken []byte LastError error - posVarIndex int ParseTree Statement - partialDDL Statement - nesting int - multi bool - specialComment *Tokenizer - - buf []byte - bufPos int - bufSize int + BindVars map[string]struct{} + + lastToken string + posVarIndex int + partialDDL Statement + nesting int + multi bool + specialComment *Tokenizer + + Pos int + buf string } // NewStringTokenizer creates a new Tokenizer for the // sql string. func NewStringTokenizer(sql string) *Tokenizer { - buf := []byte(sql) return &Tokenizer{ - buf: buf, - bufSize: len(buf), - } -} - -// NewTokenizer creates a new Tokenizer reading a sql -// string from the io.Reader. -func NewTokenizer(r io.Reader) *Tokenizer { - return &Tokenizer{ - InStream: r, - buf: make([]byte, defaultBufSize), - } -} - -// keywords is a map of mysql keywords that fall into two categories: -// 1) keywords considered reserved by MySQL -// 2) keywords for us to handle specially in sql.y -// -// Those marked as UNUSED are likely reserved keywords. We add them here so that -// when rewriting queries we can properly backtick quote them so they don't cause issues -// -// NOTE: If you add new keywords, add them also to the reserved_keywords or -// non_reserved_keywords grammar in sql.y -- this will allow the keyword to be used -// in identifiers. See the docs for each grammar to determine which one to put it into. -var keywords = map[string]int{ - "accessible": UNUSED, - "action": ACTION, - "add": ADD, - "after": AFTER, - "against": AGAINST, - "algorithm": ALGORITHM, - "all": ALL, - "alter": ALTER, - "analyze": ANALYZE, - "and": AND, - "as": AS, - "asc": ASC, - "asensitive": UNUSED, - "auto_increment": AUTO_INCREMENT, - "avg_row_length": AVG_ROW_LENGTH, - "before": UNUSED, - "begin": BEGIN, - "between": BETWEEN, - "bigint": BIGINT, - "binary": BINARY, - "_binary": UNDERSCORE_BINARY, - "_utf8mb4": UNDERSCORE_UTF8MB4, - "_utf8": UNDERSCORE_UTF8, - "_latin1": UNDERSCORE_LATIN1, - "bit": BIT, - "blob": BLOB, - "bool": BOOL, - "boolean": BOOLEAN, - "both": UNUSED, - "by": BY, - "call": UNUSED, - "cascade": CASCADE, - "cascaded": CASCADED, - "case": CASE, - "cast": CAST, - "change": CHANGE, - "char": CHAR, - "character": CHARACTER, - "charset": CHARSET, - "check": CHECK, - "checksum": CHECKSUM, - "coalesce": COALESCE, - "code": CODE, - "collate": COLLATE, - "collation": COLLATION, - "column": COLUMN, - "columns": COLUMNS, - "comment": COMMENT_KEYWORD, - "committed": COMMITTED, - "commit": COMMIT, - "compact": COMPACT, - "compressed": COMPRESSED, - "compression": COMPRESSION, - "condition": UNUSED, - "connection": CONNECTION, - "constraint": CONSTRAINT, - "continue": UNUSED, - "convert": CONVERT, - "copy": COPY, - "substr": SUBSTR, - "substring": SUBSTRING, - "create": CREATE, - "cross": CROSS, - "csv": CSV, - "current_date": CURRENT_DATE, - "current_time": CURRENT_TIME, - "current_timestamp": CURRENT_TIMESTAMP, - "current_user": CURRENT_USER, - "cursor": UNUSED, - "data": DATA, - "database": DATABASE, - "databases": DATABASES, - "day_hour": UNUSED, - "day_microsecond": UNUSED, - "day_minute": UNUSED, - "day_second": UNUSED, - "date": DATE, - "datetime": DATETIME, - "dec": UNUSED, - "decimal": DECIMAL, - "declare": UNUSED, - "default": DEFAULT, - "definer": DEFINER, - "delay_key_write": DELAY_KEY_WRITE, - "delayed": UNUSED, - "delete": DELETE, - "desc": DESC, - "describe": DESCRIBE, - "deterministic": UNUSED, - "directory": DIRECTORY, - "disable": DISABLE, - "discard": DISCARD, - "disk": DISK, - "distinct": DISTINCT, - "distinctrow": DISTINCTROW, - "div": DIV, - "double": DOUBLE, - "do": DO, - "drop": DROP, - "dumpfile": DUMPFILE, - "duplicate": DUPLICATE, - "dynamic": DYNAMIC, - "each": UNUSED, - "else": ELSE, - "elseif": UNUSED, - "enable": ENABLE, - "enclosed": ENCLOSED, - "encryption": ENCRYPTION, - "end": END, - "enforced": ENFORCED, - "engine": ENGINE, - "engines": ENGINES, - "enum": ENUM, - "escape": ESCAPE, - "escaped": ESCAPED, - "exchange": EXCHANGE, - "exclusive": EXCLUSIVE, - "exists": EXISTS, - "exit": UNUSED, - "explain": EXPLAIN, - "expansion": EXPANSION, - "extended": EXTENDED, - "false": FALSE, - "fetch": UNUSED, - "fields": FIELDS, - "first": FIRST, - "fixed": FIXED, - "float": FLOAT_TYPE, - "float4": UNUSED, - "float8": UNUSED, - "flush": FLUSH, - "for": FOR, - "force": FORCE, - "foreign": FOREIGN, - "format": FORMAT, - "from": FROM, - "full": FULL, - "fulltext": FULLTEXT, - "function": FUNCTION, - "generated": UNUSED, - "geometry": GEOMETRY, - "geometrycollection": GEOMETRYCOLLECTION, - "get": UNUSED, - "global": GLOBAL, - "grant": UNUSED, - "group": GROUP, - "group_concat": GROUP_CONCAT, - "having": HAVING, - "header": HEADER, - "high_priority": UNUSED, - "hour_microsecond": UNUSED, - "hour_minute": UNUSED, - "hour_second": UNUSED, - "if": IF, - "ignore": IGNORE, - "import": IMPORT, - "in": IN, - "index": INDEX, - "indexes": INDEXES, - "infile": UNUSED, - "inout": UNUSED, - "inner": INNER, - "inplace": INPLACE, - "insensitive": UNUSED, - "insert": INSERT, - "insert_method": INSERT_METHOD, - "int": INT, - "int1": UNUSED, - "int2": UNUSED, - "int3": UNUSED, - "int4": UNUSED, - "int8": UNUSED, - "integer": INTEGER, - "interval": INTERVAL, - "into": INTO, - "io_after_gtids": UNUSED, - "is": IS, - "isolation": ISOLATION, - "iterate": UNUSED, - "invoker": INVOKER, - "join": JOIN, - "json": JSON, - "key": KEY, - "keys": KEYS, - "keyspaces": KEYSPACES, - "key_block_size": KEY_BLOCK_SIZE, - "kill": UNUSED, - "last": LAST, - "language": LANGUAGE, - "last_insert_id": LAST_INSERT_ID, - "leading": UNUSED, - "leave": UNUSED, - "left": LEFT, - "less": LESS, - "level": LEVEL, - "like": LIKE, - "limit": LIMIT, - "linear": UNUSED, - "lines": LINES, - "linestring": LINESTRING, - "load": LOAD, - "local": LOCAL, - "localtime": LOCALTIME, - "localtimestamp": LOCALTIMESTAMP, - "lock": LOCK, - "long": UNUSED, - "longblob": LONGBLOB, - "longtext": LONGTEXT, - "loop": UNUSED, - "low_priority": LOW_PRIORITY, - "manifest": MANIFEST, - "master_bind": UNUSED, - "match": MATCH, - "max_rows": MAX_ROWS, - "maxvalue": MAXVALUE, - "mediumblob": MEDIUMBLOB, - "mediumint": MEDIUMINT, - "mediumtext": MEDIUMTEXT, - "memory": MEMORY, - "merge": MERGE, - "middleint": UNUSED, - "min_rows": MIN_ROWS, - "minute_microsecond": UNUSED, - "minute_second": UNUSED, - "mod": MOD, - "mode": MODE, - "modify": MODIFY, - "modifies": UNUSED, - "multilinestring": MULTILINESTRING, - "multipoint": MULTIPOINT, - "multipolygon": MULTIPOLYGON, - "name": NAME, - "names": NAMES, - "natural": NATURAL, - "nchar": NCHAR, - "next": NEXT, - "no": NO, - "none": NONE, - "not": NOT, - "no_write_to_binlog": UNUSED, - "null": NULL, - "numeric": NUMERIC, - "off": OFF, - "offset": OFFSET, - "on": ON, - "only": ONLY, - "optimize": OPTIMIZE, - "optimizer_costs": UNUSED, - "option": OPTION, - "optionally": OPTIONALLY, - "or": OR, - "order": ORDER, - "out": UNUSED, - "outer": OUTER, - "outfile": OUTFILE, - "overwrite": OVERWRITE, - "pack_keys": PACK_KEYS, - "parser": PARSER, - "partition": PARTITION, - "partitioning": PARTITIONING, - "password": PASSWORD, - "plugins": PLUGINS, - "point": POINT, - "polygon": POLYGON, - "precision": UNUSED, - "primary": PRIMARY, - "privileges": PRIVILEGES, - "processlist": PROCESSLIST, - "procedure": PROCEDURE, - "query": QUERY, - "range": UNUSED, - "read": READ, - "reads": UNUSED, - "read_write": UNUSED, - "real": REAL, - "rebuild": REBUILD, - "redundant": REDUNDANT, - "references": REFERENCES, - "regexp": REGEXP, - "release": RELEASE, - "remove": REMOVE, - "rename": RENAME, - "reorganize": REORGANIZE, - "repair": REPAIR, - "repeat": UNUSED, - "repeatable": REPEATABLE, - "replace": REPLACE, - "require": UNUSED, - "resignal": UNUSED, - "restrict": RESTRICT, - "return": UNUSED, - "revoke": UNUSED, - "right": RIGHT, - "rlike": REGEXP, - "rollback": ROLLBACK, - "row_format": ROW_FORMAT, - "s3": S3, - "savepoint": SAVEPOINT, - "schema": SCHEMA, - "schemas": SCHEMAS, - "second_microsecond": UNUSED, - "security": SECURITY, - "select": SELECT, - "sensitive": UNUSED, - "separator": SEPARATOR, - "sequence": SEQUENCE, - "serializable": SERIALIZABLE, - "session": SESSION, - "set": SET, - "share": SHARE, - "shared": SHARED, - "show": SHOW, - "signal": UNUSED, - "signed": SIGNED, - "smallint": SMALLINT, - "spatial": SPATIAL, - "specific": UNUSED, - "sql": SQL, - "sqlexception": UNUSED, - "sqlstate": UNUSED, - "sqlwarning": UNUSED, - "sql_big_result": UNUSED, - "sql_cache": SQL_CACHE, - "sql_calc_found_rows": SQL_CALC_FOUND_ROWS, - "sql_no_cache": SQL_NO_CACHE, - "sql_small_result": UNUSED, - "ssl": UNUSED, - "start": START, - "starting": STARTING, - "stats_auto_recalc": STATS_AUTO_RECALC, - "stats_persistent": STATS_PERSISTENT, - "stats_sample_pages": STATS_SAMPLE_PAGES, - "status": STATUS, - "storage": STORAGE, - "stored": UNUSED, - "straight_join": STRAIGHT_JOIN, - "stream": STREAM, - "vstream": VSTREAM, - "table": TABLE, - "tables": TABLES, - "tablespace": TABLESPACE, - "temptable": TEMPTABLE, - "terminated": TERMINATED, - "text": TEXT, - "than": THAN, - "then": THEN, - "time": TIME, - "timestamp": TIMESTAMP, - "timestampadd": TIMESTAMPADD, - "timestampdiff": TIMESTAMPDIFF, - "tinyblob": TINYBLOB, - "tinyint": TINYINT, - "tinytext": TINYTEXT, - "to": TO, - "trailing": UNUSED, - "transaction": TRANSACTION, - "tree": TREE, - "traditional": TRADITIONAL, - "trigger": TRIGGER, - "true": TRUE, - "truncate": TRUNCATE, - "uncommitted": UNCOMMITTED, - "undefined": UNDEFINED, - "undo": UNUSED, - "union": UNION, - "unique": UNIQUE, - "unlock": UNLOCK, - "unsigned": UNSIGNED, - "update": UPDATE, - "upgrade": UPGRADE, - "usage": UNUSED, - "use": USE, - "using": USING, - "utc_date": UTC_DATE, - "utc_time": UTC_TIME, - "utc_timestamp": UTC_TIMESTAMP, - "validation": VALIDATION, - "values": VALUES, - "variables": VARIABLES, - "varbinary": VARBINARY, - "varchar": VARCHAR, - "varcharacter": UNUSED, - "varying": UNUSED, - "virtual": UNUSED, - "vindex": VINDEX, - "vindexes": VINDEXES, - "view": VIEW, - "vitess": VITESS, - "vitess_keyspaces": VITESS_KEYSPACES, - "vitess_metadata": VITESS_METADATA, - "vitess_shards": VITESS_SHARDS, - "vitess_tablets": VITESS_TABLETS, - "vschema": VSCHEMA, - "warnings": WARNINGS, - "when": WHEN, - "where": WHERE, - "while": UNUSED, - "with": WITH, - "without": WITHOUT, - "work": WORK, - "write": WRITE, - "xor": XOR, - "year": YEAR, - "year_month": UNUSED, - "zerofill": ZEROFILL, -} - -// keywordStrings contains the reverse mapping of token to keyword strings -var keywordStrings = map[int]string{} - -func init() { - for str, id := range keywords { - if id == UNUSED { - continue - } - keywordStrings[id] = strings.ToLower(str) - } -} - -// KeywordString returns the string corresponding to the given keyword -func KeywordString(id int) string { - str, ok := keywordStrings[id] - if !ok { - return "" + buf: sql, + BindVars: make(map[string]struct{}), } - return str } // Lex returns the next token form the Tokenizer. @@ -533,7 +79,7 @@ func (tkn *Tokenizer) Lex(lval *yySymType) int { // Parse function to see how this is handled. tkn.partialDDL = nil } - lval.bytes = val + lval.str = val tkn.lastToken = val return typ } @@ -542,11 +88,11 @@ func (tkn *Tokenizer) Lex(lval *yySymType) int { type PositionedErr struct { Err string Pos int - Near []byte + Near string } func (p PositionedErr) Error() string { - if p.Near != nil { + if p.Near != "" { return fmt.Sprintf("%s at position %v near '%s'", p.Err, p.Pos, p.Near) } return fmt.Sprintf("%s at position %v", p.Err, p.Pos) @@ -554,7 +100,7 @@ func (p PositionedErr) Error() string { // Error is called by go yacc if there's a parsing error. func (tkn *Tokenizer) Error(err string) { - tkn.LastError = PositionedErr{Err: err, Pos: tkn.Position, Near: tkn.lastToken} + tkn.LastError = PositionedErr{Err: err, Pos: tkn.Pos + 1, Near: tkn.lastToken} // Try and re-sync to the next statement tkn.skipStatement() @@ -562,7 +108,7 @@ func (tkn *Tokenizer) Error(err string) { // Scan scans the tokenizer for the next token and returns // the token type and an optional value. -func (tkn *Tokenizer) Scan() (int, []byte) { +func (tkn *Tokenizer) Scan() (int, string) { if tkn.specialComment != nil { // Enter specialComment scan mode. // for scanning such kind of comment: /*! MySQL-specific code */ @@ -575,49 +121,44 @@ func (tkn *Tokenizer) Scan() (int, []byte) { // leave specialComment scan mode after all stream consumed. tkn.specialComment = nil } - if tkn.lastChar == 0 { - tkn.next() - } tkn.skipBlank() - switch ch := tkn.lastChar; { + switch ch := tkn.cur(); { case ch == '@': tokenID := AT_ID - tkn.next() - if tkn.lastChar == '@' { + tkn.skip(1) + if tkn.cur() == '@' { tokenID = AT_AT_ID - tkn.next() + tkn.skip(1) } var tID int - var tBytes []byte - ch = tkn.lastChar - tkn.next() - if ch == '`' { + var tBytes string + if tkn.cur() == '`' { + tkn.skip(1) tID, tBytes = tkn.scanLiteralIdentifier() } else { - tID, tBytes = tkn.scanIdentifier(byte(ch), true) + tID, tBytes = tkn.scanIdentifier(true) } if tID == LEX_ERROR { - return tID, nil + return tID, "" } return tokenID, tBytes case isLetter(ch): - tkn.next() if ch == 'X' || ch == 'x' { - if tkn.lastChar == '\'' { - tkn.next() + if tkn.peek(1) == '\'' { + tkn.skip(2) return tkn.scanHex() } } if ch == 'B' || ch == 'b' { - if tkn.lastChar == '\'' { - tkn.next() + if tkn.peek(1) == '\'' { + tkn.skip(2) return tkn.scanBitLiteral() } } - return tkn.scanIdentifier(byte(ch), false) + return tkn.scanIdentifier(false) case isDigit(ch): - return tkn.scanNumber(false) + return tkn.scanNumber() case ch == ':': return tkn.scanBindVar() case ch == ';': @@ -625,112 +166,118 @@ func (tkn *Tokenizer) Scan() (int, []byte) { // In multi mode, ';' is treated as EOF. So, we don't advance. // Repeated calls to Scan will keep returning 0 until ParseNext // forces the advance. - return 0, nil + return 0, "" } - tkn.next() - return ';', nil + tkn.skip(1) + return ';', "" case ch == eofChar: - return 0, nil + return 0, "" default: - tkn.next() + if ch == '.' && isDigit(tkn.peek(1)) { + return tkn.scanNumber() + } + + tkn.skip(1) switch ch { case '=', ',', '(', ')', '+', '*', '%', '^', '~': - return int(ch), nil + return int(ch), "" case '&': - if tkn.lastChar == '&' { - tkn.next() - return AND, nil + if tkn.cur() == '&' { + tkn.skip(1) + return AND, "" } - return int(ch), nil + return int(ch), "" case '|': - if tkn.lastChar == '|' { - tkn.next() - return OR, nil + if tkn.cur() == '|' { + tkn.skip(1) + return OR, "" } - return int(ch), nil + return int(ch), "" case '?': tkn.posVarIndex++ - buf := new(bytes2.Buffer) - fmt.Fprintf(buf, ":v%d", tkn.posVarIndex) - return VALUE_ARG, buf.Bytes() + buf := make([]byte, 0, 8) + buf = append(buf, ":v"...) + buf = strconv.AppendInt(buf, int64(tkn.posVarIndex), 10) + return VALUE_ARG, string(buf) case '.': - if isDigit(tkn.lastChar) { - return tkn.scanNumber(true) - } - return int(ch), nil + return int(ch), "" case '/': - switch tkn.lastChar { + switch tkn.cur() { case '/': - tkn.next() - return tkn.scanCommentType1("//") + tkn.skip(1) + return tkn.scanCommentType1(2) case '*': - tkn.next() - if tkn.lastChar == '!' && !tkn.SkipSpecialComments { + tkn.skip(1) + if tkn.cur() == '!' && !tkn.SkipSpecialComments { + tkn.skip(1) return tkn.scanMySQLSpecificComment() } return tkn.scanCommentType2() default: - return int(ch), nil + return int(ch), "" } case '#': - return tkn.scanCommentType1("#") + return tkn.scanCommentType1(1) case '-': - switch tkn.lastChar { + switch tkn.cur() { case '-': - tkn.next() - return tkn.scanCommentType1("--") + nextChar := tkn.peek(1) + if nextChar == ' ' || nextChar == '\n' || nextChar == '\t' || nextChar == '\r' || nextChar == eofChar { + tkn.skip(1) + return tkn.scanCommentType1(2) + } case '>': - tkn.next() - if tkn.lastChar == '>' { - tkn.next() - return JSON_UNQUOTE_EXTRACT_OP, nil + tkn.skip(1) + if tkn.cur() == '>' { + tkn.skip(1) + return JSON_UNQUOTE_EXTRACT_OP, "" } - return JSON_EXTRACT_OP, nil + return JSON_EXTRACT_OP, "" } - return int(ch), nil + return int(ch), "" case '<': - switch tkn.lastChar { + switch tkn.cur() { case '>': - tkn.next() - return NE, nil + tkn.skip(1) + return NE, "" case '<': - tkn.next() - return SHIFT_LEFT, nil + tkn.skip(1) + return SHIFT_LEFT, "" case '=': - tkn.next() - switch tkn.lastChar { + tkn.skip(1) + switch tkn.cur() { case '>': - tkn.next() - return NULL_SAFE_EQUAL, nil + tkn.skip(1) + return NULL_SAFE_EQUAL, "" default: - return LE, nil + return LE, "" } default: - return int(ch), nil + return int(ch), "" } case '>': - switch tkn.lastChar { + switch tkn.cur() { case '=': - tkn.next() - return GE, nil + tkn.skip(1) + return GE, "" case '>': - tkn.next() - return SHIFT_RIGHT, nil + tkn.skip(1) + return SHIFT_RIGHT, "" default: - return int(ch), nil + return int(ch), "" } case '!': - if tkn.lastChar == '=' { - tkn.next() - return NE, nil + if tkn.cur() == '=' { + tkn.skip(1) + return NE, "" } - return int(ch), nil + return int(ch), "" case '\'', '"': return tkn.scanString(ch, STRING) case '`': return tkn.scanLiteralIdentifier() default: - return LEX_ERROR, []byte{byte(ch)} + return LEX_ERROR, string(byte(ch)) } } } @@ -746,311 +293,370 @@ func (tkn *Tokenizer) skipStatement() int { } } +// skipBlank skips the cursor while it finds whitespace func (tkn *Tokenizer) skipBlank() { - ch := tkn.lastChar + ch := tkn.cur() for ch == ' ' || ch == '\n' || ch == '\r' || ch == '\t' { - tkn.next() - ch = tkn.lastChar + tkn.skip(1) + ch = tkn.cur() } } -func (tkn *Tokenizer) scanIdentifier(firstByte byte, isVariable bool) (int, []byte) { - buffer := &bytes2.Buffer{} - buffer.WriteByte(firstByte) - for isLetter(tkn.lastChar) || - isDigit(tkn.lastChar) || - tkn.lastChar == '@' || - (isVariable && isCarat(tkn.lastChar)) { - if tkn.lastChar == '@' { +// scanIdentifier scans a language keyword or @-encased variable +func (tkn *Tokenizer) scanIdentifier(isVariable bool) (int, string) { + start := tkn.Pos + tkn.skip(1) + + for { + ch := tkn.cur() + if !isLetter(ch) && !isDigit(ch) && ch != '@' && !(isVariable && isCarat(ch)) { + break + } + if ch == '@' { isVariable = true } - buffer.WriteByte(byte(tkn.lastChar)) - tkn.next() + tkn.skip(1) } - lowered := bytes.ToLower(buffer.Bytes()) - loweredStr := string(lowered) - if keywordID, found := keywords[loweredStr]; found { - return keywordID, buffer.Bytes() + keywordName := tkn.buf[start:tkn.Pos] + if keywordID, found := keywordLookupTable.LookupString(keywordName); found { + return keywordID, keywordName } // dual must always be case-insensitive - if loweredStr == "dual" { - return ID, lowered + if keywordASCIIMatch(keywordName, "dual") { + return ID, "dual" } - return ID, buffer.Bytes() + return ID, keywordName } -func (tkn *Tokenizer) scanHex() (int, []byte) { - buffer := &bytes2.Buffer{} - tkn.scanMantissa(16, buffer) - if tkn.lastChar != '\'' { - return LEX_ERROR, buffer.Bytes() +// scanHex scans a hex numeral; assumes x' or X' has already been scanned +func (tkn *Tokenizer) scanHex() (int, string) { + start := tkn.Pos + tkn.scanMantissa(16) + hex := tkn.buf[start:tkn.Pos] + if tkn.cur() != '\'' { + return LEX_ERROR, hex } - tkn.next() - if buffer.Len()%2 != 0 { - return LEX_ERROR, buffer.Bytes() + tkn.skip(1) + if len(hex)%2 != 0 { + return LEX_ERROR, hex } - return HEX, buffer.Bytes() + return HEX, hex } -func (tkn *Tokenizer) scanBitLiteral() (int, []byte) { - buffer := &bytes2.Buffer{} - tkn.scanMantissa(2, buffer) - if tkn.lastChar != '\'' { - return LEX_ERROR, buffer.Bytes() +// scanBitLiteral scans a binary numeric literal; assumes b' or B' has already been scanned +func (tkn *Tokenizer) scanBitLiteral() (int, string) { + start := tkn.Pos + tkn.scanMantissa(2) + bit := tkn.buf[start:tkn.Pos] + if tkn.cur() != '\'' { + return LEX_ERROR, bit } - tkn.next() - return BIT_LITERAL, buffer.Bytes() + tkn.skip(1) + return BIT_LITERAL, bit } -func (tkn *Tokenizer) scanLiteralIdentifier() (int, []byte) { - buffer := &bytes2.Buffer{} - backTickSeen := false +// scanLiteralIdentifierSlow scans an identifier surrounded by backticks which may +// contain escape sequences instead of it. This method is only called from +// scanLiteralIdentifier once the first escape sequence is found in the identifier. +// The provided `buf` contains the contents of the identifier that have been scanned +// so far. +func (tkn *Tokenizer) scanLiteralIdentifierSlow(buf *strings.Builder) (int, string) { + backTickSeen := true for { if backTickSeen { - if tkn.lastChar != '`' { + if tkn.cur() != '`' { break } backTickSeen = false - buffer.WriteByte('`') - tkn.next() + buf.WriteByte('`') + tkn.skip(1) continue } // The previous char was not a backtick. - switch tkn.lastChar { + switch tkn.cur() { case '`': backTickSeen = true case eofChar: // Premature EOF. - return LEX_ERROR, buffer.Bytes() + return LEX_ERROR, buf.String() default: - buffer.WriteByte(byte(tkn.lastChar)) + buf.WriteByte(byte(tkn.cur())) + // keep scanning } - tkn.next() + tkn.skip(1) } - if buffer.Len() == 0 { - return LEX_ERROR, buffer.Bytes() + return ID, buf.String() +} + +// scanLiteralIdentifier scans an identifier enclosed by backticks. If the identifier +// is a simple literal, it'll be returned as a slice of the input buffer. If the identifier +// contains escape sequences, this function will fall back to scanLiteralIdentifierSlow +func (tkn *Tokenizer) scanLiteralIdentifier() (int, string) { + start := tkn.Pos + for { + switch tkn.cur() { + case '`': + if tkn.peek(1) != '`' { + if tkn.Pos == start { + return LEX_ERROR, "" + } + tkn.skip(1) + return ID, tkn.buf[start : tkn.Pos-1] + } + + var buf strings.Builder + buf.WriteString(tkn.buf[start:tkn.Pos]) + tkn.skip(1) + return tkn.scanLiteralIdentifierSlow(&buf) + case eofChar: + // Premature EOF. + return LEX_ERROR, tkn.buf[start:tkn.Pos] + default: + tkn.skip(1) + } } - return ID, buffer.Bytes() } -func (tkn *Tokenizer) scanBindVar() (int, []byte) { - buffer := &bytes2.Buffer{} - buffer.WriteByte(byte(tkn.lastChar)) +// scanBindVar scans a bind variable; assumes a ':' has been scanned right before +func (tkn *Tokenizer) scanBindVar() (int, string) { + start := tkn.Pos token := VALUE_ARG - tkn.next() - if tkn.lastChar == ':' { + + tkn.skip(1) + if tkn.cur() == ':' { token = LIST_ARG - buffer.WriteByte(byte(tkn.lastChar)) - tkn.next() + tkn.skip(1) } - if !isLetter(tkn.lastChar) { - return LEX_ERROR, buffer.Bytes() + if !isLetter(tkn.cur()) { + return LEX_ERROR, tkn.buf[start:tkn.Pos] } - for isLetter(tkn.lastChar) || isDigit(tkn.lastChar) || tkn.lastChar == '.' { - buffer.WriteByte(byte(tkn.lastChar)) - tkn.next() + for { + ch := tkn.cur() + if !isLetter(ch) && !isDigit(ch) && ch != '.' { + break + } + tkn.skip(1) } - return token, buffer.Bytes() + return token, tkn.buf[start:tkn.Pos] } -func (tkn *Tokenizer) scanMantissa(base int, buffer *bytes2.Buffer) { - for digitVal(tkn.lastChar) < base { - tkn.consumeNext(buffer) +// scanMantissa scans a sequence of numeric characters with the same base. +// This is a helper function only called from the numeric scanners +func (tkn *Tokenizer) scanMantissa(base int) { + for digitVal(tkn.cur()) < base { + tkn.skip(1) } } -func (tkn *Tokenizer) scanNumber(seenDecimalPoint bool) (int, []byte) { +// scanNumber scans any SQL numeric literal, either floating point or integer +func (tkn *Tokenizer) scanNumber() (int, string) { + start := tkn.Pos token := INTEGRAL - buffer := &bytes2.Buffer{} - if seenDecimalPoint { + + if tkn.cur() == '.' { token = FLOAT - buffer.WriteByte('.') - tkn.scanMantissa(10, buffer) + tkn.skip(1) + tkn.scanMantissa(10) goto exponent } // 0x construct. - if tkn.lastChar == '0' { - tkn.consumeNext(buffer) - if tkn.lastChar == 'x' || tkn.lastChar == 'X' { + if tkn.cur() == '0' { + tkn.skip(1) + if tkn.cur() == 'x' || tkn.cur() == 'X' { token = HEXNUM - tkn.consumeNext(buffer) - tkn.scanMantissa(16, buffer) + tkn.skip(1) + tkn.scanMantissa(16) goto exit } } - tkn.scanMantissa(10, buffer) + tkn.scanMantissa(10) - if tkn.lastChar == '.' { + if tkn.cur() == '.' { token = FLOAT - tkn.consumeNext(buffer) - tkn.scanMantissa(10, buffer) + tkn.skip(1) + tkn.scanMantissa(10) } exponent: - if tkn.lastChar == 'e' || tkn.lastChar == 'E' { + if tkn.cur() == 'e' || tkn.cur() == 'E' { token = FLOAT - tkn.consumeNext(buffer) - if tkn.lastChar == '+' || tkn.lastChar == '-' { - tkn.consumeNext(buffer) + tkn.skip(1) + if tkn.cur() == '+' || tkn.cur() == '-' { + tkn.skip(1) } - tkn.scanMantissa(10, buffer) + tkn.scanMantissa(10) } exit: // A letter cannot immediately follow a number. - if isLetter(tkn.lastChar) { - return LEX_ERROR, buffer.Bytes() + if isLetter(tkn.cur()) { + return LEX_ERROR, tkn.buf[start:tkn.Pos] } - return token, buffer.Bytes() + return token, tkn.buf[start:tkn.Pos] } -func (tkn *Tokenizer) scanString(delim uint16, typ int) (int, []byte) { - var buffer bytes2.Buffer +// scanString scans a string surrounded by the given `delim`, which can be +// either single or double quotes. Assumes that the given delimiter has just +// been scanned. If the skin contains any escape sequences, this function +// will fall back to scanStringSlow +func (tkn *Tokenizer) scanString(delim uint16, typ int) (int, string) { + start := tkn.Pos + for { - ch := tkn.lastChar + switch tkn.cur() { + case delim: + if tkn.peek(1) != delim { + tkn.skip(1) + return typ, tkn.buf[start : tkn.Pos-1] + } + fallthrough + + case '\\': + var buffer strings.Builder + buffer.WriteString(tkn.buf[start:tkn.Pos]) + return tkn.scanStringSlow(&buffer, delim, typ) + + case eofChar: + return LEX_ERROR, tkn.buf[start:tkn.Pos] + } + + tkn.skip(1) + } +} + +// scanString scans a string surrounded by the given `delim` and containing escape +// sequencse. The given `buffer` contains the contents of the string that have +// been scanned so far. +func (tkn *Tokenizer) scanStringSlow(buffer *strings.Builder, delim uint16, typ int) (int, string) { + for { + ch := tkn.cur() if ch == eofChar { // Unterminated string. - return LEX_ERROR, buffer.Bytes() + return LEX_ERROR, buffer.String() } if ch != delim && ch != '\\' { - buffer.WriteByte(byte(ch)) - // Scan ahead to the next interesting character. - start := tkn.bufPos - for ; tkn.bufPos < tkn.bufSize; tkn.bufPos++ { - ch = uint16(tkn.buf[tkn.bufPos]) + start := tkn.Pos + for ; tkn.Pos < len(tkn.buf); tkn.Pos++ { + ch = uint16(tkn.buf[tkn.Pos]) if ch == delim || ch == '\\' { break } } - buffer.Write(tkn.buf[start:tkn.bufPos]) - tkn.Position += (tkn.bufPos - start) - - if tkn.bufPos >= tkn.bufSize { + buffer.WriteString(tkn.buf[start:tkn.Pos]) + if tkn.Pos >= len(tkn.buf) { // Reached the end of the buffer without finding a delim or // escape character. - tkn.next() + tkn.skip(1) continue } - - tkn.bufPos++ - tkn.Position++ } - tkn.next() // Read one past the delim or escape character. + tkn.skip(1) // Read one past the delim or escape character. if ch == '\\' { - if tkn.lastChar == eofChar { + if tkn.cur() == eofChar { // String terminates mid escape character. - return LEX_ERROR, buffer.Bytes() + return LEX_ERROR, buffer.String() } - if decodedChar := sqltypes.SQLDecodeMap[byte(tkn.lastChar)]; decodedChar == sqltypes.DontEscape { - ch = tkn.lastChar + if decodedChar := sqltypes.SQLDecodeMap[byte(tkn.cur())]; decodedChar == sqltypes.DontEscape { + ch = tkn.cur() } else { ch = uint16(decodedChar) } - - } else if ch == delim && tkn.lastChar != delim { + } else if ch == delim && tkn.cur() != delim { // Correctly terminated string, which is not a double delim. break } buffer.WriteByte(byte(ch)) - tkn.next() + tkn.skip(1) } - return typ, buffer.Bytes() + return typ, buffer.String() } -func (tkn *Tokenizer) scanCommentType1(prefix string) (int, []byte) { - buffer := &bytes2.Buffer{} - buffer.WriteString(prefix) - for tkn.lastChar != eofChar { - if tkn.lastChar == '\n' { - tkn.consumeNext(buffer) +// scanCommentType1 scans a SQL line-comment, which is applied until the end +// of the line. The given prefix length varies based on whether the comment +// is started with '//', '--' or '#'. +func (tkn *Tokenizer) scanCommentType1(prefixLen int) (int, string) { + start := tkn.Pos - prefixLen + for tkn.cur() != eofChar { + if tkn.cur() == '\n' { + tkn.skip(1) break } - tkn.consumeNext(buffer) + tkn.skip(1) } - return COMMENT, buffer.Bytes() + return COMMENT, tkn.buf[start:tkn.Pos] } -func (tkn *Tokenizer) scanCommentType2() (int, []byte) { - buffer := &bytes2.Buffer{} - buffer.WriteString("/*") +// scanCommentType2 scans a '/*' delimited comment; assumes the opening +// prefix has already been scanned +func (tkn *Tokenizer) scanCommentType2() (int, string) { + start := tkn.Pos - 2 for { - if tkn.lastChar == '*' { - tkn.consumeNext(buffer) - if tkn.lastChar == '/' { - tkn.consumeNext(buffer) + if tkn.cur() == '*' { + tkn.skip(1) + if tkn.cur() == '/' { + tkn.skip(1) break } continue } - if tkn.lastChar == eofChar { - return LEX_ERROR, buffer.Bytes() + if tkn.cur() == eofChar { + return LEX_ERROR, tkn.buf[start:tkn.Pos] } - tkn.consumeNext(buffer) + tkn.skip(1) } - return COMMENT, buffer.Bytes() + return COMMENT, tkn.buf[start:tkn.Pos] } -func (tkn *Tokenizer) scanMySQLSpecificComment() (int, []byte) { - buffer := &bytes2.Buffer{} - buffer.WriteString("/*!") - tkn.next() +// scanMySQLSpecificComment scans a MySQL comment pragma, which always starts with '//*` +func (tkn *Tokenizer) scanMySQLSpecificComment() (int, string) { + start := tkn.Pos - 3 for { - if tkn.lastChar == '*' { - tkn.consumeNext(buffer) - if tkn.lastChar == '/' { - tkn.consumeNext(buffer) + if tkn.cur() == '*' { + tkn.skip(1) + if tkn.cur() == '/' { + tkn.skip(1) break } continue } - if tkn.lastChar == eofChar { - return LEX_ERROR, buffer.Bytes() + if tkn.cur() == eofChar { + return LEX_ERROR, tkn.buf[start:tkn.Pos] } - tkn.consumeNext(buffer) + tkn.skip(1) + } + + commentVersion, sql := ExtractMysqlComment(tkn.buf[start:tkn.Pos]) + + if MySQLVersion >= commentVersion { + // Only add the special comment to the tokenizer if the version of MySQL is higher or equal to the comment version + tkn.specialComment = NewStringTokenizer(sql) } - _, sql := ExtractMysqlComment(buffer.String()) - tkn.specialComment = NewStringTokenizer(sql) + return tkn.Scan() } -func (tkn *Tokenizer) consumeNext(buffer *bytes2.Buffer) { - if tkn.lastChar == eofChar { - // This should never happen. - panic("unexpected EOF") - } - buffer.WriteByte(byte(tkn.lastChar)) - tkn.next() +func (tkn *Tokenizer) cur() uint16 { + return tkn.peek(0) } -func (tkn *Tokenizer) next() { - if tkn.bufPos >= tkn.bufSize && tkn.InStream != nil { - // Try and refill the buffer - var err error - tkn.bufPos = 0 - if tkn.bufSize, err = tkn.InStream.Read(tkn.buf); err != io.EOF && err != nil { - tkn.LastError = err - } - } +func (tkn *Tokenizer) skip(dist int) { + tkn.Pos += dist +} - if tkn.bufPos >= tkn.bufSize { - if tkn.lastChar != eofChar { - tkn.Position++ - tkn.lastChar = eofChar - } - } else { - tkn.Position++ - tkn.lastChar = uint16(tkn.buf[tkn.bufPos]) - tkn.bufPos++ +func (tkn *Tokenizer) peek(dist int) uint16 { + if tkn.Pos+dist >= len(tkn.buf) { + return eofChar } + return uint16(tkn.buf[tkn.Pos+dist]) } // reset clears any internal state. diff --git a/go/vt/sqlparser/token_test.go b/go/vt/sqlparser/token_test.go index cac25328e00..d393066de6c 100644 --- a/go/vt/sqlparser/token_test.go +++ b/go/vt/sqlparser/token_test.go @@ -19,6 +19,8 @@ package sqlparser import ( "fmt" "testing" + + "github.com/stretchr/testify/require" ) func TestLiteralID(t *testing.T) { @@ -73,11 +75,12 @@ func TestLiteralID(t *testing.T) { }} for _, tcase := range testcases { - tkn := NewStringTokenizer(tcase.in) - id, out := tkn.Scan() - if tcase.id != id || string(out) != tcase.out { - t.Errorf("Scan(%s): %d, %s, want %d, %s", tcase.in, id, out, tcase.id, tcase.out) - } + t.Run(tcase.in, func(t *testing.T) { + tkn := NewStringTokenizer(tcase.in) + id, out := tkn.Scan() + require.Equal(t, tcase.id, id) + require.Equal(t, tcase.out, string(out)) + }) } } @@ -146,10 +149,11 @@ func TestString(t *testing.T) { }} for _, tcase := range testcases { - id, got := NewStringTokenizer(tcase.in).Scan() - if tcase.id != id || string(got) != tcase.want { - t.Errorf("Scan(%q) = (%s, %q), want (%s, %q)", tcase.in, tokenName(id), got, tokenName(tcase.id), tcase.want) - } + t.Run(tcase.in, func(t *testing.T) { + id, got := NewStringTokenizer(tcase.in).Scan() + require.Equal(t, tcase.id, id, "Scan(%q) = (%s), want (%s)", tcase.in, tokenName(id), tokenName(tcase.id)) + require.Equal(t, tcase.want, string(got)) + }) } } @@ -190,18 +194,78 @@ func TestSplitStatement(t *testing.T) { }} for _, tcase := range testcases { - sql, rem, err := SplitStatement(tcase.in) - if err != nil { - t.Errorf("EndOfStatementPosition(%s): ERROR: %v", tcase.in, err) - continue - } - - if tcase.sql != sql { - t.Errorf("EndOfStatementPosition(%s) got sql \"%s\" want \"%s\"", tcase.in, sql, tcase.sql) - } - - if tcase.rem != rem { - t.Errorf("EndOfStatementPosition(%s) got remainder \"%s\" want \"%s\"", tcase.in, rem, tcase.rem) - } + t.Run(tcase.in, func(t *testing.T) { + sql, rem, err := SplitStatement(tcase.in) + if err != nil { + t.Errorf("EndOfStatementPosition(%s): ERROR: %v", tcase.in, err) + return + } + + if tcase.sql != sql { + t.Errorf("EndOfStatementPosition(%s) got sql \"%s\" want \"%s\"", tcase.in, sql, tcase.sql) + } + + if tcase.rem != rem { + t.Errorf("EndOfStatementPosition(%s) got remainder \"%s\" want \"%s\"", tcase.in, rem, tcase.rem) + } + }) + } +} + +func TestVersion(t *testing.T) { + testcases := []struct { + version string + in string + id []int + }{{ + version: "50709", + in: "/*!80102 SELECT*/ FROM IN EXISTS", + id: []int{FROM, IN, EXISTS, 0}, + }, { + version: "80101", + in: "/*!80102 SELECT*/ FROM IN EXISTS", + id: []int{FROM, IN, EXISTS, 0}, + }, { + version: "80201", + in: "/*!80102 SELECT*/ FROM IN EXISTS", + id: []int{SELECT, FROM, IN, EXISTS, 0}, + }, { + version: "80102", + in: "/*!80102 SELECT*/ FROM IN EXISTS", + id: []int{SELECT, FROM, IN, EXISTS, 0}, + }} + + for _, tcase := range testcases { + t.Run(tcase.version+"_"+tcase.in, func(t *testing.T) { + MySQLVersion = tcase.version + tok := NewStringTokenizer(tcase.in) + for _, expectedID := range tcase.id { + id, _ := tok.Scan() + require.Equal(t, expectedID, id) + } + }) + } +} + +func TestExtractMySQLComment(t *testing.T) { + testcases := []struct { + comment string + version string + }{{ + comment: "/*!50108 SELECT * FROM */", + version: "50108", + }, { + comment: "/*!5018 SELECT * FROM */", + version: "", + }, { + comment: "/*!SELECT * FROM */", + version: "", + }} + + for _, tcase := range testcases { + t.Run(tcase.version, func(t *testing.T) { + output, _ := ExtractMysqlComment(tcase.comment) + require.Equal(t, tcase.version, output) + }) } } diff --git a/go/vt/sqlparser/tracked_buffer.go b/go/vt/sqlparser/tracked_buffer.go index a8230360139..8a4f596db00 100644 --- a/go/vt/sqlparser/tracked_buffer.go +++ b/go/vt/sqlparser/tracked_buffer.go @@ -67,6 +67,21 @@ func (buf *TrackedBuffer) Myprintf(format string, values ...interface{}) { buf.astPrintf(nil, format, values...) } +func (buf *TrackedBuffer) printExpr(currentExpr Expr, expr Expr, left bool) { + if precedenceFor(currentExpr) == Syntactic { + expr.formatFast(buf) + } else { + needParens := needParens(currentExpr, expr, left) + if needParens { + buf.WriteByte('(') + } + expr.formatFast(buf) + if needParens { + buf.WriteByte(')') + } + } +} + // astPrintf is for internal use by the ast structs func (buf *TrackedBuffer) astPrintf(currentNode SQLNode, format string, values ...interface{}) { currentExpr, checkParens := currentNode.(Expr) @@ -118,9 +133,13 @@ func (buf *TrackedBuffer) astPrintf(currentNode SQLNode, format string, values . buf.formatter(value.(SQLNode)) } else { needParens := needParens(currentExpr, expr, left) - buf.printIf(needParens, "(") + if needParens { + buf.WriteByte('(') + } buf.formatter(expr) - buf.printIf(needParens, ")") + if needParens { + buf.WriteByte(')') + } } case 'a': buf.WriteArg(values[fieldnum].(string)) @@ -142,15 +161,9 @@ func getExpressionForParensEval(checkParens bool, value interface{}) Expr { return nil } -func (buf *TrackedBuffer) printIf(condition bool, text string) { - if condition { - buf.WriteString(text) - } -} - func (buf *TrackedBuffer) formatter(node SQLNode) { if buf.nodeFormatter == nil { - node.Format(buf) + node.formatFast(buf) } else { buf.nodeFormatter(buf, node) } diff --git a/go/vt/sqlparser/utils.go b/go/vt/sqlparser/utils.go index 1de7833a58e..4c2ee02ef6f 100644 --- a/go/vt/sqlparser/utils.go +++ b/go/vt/sqlparser/utils.go @@ -36,11 +36,14 @@ func QueryMatchesTemplates(query string, queryTemplates []string) (match bool, e if err != nil { return "", err } - stmt, err := Parse(q) + stmt, reservedVars, err := Parse2(q) + if err != nil { + return "", err + } + err = Normalize(stmt, reservedVars, bv, "") if err != nil { return "", err } - Normalize(stmt, bv, "") normalized := String(stmt) return normalized, nil } diff --git a/go/vt/sqlparser/visitorgen/ast_walker.go b/go/vt/sqlparser/visitorgen/ast_walker.go deleted file mode 100644 index 822fb6c4c5e..00000000000 --- a/go/vt/sqlparser/visitorgen/ast_walker.go +++ /dev/null @@ -1,130 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package visitorgen - -import ( - "go/ast" - "reflect" -) - -var _ ast.Visitor = (*walker)(nil) - -type walker struct { - result SourceFile -} - -// Walk walks the given AST and translates it to the simplified AST used by the next steps -func Walk(node ast.Node) *SourceFile { - var w walker - ast.Walk(&w, node) - return &w.result -} - -// Visit implements the ast.Visitor interface -func (w *walker) Visit(node ast.Node) ast.Visitor { - switch n := node.(type) { - case *ast.TypeSpec: - switch t2 := n.Type.(type) { - case *ast.InterfaceType: - w.append(&InterfaceDeclaration{ - name: n.Name.Name, - block: "", - }) - case *ast.StructType: - var fields []*Field - for _, f := range t2.Fields.List { - for _, name := range f.Names { - fields = append(fields, &Field{ - name: name.Name, - typ: sastType(f.Type), - }) - } - - } - w.append(&StructDeclaration{ - name: n.Name.Name, - fields: fields, - }) - case *ast.ArrayType: - w.append(&TypeAlias{ - name: n.Name.Name, - typ: &Array{inner: sastType(t2.Elt)}, - }) - case *ast.Ident: - w.append(&TypeAlias{ - name: n.Name.Name, - typ: &TypeString{t2.Name}, - }) - - default: - panic(reflect.TypeOf(t2)) - } - case *ast.FuncDecl: - if len(n.Recv.List) > 1 || len(n.Recv.List[0].Names) > 1 { - panic("don't know what to do!") - } - var f *Field - if len(n.Recv.List) == 1 { - r := n.Recv.List[0] - t := sastType(r.Type) - if len(r.Names) > 1 { - panic("don't know what to do!") - } - if len(r.Names) == 1 { - f = &Field{ - name: r.Names[0].Name, - typ: t, - } - } else { - f = &Field{ - name: "", - typ: t, - } - } - } - - w.append(&FuncDeclaration{ - receiver: f, - name: n.Name.Name, - block: "", - arguments: nil, - }) - } - - return w -} - -func (w *walker) append(line Sast) { - w.result.lines = append(w.result.lines, line) -} - -func sastType(e ast.Expr) Type { - switch n := e.(type) { - case *ast.StarExpr: - return &Ref{sastType(n.X)} - case *ast.Ident: - return &TypeString{n.Name} - case *ast.ArrayType: - return &Array{inner: sastType(n.Elt)} - case *ast.InterfaceType: - return &TypeString{"interface{}"} - case *ast.StructType: - return &TypeString{"struct{}"} - } - - panic(reflect.TypeOf(e)) -} diff --git a/go/vt/sqlparser/visitorgen/ast_walker_test.go b/go/vt/sqlparser/visitorgen/ast_walker_test.go deleted file mode 100644 index a4b01f70835..00000000000 --- a/go/vt/sqlparser/visitorgen/ast_walker_test.go +++ /dev/null @@ -1,239 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package visitorgen - -import ( - "go/parser" - "go/token" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/stretchr/testify/require" -) - -func TestSingleInterface(t *testing.T) { - input := ` -package sqlparser - -type Nodeiface interface { - iNode() -} -` - - fset := token.NewFileSet() - ast, err := parser.ParseFile(fset, "ast.go", input, 0) - require.NoError(t, err) - - result := Walk(ast) - expected := SourceFile{ - lines: []Sast{&InterfaceDeclaration{ - name: "Nodeiface", - block: "", - }}, - } - assert.Equal(t, expected.String(), result.String()) -} - -func TestEmptyStruct(t *testing.T) { - input := ` -package sqlparser - -type Empty struct {} -` - - fset := token.NewFileSet() - ast, err := parser.ParseFile(fset, "ast.go", input, 0) - require.NoError(t, err) - - result := Walk(ast) - expected := SourceFile{ - lines: []Sast{&StructDeclaration{ - name: "Empty", - fields: []*Field{}, - }}, - } - assert.Equal(t, expected.String(), result.String()) -} - -func TestStructWithStringField(t *testing.T) { - input := ` -package sqlparser - -type Struct struct { - field string -} -` - - fset := token.NewFileSet() - ast, err := parser.ParseFile(fset, "ast.go", input, 0) - require.NoError(t, err) - - result := Walk(ast) - expected := SourceFile{ - lines: []Sast{&StructDeclaration{ - name: "Struct", - fields: []*Field{{ - name: "field", - typ: &TypeString{typName: "string"}, - }}, - }}, - } - assert.Equal(t, expected.String(), result.String()) -} - -func TestStructWithDifferentTypes(t *testing.T) { - input := ` -package sqlparser - -type Struct struct { - field string - reference *string - array []string - arrayOfRef []*string -} -` - - fset := token.NewFileSet() - ast, err := parser.ParseFile(fset, "ast.go", input, 0) - require.NoError(t, err) - - result := Walk(ast) - expected := SourceFile{ - lines: []Sast{&StructDeclaration{ - name: "Struct", - fields: []*Field{{ - name: "field", - typ: &TypeString{typName: "string"}, - }, { - name: "reference", - typ: &Ref{&TypeString{typName: "string"}}, - }, { - name: "array", - typ: &Array{&TypeString{typName: "string"}}, - }, { - name: "arrayOfRef", - typ: &Array{&Ref{&TypeString{typName: "string"}}}, - }}, - }}, - } - assert.Equal(t, expected.String(), result.String()) -} - -func TestStructWithTwoStringFieldInOneLine(t *testing.T) { - input := ` -package sqlparser - -type Struct struct { - left, right string -} -` - - fset := token.NewFileSet() - ast, err := parser.ParseFile(fset, "ast.go", input, 0) - require.NoError(t, err) - - result := Walk(ast) - expected := SourceFile{ - lines: []Sast{&StructDeclaration{ - name: "Struct", - fields: []*Field{{ - name: "left", - typ: &TypeString{typName: "string"}, - }, { - name: "right", - typ: &TypeString{typName: "string"}, - }}, - }}, - } - assert.Equal(t, expected.String(), result.String()) -} - -func TestStructWithSingleMethod(t *testing.T) { - input := ` -package sqlparser - -type Empty struct {} - -func (*Empty) method() {} -` - - fset := token.NewFileSet() - ast, err := parser.ParseFile(fset, "ast.go", input, 0) - require.NoError(t, err) - - result := Walk(ast) - expected := SourceFile{ - lines: []Sast{ - &StructDeclaration{ - name: "Empty", - fields: []*Field{}}, - &FuncDeclaration{ - receiver: &Field{ - name: "", - typ: &Ref{&TypeString{"Empty"}}, - }, - name: "method", - block: "", - arguments: []*Field{}, - }, - }, - } - assert.Equal(t, expected.String(), result.String()) -} - -func TestSingleArrayType(t *testing.T) { - input := ` -package sqlparser - -type Strings []string -` - - fset := token.NewFileSet() - ast, err := parser.ParseFile(fset, "ast.go", input, 0) - require.NoError(t, err) - - result := Walk(ast) - expected := SourceFile{ - lines: []Sast{&TypeAlias{ - name: "Strings", - typ: &Array{&TypeString{"string"}}, - }}, - } - assert.Equal(t, expected.String(), result.String()) -} - -func TestSingleTypeAlias(t *testing.T) { - input := ` -package sqlparser - -type String string -` - - fset := token.NewFileSet() - ast, err := parser.ParseFile(fset, "ast.go", input, 0) - require.NoError(t, err) - - result := Walk(ast) - expected := SourceFile{ - lines: []Sast{&TypeAlias{ - name: "String", - typ: &TypeString{"string"}, - }}, - } - assert.Equal(t, expected.String(), result.String()) -} diff --git a/go/vt/sqlparser/visitorgen/main/main.go b/go/vt/sqlparser/visitorgen/main/main.go deleted file mode 100644 index 0d940ea060f..00000000000 --- a/go/vt/sqlparser/visitorgen/main/main.go +++ /dev/null @@ -1,164 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "bytes" - "flag" - "fmt" - "go/parser" - "go/token" - "io/ioutil" - "os" - - "vitess.io/vitess/go/exit" - "vitess.io/vitess/go/vt/log" - - "vitess.io/vitess/go/vt/sqlparser/visitorgen" -) - -var ( - inputFile = flag.String("input", "", "input file to use") - outputFile = flag.String("output", "", "output file") - compare = flag.Bool("compareOnly", false, "instead of writing to the output file, compare if the generated visitor is still valid for this ast.go") -) - -const usage = `Usage of visitorgen: - -go run /path/to/visitorgen/main -input=/path/to/ast.go -output=/path/to/rewriter.go -` - -func main() { - defer exit.Recover() - flag.Usage = printUsage - flag.Parse() - - if *inputFile == "" || *outputFile == "" { - printUsage() - exit.Return(1) - } - - fs := token.NewFileSet() - file, err := parser.ParseFile(fs, *inputFile, nil, parser.DeclarationErrors) - if err != nil { - log.Error(err) - exit.Return(1) - } - - astWalkResult := visitorgen.Walk(file) - vp := visitorgen.Transform(astWalkResult) - vd := visitorgen.ToVisitorPlan(vp) - - replacementMethods := visitorgen.EmitReplacementMethods(vd) - typeSwitch := visitorgen.EmitTypeSwitches(vd) - - b := &bytes.Buffer{} - fmt.Fprint(b, fileHeader) - fmt.Fprintln(b) - fmt.Fprintln(b, replacementMethods) - fmt.Fprint(b, applyHeader) - fmt.Fprintln(b, typeSwitch) - fmt.Fprintln(b, fileFooter) - - if *compare { - currentFile, err := ioutil.ReadFile(*outputFile) - if err != nil { - log.Error(err) - exit.Return(1) - } - if !bytes.Equal(b.Bytes(), currentFile) { - fmt.Println("rewriter needs to be re-generated: go generate " + *outputFile) - exit.Return(1) - } - } else { - err = ioutil.WriteFile(*outputFile, b.Bytes(), 0644) - if err != nil { - log.Error(err) - exit.Return(1) - } - } - -} - -func printUsage() { - os.Stderr.WriteString(usage) - os.Stderr.WriteString("\nOptions:\n") - flag.PrintDefaults() -} - -const fileHeader = `// Code generated by visitorgen/main/main.go. DO NOT EDIT. - -package sqlparser - -//go:generate go run ./visitorgen/main -input=ast.go -output=rewriter.go - -import ( - "reflect" -) - -type replacerFunc func(newNode, parent SQLNode) - -// application carries all the shared data so we can pass it around cheaply. -type application struct { - pre, post ApplyFunc - cursor Cursor -} -` - -const applyHeader = ` -// apply is where the visiting happens. Here is where we keep the big switch-case that will be used -// to do the actual visiting of SQLNodes -func (a *application) apply(parent, node SQLNode, replacer replacerFunc) { - if node == nil || isNilValue(node) { - return - } - - // avoid heap-allocating a new cursor for each apply call; reuse a.cursor instead - saved := a.cursor - a.cursor.replacer = replacer - a.cursor.node = node - a.cursor.parent = parent - - if a.pre != nil && !a.pre(&a.cursor) { - a.cursor = saved - return - } - - // walk children - // (the order of the cases is alphabetical) - switch n := node.(type) { - case nil: - ` - -const fileFooter = ` - default: - panic("unknown ast type " + reflect.TypeOf(node).String()) - } - - if a.post != nil && !a.post(&a.cursor) { - panic(abort) - } - - a.cursor = saved -} - -func isNilValue(i interface{}) bool { - valueOf := reflect.ValueOf(i) - kind := valueOf.Kind() - isNullable := kind == reflect.Ptr || kind == reflect.Array || kind == reflect.Slice - return isNullable && valueOf.IsNil() -}` diff --git a/go/vt/sqlparser/visitorgen/sast.go b/go/vt/sqlparser/visitorgen/sast.go deleted file mode 100644 index e46485e8f5d..00000000000 --- a/go/vt/sqlparser/visitorgen/sast.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package visitorgen - -// simplified ast - when reading the golang ast of the ast.go file, we translate the golang ast objects -// to this much simpler format, that contains only the necessary information and no more -type ( - // SourceFile contains all important lines from an ast.go file - SourceFile struct { - lines []Sast - } - - // Sast or simplified AST, is a representation of the ast.go lines we are interested in - Sast interface { - toSastString() string - } - - // InterfaceDeclaration represents a declaration of an interface. This is used to keep track of which types - // need to be handled by the visitor framework - InterfaceDeclaration struct { - name, block string - } - - // TypeAlias is used whenever we see a `type XXX YYY` - XXX is the new name for YYY. - // Note that YYY could be an array or a reference - TypeAlias struct { - name string - typ Type - } - - // FuncDeclaration represents a function declaration. These are tracked to know which types implement interfaces. - FuncDeclaration struct { - receiver *Field - name, block string - arguments []*Field - } - - // StructDeclaration represents a struct. It contains the fields and their types - StructDeclaration struct { - name string - fields []*Field - } - - // Field is a field in a struct - a name with a type tuple - Field struct { - name string - typ Type - } - - // Type represents a type in the golang type system. Used to keep track of type we need to handle, - // and the types of fields. - Type interface { - toTypString() string - rawTypeName() string - } - - // TypeString is a raw type name, such as `string` - TypeString struct { - typName string - } - - // Ref is a reference to something, such as `*string` - Ref struct { - inner Type - } - - // Array is an array of things, such as `[]string` - Array struct { - inner Type - } -) - -var _ Sast = (*InterfaceDeclaration)(nil) -var _ Sast = (*StructDeclaration)(nil) -var _ Sast = (*FuncDeclaration)(nil) -var _ Sast = (*TypeAlias)(nil) - -var _ Type = (*TypeString)(nil) -var _ Type = (*Ref)(nil) -var _ Type = (*Array)(nil) - -// String returns a textual representation of the SourceFile. This is for testing purposed -func (t *SourceFile) String() string { - var result string - for _, l := range t.lines { - result += l.toSastString() - result += "\n" - } - - return result -} - -func (t *Ref) toTypString() string { - return "*" + t.inner.toTypString() -} - -func (t *Array) toTypString() string { - return "[]" + t.inner.toTypString() -} - -func (t *TypeString) toTypString() string { - return t.typName -} - -func (f *FuncDeclaration) toSastString() string { - var receiver string - if f.receiver != nil { - receiver = "(" + f.receiver.String() + ") " - } - var args string - for i, arg := range f.arguments { - if i > 0 { - args += ", " - } - args += arg.String() - } - - return "func " + receiver + f.name + "(" + args + ") {" + blockInNewLines(f.block) + "}" -} - -func (i *InterfaceDeclaration) toSastString() string { - return "type " + i.name + " interface {" + blockInNewLines(i.block) + "}" -} - -func (a *TypeAlias) toSastString() string { - return "type " + a.name + " " + a.typ.toTypString() -} - -func (s *StructDeclaration) toSastString() string { - var block string - for _, f := range s.fields { - block += "\t" + f.String() + "\n" - } - - return "type " + s.name + " struct {" + blockInNewLines(block) + "}" -} - -func blockInNewLines(block string) string { - if block == "" { - return "" - } - return "\n" + block + "\n" -} - -// String returns a string representation of a field -func (f *Field) String() string { - if f.name != "" { - return f.name + " " + f.typ.toTypString() - } - - return f.typ.toTypString() -} - -func (t *TypeString) rawTypeName() string { - return t.typName -} - -func (t *Ref) rawTypeName() string { - return t.inner.rawTypeName() -} - -func (t *Array) rawTypeName() string { - return t.inner.rawTypeName() -} diff --git a/go/vt/sqlparser/visitorgen/struct_producer.go b/go/vt/sqlparser/visitorgen/struct_producer.go deleted file mode 100644 index 1c293f30803..00000000000 --- a/go/vt/sqlparser/visitorgen/struct_producer.go +++ /dev/null @@ -1,253 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package visitorgen - -import ( - "fmt" - "sort" -) - -// VisitorData is the data needed to produce the output file -type ( - // VisitorItem represents something that needs to be added to the rewriter infrastructure - VisitorItem interface { - toFieldItemString() string - typeName() string - asSwitchCase() string - asReplMethod() string - getFieldName() string - } - - // SingleFieldItem is a single field in a struct - SingleFieldItem struct { - StructType, FieldType Type - FieldName string - } - - // ArrayFieldItem is an array field in a struct - ArrayFieldItem struct { - StructType, ItemType Type - FieldName string - } - - // ArrayItem is an array that implements SQLNode - ArrayItem struct { - StructType, ItemType Type - } - - // VisitorPlan represents all the output needed for the rewriter - VisitorPlan struct { - Switches []*SwitchCase // The cases for the big switch statement used to implement the visitor - } - - // SwitchCase is what we need to know to produce all the type switch cases in the visitor. - SwitchCase struct { - Type Type - Fields []VisitorItem - } -) - -var _ VisitorItem = (*SingleFieldItem)(nil) -var _ VisitorItem = (*ArrayItem)(nil) -var _ VisitorItem = (*ArrayFieldItem)(nil) -var _ sort.Interface = (*VisitorPlan)(nil) -var _ sort.Interface = (*SwitchCase)(nil) - -// ToVisitorPlan transforms the source information into a plan for the visitor code that needs to be produced -func ToVisitorPlan(input *SourceInformation) *VisitorPlan { - var output VisitorPlan - - for _, typ := range input.interestingTypes { - switchit := &SwitchCase{Type: typ} - stroct, isStruct := input.structs[typ.rawTypeName()] - if isStruct { - for _, f := range stroct.fields { - switchit.Fields = append(switchit.Fields, trySingleItem(input, f, typ)...) - } - } else { - itemType := input.getItemTypeOfArray(typ) - if itemType != nil && input.isSQLNode(itemType) { - switchit.Fields = append(switchit.Fields, &ArrayItem{ - StructType: typ, - ItemType: itemType, - }) - } - } - sort.Sort(switchit) - output.Switches = append(output.Switches, switchit) - } - sort.Sort(&output) - return &output -} - -func trySingleItem(input *SourceInformation, f *Field, typ Type) []VisitorItem { - if input.isSQLNode(f.typ) { - return []VisitorItem{&SingleFieldItem{ - StructType: typ, - FieldType: f.typ, - FieldName: f.name, - }} - } - - arrType, isArray := f.typ.(*Array) - if isArray && input.isSQLNode(arrType.inner) { - return []VisitorItem{&ArrayFieldItem{ - StructType: typ, - ItemType: arrType.inner, - FieldName: f.name, - }} - } - return []VisitorItem{} -} - -// String returns a string, used for testing -func (v *VisitorPlan) String() string { - var sb builder - for _, s := range v.Switches { - sb.appendF("Type: %v", s.Type.toTypString()) - for _, f := range s.Fields { - sb.appendF("\t%v", f.toFieldItemString()) - } - } - return sb.String() -} - -func (s *SingleFieldItem) toFieldItemString() string { - return fmt.Sprintf("single item: %v of type: %v", s.FieldName, s.FieldType.toTypString()) -} - -func (s *SingleFieldItem) asSwitchCase() string { - return fmt.Sprintf(` a.apply(node, n.%s, %s)`, s.FieldName, s.typeName()) -} - -func (s *SingleFieldItem) asReplMethod() string { - _, isRef := s.StructType.(*Ref) - - if isRef { - return fmt.Sprintf(`func %s(newNode, parent SQLNode) { - parent.(%s).%s = newNode.(%s) -}`, s.typeName(), s.StructType.toTypString(), s.FieldName, s.FieldType.toTypString()) - } - - return fmt.Sprintf(`func %s(newNode, parent SQLNode) { - tmp := parent.(%s) - tmp.%s = newNode.(%s) -}`, s.typeName(), s.StructType.toTypString(), s.FieldName, s.FieldType.toTypString()) - -} - -func (ai *ArrayItem) asReplMethod() string { - name := ai.typeName() - return fmt.Sprintf(`type %s int - -func (r *%s) replace(newNode, container SQLNode) { - container.(%s)[int(*r)] = newNode.(%s) -} - -func (r *%s) inc() { - *r++ -}`, name, name, ai.StructType.toTypString(), ai.ItemType.toTypString(), name) -} - -func (afi *ArrayFieldItem) asReplMethod() string { - name := afi.typeName() - return fmt.Sprintf(`type %s int - -func (r *%s) replace(newNode, container SQLNode) { - container.(%s).%s[int(*r)] = newNode.(%s) -} - -func (r *%s) inc() { - *r++ -}`, name, name, afi.StructType.toTypString(), afi.FieldName, afi.ItemType.toTypString(), name) -} - -func (s *SingleFieldItem) getFieldName() string { - return s.FieldName -} - -func (s *SingleFieldItem) typeName() string { - return "replace" + s.StructType.rawTypeName() + s.FieldName -} - -func (afi *ArrayFieldItem) toFieldItemString() string { - return fmt.Sprintf("array field item: %v.%v contains items of type %v", afi.StructType.toTypString(), afi.FieldName, afi.ItemType.toTypString()) -} - -func (ai *ArrayItem) toFieldItemString() string { - return fmt.Sprintf("array item: %v containing items of type %v", ai.StructType.toTypString(), ai.ItemType.toTypString()) -} - -func (ai *ArrayItem) getFieldName() string { - panic("Should not be called!") -} - -func (afi *ArrayFieldItem) getFieldName() string { - return afi.FieldName -} - -func (ai *ArrayItem) asSwitchCase() string { - return fmt.Sprintf(` replacer := %s(0) - replacerRef := &replacer - for _, item := range n { - a.apply(node, item, replacerRef.replace) - replacerRef.inc() - }`, ai.typeName()) -} - -func (afi *ArrayFieldItem) asSwitchCase() string { - return fmt.Sprintf(` replacer%s := %s(0) - replacer%sB := &replacer%s - for _, item := range n.%s { - a.apply(node, item, replacer%sB.replace) - replacer%sB.inc() - }`, afi.FieldName, afi.typeName(), afi.FieldName, afi.FieldName, afi.FieldName, afi.FieldName, afi.FieldName) -} - -func (ai *ArrayItem) typeName() string { - return "replace" + ai.StructType.rawTypeName() + "Items" -} - -func (afi *ArrayFieldItem) typeName() string { - return "replace" + afi.StructType.rawTypeName() + afi.FieldName -} -func (v *VisitorPlan) Len() int { - return len(v.Switches) -} - -func (v *VisitorPlan) Less(i, j int) bool { - return v.Switches[i].Type.rawTypeName() < v.Switches[j].Type.rawTypeName() -} - -func (v *VisitorPlan) Swap(i, j int) { - temp := v.Switches[i] - v.Switches[i] = v.Switches[j] - v.Switches[j] = temp -} -func (s *SwitchCase) Len() int { - return len(s.Fields) -} - -func (s *SwitchCase) Less(i, j int) bool { - return s.Fields[i].getFieldName() < s.Fields[j].getFieldName() -} - -func (s *SwitchCase) Swap(i, j int) { - temp := s.Fields[i] - s.Fields[i] = s.Fields[j] - s.Fields[j] = temp -} diff --git a/go/vt/sqlparser/visitorgen/struct_producer_test.go b/go/vt/sqlparser/visitorgen/struct_producer_test.go deleted file mode 100644 index 065b532a9eb..00000000000 --- a/go/vt/sqlparser/visitorgen/struct_producer_test.go +++ /dev/null @@ -1,423 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package visitorgen - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestEmptyStructVisitor(t *testing.T) { - /* - type Node interface{} - type Struct struct {} - func (*Struct) iNode() {} - */ - - input := &SourceInformation{ - interestingTypes: map[string]Type{ - "*Struct": &Ref{&TypeString{"Struct"}}, - }, - interfaces: map[string]bool{ - "Node": true, - }, - structs: map[string]*StructDeclaration{ - "Struct": {name: "Struct", fields: []*Field{}}, - }, - typeAliases: map[string]*TypeAlias{}, - } - - result := ToVisitorPlan(input) - - expected := &VisitorPlan{ - Switches: []*SwitchCase{{ - Type: &Ref{&TypeString{"Struct"}}, - Fields: []VisitorItem{}, - }}, - } - - assert.Equal(t, expected.String(), result.String()) -} - -func TestStructWithSqlNodeField(t *testing.T) { - /* - type Node interface{} - type Struct struct { - Field Node - } - func (*Struct) iNode() {} - */ - input := &SourceInformation{ - interestingTypes: map[string]Type{ - "*Struct": &Ref{&TypeString{"Struct"}}, - }, - interfaces: map[string]bool{ - "Node": true, - }, - structs: map[string]*StructDeclaration{ - "Struct": {name: "Struct", fields: []*Field{ - {name: "Field", typ: &TypeString{"Node"}}, - }}, - }, - typeAliases: map[string]*TypeAlias{}, - } - - result := ToVisitorPlan(input) - - expected := &VisitorPlan{ - Switches: []*SwitchCase{{ - Type: &Ref{&TypeString{"Struct"}}, - Fields: []VisitorItem{&SingleFieldItem{ - StructType: &Ref{&TypeString{"Struct"}}, - FieldType: &TypeString{"Node"}, - FieldName: "Field", - }}, - }}, - } - - assert.Equal(t, expected.String(), result.String()) -} - -func TestStructWithStringField2(t *testing.T) { - /* - type Node interface{} - type Struct struct { - Field Node - } - func (*Struct) iNode() {} - */ - - input := &SourceInformation{ - interestingTypes: map[string]Type{ - "*Struct": &Ref{&TypeString{"Struct"}}, - }, - interfaces: map[string]bool{ - "Node": true, - }, - structs: map[string]*StructDeclaration{ - "Struct": {name: "Struct", fields: []*Field{ - {name: "Field", typ: &TypeString{"string"}}, - }}, - }, - typeAliases: map[string]*TypeAlias{}, - } - - result := ToVisitorPlan(input) - - expected := &VisitorPlan{ - Switches: []*SwitchCase{{ - Type: &Ref{&TypeString{"Struct"}}, - Fields: []VisitorItem{}, - }}, - } - - assert.Equal(t, expected.String(), result.String()) -} - -func TestArrayAsSqlNode(t *testing.T) { - /* - type NodeInterface interface { - iNode() - } - - func (*NodeArray) iNode{} - - type NodeArray []NodeInterface - */ - - input := &SourceInformation{ - interfaces: map[string]bool{"NodeInterface": true}, - interestingTypes: map[string]Type{ - "*NodeArray": &Ref{&TypeString{"NodeArray"}}}, - structs: map[string]*StructDeclaration{}, - typeAliases: map[string]*TypeAlias{ - "NodeArray": { - name: "NodeArray", - typ: &Array{&TypeString{"NodeInterface"}}, - }, - }, - } - - result := ToVisitorPlan(input) - - expected := &VisitorPlan{ - Switches: []*SwitchCase{{ - Type: &Ref{&TypeString{"NodeArray"}}, - Fields: []VisitorItem{&ArrayItem{ - StructType: &Ref{&TypeString{"NodeArray"}}, - ItemType: &TypeString{"NodeInterface"}, - }}, - }}, - } - - assert.Equal(t, expected.String(), result.String()) -} - -func TestStructWithStructField(t *testing.T) { - /* - type Node interface{} - type Struct struct { - Field *Struct - } - func (*Struct) iNode() {} - */ - - input := &SourceInformation{ - interestingTypes: map[string]Type{ - "*Struct": &Ref{&TypeString{"Struct"}}}, - structs: map[string]*StructDeclaration{ - "Struct": {name: "Struct", fields: []*Field{ - {name: "Field", typ: &Ref{&TypeString{"Struct"}}}, - }}, - }, - typeAliases: map[string]*TypeAlias{}, - } - - result := ToVisitorPlan(input) - - expected := &VisitorPlan{ - Switches: []*SwitchCase{{ - Type: &Ref{&TypeString{"Struct"}}, - Fields: []VisitorItem{&SingleFieldItem{ - StructType: &Ref{&TypeString{"Struct"}}, - FieldType: &Ref{&TypeString{"Struct"}}, - FieldName: "Field", - }}, - }}, - } - - assert.Equal(t, expected.String(), result.String()) -} - -func TestStructWithArrayOfNodes(t *testing.T) { - /* - type NodeInterface interface {} - type Struct struct { - Items []NodeInterface - } - - func (*Struct) iNode{} - */ - - input := &SourceInformation{ - interfaces: map[string]bool{ - "NodeInterface": true, - }, - interestingTypes: map[string]Type{ - "*Struct": &Ref{&TypeString{"Struct"}}}, - structs: map[string]*StructDeclaration{ - "Struct": {name: "Struct", fields: []*Field{ - {name: "Items", typ: &Array{&TypeString{"NodeInterface"}}}, - }}, - }, - typeAliases: map[string]*TypeAlias{}, - } - - result := ToVisitorPlan(input) - - expected := &VisitorPlan{ - Switches: []*SwitchCase{{ - Type: &Ref{&TypeString{"Struct"}}, - Fields: []VisitorItem{&ArrayFieldItem{ - StructType: &Ref{&TypeString{"Struct"}}, - ItemType: &TypeString{"NodeInterface"}, - FieldName: "Items", - }}, - }}, - } - - assert.Equal(t, expected.String(), result.String()) -} - -func TestStructWithArrayOfStrings(t *testing.T) { - /* - type NodeInterface interface {} - type Struct struct { - Items []string - } - - func (*Struct) iNode{} - */ - - input := &SourceInformation{ - interfaces: map[string]bool{ - "NodeInterface": true, - }, - interestingTypes: map[string]Type{ - "*Struct": &Ref{&TypeString{"Struct"}}}, - structs: map[string]*StructDeclaration{ - "Struct": {name: "Struct", fields: []*Field{ - {name: "Items", typ: &Array{&TypeString{"string"}}}, - }}, - }, - typeAliases: map[string]*TypeAlias{}, - } - - result := ToVisitorPlan(input) - - expected := &VisitorPlan{ - Switches: []*SwitchCase{{ - Type: &Ref{&TypeString{"Struct"}}, - Fields: []VisitorItem{}, - }}, - } - - assert.Equal(t, expected.String(), result.String()) -} - -func TestArrayOfStringsThatImplementSQLNode(t *testing.T) { - /* - type NodeInterface interface {} - type Struct []string - func (Struct) iNode{} - */ - - input := &SourceInformation{ - interfaces: map[string]bool{"NodeInterface": true}, - interestingTypes: map[string]Type{"Struct": &Ref{&TypeString{"Struct"}}}, - structs: map[string]*StructDeclaration{}, - typeAliases: map[string]*TypeAlias{ - "Struct": { - name: "Struct", - typ: &Array{&TypeString{"string"}}, - }, - }, - } - - result := ToVisitorPlan(input) - - expected := &VisitorPlan{ - Switches: []*SwitchCase{{ - Type: &Ref{&TypeString{"Struct"}}, - Fields: []VisitorItem{}, - }}, - } - - assert.Equal(t, expected.String(), result.String()) -} - -func TestSortingOfOutputs(t *testing.T) { - /* - type NodeInterface interface {} - type AStruct struct { - AField NodeInterface - BField NodeInterface - } - type BStruct struct { - CField NodeInterface - } - func (*AStruct) iNode{} - func (*BStruct) iNode{} - */ - - input := &SourceInformation{ - interfaces: map[string]bool{"NodeInterface": true}, - interestingTypes: map[string]Type{ - "AStruct": &Ref{&TypeString{"AStruct"}}, - "BStruct": &Ref{&TypeString{"BStruct"}}, - }, - structs: map[string]*StructDeclaration{ - "AStruct": {name: "AStruct", fields: []*Field{ - {name: "BField", typ: &TypeString{"NodeInterface"}}, - {name: "AField", typ: &TypeString{"NodeInterface"}}, - }}, - "BStruct": {name: "BStruct", fields: []*Field{ - {name: "CField", typ: &TypeString{"NodeInterface"}}, - }}, - }, - typeAliases: map[string]*TypeAlias{}, - } - - result := ToVisitorPlan(input) - - expected := &VisitorPlan{ - Switches: []*SwitchCase{ - {Type: &Ref{&TypeString{"AStruct"}}, - Fields: []VisitorItem{ - &SingleFieldItem{ - StructType: &Ref{&TypeString{"AStruct"}}, - FieldType: &TypeString{"NodeInterface"}, - FieldName: "AField", - }, - &SingleFieldItem{ - StructType: &Ref{&TypeString{"AStruct"}}, - FieldType: &TypeString{"NodeInterface"}, - FieldName: "BField", - }}}, - {Type: &Ref{&TypeString{"BStruct"}}, - Fields: []VisitorItem{ - &SingleFieldItem{ - StructType: &Ref{&TypeString{"BStruct"}}, - FieldType: &TypeString{"NodeInterface"}, - FieldName: "CField", - }}}}, - } - assert.Equal(t, expected.String(), result.String()) -} - -func TestAliasOfAlias(t *testing.T) { - /* - type NodeInterface interface { - iNode() - } - - type NodeArray []NodeInterface - type AliasOfAlias NodeArray - - func (NodeArray) iNode{} - func (AliasOfAlias) iNode{} - */ - - input := &SourceInformation{ - interfaces: map[string]bool{"NodeInterface": true}, - interestingTypes: map[string]Type{ - "NodeArray": &TypeString{"NodeArray"}, - "AliasOfAlias": &TypeString{"AliasOfAlias"}, - }, - structs: map[string]*StructDeclaration{}, - typeAliases: map[string]*TypeAlias{ - "NodeArray": { - name: "NodeArray", - typ: &Array{&TypeString{"NodeInterface"}}, - }, - "AliasOfAlias": { - name: "NodeArray", - typ: &TypeString{"NodeArray"}, - }, - }, - } - - result := ToVisitorPlan(input) - - expected := &VisitorPlan{ - Switches: []*SwitchCase{ - {Type: &TypeString{"AliasOfAlias"}, - Fields: []VisitorItem{&ArrayItem{ - StructType: &TypeString{"AliasOfAlias"}, - ItemType: &TypeString{"NodeInterface"}, - }}, - }, - {Type: &TypeString{"NodeArray"}, - Fields: []VisitorItem{&ArrayItem{ - StructType: &TypeString{"NodeArray"}, - ItemType: &TypeString{"NodeInterface"}, - }}, - }}, - } - assert.Equal(t, expected.String(), result.String()) -} diff --git a/go/vt/sqlparser/visitorgen/transformer.go b/go/vt/sqlparser/visitorgen/transformer.go deleted file mode 100644 index 98129be81b1..00000000000 --- a/go/vt/sqlparser/visitorgen/transformer.go +++ /dev/null @@ -1,95 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package visitorgen - -import "fmt" - -// Transform takes an input file and collects the information into an easier to consume format -func Transform(input *SourceFile) *SourceInformation { - interestingTypes := make(map[string]Type) - interfaces := make(map[string]bool) - structs := make(map[string]*StructDeclaration) - typeAliases := make(map[string]*TypeAlias) - - for _, l := range input.lines { - switch line := l.(type) { - case *FuncDeclaration: - interestingTypes[line.receiver.typ.toTypString()] = line.receiver.typ - case *StructDeclaration: - structs[line.name] = line - case *TypeAlias: - typeAliases[line.name] = line - case *InterfaceDeclaration: - interfaces[line.name] = true - } - } - - return &SourceInformation{ - interfaces: interfaces, - interestingTypes: interestingTypes, - structs: structs, - typeAliases: typeAliases, - } -} - -// SourceInformation contains the information from the ast.go file, but in a format that is easier to consume -type SourceInformation struct { - interestingTypes map[string]Type - interfaces map[string]bool - structs map[string]*StructDeclaration - typeAliases map[string]*TypeAlias -} - -func (v *SourceInformation) String() string { - var types string - for _, k := range v.interestingTypes { - types += k.toTypString() + "\n" - } - var structs string - for _, k := range v.structs { - structs += k.toSastString() + "\n" - } - var typeAliases string - for _, k := range v.typeAliases { - typeAliases += k.toSastString() + "\n" - } - - return fmt.Sprintf("Types to build visitor for:\n%s\nStructs with fields: \n%s\nTypeAliases with type: \n%s\n", types, structs, typeAliases) -} - -// getItemTypeOfArray will return nil if the given type is not pointing to a array type. -// If it is an array type, the type of it's items will be returned -func (v *SourceInformation) getItemTypeOfArray(typ Type) Type { - alias := v.typeAliases[typ.rawTypeName()] - if alias == nil { - return nil - } - arrTyp, isArray := alias.typ.(*Array) - if !isArray { - return v.getItemTypeOfArray(alias.typ) - } - return arrTyp.inner -} - -func (v *SourceInformation) isSQLNode(typ Type) bool { - _, isInteresting := v.interestingTypes[typ.toTypString()] - if isInteresting { - return true - } - _, isInterface := v.interfaces[typ.toTypString()] - return isInterface -} diff --git a/go/vt/sqlparser/visitorgen/transformer_test.go b/go/vt/sqlparser/visitorgen/transformer_test.go deleted file mode 100644 index 4a0849e9e9c..00000000000 --- a/go/vt/sqlparser/visitorgen/transformer_test.go +++ /dev/null @@ -1,110 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package visitorgen - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestSimplestAst(t *testing.T) { - /* - type NodeInterface interface { - iNode() - } - - type NodeStruct struct {} - - func (*NodeStruct) iNode{} - */ - input := &SourceFile{ - lines: []Sast{ - &InterfaceDeclaration{ - name: "NodeInterface", - block: "// an interface lives here"}, - &StructDeclaration{ - name: "NodeStruct", - fields: []*Field{}}, - &FuncDeclaration{ - receiver: &Field{ - name: "", - typ: &Ref{&TypeString{"NodeStruct"}}, - }, - name: "iNode", - block: "", - arguments: []*Field{}}, - }, - } - - expected := &SourceInformation{ - interestingTypes: map[string]Type{ - "*NodeStruct": &Ref{&TypeString{"NodeStruct"}}}, - structs: map[string]*StructDeclaration{ - "NodeStruct": { - name: "NodeStruct", - fields: []*Field{}}}, - } - - assert.Equal(t, expected.String(), Transform(input).String()) -} - -func TestAstWithArray(t *testing.T) { - /* - type NodeInterface interface { - iNode() - } - - func (*NodeArray) iNode{} - - type NodeArray []NodeInterface - */ - input := &SourceFile{ - lines: []Sast{ - &InterfaceDeclaration{ - name: "NodeInterface"}, - &TypeAlias{ - name: "NodeArray", - typ: &Array{&TypeString{"NodeInterface"}}, - }, - &FuncDeclaration{ - receiver: &Field{ - name: "", - typ: &Ref{&TypeString{"NodeArray"}}, - }, - name: "iNode", - block: "", - arguments: []*Field{}}, - }, - } - - expected := &SourceInformation{ - interestingTypes: map[string]Type{ - "*NodeArray": &Ref{&TypeString{"NodeArray"}}}, - structs: map[string]*StructDeclaration{}, - typeAliases: map[string]*TypeAlias{ - "NodeArray": { - name: "NodeArray", - typ: &Array{&TypeString{"NodeInterface"}}, - }, - }, - } - - result := Transform(input) - - assert.Equal(t, expected.String(), result.String()) -} diff --git a/go/vt/sqlparser/visitorgen/visitor_emitter.go b/go/vt/sqlparser/visitorgen/visitor_emitter.go deleted file mode 100644 index 889c05fe7f7..00000000000 --- a/go/vt/sqlparser/visitorgen/visitor_emitter.go +++ /dev/null @@ -1,76 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package visitorgen - -import ( - "fmt" - "strings" -) - -// EmitReplacementMethods is an anti-parser (a.k.a prettifier) - it takes a struct that is much like an AST, -// and produces a string from it. This method will produce the replacement methods that make it possible to -// replace objects in fields or in slices. -func EmitReplacementMethods(vd *VisitorPlan) string { - var sb builder - for _, s := range vd.Switches { - for _, k := range s.Fields { - sb.appendF(k.asReplMethod()) - sb.newLine() - } - } - - return sb.String() -} - -// EmitTypeSwitches is an anti-parser (a.k.a prettifier) - it takes a struct that is much like an AST, -// and produces a string from it. This method will produce the switch cases needed to cover the Vitess AST. -func EmitTypeSwitches(vd *VisitorPlan) string { - var sb builder - for _, s := range vd.Switches { - sb.newLine() - sb.appendF(" case %s:", s.Type.toTypString()) - for _, k := range s.Fields { - sb.appendF(k.asSwitchCase()) - } - } - - return sb.String() -} - -func (b *builder) String() string { - return strings.TrimSpace(b.sb.String()) -} - -type builder struct { - sb strings.Builder -} - -func (b *builder) appendF(format string, data ...interface{}) *builder { - _, err := b.sb.WriteString(fmt.Sprintf(format, data...)) - if err != nil { - panic(err) - } - b.newLine() - return b -} - -func (b *builder) newLine() { - _, err := b.sb.WriteString("\n") - if err != nil { - panic(err) - } -} diff --git a/go/vt/sqlparser/visitorgen/visitor_emitter_test.go b/go/vt/sqlparser/visitorgen/visitor_emitter_test.go deleted file mode 100644 index 94666daa743..00000000000 --- a/go/vt/sqlparser/visitorgen/visitor_emitter_test.go +++ /dev/null @@ -1,92 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package visitorgen - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestSingleItem(t *testing.T) { - sfi := SingleFieldItem{ - StructType: &Ref{&TypeString{"Struct"}}, - FieldType: &TypeString{"string"}, - FieldName: "Field", - } - - expectedReplacer := `func replaceStructField(newNode, parent SQLNode) { - parent.(*Struct).Field = newNode.(string) -}` - - expectedSwitch := ` a.apply(node, n.Field, replaceStructField)` - require.Equal(t, expectedReplacer, sfi.asReplMethod()) - require.Equal(t, expectedSwitch, sfi.asSwitchCase()) -} - -func TestArrayFieldItem(t *testing.T) { - sfi := ArrayFieldItem{ - StructType: &Ref{&TypeString{"Struct"}}, - ItemType: &TypeString{"string"}, - FieldName: "Field", - } - - expectedReplacer := `type replaceStructField int - -func (r *replaceStructField) replace(newNode, container SQLNode) { - container.(*Struct).Field[int(*r)] = newNode.(string) -} - -func (r *replaceStructField) inc() { - *r++ -}` - - expectedSwitch := ` replacerField := replaceStructField(0) - replacerFieldB := &replacerField - for _, item := range n.Field { - a.apply(node, item, replacerFieldB.replace) - replacerFieldB.inc() - }` - require.Equal(t, expectedReplacer, sfi.asReplMethod()) - require.Equal(t, expectedSwitch, sfi.asSwitchCase()) -} - -func TestArrayItem(t *testing.T) { - sfi := ArrayItem{ - StructType: &Ref{&TypeString{"Struct"}}, - ItemType: &TypeString{"string"}, - } - - expectedReplacer := `type replaceStructItems int - -func (r *replaceStructItems) replace(newNode, container SQLNode) { - container.(*Struct)[int(*r)] = newNode.(string) -} - -func (r *replaceStructItems) inc() { - *r++ -}` - - expectedSwitch := ` replacer := replaceStructItems(0) - replacerRef := &replacer - for _, item := range n { - a.apply(node, item, replacerRef.replace) - replacerRef.inc() - }` - require.Equal(t, expectedReplacer, sfi.asReplMethod()) - require.Equal(t, expectedSwitch, sfi.asSwitchCase()) -} diff --git a/go/vt/sqlparser/visitorgen/visitorgen.go b/go/vt/sqlparser/visitorgen/visitorgen.go deleted file mode 100644 index 284f8c4d9be..00000000000 --- a/go/vt/sqlparser/visitorgen/visitorgen.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -//Package visitorgen is responsible for taking the ast.go of Vitess and -//and producing visitor infrastructure for it. -// -//This is accomplished in a few steps. -//Step 1: Walk the AST and collect the interesting information into a format that is -// easy to consume for the next step. The output format is a *SourceFile, that -// contains the needed information in a format that is pretty close to the golang ast, -// but simplified -//Step 2: A SourceFile is packaged into a SourceInformation. SourceInformation is still -// concerned with the input ast - it's just an even more distilled and easy to -// consume format for the last step. This step is performed by the code in transformer.go. -//Step 3: Using the SourceInformation, the struct_producer.go code produces the final data structure -// used, a VisitorPlan. This is focused on the output - it contains a list of all fields or -// arrays that need to be handled by the visitor produced. -//Step 4: The VisitorPlan is lastly turned into a string that is written as the output of -// this whole process. -package visitorgen diff --git a/go/vt/sqlparser/walker_test.go b/go/vt/sqlparser/walker_test.go new file mode 100644 index 00000000000..f8bf2b4792a --- /dev/null +++ b/go/vt/sqlparser/walker_test.go @@ -0,0 +1,58 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +func BenchmarkWalkLargeExpression(b *testing.B) { + for i := 0; i < 10; i++ { + b.Run(fmt.Sprintf("%d", i), func(b *testing.B) { + exp := newGenerator(int64(i*100), 5).expression() + count := 0 + for i := 0; i < b.N; i++ { + err := Walk(func(node SQLNode) (kontinue bool, err error) { + count++ + return true, nil + }, exp) + require.NoError(b, err) + } + }) + } +} + +func BenchmarkRewriteLargeExpression(b *testing.B) { + for i := 1; i < 7; i++ { + b.Run(fmt.Sprintf("%d", i), func(b *testing.B) { + exp := newGenerator(int64(i*100), i).expression() + count := 0 + for i := 0; i < b.N; i++ { + _ = Rewrite(exp, func(_ *Cursor) bool { + count++ + return true + }, func(_ *Cursor) bool { + count-- + return true + }) + } + }) + } +} diff --git a/go/vt/srvtopo/cached_size.go b/go/vt/srvtopo/cached_size.go new file mode 100644 index 00000000000..03dd1ceb0da --- /dev/null +++ b/go/vt/srvtopo/cached_size.go @@ -0,0 +1,39 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by Sizegen. DO NOT EDIT. + +package srvtopo + +type cachedObject interface { + CachedSize(alloc bool) int64 +} + +func (cached *ResolvedShard) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + // field Target *vitess.io/vitess/go/vt/proto/query.Target + size += cached.Target.CachedSize(true) + // field Gateway vitess.io/vitess/go/vt/srvtopo.Gateway + if cc, ok := cached.Gateway.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} diff --git a/go/vt/srvtopo/resilient_server.go b/go/vt/srvtopo/resilient_server.go index 97c2fd1c311..cdb8b9c12e8 100644 --- a/go/vt/srvtopo/resilient_server.go +++ b/go/vt/srvtopo/resilient_server.go @@ -48,6 +48,7 @@ var ( // setting the watch fails, we will use the last known value until // srv_topo_cache_ttl elapses and we only try to re-establish the watch // once every srv_topo_cache_refresh interval. + srvTopoTimeout = flag.Duration("srv_topo_timeout", 1*time.Second, "topo server timeout") srvTopoCacheTTL = flag.Duration("srv_topo_cache_ttl", 1*time.Second, "how long to use cached entries for topology") srvTopoCacheRefresh = flag.Duration("srv_topo_cache_refresh", 1*time.Second, "how frequently to refresh the topology for cached entries") ) @@ -289,9 +290,9 @@ func (server *ResilientServer) GetSrvKeyspaceNames(ctx context.Context, cell str log.Errorf("GetSrvKeyspaceNames uncaught panic, cell :%v, err :%v)", cell, err) } }() - - result, err := server.topoServer.GetSrvKeyspaceNames(ctx, cell) - + newCtx, cancel := context.WithTimeout(ctx, *srvTopoTimeout) + defer cancel() + result, err := server.topoServer.GetSrvKeyspaceNames(newCtx, cell) entry.mutex.Lock() defer func() { close(entry.refreshingChan) @@ -309,7 +310,8 @@ func (server *ResilientServer) GetSrvKeyspaceNames(ctx context.Context, cell str server.counts.Add(errorCategory, 1) if entry.insertionTime.IsZero() { log.Errorf("GetSrvKeyspaceNames(%v, %v) failed: %v (no cached value, caching and returning error)", ctx, cell, err) - + } else if newCtx.Err() == context.DeadlineExceeded { + log.Errorf("GetSrvKeyspaceNames(%v, %v) failed: %v (request timeout), (keeping cached value: %v)", ctx, cell, err, entry.value) } else if entry.value != nil && time.Since(entry.insertionTime) < server.cacheTTL { server.counts.Add(cachedCategory, 1) log.Warningf("GetSrvKeyspaceNames(%v, %v) failed: %v (keeping cached value: %v)", ctx, cell, err, entry.value) @@ -321,7 +323,7 @@ func (server *ResilientServer) GetSrvKeyspaceNames(ctx context.Context, cell str } entry.lastError = err - entry.lastErrorCtx = ctx + entry.lastErrorCtx = newCtx }() } diff --git a/go/vt/srvtopo/resilient_server_test.go b/go/vt/srvtopo/resilient_server_test.go index 76f6d18dcbe..59dd818cb3d 100644 --- a/go/vt/srvtopo/resilient_server_test.go +++ b/go/vt/srvtopo/resilient_server_test.go @@ -304,20 +304,25 @@ func TestGetSrvKeyspace(t *testing.T) { // Force another error and lock the topo. Then wait for the TTL to // expire and verify that the context timeout unblocks the request. - forceErr = fmt.Errorf("force long test error") - factory.SetError(forceErr) - factory.Lock() - time.Sleep(*srvTopoCacheTTL) + // TODO(deepthi): Commenting out this test until we fix https://github.com/vitessio/vitess/issues/6134 - timeoutCtx, cancel := context.WithTimeout(context.Background(), *srvTopoCacheRefresh*2) //nolint - defer cancel() - _, err = rs.GetSrvKeyspace(timeoutCtx, "test_cell", "test_ks") - wantErr := "timed out waiting for keyspace" - if err == nil || err.Error() != wantErr { - t.Errorf("expected error '%v', got '%v'", wantErr, err) - } - factory.Unlock() + /* + forceErr = fmt.Errorf("force long test error") + factory.SetError(forceErr) + factory.Lock() + + time.Sleep(*srvTopoCacheTTL) + + timeoutCtx, cancel := context.WithTimeout(context.Background(), *srvTopoCacheRefresh*2) //nolint + defer cancel() + _, err = rs.GetSrvKeyspace(timeoutCtx, "test_cell", "test_ks") + wantErr := "timed out waiting for keyspace" + if err == nil || err.Error() != wantErr { + t.Errorf("expected error '%v', got '%v'", wantErr, err) + } + factory.Unlock() + */ } // TestSrvKeyspaceCachedError will test we properly re-try to query diff --git a/go/vt/sysvars/sysvars.go b/go/vt/sysvars/sysvars.go index b0e60b61d6d..13d3b5159cc 100644 --- a/go/vt/sysvars/sysvars.go +++ b/go/vt/sysvars/sysvars.go @@ -43,19 +43,24 @@ var ( off = "0" utf8 = "'utf8'" - Autocommit = SystemVariable{Name: "autocommit", IsBoolean: true, Default: on} - ClientFoundRows = SystemVariable{Name: "client_found_rows", IsBoolean: true, Default: off} - SkipQueryPlanCache = SystemVariable{Name: "skip_query_plan_cache", IsBoolean: true, Default: off} - TxReadOnly = SystemVariable{Name: "tx_read_only", IsBoolean: true, Default: off} - TransactionReadOnly = SystemVariable{Name: "transaction_read_only", IsBoolean: true, Default: off} - SQLSelectLimit = SystemVariable{Name: "sql_select_limit", Default: off} - TransactionMode = SystemVariable{Name: "transaction_mode", IdentifierAsString: true} - Workload = SystemVariable{Name: "workload", IdentifierAsString: true} - Charset = SystemVariable{Name: "charset", Default: utf8, IdentifierAsString: true} - Names = SystemVariable{Name: "names", Default: utf8, IdentifierAsString: true} - SessionUUID = SystemVariable{Name: "session_uuid", IdentifierAsString: true} + Autocommit = SystemVariable{Name: "autocommit", IsBoolean: true, Default: on} + Charset = SystemVariable{Name: "charset", Default: utf8, IdentifierAsString: true} + ClientFoundRows = SystemVariable{Name: "client_found_rows", IsBoolean: true, Default: off} + SessionEnableSystemSettings = SystemVariable{Name: "enable_system_settings", IsBoolean: true, Default: on} + Names = SystemVariable{Name: "names", Default: utf8, IdentifierAsString: true} + SessionUUID = SystemVariable{Name: "session_uuid", IdentifierAsString: true} + SkipQueryPlanCache = SystemVariable{Name: "skip_query_plan_cache", IsBoolean: true, Default: off} + Socket = SystemVariable{Name: "socket", Default: off} + SQLSelectLimit = SystemVariable{Name: "sql_select_limit", Default: off} + TransactionMode = SystemVariable{Name: "transaction_mode", IdentifierAsString: true} + TransactionReadOnly = SystemVariable{Name: "transaction_read_only", IsBoolean: true, Default: off} + TxReadOnly = SystemVariable{Name: "tx_read_only", IsBoolean: true, Default: off} + Workload = SystemVariable{Name: "workload", IdentifierAsString: true} + // Online DDL - DDLStrategy = SystemVariable{Name: "ddl_strategy", IdentifierAsString: true} + DDLStrategy = SystemVariable{Name: "ddl_strategy", IdentifierAsString: true} + Version = SystemVariable{Name: "version"} + VersionComment = SystemVariable{Name: "version_comment"} // Read After Write settings ReadAfterWriteGTID = SystemVariable{Name: "read_after_write_gtid"} @@ -75,11 +80,18 @@ var ( Charset, Names, SessionUUID, + SessionEnableSystemSettings, ReadAfterWriteGTID, ReadAfterWriteTimeOut, SessionTrackGTIDs, } + ReadOnly = []SystemVariable{ + Socket, + Version, + VersionComment, + } + IgnoreThese = []SystemVariable{ {Name: "big_tables", IsBoolean: true}, {Name: "bulk_insert_buffer_size"}, @@ -156,6 +168,7 @@ var ( {Name: "explicit_defaults_for_timestamp"}, {Name: "foreign_key_checks", IsBoolean: true}, {Name: "group_concat_max_len"}, + {Name: "information_schema_stats_expiry"}, {Name: "max_heap_table_size"}, {Name: "max_seeks_for_key"}, {Name: "max_tmp_tables"}, @@ -233,3 +246,17 @@ var ( {Name: "version_tokens_session"}, } ) + +// GetInterestingVariables is used to return all the variables that may be listed in a SHOW VARIABLES command. +func GetInterestingVariables() []string { + var res []string + // Add all the vitess aware variables + for _, variable := range VitessAware { + res = append(res, variable.Name) + } + // Also add version and version comment + res = append(res, Version.Name) + res = append(res, VersionComment.Name) + res = append(res, Socket.Name) + return res +} diff --git a/go/vt/tableacl/cached_size.go b/go/vt/tableacl/cached_size.go new file mode 100644 index 00000000000..63935b92a4d --- /dev/null +++ b/go/vt/tableacl/cached_size.go @@ -0,0 +1,39 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by Sizegen. DO NOT EDIT. + +package tableacl + +type cachedObject interface { + CachedSize(alloc bool) int64 +} + +func (cached *ACLResult) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field ACL vitess.io/vitess/go/vt/tableacl/acl.ACL + if cc, ok := cached.ACL.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field GroupName string + size += int64(len(cached.GroupName)) + return size +} diff --git a/go/vt/tlstest/tlstest_test.go b/go/vt/tlstest/tlstest_test.go index 7ad29cbf8f7..b7994fe731d 100644 --- a/go/vt/tlstest/tlstest_test.go +++ b/go/vt/tlstest/tlstest_test.go @@ -34,12 +34,20 @@ import ( "vitess.io/vitess/go/vt/vttls" ) -// TestClientServer generates: +func TestClientServerWithoutCombineCerts(t *testing.T) { + testClientServer(t, false) +} + +func TestClientServerWithCombineCerts(t *testing.T) { + testClientServer(t, true) +} + +// testClientServer generates: // - a root CA // - a server intermediate CA, with a server. // - a client intermediate CA, with a client. // And then performs a few tests on them. -func TestClientServer(t *testing.T) { +func testClientServer(t *testing.T, combineCerts bool) { // Our test root. root, err := ioutil.TempDir("", "tlstest") if err != nil { @@ -48,11 +56,17 @@ func TestClientServer(t *testing.T) { defer os.RemoveAll(root) clientServerKeyPairs := CreateClientServerCertPairs(root) + serverCA := "" + + if combineCerts { + serverCA = clientServerKeyPairs.ServerCA + } serverConfig, err := vttls.ServerConfig( clientServerKeyPairs.ServerCert, clientServerKeyPairs.ServerKey, - clientServerKeyPairs.ClientCA) + clientServerKeyPairs.ClientCA, + serverCA) if err != nil { t.Fatalf("TLSServerConfig failed: %v", err) } @@ -165,10 +179,19 @@ func TestClientServer(t *testing.T) { } } -func getServerConfig(keypairs ClientServerKeyPairs) (*tls.Config, error) { +func getServerConfigWithoutCombinedCerts(keypairs ClientServerKeyPairs) (*tls.Config, error) { return vttls.ServerConfig( - keypairs.ClientCert, - keypairs.ClientKey, + keypairs.ServerCert, + keypairs.ServerKey, + keypairs.ClientCA, + "") +} + +func getServerConfigWithCombinedCerts(keypairs ClientServerKeyPairs) (*tls.Config, error) { + return vttls.ServerConfig( + keypairs.ServerCert, + keypairs.ServerKey, + keypairs.ClientCA, keypairs.ServerCA) } @@ -180,12 +203,20 @@ func getClientConfig(keypairs ClientServerKeyPairs) (*tls.Config, error) { keypairs.ServerName) } -func TestServerTLSConfigCaching(t *testing.T) { +func testServerTLSConfigCaching(t *testing.T, getServerConfig func(ClientServerKeyPairs) (*tls.Config, error)) { testConfigGeneration(t, "servertlstest", getServerConfig, func(config *tls.Config) *x509.CertPool { return config.ClientCAs }) } +func TestServerTLSConfigCachingWithoutCombinedCerts(t *testing.T) { + testServerTLSConfigCaching(t, getServerConfigWithoutCombinedCerts) +} + +func TestServerTLSConfigCachingWithCombinedCerts(t *testing.T) { + testServerTLSConfigCaching(t, getServerConfigWithCombinedCerts) +} + func TestClientTLSConfigCaching(t *testing.T) { testConfigGeneration(t, "clienttlstest", getClientConfig, func(config *tls.Config) *x509.CertPool { return config.RootCAs @@ -238,3 +269,37 @@ func testConfigGeneration(t *testing.T, rootPrefix string, generateConfig func(C } } + +func testNumberOfCertsWithOrWithoutCombining(t *testing.T, numCertsExpected int, combine bool) { + // Our test root. + root, err := ioutil.TempDir("", "tlstest") + if err != nil { + t.Fatalf("TempDir failed: %v", err) + } + defer os.RemoveAll(root) + + clientServerKeyPairs := CreateClientServerCertPairs(root) + serverCA := "" + if combine { + serverCA = clientServerKeyPairs.ServerCA + } + + serverConfig, err := vttls.ServerConfig( + clientServerKeyPairs.ServerCert, + clientServerKeyPairs.ServerKey, + clientServerKeyPairs.ClientCA, + serverCA) + + if err != nil { + t.Fatalf("TLSServerConfig failed: %v", err) + } + assert.Equal(t, numCertsExpected, len(serverConfig.Certificates[0].Certificate)) +} + +func TestNumberOfCertsWithoutCombining(t *testing.T) { + testNumberOfCertsWithOrWithoutCombining(t, 1, false) +} + +func TestNumberOfCertsWithCombining(t *testing.T) { + testNumberOfCertsWithOrWithoutCombining(t, 2, true) +} diff --git a/go/vt/topo/cell_info.go b/go/vt/topo/cell_info.go index ca700890b57..ba59b656067 100644 --- a/go/vt/topo/cell_info.go +++ b/go/vt/topo/cell_info.go @@ -18,6 +18,7 @@ package topo import ( "path" + "strings" "context" @@ -171,3 +172,41 @@ func (ts *Server) GetKnownCells(ctx context.Context) ([]string, error) { } return DirEntriesToStringArray(entries), nil } + +// ExpandCells takes a comma-separated list of cells and returns an array of cell names +// Aliases are expanded and an empty string returns all cells +func (ts *Server) ExpandCells(ctx context.Context, cells string) ([]string, error) { + var err error + var outputCells []string + inputCells := strings.Split(cells, ",") + if cells == "" { + inputCells, err = ts.GetCellInfoNames(ctx) + if err != nil { + return nil, err + } + } + + for _, cell := range inputCells { + cell2 := strings.TrimSpace(cell) + shortCtx, cancel := context.WithTimeout(ctx, *RemoteOperationTimeout) + defer cancel() + _, err := ts.GetCellInfo(shortCtx, cell2, false) + if err != nil { + // not a valid cell, check whether it is a cell alias + shortCtx, cancel := context.WithTimeout(ctx, *RemoteOperationTimeout) + defer cancel() + alias, err2 := ts.GetCellsAlias(shortCtx, cell2, false) + // if we get an error, either cellAlias doesn't exist or it isn't a cell alias at all. Ignore and continue + if err2 == nil { + outputCells = append(outputCells, alias.Cells...) + } + if err != nil { + return nil, err + } + } else { + // valid cell, add it to our list + outputCells = append(outputCells, cell2) + } + } + return outputCells, nil +} diff --git a/go/vt/topo/errors.go b/go/vt/topo/errors.go index 85158e358b8..8ec91532f5c 100644 --- a/go/vt/topo/errors.go +++ b/go/vt/topo/errors.go @@ -17,6 +17,7 @@ limitations under the License. package topo import ( + "errors" "fmt" ) @@ -80,8 +81,15 @@ func (e Error) Error() string { // IsErrType returns true if the error has the specified ErrorCode. func IsErrType(err error, code ErrorCode) bool { + var e Error + + if errors.As(err, &e) { + return e.code == code + } + if e, ok := err.(Error); ok { return e.code == code } + return false } diff --git a/go/vt/vtadmin/errors.go b/go/vt/topo/events/external_cluster_change.go similarity index 51% rename from go/vt/vtadmin/errors.go rename to go/vt/topo/events/external_cluster_change.go index 60356e9bde9..4d1b6762d81 100644 --- a/go/vt/vtadmin/errors.go +++ b/go/vt/topo/events/external_cluster_change.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Vitess Authors. +Copyright 2021 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,17 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -package vtadmin +package events -import "errors" - -var ( - // ErrAmbiguousTablet occurs when more than one tablet is found for a given - // set of filter criteria. - ErrAmbiguousTablet = errors.New("multiple tablets found") - // ErrNoTablet occurs when a tablet cannot be found for a given set of - // filter criteria. - ErrNoTablet = errors.New("no such tablet") - // ErrUnsupportedCluster occurs when a cluster parameter is invalid. - ErrUnsupportedCluster = errors.New("unsupported cluster(s)") +import ( + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) + +// ExternalVitessClusterChange is an event that describes changes to a vitess cluster. +type ExternalVitessClusterChange struct { + ClusterName string + ExternalVitessCluster *topodatapb.ExternalVitessCluster + Status string +} diff --git a/go/vt/topo/external_vitess_cluster.go b/go/vt/topo/external_vitess_cluster.go new file mode 100644 index 00000000000..e19bf7b161d --- /dev/null +++ b/go/vt/topo/external_vitess_cluster.go @@ -0,0 +1,137 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package topo + +import ( + "context" + "path" + + "github.com/golang/protobuf/proto" + + "vitess.io/vitess/go/event" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/topo/events" + "vitess.io/vitess/go/vt/vterrors" +) + +// ExternalVitessClusterInfo is a meta struct that contains metadata to give the +// data more context and convenience. This is the main way we interact +// with a vitess cluster stored in the topo. +type ExternalVitessClusterInfo struct { + ClusterName string + version Version + *topodatapb.ExternalVitessCluster +} + +// GetExternalVitessClusterDir returns node path containing external vitess clusters +func GetExternalVitessClusterDir() string { + return path.Join(ExternalClustersFile, ExternalClusterVitess) +} + +// GetExternalVitessClusterPath returns node path containing external clusters +func GetExternalVitessClusterPath(clusterName string) string { + return path.Join(GetExternalVitessClusterDir(), clusterName) +} + +// CreateExternalVitessCluster creates a topo record for the passed vitess cluster +func (ts *Server) CreateExternalVitessCluster(ctx context.Context, clusterName string, value *topodatapb.ExternalVitessCluster) error { + data, err := proto.Marshal(value) + if err != nil { + return err + } + + if _, err := ts.globalCell.Create(ctx, GetExternalVitessClusterPath(clusterName), data); err != nil { + return err + } + + event.Dispatch(&events.ExternalVitessClusterChange{ + ClusterName: clusterName, + ExternalVitessCluster: value, + Status: "created", + }) + return nil +} + +// GetExternalVitessCluster returns a topo record for the named vitess cluster +func (ts *Server) GetExternalVitessCluster(ctx context.Context, clusterName string) (*ExternalVitessClusterInfo, error) { + data, version, err := ts.globalCell.Get(ctx, GetExternalVitessClusterPath(clusterName)) + switch { + case IsErrType(err, NoNode): + return nil, nil + case err == nil: + default: + return nil, err + } + vc := &topodatapb.ExternalVitessCluster{} + if err = proto.Unmarshal(data, vc); err != nil { + return nil, vterrors.Wrap(err, "bad vitess cluster data") + } + + return &ExternalVitessClusterInfo{ + ClusterName: clusterName, + version: version, + ExternalVitessCluster: vc, + }, nil +} + +// UpdateExternalVitessCluster updates the topo record for the named vitess cluster +func (ts *Server) UpdateExternalVitessCluster(ctx context.Context, vc *ExternalVitessClusterInfo) error { + //FIXME: check for cluster lock + data, err := proto.Marshal(vc.ExternalVitessCluster) + if err != nil { + return err + } + version, err := ts.globalCell.Update(ctx, GetExternalVitessClusterPath(vc.ClusterName), data, vc.version) + if err != nil { + return err + } + vc.version = version + + event.Dispatch(&events.ExternalVitessClusterChange{ + ClusterName: vc.ClusterName, + ExternalVitessCluster: vc.ExternalVitessCluster, + Status: "updated", + }) + return nil +} + +// DeleteExternalVitessCluster deletes the topo record for the named vitess cluster +func (ts *Server) DeleteExternalVitessCluster(ctx context.Context, clusterName string) error { + if err := ts.globalCell.Delete(ctx, GetExternalVitessClusterPath(clusterName), nil); err != nil { + return err + } + + event.Dispatch(&events.ExternalVitessClusterChange{ + ClusterName: clusterName, + ExternalVitessCluster: nil, + Status: "deleted", + }) + return nil +} + +// GetExternalVitessClusters returns the list of external vitess clusters in the topology. +func (ts *Server) GetExternalVitessClusters(ctx context.Context) ([]string, error) { + children, err := ts.globalCell.ListDir(ctx, GetExternalVitessClusterDir(), false /*full*/) + switch { + case err == nil: + return DirEntriesToStringArray(children), nil + case IsErrType(err, NoNode): + return nil, nil + default: + return nil, err + } +} diff --git a/go/vt/topo/k8stopo/client/clientset/versioned/fake/register.go b/go/vt/topo/k8stopo/client/clientset/versioned/fake/register.go index 82a02a7a271..5b425fd4603 100644 --- a/go/vt/topo/k8stopo/client/clientset/versioned/fake/register.go +++ b/go/vt/topo/k8stopo/client/clientset/versioned/fake/register.go @@ -30,7 +30,7 @@ import ( var scheme = runtime.NewScheme() var codecs = serializer.NewCodecFactory(scheme) -var parameterCodec = runtime.NewParameterCodec(scheme) //nolint +var parameterCodec = runtime.NewParameterCodec(scheme) var localSchemeBuilder = runtime.SchemeBuilder{ topov1beta1.AddToScheme, } diff --git a/go/vt/topo/server.go b/go/vt/topo/server.go index f8be44e5c85..8fc31c4a6ae 100644 --- a/go/vt/topo/server.go +++ b/go/vt/topo/server.go @@ -76,6 +76,7 @@ const ( SrvVSchemaFile = "SrvVSchema" SrvKeyspaceFile = "SrvKeyspace" RoutingRulesFile = "RoutingRules" + ExternalClustersFile = "ExternalClusters" ) // Path for all object types. @@ -86,6 +87,9 @@ const ( ShardsPath = "shards" TabletsPath = "tablets" MetadataPath = "metadata" + + ExternalClusterMySQL = "mysql" + ExternalClusterVitess = "vitess" ) // Factory is a factory interface to create Conn objects. @@ -329,3 +333,23 @@ func (ts *Server) clearCellAliasesCache() { defer cellsAliases.mu.Unlock() cellsAliases.cellsToAliases = make(map[string]string) } + +// OpenExternalVitessClusterServer returns the topo server of the external cluster +func (ts *Server) OpenExternalVitessClusterServer(ctx context.Context, clusterName string) (*Server, error) { + vc, err := ts.GetExternalVitessCluster(ctx, clusterName) + if err != nil { + return nil, err + } + if vc == nil { + return nil, fmt.Errorf("no vitess cluster found with name %s", clusterName) + } + var externalTopo *Server + externalTopo, err = OpenServer(vc.TopoConfig.TopoType, vc.TopoConfig.Server, vc.TopoConfig.Root) + if err != nil { + return nil, err + } + if externalTopo == nil { + return nil, fmt.Errorf("unable to open external topo for config %s", clusterName) + } + return externalTopo, nil +} diff --git a/go/vt/topo/srv_keyspace.go b/go/vt/topo/srv_keyspace.go index 3b0fa54141f..d98869eb1d7 100644 --- a/go/vt/topo/srv_keyspace.go +++ b/go/vt/topo/srv_keyspace.go @@ -701,3 +701,22 @@ func ShardIsServing(srvKeyspace *topodatapb.SrvKeyspace, shard *topodatapb.Shard } return false } + +// ValidateSrvKeyspace validates that the SrvKeyspace for given keyspace in the provided cells is not corrupted +func (ts *Server) ValidateSrvKeyspace(ctx context.Context, keyspace, cells string) error { + cellsToValidate, err := ts.ExpandCells(ctx, cells) + if err != nil { + return err + } + for _, cell := range cellsToValidate { + srvKeyspace, err := ts.GetSrvKeyspace(ctx, cell, keyspace) + if err != nil { + return err + } + err = OrderAndCheckPartitions(cell, srvKeyspace) + if err != nil { + return err + } + } + return nil +} diff --git a/go/vt/topo/tablet.go b/go/vt/topo/tablet.go index ff716ee810f..c11bd7c6b28 100644 --- a/go/vt/topo/tablet.go +++ b/go/vt/topo/tablet.go @@ -472,3 +472,21 @@ func (ts *Server) GetTabletsByCell(ctx context.Context, cell string) ([]*topodat } return result, nil } + +// ParseServingTabletType parses the tablet type into the enum, and makes sure +// that the enum is of serving type (MASTER, REPLICA, RDONLY/BATCH). +// +// Note: This function more closely belongs in topoproto, but that would create +// a circular import between packages topo and topoproto. +func ParseServingTabletType(param string) (topodatapb.TabletType, error) { + servedType, err := topoproto.ParseTabletType(param) + if err != nil { + return topodatapb.TabletType_UNKNOWN, err + } + + if !IsInServingGraph(servedType) { + return topodatapb.TabletType_UNKNOWN, fmt.Errorf("served_type has to be in the serving graph, not %v", param) + } + + return servedType, nil +} diff --git a/go/vt/topo/topoproto/keyspace.go b/go/vt/topo/topoproto/keyspace.go index e409f763fd7..625ec0b7293 100644 --- a/go/vt/topo/topoproto/keyspace.go +++ b/go/vt/topo/topoproto/keyspace.go @@ -23,7 +23,7 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) -// ParseKeyspaceType parses a string into a KeyspaceType +// ParseKeyspaceType parses a string into a KeyspaceType. func ParseKeyspaceType(param string) (topodatapb.KeyspaceType, error) { value, ok := topodatapb.KeyspaceType_value[strings.ToUpper(param)] if !ok { @@ -32,3 +32,13 @@ func ParseKeyspaceType(param string) (topodatapb.KeyspaceType, error) { } return topodatapb.KeyspaceType(value), nil } + +// KeyspaceTypeString returns the string representation of a KeyspaceType. +func KeyspaceTypeString(kt topodatapb.KeyspaceType) string { + str, ok := topodatapb.KeyspaceType_name[int32(kt)] + if !ok { + return "UNKNOWN" + } + + return str +} diff --git a/go/vt/topo/topoproto/tablet.go b/go/vt/topo/topoproto/tablet.go index bca66befc3d..ca4571bf974 100644 --- a/go/vt/topo/topoproto/tablet.go +++ b/go/vt/topo/topoproto/tablet.go @@ -134,6 +134,18 @@ func (tal TabletAliasList) Swap(i, j int) { tal[i], tal[j] = tal[j], tal[i] } +// ToStringSlice returns a slice which is the result of mapping +// TabletAliasString over a slice of TabletAliases. +func (tal TabletAliasList) ToStringSlice() []string { + result := make([]string, len(tal)) + + for i, alias := range tal { + result[i] = TabletAliasString(alias) + } + + return result +} + // AllTabletTypes lists all the possible tablet types var AllTabletTypes = []topodatapb.TabletType{ topodatapb.TabletType_MASTER, diff --git a/go/vt/topo/topotests/cell_info_test.go b/go/vt/topo/topotests/cell_info_test.go index 8d6ea160109..7854763a683 100644 --- a/go/vt/topo/topotests/cell_info_test.go +++ b/go/vt/topo/topotests/cell_info_test.go @@ -18,8 +18,11 @@ package topotests import ( "fmt" + "strings" "testing" + "github.com/stretchr/testify/require" + "context" "vitess.io/vitess/go/vt/topo" @@ -44,6 +47,11 @@ func TestCellInfo(t *testing.T) { t.Fatalf("unexpected CellInfo: %v", ci) } + var cells []string + cells, err = ts.ExpandCells(ctx, cell) + require.NoError(t, err) + require.EqualValues(t, []string{"cell1"}, cells) + // Update the Server Address. if err := ts.UpdateCellInfoFields(ctx, cell, func(ci *topodatapb.CellInfo) error { ci.ServerAddress = "new address" @@ -124,3 +132,48 @@ func TestCellInfo(t *testing.T) { t.Fatalf("GetCellInfo(non-existing cell) failed: %v", err) } } + +func TestExpandCells(t *testing.T) { + ctx := context.Background() + var cells []string + var err error + var allCells = "cell1,cell2,cell3" + type testCase struct { + name string + cellsIn string + cellsOut []string + errString string + } + + testCases := []testCase{ + {"single", "cell1", []string{"cell1"}, ""}, + {"multiple", "cell1,cell2,cell3", []string{"cell1", "cell2", "cell3"}, ""}, + {"empty", "", []string{"cell1", "cell2", "cell3"}, ""}, + {"bad", "unknown", nil, "node doesn't exist"}, + } + + for _, tCase := range testCases { + t.Run(tCase.name, func(t *testing.T) { + cellsIn := tCase.cellsIn + if cellsIn == "" { + cellsIn = allCells + } + topoCells := strings.Split(cellsIn, ",") + var ts *topo.Server + if tCase.name == "bad" { + ts = memorytopo.NewServer() + } else { + ts = memorytopo.NewServer(topoCells...) + } + cells, err = ts.ExpandCells(ctx, cellsIn) + if tCase.errString != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tCase.errString) + } else { + require.NoError(t, err) + } + require.EqualValues(t, tCase.cellsOut, cells) + }) + } + +} diff --git a/go/vt/topo/topotests/srv_keyspace_test.go b/go/vt/topo/topotests/srv_keyspace_test.go index 40d418e63dc..149cc14c54d 100644 --- a/go/vt/topo/topotests/srv_keyspace_test.go +++ b/go/vt/topo/topotests/srv_keyspace_test.go @@ -23,6 +23,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "context" "github.com/golang/protobuf/proto" @@ -1170,3 +1172,71 @@ func TestMasterMigrateServedType(t *testing.T) { t.Errorf("MigrateServedType() failure. Got %v, want: %v", string(got), string(want)) } } + +func TestValidateSrvKeyspace(t *testing.T) { + cell := "cell1" + cell2 := "cell2" + keyspace := "ks1" + ctx := context.Background() + ts := memorytopo.NewServer(cell, cell2) + + leftKeyRange, err := key.ParseShardingSpec("-80") + if err != nil || len(leftKeyRange) != 1 { + t.Fatalf("ParseShardingSpec failed. Expected non error and only one element. Got err: %v, len(%v)", err, len(leftKeyRange)) + } + + rightKeyRange, err := key.ParseShardingSpec("80-") + if err != nil || len(rightKeyRange) != 1 { + t.Fatalf("ParseShardingSpec failed. Expected non error and only one element. Got err: %v, len(%v)", err, len(rightKeyRange)) + } + + correct := &topodatapb.SrvKeyspace{ + Partitions: []*topodatapb.SrvKeyspace_KeyspacePartition{ + { + ServedType: topodatapb.TabletType_MASTER, + ShardReferences: []*topodatapb.ShardReference{ + { + Name: "-80", + KeyRange: leftKeyRange[0], + }, + { + Name: "80-", + KeyRange: rightKeyRange[0], + }, + }, + }, + }, + } + + incorrect := &topodatapb.SrvKeyspace{ + Partitions: []*topodatapb.SrvKeyspace_KeyspacePartition{ + { + ServedType: topodatapb.TabletType_MASTER, + ShardReferences: []*topodatapb.ShardReference{ + { + Name: "80-", + KeyRange: rightKeyRange[0], + }, + }, + }, + }, + } + + if err := ts.UpdateSrvKeyspace(ctx, cell, keyspace, correct); err != nil { + t.Fatalf("UpdateSrvKeyspace() failed: %v", err) + } + + if err := ts.UpdateSrvKeyspace(ctx, cell2, keyspace, incorrect); err != nil { + t.Fatalf("UpdateSrvKeyspace() failed: %v", err) + } + errMsg := "keyspace partition for MASTER in cell cell2 does not start with min key" + err = ts.ValidateSrvKeyspace(ctx, keyspace, "cell1,cell2") + require.EqualError(t, err, errMsg) + + err = ts.ValidateSrvKeyspace(ctx, keyspace, "cell1") + require.NoError(t, err) + err = ts.ValidateSrvKeyspace(ctx, keyspace, "cell2") + require.EqualError(t, err, errMsg) + err = ts.ValidateSrvKeyspace(ctx, keyspace, "") + require.EqualError(t, err, errMsg) +} diff --git a/go/vt/topotools/position_searcher.go b/go/vt/topotools/position_searcher.go new file mode 100644 index 00000000000..8affbcf077e --- /dev/null +++ b/go/vt/topotools/position_searcher.go @@ -0,0 +1,124 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package topotools + +import ( + "context" + "sync" + "time" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vttablet/tmclient" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" +) + +// MaxReplicationPositionSearcher provides a threadsafe way to find a tablet +// with the most advanced replication position. +// +// A typical usage will look like: +// +// var ( +// searcher = NewMaxReplicationPositionSearcher(tmc, logger, waitTimeout) +// wg sync.WaitGroup +// ) +// for _, tablet := range tablets { +// wg.Add(1) +// go func(t *topodatapb.Tablet) { +// defer wg.Done() +// searcher.ProcessTablet(ctx, t) +// }(tablet) +// } +// wg.Wait() +// maxPosTablet := searcher.MaxPositionTablet() +// +type MaxReplicationPositionSearcher struct { + tmc tmclient.TabletManagerClient + logger logutil.Logger + waitTimeout time.Duration + m sync.Mutex + + maxPos mysql.Position + maxPosTablet *topodatapb.Tablet +} + +// NewMaxReplicationPositionSearcher returns a new +// MaxReplicationPositionSearcher instance, ready to begin processing tablets. +// To reuse an existing instance, first call Reset(). +func NewMaxReplicationPositionSearcher(tmc tmclient.TabletManagerClient, logger logutil.Logger, waitTimeout time.Duration) *MaxReplicationPositionSearcher { + return &MaxReplicationPositionSearcher{ + tmc: tmc, + logger: logger, + waitTimeout: waitTimeout, + m: sync.Mutex{}, + maxPos: mysql.Position{}, + maxPosTablet: nil, + } +} + +// ProcessTablet processes the replication position for a single tablet and +// updates the state of the searcher. It is safe to call from multiple +// goroutines. +func (searcher *MaxReplicationPositionSearcher) ProcessTablet(ctx context.Context, tablet *topodatapb.Tablet) { + searcher.logger.Infof("getting replication position from %v", topoproto.TabletAliasString(tablet.Alias)) + + ctx, cancel := context.WithTimeout(ctx, searcher.waitTimeout) + defer cancel() + + status, err := searcher.tmc.ReplicationStatus(ctx, tablet) + if err != nil { + searcher.logger.Warningf("failed to get replication status from %v, ignoring tablet: %v", topoproto.TabletAliasString(tablet.Alias), err) + + return + } + + pos, err := mysql.DecodePosition(status.Position) + if err != nil { + searcher.logger.Warningf("cannot decode replica position %v for tablet %v, ignoring tablet: %v", status.Position, topoproto.TabletAliasString(tablet.Alias), err) + + return + } + + searcher.m.Lock() + defer searcher.m.Unlock() + + if searcher.maxPosTablet == nil || !searcher.maxPos.AtLeast(pos) { + searcher.maxPos = pos + searcher.maxPosTablet = tablet + } +} + +// MaxPositionTablet returns the most advanced-positioned tablet the searcher +// has seen so far. +func (searcher *MaxReplicationPositionSearcher) MaxPositionTablet() *topodatapb.Tablet { + searcher.m.Lock() + defer searcher.m.Unlock() + + return searcher.maxPosTablet +} + +// Reset clears any tracked position or tablet from the searcher, making this +// instance ready to begin a new search. +func (searcher *MaxReplicationPositionSearcher) Reset() { + searcher.m.Lock() + defer searcher.m.Unlock() + + searcher.maxPos = mysql.Position{} + searcher.maxPosTablet = nil +} diff --git a/go/vt/topotools/position_searcher_test.go b/go/vt/topotools/position_searcher_test.go new file mode 100644 index 00000000000..b90b52be489 --- /dev/null +++ b/go/vt/topotools/position_searcher_test.go @@ -0,0 +1,223 @@ +package topotools + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vttablet/tmclient" + + replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" +) + +type fakeTMClient struct { + tmclient.TabletManagerClient + tabletReplicationPositions map[string]*replicationdatapb.Status +} + +func (fake *fakeTMClient) ReplicationStatus(ctx context.Context, tablet *topodatapb.Tablet) (*replicationdatapb.Status, error) { + if fake.tabletReplicationPositions == nil { + return nil, assert.AnError + } + + if tablet.Alias == nil { + return nil, assert.AnError + } + + if pos, ok := fake.tabletReplicationPositions[topoproto.TabletAliasString(tablet.Alias)]; ok { + return pos, nil + } + + return nil, assert.AnError +} + +func TestMaxReplicationPositionSearcher(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + positions map[string]*replicationdatapb.Status + tablets []*topodatapb.Tablet + expected *topodatapb.Tablet + }{ + { + name: "success", + positions: map[string]*replicationdatapb.Status{ + "zone1-0000000100": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:23", + }, + "zone1-0000000101": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:23-28", + }, + "zone1-0000000102": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:23-30", + }, + }, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + }, + }, + expected: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + }, + }, + { + name: "reverse order", + positions: map[string]*replicationdatapb.Status{ + "zone1-0000000100": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:23", + }, + "zone1-0000000101": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:23-28", + }, + "zone1-0000000102": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:23-30", + }, + }, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + expected: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + }, + }, + { + name: "no position for tablet is ignored", + positions: map[string]*replicationdatapb.Status{ + "zone1-0000000100": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:23", + }, + }, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + }, + }, + expected: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + { + name: "bad position is ignored", + positions: map[string]*replicationdatapb.Status{ + "zone1-0000000100": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:23", + }, + "zone1-0000000101": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:23-28", + }, + "zone1-0000000102": { + Position: "junk position", + }, + }, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + }, + }, + expected: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ctx := context.Background() + tmc := &fakeTMClient{ + tabletReplicationPositions: tt.positions, + } + searcher := NewMaxReplicationPositionSearcher(tmc, logutil.NewMemoryLogger(), time.Millisecond*50) + + for _, tablet := range tt.tablets { + searcher.ProcessTablet(ctx, tablet) + } + + assert.Equal(t, tt.expected, searcher.MaxPositionTablet()) + }) + } +} diff --git a/go/vt/topotools/rebuild_keyspace.go b/go/vt/topotools/rebuild_keyspace.go index 1da0468321d..bc72b2dd8b1 100644 --- a/go/vt/topotools/rebuild_keyspace.go +++ b/go/vt/topotools/rebuild_keyspace.go @@ -83,10 +83,11 @@ func RebuildKeyspaceLocked(ctx context.Context, log logutil.Logger, ts *topo.Ser switch { case err == nil: for _, partition := range srvKeyspace.GetPartitions() { - if partition.GetShardTabletControls() != nil { - return fmt.Errorf("can't rebuild serving keyspace while a migration is on going. TabletControls is set for partition %v", partition) + for _, shardTabletControl := range partition.GetShardTabletControls() { + if shardTabletControl.QueryServiceDisabled { + return fmt.Errorf("can't rebuild serving keyspace while a migration is on going. TabletControls is set for partition %v", partition) + } } - } case topo.IsErrType(err, topo.NoNode): // NOOP diff --git a/go/vt/topotools/tablet.go b/go/vt/topotools/tablet.go index 48edb1339d4..8623085543b 100644 --- a/go/vt/topotools/tablet.go +++ b/go/vt/topotools/tablet.go @@ -103,6 +103,44 @@ func CheckOwnership(oldTablet, newTablet *topodatapb.Tablet) error { return nil } +// IsPrimaryTablet is a helper function to determine whether the current tablet +// is a primary before we allow its tablet record to be deleted. The canonical +// way to determine the only true primary in a shard is to list all the tablets +// and find the one with the highest MasterTermStartTime among the ones that +// claim to be master. +// +// We err on the side of caution here, i.e. we should never return false for +// a true primary tablet, but it is okay to return true for a tablet that isn't +// the true primary. This can occur if someone issues a DeleteTablet while +// the system is in transition (a reparenting event is in progress and parts of +// the topo have not yet been updated). +func IsPrimaryTablet(ctx context.Context, ts *topo.Server, ti *topo.TabletInfo) (bool, error) { + // Tablet record claims to be non-master, we believe it + if ti.Type != topodatapb.TabletType_MASTER { + return false, nil + } + + si, err := ts.GetShard(ctx, ti.Keyspace, ti.Shard) + if err != nil { + // strictly speaking it isn't correct to return false here, the tablet + // status is unknown + return false, err + } + + // Tablet record claims to be master, and shard record matches + if topoproto.TabletAliasEqual(si.MasterAlias, ti.Tablet.Alias) { + return true, nil + } + + // Shard record has another tablet as master, so check MasterTermStartTime + // If tablet record's MasterTermStartTime is later than the one in the shard + // record, then the tablet is master + tabletMTST := ti.GetMasterTermStartTime() + shardMTST := si.GetMasterTermStartTime() + + return tabletMTST.After(shardMTST), nil +} + // DeleteTablet removes a tablet record from the topology: // - the replication data record if any // - the tablet record diff --git a/go/vt/vitessdriver/driver.go b/go/vt/vitessdriver/driver.go index 63c00ab730e..66f6e52528b 100644 --- a/go/vt/vitessdriver/driver.go +++ b/go/vt/vitessdriver/driver.go @@ -209,6 +209,15 @@ func (c *conn) dial() error { return nil } +func (c *conn) Ping(ctx context.Context) error { + if c.Streaming { + return errors.New("Ping not allowed for streaming connections") + } + + _, err := c.ExecContext(ctx, "select 1", nil) + return err +} + func (c *conn) Prepare(query string) (driver.Stmt, error) { return &stmt{c: c, query: query}, nil } diff --git a/go/vt/vitessdriver/fakeserver_test.go b/go/vt/vitessdriver/fakeserver_test.go index 2a8a1f5b00a..5cccbb237aa 100644 --- a/go/vt/vitessdriver/fakeserver_test.go +++ b/go/vt/vitessdriver/fakeserver_test.go @@ -140,7 +140,7 @@ func (f *fakeVTGateService) ResolveTransaction(ctx context.Context, dtid string) return nil } -func (f *fakeVTGateService) VStream(ctx context.Context, tabletType topodatapb.TabletType, vgtid *binlogdatapb.VGtid, filter *binlogdatapb.Filter, send func([]*binlogdatapb.VEvent) error) error { +func (f *fakeVTGateService) VStream(ctx context.Context, tabletType topodatapb.TabletType, vgtid *binlogdatapb.VGtid, filter *binlogdatapb.Filter, flags *vtgatepb.VStreamFlags, send func([]*binlogdatapb.VEvent) error) error { return nil } diff --git a/go/vt/vtadmin/api.go b/go/vt/vtadmin/api.go index 012db642970..cdd7fac76a8 100644 --- a/go/vt/vtadmin/api.go +++ b/go/vt/vtadmin/api.go @@ -18,22 +18,36 @@ package vtadmin import ( "context" + "encoding/json" + stderrors "errors" + "fmt" "net/http" + "strings" "sync" + "time" "github.com/gorilla/handlers" "github.com/gorilla/mux" + "k8s.io/apimachinery/pkg/util/sets" "vitess.io/vitess/go/trace" "vitess.io/vitess/go/vt/concurrency" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtadmin/cluster" + "vitess.io/vitess/go/vt/vtadmin/errors" "vitess.io/vitess/go/vt/vtadmin/grpcserver" vtadminhttp "vitess.io/vitess/go/vt/vtadmin/http" vthandlers "vitess.io/vitess/go/vt/vtadmin/http/handlers" "vitess.io/vitess/go/vt/vtadmin/sort" + "vitess.io/vitess/go/vt/vtadmin/vtadminproto" "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtexplain" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) @@ -44,10 +58,18 @@ type API struct { clusterMap map[string]*cluster.Cluster serv *grpcserver.Server router *mux.Router + + // See https://github.com/vitessio/vitess/issues/7723 for why this exists. + vtexplainLock sync.Mutex } // NewAPI returns a new API, configured to service the given set of clusters, // and configured with the given gRPC and HTTP server options. +// +// If opts.Services is nil, NewAPI will automatically add +// "vtadmin.VTAdminServer" to the list of services queryable in the healthcheck +// service. Callers can opt-out of this behavior by explicitly setting this +// value to the empty slice. func NewAPI(clusters []*cluster.Cluster, opts grpcserver.Options, httpOpts vtadminhttp.Options) *API { clusterMap := make(map[string]*cluster.Cluster, len(clusters)) for _, cluster := range clusters { @@ -58,6 +80,10 @@ func NewAPI(clusters []*cluster.Cluster, opts grpcserver.Options, httpOpts vtadm return c1.ID < c2.ID }).Sort(clusters) + if opts.Services == nil { + opts.Services = []string{"vtadmin.VTAdminServer"} + } + serv := grpcserver.New("vtadmin", opts) serv.Router().HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("ok\n")) @@ -76,9 +102,19 @@ func NewAPI(clusters []*cluster.Cluster, opts grpcserver.Options, httpOpts vtadm httpAPI := vtadminhttp.NewAPI(api) + router.HandleFunc("/clusters", httpAPI.Adapt(vtadminhttp.GetClusters)).Name("API.GetClusters") router.HandleFunc("/gates", httpAPI.Adapt(vtadminhttp.GetGates)).Name("API.GetGates") + router.HandleFunc("/keyspaces", httpAPI.Adapt(vtadminhttp.GetKeyspaces)).Name("API.GetKeyspaces") + router.HandleFunc("/schema/{table}", httpAPI.Adapt(vtadminhttp.FindSchema)).Name("API.FindSchema") + router.HandleFunc("/schema/{cluster_id}/{keyspace}/{table}", httpAPI.Adapt(vtadminhttp.GetSchema)).Name("API.GetSchema") + router.HandleFunc("/schemas", httpAPI.Adapt(vtadminhttp.GetSchemas)).Name("API.GetSchemas") router.HandleFunc("/tablets", httpAPI.Adapt(vtadminhttp.GetTablets)).Name("API.GetTablets") router.HandleFunc("/tablet/{tablet}", httpAPI.Adapt(vtadminhttp.GetTablet)).Name("API.GetTablet") + router.HandleFunc("/vschema/{cluster_id}/{keyspace}", httpAPI.Adapt(vtadminhttp.GetVSchema)).Name("API.GetVSchema") + router.HandleFunc("/vschemas", httpAPI.Adapt(vtadminhttp.GetVSchemas)).Name("API.GetVSchemas") + router.HandleFunc("/vtexplain", httpAPI.Adapt(vtadminhttp.VTExplain)).Name("API.VTExplain") + router.HandleFunc("/workflow/{cluster_id}/{keyspace}/{name}", httpAPI.Adapt(vtadminhttp.GetWorkflow)).Name("API.GetWorkflow") + router.HandleFunc("/workflows", httpAPI.Adapt(vtadminhttp.GetWorkflows)).Name("API.GetWorkflows") // Middlewares are executed in order of addition. Our ordering (all // middlewares being optional) is: @@ -111,6 +147,103 @@ func (api *API) ListenAndServe() error { return api.serv.ListenAndServe() } +// FindSchema is part of the vtadminpb.VTAdminServer interface. +func (api *API) FindSchema(ctx context.Context, req *vtadminpb.FindSchemaRequest) (*vtadminpb.Schema, error) { + span, _ := trace.NewSpan(ctx, "API.FindSchema") + defer span.Finish() + + span.Annotate("table", req.Table) + + clusters, _ := api.getClustersForRequest(req.ClusterIds) + + var ( + m sync.Mutex + wg sync.WaitGroup + rec concurrency.AllErrorRecorder + results []*vtadminpb.Schema + ) + + for _, c := range clusters { + wg.Add(1) + + go func(c *cluster.Cluster) { + defer wg.Done() + + tablets, err := c.FindTablets(ctx, func(t *vtadminpb.Tablet) bool { + // Filter out all the non-serving tablets once, to make the + // later, per-keyspace filtering slightly faster (fewer + // potentially-redundant iterations). + return t.State == vtadminpb.Tablet_SERVING + }, -1) + if err != nil { + err := fmt.Errorf("could not find any serving tablets for cluster %s: %w", c.ID, err) + rec.RecordError(err) + + return + } + + schemas, err := api.getSchemas(ctx, c, cluster.GetSchemaOptions{ + Tablets: tablets, + TableSizeOptions: req.TableSizeOptions, + }) + if err != nil { + err := fmt.Errorf("%w: while collecting schemas for cluster %s", err, c.ID) + rec.RecordError(err) + + return + } + + for _, schema := range schemas { + for _, td := range schema.TableDefinitions { + if td.Name == req.Table { + m.Lock() + results = append(results, schema) + m.Unlock() + + return + } + } + } + + log.Infof("cluster %s has no tables named %s", c.ID, req.Table) + }(c) + } + + wg.Wait() + + if rec.HasErrors() { + return nil, rec.Error() + } + + switch len(results) { + case 0: + return nil, fmt.Errorf("%w: no schemas found with table named %s", errors.ErrNoSchema, req.Table) + case 1: + return results[0], nil + default: + return nil, fmt.Errorf("%w: %d schemas found with table named %s", errors.ErrAmbiguousSchema, len(results), req.Table) + } +} + +// GetClusters is part of the vtadminpb.VTAdminServer interface. +func (api *API) GetClusters(ctx context.Context, req *vtadminpb.GetClustersRequest) (*vtadminpb.GetClustersResponse, error) { + span, _ := trace.NewSpan(ctx, "API.GetClusters") + defer span.Finish() + + vcs := make([]*vtadminpb.Cluster, 0, len(api.clusters)) + + for _, c := range api.clusters { + vcs = append(vcs, &vtadminpb.Cluster{ + Id: c.ID, + Name: c.Name, + }) + } + + return &vtadminpb.GetClustersResponse{ + Clusters: vcs, + }, nil +} + // GetGates is part of the vtadminpb.VTAdminServer interface. func (api *API) GetGates(ctx context.Context, req *vtadminpb.GetGatesRequest) (*vtadminpb.GetGatesResponse, error) { span, ctx := trace.NewSpan(ctx, "API.GetGates") @@ -131,14 +264,27 @@ func (api *API) GetGates(ctx context.Context, req *vtadminpb.GetGatesRequest) (* go func(c *cluster.Cluster) { defer wg.Done() - g, err := c.Discovery.DiscoverVTGates(ctx, []string{}) + gs, err := c.Discovery.DiscoverVTGates(ctx, []string{}) if err != nil { - er.RecordError(err) + er.RecordError(fmt.Errorf("DiscoverVTGates(cluster = %s): %w", c.ID, err)) return } m.Lock() - gates = append(gates, g...) + + for _, g := range gs { + gates = append(gates, &vtadminpb.VTGate{ + Cell: g.Cell, + Cluster: &vtadminpb.Cluster{ + Id: c.ID, + Name: c.Name, + }, + Hostname: g.Hostname, + Keyspaces: g.Keyspaces, + Pool: g.Pool, + }) + } + m.Unlock() }(c) } @@ -154,6 +300,242 @@ func (api *API) GetGates(ctx context.Context, req *vtadminpb.GetGatesRequest) (* }, nil } +// GetKeyspaces is part of the vtadminpb.VTAdminServer interface. +func (api *API) GetKeyspaces(ctx context.Context, req *vtadminpb.GetKeyspacesRequest) (*vtadminpb.GetKeyspacesResponse, error) { + span, ctx := trace.NewSpan(ctx, "API.GetKeyspaces") + defer span.Finish() + + clusters, _ := api.getClustersForRequest(req.ClusterIds) + + var ( + keyspaces []*vtadminpb.Keyspace + wg sync.WaitGroup + er concurrency.AllErrorRecorder + m sync.Mutex + ) + + for _, c := range clusters { + wg.Add(1) + + go func(c *cluster.Cluster) { + defer wg.Done() + + if err := c.Vtctld.Dial(ctx); err != nil { + er.RecordError(err) + return + } + + getKeyspacesSpan, getKeyspacesCtx := trace.NewSpan(ctx, "Cluster.GetKeyspaces") + cluster.AnnotateSpan(c, getKeyspacesSpan) + + resp, err := c.Vtctld.GetKeyspaces(getKeyspacesCtx, &vtctldatapb.GetKeyspacesRequest{}) + if err != nil { + er.RecordError(fmt.Errorf("GetKeyspaces(cluster = %s): %w", c.ID, err)) + getKeyspacesSpan.Finish() + return + } + + getKeyspacesSpan.Finish() + + kss := make([]*vtadminpb.Keyspace, 0, len(resp.Keyspaces)) + + var ( + kwg sync.WaitGroup + km sync.Mutex + ) + + for _, ks := range resp.Keyspaces { + kwg.Add(1) + + // Find all shards for each keyspace in the cluster, in parallel + go func(c *cluster.Cluster, ks *vtctldatapb.Keyspace) { + defer kwg.Done() + + shards, err := c.FindAllShardsInKeyspace(ctx, ks.Name, cluster.FindAllShardsInKeyspaceOptions{ + SkipDial: true, + }) + + if err != nil { + er.RecordError(err) + return + } + + km.Lock() + kss = append(kss, &vtadminpb.Keyspace{ + Cluster: c.ToProto(), + Keyspace: ks, + Shards: shards, + }) + km.Unlock() + }(c, ks) + } + + kwg.Wait() + + m.Lock() + keyspaces = append(keyspaces, kss...) + m.Unlock() + }(c) + } + + wg.Wait() + + if er.HasErrors() { + return nil, er.Error() + } + + return &vtadminpb.GetKeyspacesResponse{ + Keyspaces: keyspaces, + }, nil +} + +// GetSchema is part of the vtadminpb.VTAdminServer interface. +func (api *API) GetSchema(ctx context.Context, req *vtadminpb.GetSchemaRequest) (*vtadminpb.Schema, error) { + span, ctx := trace.NewSpan(ctx, "API.GetSchema") + defer span.Finish() + + span.Annotate("cluster_id", req.ClusterId) + span.Annotate("keyspace", req.Keyspace) + span.Annotate("table", req.Table) + vtadminproto.AnnotateSpanWithGetSchemaTableSizeOptions(req.TableSizeOptions, span) + + c, ok := api.clusterMap[req.ClusterId] + if !ok { + return nil, fmt.Errorf("%w: no cluster with id %s", errors.ErrUnsupportedCluster, req.ClusterId) + } + + return c.GetSchema(ctx, req.Keyspace, cluster.GetSchemaOptions{ + BaseRequest: &vtctldatapb.GetSchemaRequest{ + Tables: []string{req.Table}, + }, + TableSizeOptions: req.TableSizeOptions, + }) +} + +// GetSchemas is part of the vtadminpb.VTAdminServer interface. +func (api *API) GetSchemas(ctx context.Context, req *vtadminpb.GetSchemasRequest) (*vtadminpb.GetSchemasResponse, error) { + span, ctx := trace.NewSpan(ctx, "API.GetSchemas") + defer span.Finish() + + clusters, _ := api.getClustersForRequest(req.ClusterIds) + + var ( + schemas []*vtadminpb.Schema + wg sync.WaitGroup + er concurrency.AllErrorRecorder + m sync.Mutex + ) + + for _, c := range clusters { + wg.Add(1) + + // Get schemas for the cluster + go func(c *cluster.Cluster) { + defer wg.Done() + + // Since tablets are per-cluster, we can fetch them once + // and use them throughout the other waitgroups. + tablets, err := c.GetTablets(ctx) + if err != nil { + er.RecordError(err) + return + } + + ss, err := api.getSchemas(ctx, c, cluster.GetSchemaOptions{ + Tablets: tablets, + TableSizeOptions: req.TableSizeOptions, + }) + if err != nil { + er.RecordError(err) + return + } + + m.Lock() + schemas = append(schemas, ss...) + m.Unlock() + }(c) + } + + wg.Wait() + + if er.HasErrors() { + return nil, er.Error() + } + + return &vtadminpb.GetSchemasResponse{ + Schemas: schemas, + }, nil +} + +// getSchemas returns all of the schemas across all keyspaces in the given cluster. +func (api *API) getSchemas(ctx context.Context, c *cluster.Cluster, opts cluster.GetSchemaOptions) ([]*vtadminpb.Schema, error) { + if err := c.Vtctld.Dial(ctx); err != nil { + return nil, err + } + + getKeyspacesSpan, getKeyspacesCtx := trace.NewSpan(ctx, "Cluster.GetKeyspaces") + cluster.AnnotateSpan(c, getKeyspacesSpan) + + resp, err := c.Vtctld.GetKeyspaces(getKeyspacesCtx, &vtctldatapb.GetKeyspacesRequest{}) + if err != nil { + getKeyspacesSpan.Finish() + return nil, err + } + + getKeyspacesSpan.Finish() + + var ( + schemas []*vtadminpb.Schema + wg sync.WaitGroup + er concurrency.AllErrorRecorder + m sync.Mutex + ) + + for _, ks := range resp.Keyspaces { + wg.Add(1) + + // Get schemas for the cluster/keyspace + go func(c *cluster.Cluster, ks *vtctldatapb.Keyspace) { + defer wg.Done() + + ss, err := c.GetSchema(ctx, ks.Name, opts) + if err != nil { + // Ignore keyspaces without any serving tablets. + if stderrors.Is(err, errors.ErrNoServingTablet) { + log.Infof(err.Error()) + return + } + + er.RecordError(err) + return + } + + // Ignore keyspaces without schemas + if ss == nil { + log.Infof("No schemas for %s", ks.Name) + return + } + + if len(ss.TableDefinitions) == 0 { + log.Infof("No tables in schema for %s", ks.Name) + return + } + + m.Lock() + schemas = append(schemas, ss) + m.Unlock() + }(c, ks) + } + + wg.Wait() + + if er.HasErrors() { + return nil, er.Error() + } + + return schemas, nil +} + // GetTablet is part of the vtadminpb.VTAdminServer interface. func (api *API) GetTablet(ctx context.Context, req *vtadminpb.GetTabletRequest) (*vtadminpb.Tablet, error) { span, ctx := trace.NewSpan(ctx, "API.GetTablet") @@ -176,9 +558,9 @@ func (api *API) GetTablet(ctx context.Context, req *vtadminpb.GetTabletRequest) go func(c *cluster.Cluster) { defer wg.Done() - ts, err := api.getTablets(ctx, c) + ts, err := c.GetTablets(ctx) if err != nil { - er.RecordError(err) + er.RecordError(fmt.Errorf("GetTablets(cluster = %s): %w", c.ID, err)) return } @@ -204,12 +586,12 @@ func (api *API) GetTablet(ctx context.Context, req *vtadminpb.GetTabletRequest) switch len(tablets) { case 0: - return nil, vterrors.Errorf(vtrpcpb.Code_NOT_FOUND, "%s: %s, searched clusters = %v", ErrNoTablet, req.Hostname, ids) + return nil, vterrors.Errorf(vtrpcpb.Code_NOT_FOUND, "%s: %s, searched clusters = %v", errors.ErrNoTablet, req.Hostname, ids) case 1: return tablets[0], nil } - return nil, vterrors.Errorf(vtrpcpb.Code_NOT_FOUND, "%s: %s, searched clusters = %v", ErrAmbiguousTablet, req.Hostname, ids) + return nil, vterrors.Errorf(vtrpcpb.Code_NOT_FOUND, "%s: %s, searched clusters = %v", errors.ErrAmbiguousTablet, req.Hostname, ids) } // GetTablets is part of the vtadminpb.VTAdminServer interface. @@ -232,9 +614,9 @@ func (api *API) GetTablets(ctx context.Context, req *vtadminpb.GetTabletsRequest go func(c *cluster.Cluster) { defer wg.Done() - ts, err := api.getTablets(ctx, c) + ts, err := c.GetTablets(ctx) if err != nil { - er.RecordError(err) + er.RecordError(fmt.Errorf("GetTablets(cluster = %s): %w", c.ID, err)) return } @@ -255,17 +637,357 @@ func (api *API) GetTablets(ctx context.Context, req *vtadminpb.GetTabletsRequest }, nil } -func (api *API) getTablets(ctx context.Context, c *cluster.Cluster) ([]*vtadminpb.Tablet, error) { - if err := c.DB.Dial(ctx, ""); err != nil { +// GetVSchema is part of the vtadminpb.VTAdminServer interface. +func (api *API) GetVSchema(ctx context.Context, req *vtadminpb.GetVSchemaRequest) (*vtadminpb.VSchema, error) { + span, ctx := trace.NewSpan(ctx, "API.GetVSchema") + defer span.Finish() + + c, ok := api.clusterMap[req.ClusterId] + if !ok { + return nil, fmt.Errorf("%w: no such cluster %s", errors.ErrUnsupportedCluster, req.ClusterId) + } + + cluster.AnnotateSpan(c, span) + + if err := c.Vtctld.Dial(ctx); err != nil { return nil, err } - rows, err := c.DB.ShowTablets(ctx) + return c.GetVSchema(ctx, req.Keyspace) +} + +// GetVSchemas is part of the vtadminpb.VTAdminServer interface. +func (api *API) GetVSchemas(ctx context.Context, req *vtadminpb.GetVSchemasRequest) (*vtadminpb.GetVSchemasResponse, error) { + span, ctx := trace.NewSpan(ctx, "API.GetVSchemas") + defer span.Finish() + + clusters, _ := api.getClustersForRequest(req.ClusterIds) + + var ( + m sync.Mutex + wg sync.WaitGroup + rec concurrency.AllErrorRecorder + vschemas []*vtadminpb.VSchema + ) + + if len(clusters) == 0 { + if len(req.ClusterIds) > 0 { + return nil, fmt.Errorf("%w: %s", errors.ErrUnsupportedCluster, strings.Join(req.ClusterIds, ", ")) + } + + return &vtadminpb.GetVSchemasResponse{VSchemas: []*vtadminpb.VSchema{}}, nil + } + + for _, c := range clusters { + wg.Add(1) + + go func(c *cluster.Cluster) { + defer wg.Done() + + span, ctx := trace.NewSpan(ctx, "Cluster.GetVSchemas") + defer span.Finish() + + cluster.AnnotateSpan(c, span) + + if err := c.Vtctld.Dial(ctx); err != nil { + rec.RecordError(fmt.Errorf("Vtctld.Dial(cluster = %s): %w", c.ID, err)) + return + } + + getKeyspacesSpan, getKeyspacesCtx := trace.NewSpan(ctx, "Cluster.GetKeyspaces") + cluster.AnnotateSpan(c, getKeyspacesSpan) + + keyspaces, err := c.Vtctld.GetKeyspaces(getKeyspacesCtx, &vtctldatapb.GetKeyspacesRequest{}) + if err != nil { + rec.RecordError(fmt.Errorf("GetKeyspaces(cluster = %s): %w", c.ID, err)) + getKeyspacesSpan.Finish() + return + } + + getKeyspacesSpan.Finish() + + var ( + clusterM sync.Mutex + clusterWG sync.WaitGroup + clusterRec concurrency.AllErrorRecorder + clusterVSchemas = make([]*vtadminpb.VSchema, 0, len(keyspaces.Keyspaces)) + ) + + for _, keyspace := range keyspaces.Keyspaces { + clusterWG.Add(1) + + go func(keyspace *vtctldatapb.Keyspace) { + defer clusterWG.Done() + vschema, err := c.GetVSchema(ctx, keyspace.Name) + if err != nil { + clusterRec.RecordError(fmt.Errorf("GetVSchema(keyspace = %s): %w", keyspace.Name, err)) + return + } + + clusterM.Lock() + clusterVSchemas = append(clusterVSchemas, vschema) + clusterM.Unlock() + }(keyspace) + } + + clusterWG.Wait() + + if clusterRec.HasErrors() { + rec.RecordError(fmt.Errorf("GetVSchemas(cluster = %s): %w", c.ID, clusterRec.Error())) + return + } + + m.Lock() + vschemas = append(vschemas, clusterVSchemas...) + m.Unlock() + }(c) + } + + wg.Wait() + + if rec.HasErrors() { + return nil, rec.Error() + } + + return &vtadminpb.GetVSchemasResponse{ + VSchemas: vschemas, + }, nil +} + +// GetWorkflow is part of the vtadminpb.VTAdminServer interface. +func (api *API) GetWorkflow(ctx context.Context, req *vtadminpb.GetWorkflowRequest) (*vtadminpb.Workflow, error) { + span, ctx := trace.NewSpan(ctx, "API.GetWorkflow") + defer span.Finish() + + c, ok := api.clusterMap[req.ClusterId] + if !ok { + return nil, fmt.Errorf("%w: no such cluster %s", errors.ErrUnsupportedCluster, req.ClusterId) + } + + cluster.AnnotateSpan(c, span) + span.Annotate("keyspace", req.Keyspace) + span.Annotate("workflow_name", req.Name) + span.Annotate("active_only", req.ActiveOnly) + + return c.GetWorkflow(ctx, req.Keyspace, req.Name, cluster.GetWorkflowOptions{ + ActiveOnly: req.ActiveOnly, + }) +} + +// GetWorkflows is part of the vtadminpb.VTAdminServer interface. +func (api *API) GetWorkflows(ctx context.Context, req *vtadminpb.GetWorkflowsRequest) (*vtadminpb.GetWorkflowsResponse, error) { + span, ctx := trace.NewSpan(ctx, "API.GetWorkflows") + defer span.Finish() + + clusters, _ := api.getClustersForRequest(req.ClusterIds) + + var ( + m sync.Mutex + wg sync.WaitGroup + rec concurrency.AllErrorRecorder + results = map[string]*vtadminpb.ClusterWorkflows{} + ) + + for _, c := range clusters { + wg.Add(1) + + go func(c *cluster.Cluster) { + defer wg.Done() + + workflows, err := c.GetWorkflows(ctx, req.Keyspaces, cluster.GetWorkflowsOptions{ + ActiveOnly: req.ActiveOnly, + IgnoreKeyspaces: sets.NewString(req.IgnoreKeyspaces...), + }) + if err != nil { + rec.RecordError(err) + + return + } + + m.Lock() + results[c.ID] = workflows + m.Unlock() + }(c) + } + + wg.Wait() + + if rec.HasErrors() { + return nil, rec.Error() + } + + return &vtadminpb.GetWorkflowsResponse{ + WorkflowsByCluster: results, + }, nil +} + +// VTExplain is part of the vtadminpb.VTAdminServer interface. +func (api *API) VTExplain(ctx context.Context, req *vtadminpb.VTExplainRequest) (*vtadminpb.VTExplainResponse, error) { + span, ctx := trace.NewSpan(ctx, "API.VTExplain") + defer span.Finish() + + if req.Cluster == "" { + return nil, fmt.Errorf("%w: cluster ID is required", errors.ErrInvalidRequest) + } + + if req.Keyspace == "" { + return nil, fmt.Errorf("%w: keyspace name is required", errors.ErrInvalidRequest) + } + + if req.Sql == "" { + return nil, fmt.Errorf("%w: SQL query is required", errors.ErrInvalidRequest) + } + + c, ok := api.clusterMap[req.Cluster] + if !ok { + return nil, fmt.Errorf("%w: %s", errors.ErrUnsupportedCluster, req.Cluster) + } + + span.Annotate("keyspace", req.Keyspace) + cluster.AnnotateSpan(c, span) + + tablet, err := c.FindTablet(ctx, func(t *vtadminpb.Tablet) bool { + return t.Tablet.Keyspace == req.Keyspace && topo.IsInServingGraph(t.Tablet.Type) && t.Tablet.Type != topodatapb.TabletType_MASTER && t.State == vtadminpb.Tablet_SERVING + }) if err != nil { + return nil, fmt.Errorf("cannot find serving, non-primary tablet in keyspace=%s: %w", req.Keyspace, err) + } + + span.Annotate("tablet_alias", topoproto.TabletAliasString(tablet.Tablet.Alias)) + + if err := c.Vtctld.Dial(ctx); err != nil { return nil, err } - return ParseTablets(rows, c) + var ( + wg sync.WaitGroup + er concurrency.AllErrorRecorder + + // Writes to these three variables are, in the strictest sense, unsafe. + // However, there is one goroutine responsible for writing each of these + // values (so, no concurrent writes), and reads are blocked on the call to + // wg.Wait(), so we guarantee that all writes have finished before attempting + // to read anything. + srvVSchema string + schema string + shardMap string + ) + + wg.Add(3) + + // GetSchema + go func(c *cluster.Cluster) { + defer wg.Done() + + res, err := c.GetSchema(ctx, req.Keyspace, cluster.GetSchemaOptions{ + Tablets: []*vtadminpb.Tablet{tablet}, + }) + if err != nil { + er.RecordError(fmt.Errorf("GetSchema(%s): %w", topoproto.TabletAliasString(tablet.Tablet.Alias), err)) + return + } + + schemas := make([]string, len(res.TableDefinitions)) + for i, td := range res.TableDefinitions { + schemas[i] = td.Schema + } + + schema = strings.Join(schemas, ";") + }(c) + + // GetSrvVSchema + go func(c *cluster.Cluster) { + defer wg.Done() + + span, ctx := trace.NewSpan(ctx, "Cluster.GetSrvVSchema") + defer span.Finish() + + span.Annotate("cell", tablet.Tablet.Alias.Cell) + cluster.AnnotateSpan(c, span) + + res, err := c.Vtctld.GetSrvVSchema(ctx, &vtctldatapb.GetSrvVSchemaRequest{ + Cell: tablet.Tablet.Alias.Cell, + }) + + if err != nil { + er.RecordError(fmt.Errorf("GetSrvVSchema(%s): %w", tablet.Tablet.Alias.Cell, err)) + return + } + + ksvs, ok := res.SrvVSchema.Keyspaces[req.Keyspace] + if !ok { + er.RecordError(fmt.Errorf("%w: keyspace %s", errors.ErrNoSrvVSchema, req.Keyspace)) + return + } + + ksvsb, err := json.Marshal(&ksvs) + if err != nil { + er.RecordError(err) + return + } + + srvVSchema = fmt.Sprintf(`{"%s": %s}`, req.Keyspace, string(ksvsb)) + }(c) + + // FindAllShardsInKeyspace + go func(c *cluster.Cluster) { + defer wg.Done() + + shards, err := c.FindAllShardsInKeyspace(ctx, req.Keyspace, cluster.FindAllShardsInKeyspaceOptions{ + SkipDial: true, + }) + if err != nil { + er.RecordError(err) + return + } + + vtsm := make(map[string]*topodatapb.Shard) + for _, s := range shards { + vtsm[s.Name] = s.Shard + } + + vtsb, err := json.Marshal(&vtsm) + if err != nil { + er.RecordError(err) + return + } + + shardMap = fmt.Sprintf(`{"%s": %s}`, req.Keyspace, string(vtsb)) + }(c) + + wg.Wait() + + if er.HasErrors() { + return nil, er.Error() + } + + opts := &vtexplain.Options{ReplicationMode: "ROW"} + + lockWaitStart := time.Now() + + api.vtexplainLock.Lock() + defer api.vtexplainLock.Unlock() + + lockWaitTime := time.Since(lockWaitStart) + log.Infof("vtexplain lock wait time: %s", lockWaitTime) + + span.Annotate("vtexplain_lock_wait_time", lockWaitTime.String()) + + if err := vtexplain.Init(srvVSchema, schema, shardMap, opts); err != nil { + return nil, fmt.Errorf("error initilaizing vtexplain: %w", err) + } + + defer vtexplain.Stop() + + plans, err := vtexplain.Run(req.Sql) + if err != nil { + return nil, fmt.Errorf("error running vtexplain: %w", err) + } + + response := vtexplain.ExplainsAsText(plans) + return &vtadminpb.VTExplainResponse{ + Response: response, + }, nil } func (api *API) getClustersForRequest(ids []string) ([]*cluster.Cluster, []string) { diff --git a/go/vt/vtadmin/api_test.go b/go/vt/vtadmin/api_test.go index 990f8a01954..dd895fb3962 100644 --- a/go/vt/vtadmin/api_test.go +++ b/go/vt/vtadmin/api_test.go @@ -18,547 +18,4117 @@ package vtadmin import ( "context" - "database/sql" + "errors" "fmt" "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" - "vitess.io/vitess/go/vt/vitessdriver" + "vitess.io/vitess/go/vt/grpccommon" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtadmin/cluster" "vitess.io/vitess/go/vt/vtadmin/cluster/discovery/fakediscovery" + vtadminerrors "vitess.io/vitess/go/vt/vtadmin/errors" "vitess.io/vitess/go/vt/vtadmin/grpcserver" "vitess.io/vitess/go/vt/vtadmin/http" - "vitess.io/vitess/go/vt/vtadmin/vtsql" - "vitess.io/vitess/go/vt/vtadmin/vtsql/fakevtsql" + vtadmintestutil "vitess.io/vitess/go/vt/vtadmin/testutil" + "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver" + "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver/testutil" + "vitess.io/vitess/go/vt/vtctl/vtctldclient" + "vitess.io/vitess/go/vt/vttablet/tmclient" + querypb "vitess.io/vitess/go/vt/proto/query" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" + vtctlservicepb "vitess.io/vitess/go/vt/proto/vtctlservice" + "vitess.io/vitess/go/vt/proto/vttime" ) +func TestFindSchema(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + clusters []vtadmintestutil.TestClusterConfig + req *vtadminpb.FindSchemaRequest + expected *vtadminpb.Schema + shouldErr bool + }{ + { + name: "exact match", + clusters: []vtadmintestutil.TestClusterConfig{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + VtctldClient: &vtadmintestutil.VtctldClient{ + GetKeyspacesResults: struct { + Keyspaces []*vtctldatapb.Keyspace + Error error + }{ + Keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "testkeyspace", + }, + }, + }, + GetSchemaResults: map[string]struct { + Response *vtctldatapb.GetSchemaResponse + Error error + }{ + "zone1-0000000100": { + Response: &vtctldatapb.GetSchemaResponse{ + Schema: &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "testtable", + }, + }, + }, + }, + }, + }, + }, + Tablets: []*vtadminpb.Tablet{ + { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + }, + State: vtadminpb.Tablet_SERVING, + }, + }, + }, + }, + req: &vtadminpb.FindSchemaRequest{ + Table: "testtable", + }, + expected: &vtadminpb.Schema{ + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + Keyspace: "testkeyspace", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "testtable", + }, + }, + TableSizes: map[string]*vtadminpb.Schema_TableSize{}, + }, + shouldErr: false, + }, + { + name: "error getting tablets", + clusters: []vtadmintestutil.TestClusterConfig{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + + DBConfig: vtadmintestutil.Dbcfg{ + ShouldErr: true, + }, + }, + }, + req: &vtadminpb.FindSchemaRequest{ + Table: "testtable", + }, + shouldErr: true, + }, + { + name: "error getting keyspaces", + clusters: []vtadmintestutil.TestClusterConfig{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + VtctldClient: &vtadmintestutil.VtctldClient{ + GetKeyspacesResults: struct { + Keyspaces []*vtctldatapb.Keyspace + Error error + }{ + Error: fmt.Errorf("GetKeyspaces: %w", assert.AnError), + }, + }, + Tablets: []*vtadminpb.Tablet{ + { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + }, + State: vtadminpb.Tablet_SERVING, + }, + }, + }, + }, + req: &vtadminpb.FindSchemaRequest{ + Table: "testtable", + }, + shouldErr: true, + }, + { + name: "error getting schemas", + clusters: []vtadmintestutil.TestClusterConfig{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + VtctldClient: &vtadmintestutil.VtctldClient{ + GetKeyspacesResults: struct { + Keyspaces []*vtctldatapb.Keyspace + Error error + }{ + Keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "testkeyspace", + }, + }, + }, + GetSchemaResults: map[string]struct { + Response *vtctldatapb.GetSchemaResponse + Error error + }{ + "zone1-0000000100": { + Error: fmt.Errorf("GetSchema: %w", assert.AnError), + }, + }, + }, + Tablets: []*vtadminpb.Tablet{ + { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + }, + State: vtadminpb.Tablet_SERVING, + }, + }, + }, + }, + req: &vtadminpb.FindSchemaRequest{ + Table: "testtable", + }, + shouldErr: true, + }, + { + name: "no schema found", + clusters: []vtadmintestutil.TestClusterConfig{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + VtctldClient: &vtadmintestutil.VtctldClient{ + GetKeyspacesResults: struct { + Keyspaces []*vtctldatapb.Keyspace + Error error + }{ + Keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "testkeyspace", + }, + }, + }, + GetSchemaResults: map[string]struct { + Response *vtctldatapb.GetSchemaResponse + Error error + }{ + "zone1-0000000100": { + Response: &vtctldatapb.GetSchemaResponse{ + Schema: &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "othertable", + }, + }, + }, + }, + }, + }, + }, + Tablets: []*vtadminpb.Tablet{ + { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + }, + State: vtadminpb.Tablet_SERVING, + }, + }, + }, + }, + req: &vtadminpb.FindSchemaRequest{ + Table: "testtable", + }, + shouldErr: true, + }, + { + name: "ambiguous schema errors", + clusters: []vtadmintestutil.TestClusterConfig{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + VtctldClient: &vtadmintestutil.VtctldClient{ + GetKeyspacesResults: struct { + Keyspaces []*vtctldatapb.Keyspace + Error error + }{ + Keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "testkeyspace", + }, + }, + }, + GetSchemaResults: map[string]struct { + Response *vtctldatapb.GetSchemaResponse + Error error + }{ + "zone1-0000000100": { + Response: &vtctldatapb.GetSchemaResponse{ + Schema: &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "testtable", + }, + }, + }, + }, + }, + }, + }, + Tablets: []*vtadminpb.Tablet{ + { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + }, + State: vtadminpb.Tablet_SERVING, + }, + }, + }, + { + Cluster: &vtadminpb.Cluster{ + Id: "c2", + Name: "cluster2", + }, + VtctldClient: &vtadmintestutil.VtctldClient{ + GetKeyspacesResults: struct { + Keyspaces []*vtctldatapb.Keyspace + Error error + }{ + Keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "testkeyspace", + }, + }, + }, + GetSchemaResults: map[string]struct { + Response *vtctldatapb.GetSchemaResponse + Error error + }{ + "zone2-0000000200": { + Response: &vtctldatapb.GetSchemaResponse{ + Schema: &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "testtable", + }, + }, + }, + }, + }, + }, + }, + Tablets: []*vtadminpb.Tablet{ + { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 200, + }, + Keyspace: "testkeyspace", + }, + State: vtadminpb.Tablet_SERVING, + }, + }, + }, + }, + req: &vtadminpb.FindSchemaRequest{ + Table: "testtable", + }, + shouldErr: true, + }, + { + name: "ambiguous schema with request scoped to single cluster passes", + clusters: []vtadmintestutil.TestClusterConfig{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + VtctldClient: &vtadmintestutil.VtctldClient{ + GetKeyspacesResults: struct { + Keyspaces []*vtctldatapb.Keyspace + Error error + }{ + Keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "testkeyspace1", + }, + }, + }, + GetSchemaResults: map[string]struct { + Response *vtctldatapb.GetSchemaResponse + Error error + }{ + "zone1-0000000100": { + Response: &vtctldatapb.GetSchemaResponse{ + Schema: &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "testtable", + }, + }, + }, + }, + }, + }, + }, + Tablets: []*vtadminpb.Tablet{ + { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace1", + }, + State: vtadminpb.Tablet_SERVING, + }, + }, + }, + { + Cluster: &vtadminpb.Cluster{ + Id: "c2", + Name: "cluster2", + }, + VtctldClient: &vtadmintestutil.VtctldClient{ + GetKeyspacesResults: struct { + Keyspaces []*vtctldatapb.Keyspace + Error error + }{ + Keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "testkeyspace2", + }, + }, + }, + GetSchemaResults: map[string]struct { + Response *vtctldatapb.GetSchemaResponse + Error error + }{ + "zone2-0000000200": { + Response: &vtctldatapb.GetSchemaResponse{ + Schema: &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "testtable", + }, + }, + }, + }, + }, + }, + }, + Tablets: []*vtadminpb.Tablet{ + { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 200, + }, + Keyspace: "testkeyspace2", + }, + State: vtadminpb.Tablet_SERVING, + }, + }, + }, + }, + req: &vtadminpb.FindSchemaRequest{ + Table: "testtable", + ClusterIds: []string{"c1"}, + }, + expected: &vtadminpb.Schema{ + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + Keyspace: "testkeyspace1", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "testtable", + }, + }, + TableSizes: map[string]*vtadminpb.Schema_TableSize{}, + }, + shouldErr: false, + }, + } + + ctx := context.Background() + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + clusters := make([]*cluster.Cluster, len(tt.clusters)) + for i, cfg := range tt.clusters { + clusters[i] = vtadmintestutil.BuildCluster(cfg) + } + + api := NewAPI(clusters, grpcserver.Options{}, http.Options{}) + + resp, err := api.FindSchema(ctx, tt.req) + if tt.shouldErr { + assert.Error(t, err) + + return + } + + assert.NoError(t, err) + assert.Equal(t, tt.expected, resp) + }) + } + + t.Run("size aggregation", func(t *testing.T) { + t.Parallel() + + c1pb := &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + } + c2pb := &vtadminpb.Cluster{ + Id: "c2", + Name: "cluster2", + } + + c1 := vtadmintestutil.BuildCluster( + vtadmintestutil.TestClusterConfig{ + Cluster: c1pb, + VtctldClient: &vtadmintestutil.VtctldClient{ + FindAllShardsInKeyspaceResults: map[string]struct { + Response *vtctldatapb.FindAllShardsInKeyspaceResponse + Error error + }{ + "testkeyspace": { + Response: &vtctldatapb.FindAllShardsInKeyspaceResponse{ + Shards: map[string]*vtctldatapb.Shard{ + "-80": { + Keyspace: "testkeyspace", + Name: "-80", + Shard: &topodatapb.Shard{ + IsMasterServing: true, + }, + }, + "80-": { + Keyspace: "testkeyspace", + Name: "80-", + Shard: &topodatapb.Shard{ + IsMasterServing: true, + }, + }, + }, + }, + }, + "ks1": { + Response: &vtctldatapb.FindAllShardsInKeyspaceResponse{ + Shards: map[string]*vtctldatapb.Shard{ + "-": { + Keyspace: "ks1", + Name: "-", + Shard: &topodatapb.Shard{ + IsMasterServing: true, + }, + }, + }, + }, + }, + }, + GetKeyspacesResults: struct { + Keyspaces []*vtctldatapb.Keyspace + Error error + }{ + Keyspaces: []*vtctldatapb.Keyspace{ + {Name: "testkeyspace"}, + {Name: "ks1"}, + }, + }, + GetSchemaResults: map[string]struct { + Response *vtctldatapb.GetSchemaResponse + Error error + }{ + "c1zone1-0000000100": { + Response: &vtctldatapb.GetSchemaResponse{ + Schema: &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "testtable", + RowCount: 10, + DataLength: 100, + }, + }, + }, + }, + }, + "c1zone1-0000000200": { + Response: &vtctldatapb.GetSchemaResponse{ + Schema: &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "testtable", + RowCount: 20, + DataLength: 200, + }, + }, + }, + }, + }, + }, + }, + Tablets: []*vtadminpb.Tablet{ + { + Cluster: c1pb, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "c1zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + Shard: "-80", + }, + State: vtadminpb.Tablet_SERVING, + }, + { + Cluster: c1pb, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "c1zone1", + Uid: 200, + }, + Keyspace: "testkeyspace", + Shard: "80-", + }, + State: vtadminpb.Tablet_SERVING, + }, + }, + }, + ) + c2 := vtadmintestutil.BuildCluster( + vtadmintestutil.TestClusterConfig{ + Cluster: c2pb, + VtctldClient: &vtadmintestutil.VtctldClient{ + FindAllShardsInKeyspaceResults: map[string]struct { + Response *vtctldatapb.FindAllShardsInKeyspaceResponse + Error error + }{ + "ks2": { + Response: &vtctldatapb.FindAllShardsInKeyspaceResponse{ + Shards: map[string]*vtctldatapb.Shard{ + "-": { + Keyspace: "ks2", + Name: "-", + Shard: &topodatapb.Shard{ + IsMasterServing: true, + }, + }, + }, + }, + }, + }, + GetKeyspacesResults: struct { + Keyspaces []*vtctldatapb.Keyspace + Error error + }{ + Keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "ks2", + }, + }, + }, + GetSchemaResults: map[string]struct { + Response *vtctldatapb.GetSchemaResponse + Error error + }{ + "c2z1-0000000100": { + Response: &vtctldatapb.GetSchemaResponse{}, + }, + }, + }, + Tablets: []*vtadminpb.Tablet{ + { + Cluster: c2pb, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "c2z1", + Uid: 100, + }, + Keyspace: "ks2", + Shard: "-", + }, + State: vtadminpb.Tablet_SERVING, + }, + }, + }, + ) + + api := NewAPI([]*cluster.Cluster{c1, c2}, grpcserver.Options{}, http.Options{}) + schema, err := api.FindSchema(ctx, &vtadminpb.FindSchemaRequest{ + Table: "testtable", + TableSizeOptions: &vtadminpb.GetSchemaTableSizeOptions{ + AggregateSizes: true, + }, + }) + + expected := &vtadminpb.Schema{ + Cluster: c1pb, + Keyspace: "testkeyspace", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "testtable", + }, + }, + TableSizes: map[string]*vtadminpb.Schema_TableSize{ + "testtable": { + RowCount: 10 + 20, + DataLength: 100 + 200, + ByShard: map[string]*vtadminpb.Schema_ShardTableSize{ + "-80": { + RowCount: 10, + DataLength: 100, + }, + "80-": { + RowCount: 20, + DataLength: 200, + }, + }, + }, + }, + } + + if schema != nil { + for _, td := range schema.TableDefinitions { + // Zero these out because they're non-deterministic and also not + // relevant to the final result. + td.RowCount = 0 + td.DataLength = 0 + } + } + + assert.NoError(t, err) + assert.Equal(t, expected, schema) + }) +} + +func TestGetClusters(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + clusters []*cluster.Cluster + expected []*vtadminpb.Cluster + }{ + { + name: "multiple clusters", + clusters: []*cluster.Cluster{ + { + ID: "c1", + Name: "cluster1", + Discovery: fakediscovery.New(), + }, + { + ID: "c2", + Name: "cluster2", + Discovery: fakediscovery.New(), + }, + }, + expected: []*vtadminpb.Cluster{ + { + Id: "c1", + Name: "cluster1", + }, + { + Id: "c2", + Name: "cluster2", + }, + }, + }, + { + name: "no clusters", + clusters: []*cluster.Cluster{}, + expected: []*vtadminpb.Cluster{}, + }, + } + + ctx := context.Background() + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + api := NewAPI(tt.clusters, grpcserver.Options{}, http.Options{}) + + resp, err := api.GetClusters(ctx, &vtadminpb.GetClustersRequest{}) + assert.NoError(t, err) + assert.ElementsMatch(t, tt.expected, resp.Clusters) + }) + } +} + func TestGetGates(t *testing.T) { + t.Parallel() + fakedisco1 := fakediscovery.New() cluster1 := &cluster.Cluster{ ID: "c1", Name: "cluster1", Discovery: fakedisco1, } - cluster1Gates := []*vtadminpb.VTGate{ + cluster1Gates := []*vtadminpb.VTGate{ + { + Hostname: "cluster1-gate1", + }, + { + Hostname: "cluster1-gate2", + }, + { + Hostname: "cluster1-gate3", + }, + } + fakedisco1.AddTaggedGates(nil, cluster1Gates...) + + expectedCluster1Gates := []*vtadminpb.VTGate{ + { + Cluster: &vtadminpb.Cluster{ + Id: cluster1.ID, + Name: cluster1.Name, + }, + Hostname: "cluster1-gate1", + }, + { + Cluster: &vtadminpb.Cluster{ + Id: cluster1.ID, + Name: cluster1.Name, + }, + Hostname: "cluster1-gate2", + }, + { + Cluster: &vtadminpb.Cluster{ + Id: cluster1.ID, + Name: cluster1.Name, + }, + Hostname: "cluster1-gate3", + }, + } + + fakedisco2 := fakediscovery.New() + cluster2 := &cluster.Cluster{ + ID: "c2", + Name: "cluster2", + Discovery: fakedisco2, + } + cluster2Gates := []*vtadminpb.VTGate{ + { + Hostname: "cluster2-gate1", + }, + } + fakedisco2.AddTaggedGates(nil, cluster2Gates...) + + expectedCluster2Gates := []*vtadminpb.VTGate{ + { + Cluster: &vtadminpb.Cluster{ + Id: cluster2.ID, + Name: cluster2.Name, + }, + Hostname: "cluster2-gate1", + }, + } + + api := NewAPI([]*cluster.Cluster{cluster1, cluster2}, grpcserver.Options{}, http.Options{}) + ctx := context.Background() + + resp, err := api.GetGates(ctx, &vtadminpb.GetGatesRequest{}) + assert.NoError(t, err) + assert.ElementsMatch(t, append(expectedCluster1Gates, expectedCluster2Gates...), resp.Gates) + + resp, err = api.GetGates(ctx, &vtadminpb.GetGatesRequest{ClusterIds: []string{cluster1.ID}}) + assert.NoError(t, err) + assert.ElementsMatch(t, expectedCluster1Gates, resp.Gates) + + fakedisco1.SetGatesError(true) + + resp, err = api.GetGates(ctx, &vtadminpb.GetGatesRequest{}) + assert.Error(t, err) + assert.Nil(t, resp) +} + +func TestGetKeyspaces(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + clusterKeyspaces [][]*vtctldatapb.Keyspace + clusterShards [][]*vtctldatapb.Shard + req *vtadminpb.GetKeyspacesRequest + expected *vtadminpb.GetKeyspacesResponse + }{ + { + name: "multiple clusters, multiple shards", + clusterKeyspaces: [][]*vtctldatapb.Keyspace{ + //cluster0 + { + { + Name: "c0-ks0", + Keyspace: &topodatapb.Keyspace{}, + }, + }, + //cluster1 + { + { + Name: "c1-ks0", + Keyspace: &topodatapb.Keyspace{}, + }, + }, + }, + clusterShards: [][]*vtctldatapb.Shard{ + //cluster0 + { + { + Keyspace: "c0-ks0", + Name: "-80", + }, + { + Keyspace: "c0-ks0", + Name: "80-", + }, + }, + //cluster1 + { + { + Keyspace: "c1-ks0", + Name: "-", + }, + }, + }, + req: &vtadminpb.GetKeyspacesRequest{}, + expected: &vtadminpb.GetKeyspacesResponse{ + Keyspaces: []*vtadminpb.Keyspace{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c0", + Name: "cluster0", + }, + Keyspace: &vtctldatapb.Keyspace{ + Name: "c0-ks0", + Keyspace: &topodatapb.Keyspace{}, + }, + Shards: map[string]*vtctldatapb.Shard{ + "-80": { + Keyspace: "c0-ks0", + Name: "-80", + Shard: &topodatapb.Shard{ + IsMasterServing: true, + }, + }, + "80-": { + Keyspace: "c0-ks0", + Name: "80-", + Shard: &topodatapb.Shard{ + IsMasterServing: true, + }, + }, + }, + }, + { + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + Keyspace: &vtctldatapb.Keyspace{ + Name: "c1-ks0", + Keyspace: &topodatapb.Keyspace{}, + }, + Shards: map[string]*vtctldatapb.Shard{ + "-": { + Keyspace: "c1-ks0", + Name: "-", + Shard: &topodatapb.Shard{ + IsMasterServing: true, + }, + }, + }, + }, + }, + }, + }, + { + name: "with snapshot", + clusterKeyspaces: [][]*vtctldatapb.Keyspace{ + // cluster0 + { + { + Name: "testkeyspace", + Keyspace: &topodatapb.Keyspace{}, + }, + { + Name: "snapshot", + Keyspace: &topodatapb.Keyspace{ + KeyspaceType: topodatapb.KeyspaceType_SNAPSHOT, + BaseKeyspace: "testkeyspace", + SnapshotTime: &vttime.Time{Seconds: 10, Nanoseconds: 1}, + }, + }, + }, + }, + req: &vtadminpb.GetKeyspacesRequest{}, + expected: &vtadminpb.GetKeyspacesResponse{ + Keyspaces: []*vtadminpb.Keyspace{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c0", + Name: "cluster0", + }, + Keyspace: &vtctldatapb.Keyspace{ + Name: "testkeyspace", + Keyspace: &topodatapb.Keyspace{}, + }, + }, + { + Cluster: &vtadminpb.Cluster{ + Id: "c0", + Name: "cluster0", + }, + Keyspace: &vtctldatapb.Keyspace{ + Name: "snapshot", + Keyspace: &topodatapb.Keyspace{ + KeyspaceType: topodatapb.KeyspaceType_SNAPSHOT, + BaseKeyspace: "testkeyspace", + SnapshotTime: &vttime.Time{Seconds: 10, Nanoseconds: 1}, + }, + }, + }, + }, + }, + }, + { + name: "filtered by cluster ID", + clusterKeyspaces: [][]*vtctldatapb.Keyspace{ + //cluster0 + { + { + Name: "c0-ks0", + Keyspace: &topodatapb.Keyspace{}, + }, + }, + //cluster1 + { + { + Name: "c1-ks0", + Keyspace: &topodatapb.Keyspace{}, + }, + }, + }, + req: &vtadminpb.GetKeyspacesRequest{ + ClusterIds: []string{"c1"}, + }, + expected: &vtadminpb.GetKeyspacesResponse{ + Keyspaces: []*vtadminpb.Keyspace{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + Keyspace: &vtctldatapb.Keyspace{ + Name: "c1-ks0", + Keyspace: &topodatapb.Keyspace{}, + }, + }, + }, + }, + }, + } + + ctx := context.Background() + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + // Note that these test cases were written prior to the existence of + // WithTestServers, so they are all written with the assumption that + // there are exactly 2 clusters. + topos := []*topo.Server{ + memorytopo.NewServer("c0_cell1"), + memorytopo.NewServer("c1_cell1"), + } + + for cdx, cks := range tt.clusterKeyspaces { + for _, ks := range cks { + testutil.AddKeyspace(ctx, t, topos[cdx], ks) + } + } + + for cdx, css := range tt.clusterShards { + testutil.AddShards(ctx, t, topos[cdx], css...) + } + + servers := []vtctlservicepb.VtctldServer{ + testutil.NewVtctldServerWithTabletManagerClient(t, topos[0], nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return grpcvtctldserver.NewVtctldServer(ts) + }), + testutil.NewVtctldServerWithTabletManagerClient(t, topos[1], nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return grpcvtctldserver.NewVtctldServer(ts) + }), + } + + testutil.WithTestServers(t, func(t *testing.T, clients ...vtctldclient.VtctldClient) { + clusters := []*cluster.Cluster{ + vtadmintestutil.BuildCluster(vtadmintestutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: "c0", + Name: "cluster0", + }, + VtctldClient: clients[0], + }), + vtadmintestutil.BuildCluster(vtadmintestutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + VtctldClient: clients[1], + }), + } + + api := NewAPI(clusters, grpcserver.Options{}, http.Options{}) + resp, err := api.GetKeyspaces(ctx, tt.req) + require.NoError(t, err) + + vtadmintestutil.AssertKeyspaceSlicesEqual(t, tt.expected.Keyspaces, resp.Keyspaces) + }, servers...) + }) + } +} + +func TestGetSchema(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + clusterID int + ts *topo.Server + tmc tmclient.TabletManagerClient + tablets []*vtadminpb.Tablet + req *vtadminpb.GetSchemaRequest + expected *vtadminpb.Schema + shouldErr bool + }{ + { + name: "success", + clusterID: 1, + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + GetSchemaResults: map[string]struct { + Schema *tabletmanagerdatapb.SchemaDefinition + Error error + }{ + "zone1-0000000100": { + Schema: &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "testtable", + }, + }, + }, + }, + }, + }, + tablets: []*vtadminpb.Tablet{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + }, + }, + }, + req: &vtadminpb.GetSchemaRequest{ + ClusterId: "c1", + Keyspace: "testkeyspace", + Table: "testtable", + }, + expected: &vtadminpb.Schema{ + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + Keyspace: "testkeyspace", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "testtable", + }, + }, + TableSizes: map[string]*vtadminpb.Schema_TableSize{}, + }, + shouldErr: false, + }, + { + name: "cluster not found", + clusterID: 1, // results in clusterId == "c1" + ts: memorytopo.NewServer("zone1"), + tablets: nil, + req: &vtadminpb.GetSchemaRequest{ + ClusterId: "c2", + Keyspace: "testkeyspace", + Table: "testtable", + }, + expected: nil, + shouldErr: true, + }, + { + name: "tablet not found for keyspace", + clusterID: 1, + ts: memorytopo.NewServer("zone1"), + tablets: []*vtadminpb.Tablet{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "otherkeyspace", + }, + }, + }, + req: &vtadminpb.GetSchemaRequest{ + ClusterId: "c1", + Keyspace: "testkeyspace", + Table: "testtable", + }, + expected: nil, + shouldErr: true, + }, + { + name: "no serving tablet found for keyspace", + clusterID: 1, + ts: memorytopo.NewServer("zone1"), + tablets: []*vtadminpb.Tablet{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + State: vtadminpb.Tablet_NOT_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + }, + }, + }, + req: &vtadminpb.GetSchemaRequest{ + ClusterId: "c1", + Keyspace: "testkeyspace", + Table: "testtable", + }, + expected: nil, + shouldErr: true, + }, + { + name: "error in GetSchema call", + clusterID: 1, + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + GetSchemaResults: map[string]struct { + Schema *tabletmanagerdatapb.SchemaDefinition + Error error + }{ + "zone1-0000000100": { + Schema: &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "testtable", + }, + { + Name: "table2", + }, + { + Name: "table3", + }, + }, + }, + Error: assert.AnError, + }, + }, + }, + tablets: []*vtadminpb.Tablet{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + }, + }, + }, + req: &vtadminpb.GetSchemaRequest{ + ClusterId: "c1", + Keyspace: "testkeyspace", + Table: "testtable", + }, + expected: nil, + shouldErr: true, + }, + } + + ctx := context.Background() + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return grpcvtctldserver.NewVtctldServer(ts) + }) + + testutil.AddTablets(ctx, t, tt.ts, nil, vtadmintestutil.TopodataTabletsFromVTAdminTablets(tt.tablets)...) + + testutil.WithTestServer(t, vtctld, func(t *testing.T, client vtctldclient.VtctldClient) { + c := vtadmintestutil.BuildCluster(vtadmintestutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: fmt.Sprintf("c%d", tt.clusterID), + Name: fmt.Sprintf("cluster%d", tt.clusterID), + }, + VtctldClient: client, + Tablets: tt.tablets, + }) + api := NewAPI([]*cluster.Cluster{c}, grpcserver.Options{}, http.Options{}) + + resp, err := api.GetSchema(ctx, tt.req) + if tt.shouldErr { + assert.Error(t, err) + + return + } + + assert.NoError(t, err) + assert.Equal(t, tt.expected, resp) + }) + }) + } + + t.Run("size aggregation", func(t *testing.T) { + t.Parallel() + + c1pb := &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + } + c1 := vtadmintestutil.BuildCluster( + vtadmintestutil.TestClusterConfig{ + Cluster: c1pb, + VtctldClient: &vtadmintestutil.VtctldClient{ + FindAllShardsInKeyspaceResults: map[string]struct { + Response *vtctldatapb.FindAllShardsInKeyspaceResponse + Error error + }{ + "testkeyspace": { + Response: &vtctldatapb.FindAllShardsInKeyspaceResponse{ + Shards: map[string]*vtctldatapb.Shard{ + "-80": { + Keyspace: "testkeyspace", + Name: "-80", + Shard: &topodatapb.Shard{ + IsMasterServing: true, + }, + }, + "80-": { + Keyspace: "testkeyspace", + Name: "80-", + Shard: &topodatapb.Shard{ + IsMasterServing: true, + }, + }, + }, + }, + }, + }, + GetSchemaResults: map[string]struct { + Response *vtctldatapb.GetSchemaResponse + Error error + }{ + "c1zone1-0000000100": { + Response: &vtctldatapb.GetSchemaResponse{ + Schema: &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "testtable", + RowCount: 10, + DataLength: 100, + }, + }, + }, + }, + }, + "c1zone1-0000000200": { + Response: &vtctldatapb.GetSchemaResponse{ + Schema: &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "testtable", + RowCount: 20, + DataLength: 200, + }, + }, + }, + }, + }, + }, + }, + Tablets: []*vtadminpb.Tablet{ + { + Cluster: c1pb, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "c1zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + Shard: "-80", + }, + State: vtadminpb.Tablet_SERVING, + }, + { + Cluster: c1pb, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "c1zone1", + Uid: 200, + }, + Keyspace: "testkeyspace", + Shard: "80-", + }, + State: vtadminpb.Tablet_SERVING, + }, + }, + }, + ) + c2 := vtadmintestutil.BuildCluster( + vtadmintestutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: "c2", + Name: "cluster2", + }, + }, + ) + + api := NewAPI([]*cluster.Cluster{c1, c2}, grpcserver.Options{}, http.Options{}) + schema, err := api.GetSchema(ctx, &vtadminpb.GetSchemaRequest{ + ClusterId: c1.ID, + Keyspace: "testkeyspace", + Table: "testtable", + TableSizeOptions: &vtadminpb.GetSchemaTableSizeOptions{ + AggregateSizes: true, + }, + }) + + expected := &vtadminpb.Schema{ + Cluster: c1pb, + Keyspace: "testkeyspace", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "testtable", + }, + }, + TableSizes: map[string]*vtadminpb.Schema_TableSize{ + "testtable": { + RowCount: 10 + 20, + DataLength: 100 + 200, + ByShard: map[string]*vtadminpb.Schema_ShardTableSize{ + "-80": { + RowCount: 10, + DataLength: 100, + }, + "80-": { + RowCount: 20, + DataLength: 200, + }, + }, + }, + }, + } + + if schema != nil { + for _, td := range schema.TableDefinitions { + // Zero these out because they're non-deterministic and also not + // relevant to the final result. + td.RowCount = 0 + td.DataLength = 0 + } + } + + assert.NoError(t, err) + assert.Equal(t, expected, schema) + }) +} + +func TestGetSchemas(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + clusterTablets [][]*vtadminpb.Tablet + // Indexed by tablet alias + tabletSchemas map[string]*tabletmanagerdatapb.SchemaDefinition + req *vtadminpb.GetSchemasRequest + expected *vtadminpb.GetSchemasResponse + }{ + { + name: "one schema in one cluster", + clusterTablets: [][]*vtadminpb.Tablet{ + // cluster0 + { + { + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "c0_cell1", + Uid: 100, + }, + Keyspace: "commerce", + }, + }, + }, + // cluster1 + { + { + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "c1_cell1", + Uid: 100, + }, + Keyspace: "commerce", + }, + }, + }, + }, + tabletSchemas: map[string]*tabletmanagerdatapb.SchemaDefinition{ + "c0_cell1-0000000100": { + DatabaseSchema: "CREATE DATABASE vt_testkeyspace", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "t1", + Schema: `CREATE TABLE t1 (id int(11) not null,PRIMARY KEY (id));`, + Type: "BASE", + Columns: []string{"id"}, + DataLength: 100, + RowCount: 50, + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + }, + }, + }, + }, + }, + req: &vtadminpb.GetSchemasRequest{}, + expected: &vtadminpb.GetSchemasResponse{ + Schemas: []*vtadminpb.Schema{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c0", + Name: "cluster0", + }, + Keyspace: "commerce", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "t1", + Schema: `CREATE TABLE t1 (id int(11) not null,PRIMARY KEY (id));`, + Type: "BASE", + Columns: []string{"id"}, + DataLength: 100, + RowCount: 50, + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + }, + }, + }, + TableSizes: map[string]*vtadminpb.Schema_TableSize{}, + }, + }, + }, + }, + { + name: "one schema in each cluster", + clusterTablets: [][]*vtadminpb.Tablet{ + // cluster0 + { + { + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "c0_cell1", + Uid: 100, + }, + Keyspace: "commerce", + }, + }, + }, + // cluster1 + { + { + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "c1_cell1", + Uid: 100, + }, + Keyspace: "commerce", + }, + }, + }, + }, + tabletSchemas: map[string]*tabletmanagerdatapb.SchemaDefinition{ + "c0_cell1-0000000100": { + DatabaseSchema: "CREATE DATABASE vt_testkeyspace", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "t1", + Schema: `CREATE TABLE t1 (id int(11) not null,PRIMARY KEY (id));`, + Type: "BASE", + Columns: []string{"id"}, + DataLength: 100, + RowCount: 50, + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + }, + }, + }, + }, + "c1_cell1-0000000100": { + DatabaseSchema: "CREATE DATABASE vt_testkeyspace", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "t2", + Schema: `CREATE TABLE t2 (id int(11) not null,PRIMARY KEY (id));`, + Type: "BASE", + Columns: []string{"id"}, + DataLength: 100, + RowCount: 50, + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + }, + }, + }, + }, + }, + req: &vtadminpb.GetSchemasRequest{}, + expected: &vtadminpb.GetSchemasResponse{ + Schemas: []*vtadminpb.Schema{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c0", + Name: "cluster0", + }, + Keyspace: "commerce", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "t1", + Schema: `CREATE TABLE t1 (id int(11) not null,PRIMARY KEY (id));`, + Type: "BASE", + Columns: []string{"id"}, + DataLength: 100, + RowCount: 50, + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + }, + }, + }, + TableSizes: map[string]*vtadminpb.Schema_TableSize{}, + }, + { + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + Keyspace: "commerce", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "t2", + Schema: `CREATE TABLE t2 (id int(11) not null,PRIMARY KEY (id));`, + Type: "BASE", + Columns: []string{"id"}, + DataLength: 100, + RowCount: 50, + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + }, + }, + }, + TableSizes: map[string]*vtadminpb.Schema_TableSize{}, + }, + }, + }, + }, + { + name: "filtered by cluster ID", + clusterTablets: [][]*vtadminpb.Tablet{ + // cluster0 + { + { + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "c0_cell1", + Uid: 100, + }, + Keyspace: "commerce", + }, + }, + }, + // cluster1 + { + { + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "c1_cell1", + Uid: 100, + }, + Keyspace: "commerce", + }, + }, + }, + }, + tabletSchemas: map[string]*tabletmanagerdatapb.SchemaDefinition{ + "c0_cell1-0000000100": { + DatabaseSchema: "CREATE DATABASE vt_testkeyspace", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "t1", + Schema: `CREATE TABLE t1 (id int(11) not null,PRIMARY KEY (id));`, + Type: "BASE", + Columns: []string{"id"}, + DataLength: 100, + RowCount: 50, + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + }, + }, + }, + }, + "c1_cell1-0000000100": { + DatabaseSchema: "CREATE DATABASE vt_testkeyspace", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "t2", + Schema: `CREATE TABLE t2 (id int(11) not null,PRIMARY KEY (id));`, + Type: "BASE", + Columns: []string{"id"}, + DataLength: 100, + RowCount: 50, + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + }, + }, + }, + }, + }, + req: &vtadminpb.GetSchemasRequest{ + ClusterIds: []string{"c1"}, + }, + expected: &vtadminpb.GetSchemasResponse{ + Schemas: []*vtadminpb.Schema{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + Keyspace: "commerce", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "t2", + Schema: `CREATE TABLE t2 (id int(11) not null,PRIMARY KEY (id));`, + Type: "BASE", + Columns: []string{"id"}, + DataLength: 100, + RowCount: 50, + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + }, + }, + }, + TableSizes: map[string]*vtadminpb.Schema_TableSize{}, + }, + }, + }, + }, + { + name: "filtered by cluster ID that doesn't exist", + clusterTablets: [][]*vtadminpb.Tablet{ + // cluster0 + { + { + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "c0_cell1", + Uid: 100, + }, + Keyspace: "commerce", + }, + }, + }, + }, + tabletSchemas: map[string]*tabletmanagerdatapb.SchemaDefinition{ + "c0_cell1-0000000100": { + DatabaseSchema: "CREATE DATABASE vt_testkeyspace", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "t1", + Schema: `CREATE TABLE t1 (id int(11) not null,PRIMARY KEY (id));`, + Type: "BASE", + Columns: []string{"id"}, + DataLength: 100, + RowCount: 50, + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + }, + }, + }, + }, + }, + req: &vtadminpb.GetSchemasRequest{ + ClusterIds: []string{"nope"}, + }, + expected: &vtadminpb.GetSchemasResponse{ + Schemas: []*vtadminpb.Schema{}, + }, + }, + { + name: "no schemas for any cluster", + clusterTablets: [][]*vtadminpb.Tablet{ + // cluster0 + { + { + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "c0_cell1", + Uid: 100, + }, + Keyspace: "commerce", + }, + }, + }, + }, + tabletSchemas: map[string]*tabletmanagerdatapb.SchemaDefinition{}, + req: &vtadminpb.GetSchemasRequest{}, + expected: &vtadminpb.GetSchemasResponse{ + Schemas: []*vtadminpb.Schema{}, + }, + }, + { + name: "no serving tablets", + clusterTablets: [][]*vtadminpb.Tablet{ + // cluster0 + { + { + State: vtadminpb.Tablet_NOT_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "c0_cell1", + Uid: 100, + }, + Keyspace: "commerce", + }, + }, + }, + }, + tabletSchemas: map[string]*tabletmanagerdatapb.SchemaDefinition{ + "c0_cell1-0000000100": { + DatabaseSchema: "CREATE DATABASE vt_testkeyspace", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "t1", + Schema: `CREATE TABLE t1 (id int(11) not null,PRIMARY KEY (id));`, + Type: "BASE", + Columns: []string{"id"}, + DataLength: 100, + RowCount: 50, + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + }, + }, + }, + }, + }, + req: &vtadminpb.GetSchemasRequest{}, + expected: &vtadminpb.GetSchemasResponse{ + Schemas: []*vtadminpb.Schema{}, + }, + }, + } + + ctx := context.Background() + + for _, tt := range tests { + // Note that these test cases were written prior to the existence of + // WithTestServers, so they are all written with the assumption that + // there are exactly 2 clusters. + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + topos := []*topo.Server{ + memorytopo.NewServer("c0_cell1"), + memorytopo.NewServer("c1_cell1"), + } + + tmc := testutil.TabletManagerClient{ + GetSchemaResults: map[string]struct { + Schema *tabletmanagerdatapb.SchemaDefinition + Error error + }{}, + } + + vtctlds := []vtctlservicepb.VtctldServer{ + testutil.NewVtctldServerWithTabletManagerClient(t, topos[0], &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return grpcvtctldserver.NewVtctldServer(ts) + }), + testutil.NewVtctldServerWithTabletManagerClient(t, topos[1], &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return grpcvtctldserver.NewVtctldServer(ts) + }), + } + + testutil.WithTestServers(t, func(t *testing.T, clients ...vtctldclient.VtctldClient) { + clusters := make([]*cluster.Cluster, len(topos)) + for cdx, toposerver := range topos { + // Handle when a test doesn't define any tablets for a given cluster. + var cts []*vtadminpb.Tablet + if cdx < len(tt.clusterTablets) { + cts = tt.clusterTablets[cdx] + } + + for _, tablet := range cts { + // AddTablet also adds the keyspace + shard for us. + testutil.AddTablet(ctx, t, toposerver, tablet.Tablet, nil) + + // Adds each SchemaDefinition to the fake TabletManagerClient, or nil + // if there are no schemas for that tablet. (All tablet aliases must + // exist in the map. Otherwise, TabletManagerClient will return an error when + // looking up the schema with tablet alias that doesn't exist.) + alias := topoproto.TabletAliasString(tablet.Tablet.Alias) + tmc.GetSchemaResults[alias] = struct { + Schema *tabletmanagerdatapb.SchemaDefinition + Error error + }{ + Schema: tt.tabletSchemas[alias], + Error: nil, + } + } + + clusters[cdx] = vtadmintestutil.BuildCluster(vtadmintestutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: fmt.Sprintf("c%d", cdx), + Name: fmt.Sprintf("cluster%d", cdx), + }, + VtctldClient: clients[cdx], + Tablets: cts, + }) + } + + api := NewAPI(clusters, grpcserver.Options{}, http.Options{}) + + resp, err := api.GetSchemas(ctx, tt.req) + require.NoError(t, err) + + vtadmintestutil.AssertSchemaSlicesEqual(t, tt.expected.Schemas, resp.Schemas, tt.name) + }, vtctlds...) + }) + } + + t.Run("size aggregation", func(t *testing.T) { + t.Parallel() + + c1pb := &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + } + c2pb := &vtadminpb.Cluster{ + Id: "c2", + Name: "cluster2", + } + + c1 := vtadmintestutil.BuildCluster( + vtadmintestutil.TestClusterConfig{ + Cluster: c1pb, + VtctldClient: &vtadmintestutil.VtctldClient{ + FindAllShardsInKeyspaceResults: map[string]struct { + Response *vtctldatapb.FindAllShardsInKeyspaceResponse + Error error + }{ + "testkeyspace": { + Response: &vtctldatapb.FindAllShardsInKeyspaceResponse{ + Shards: map[string]*vtctldatapb.Shard{ + "-80": { + Keyspace: "testkeyspace", + Name: "-80", + Shard: &topodatapb.Shard{ + IsMasterServing: true, + }, + }, + "80-": { + Keyspace: "testkeyspace", + Name: "80-", + Shard: &topodatapb.Shard{ + IsMasterServing: true, + }, + }, + }, + }, + }, + "ks1": { + Response: &vtctldatapb.FindAllShardsInKeyspaceResponse{ + Shards: map[string]*vtctldatapb.Shard{ + "-": { + Keyspace: "ks1", + Name: "-", + Shard: &topodatapb.Shard{ + IsMasterServing: true, + }, + }, + }, + }, + }, + }, + GetKeyspacesResults: struct { + Keyspaces []*vtctldatapb.Keyspace + Error error + }{ + Keyspaces: []*vtctldatapb.Keyspace{ + {Name: "testkeyspace"}, + {Name: "ks1"}, + }, + }, + GetSchemaResults: map[string]struct { + Response *vtctldatapb.GetSchemaResponse + Error error + }{ + "c1zone1-0000000100": { + Response: &vtctldatapb.GetSchemaResponse{ + Schema: &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "testtable", + RowCount: 10, + DataLength: 100, + }, + }, + }, + }, + }, + "c1zone1-0000000200": { + Response: &vtctldatapb.GetSchemaResponse{ + Schema: &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "testtable", + RowCount: 20, + DataLength: 200, + }, + }, + }, + }, + }, + }, + }, + Tablets: []*vtadminpb.Tablet{ + { + Cluster: c1pb, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "c1zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + Shard: "-80", + }, + State: vtadminpb.Tablet_SERVING, + }, + { + Cluster: c1pb, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "c1zone1", + Uid: 200, + }, + Keyspace: "testkeyspace", + Shard: "80-", + }, + State: vtadminpb.Tablet_SERVING, + }, + }, + }, + ) + c2 := vtadmintestutil.BuildCluster( + vtadmintestutil.TestClusterConfig{ + Cluster: c2pb, + VtctldClient: &vtadmintestutil.VtctldClient{ + FindAllShardsInKeyspaceResults: map[string]struct { + Response *vtctldatapb.FindAllShardsInKeyspaceResponse + Error error + }{ + "ks2": { + Response: &vtctldatapb.FindAllShardsInKeyspaceResponse{ + Shards: map[string]*vtctldatapb.Shard{ + "-": { + Keyspace: "ks2", + Name: "-", + Shard: &topodatapb.Shard{ + IsMasterServing: true, + }, + }, + }, + }, + }, + }, + GetKeyspacesResults: struct { + Keyspaces []*vtctldatapb.Keyspace + Error error + }{ + Keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "ks2", + }, + }, + }, + GetSchemaResults: map[string]struct { + Response *vtctldatapb.GetSchemaResponse + Error error + }{ + "c2z1-0000000100": { + Response: &vtctldatapb.GetSchemaResponse{ + Schema: &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "t2", + DataLength: 5, + RowCount: 7, + }, + { + Name: "_t2_ghc", + DataLength: 5, + RowCount: 7, + }, + }, + }, + }, + }, + }, + }, + Tablets: []*vtadminpb.Tablet{ + { + Cluster: c2pb, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "c2z1", + Uid: 100, + }, + Keyspace: "ks2", + Shard: "-", + }, + State: vtadminpb.Tablet_SERVING, + }, + }, + }, + ) + + api := NewAPI([]*cluster.Cluster{c1, c2}, grpcserver.Options{}, http.Options{}) + resp, err := api.GetSchemas(ctx, &vtadminpb.GetSchemasRequest{ + TableSizeOptions: &vtadminpb.GetSchemaTableSizeOptions{ + AggregateSizes: true, + }, + }) + + expected := &vtadminpb.GetSchemasResponse{ + Schemas: []*vtadminpb.Schema{ + { + Cluster: c1pb, + Keyspace: "testkeyspace", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "testtable", + }, + }, + TableSizes: map[string]*vtadminpb.Schema_TableSize{ + "testtable": { + RowCount: 10 + 20, + DataLength: 100 + 200, + ByShard: map[string]*vtadminpb.Schema_ShardTableSize{ + "-80": { + RowCount: 10, + DataLength: 100, + }, + "80-": { + RowCount: 20, + DataLength: 200, + }, + }, + }, + }, + }, + { + Cluster: c2pb, + Keyspace: "ks2", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + {Name: "t2"}, + {Name: "_t2_ghc"}, + }, + TableSizes: map[string]*vtadminpb.Schema_TableSize{ + "t2": { + DataLength: 5, + RowCount: 7, + ByShard: map[string]*vtadminpb.Schema_ShardTableSize{ + "-": { + DataLength: 5, + RowCount: 7, + }, + }, + }, + "_t2_ghc": { + DataLength: 5, + RowCount: 7, + ByShard: map[string]*vtadminpb.Schema_ShardTableSize{ + "-": { + DataLength: 5, + RowCount: 7, + }, + }, + }, + }, + }, + }, + } + + if resp != nil { + for _, schema := range resp.Schemas { + for _, td := range schema.TableDefinitions { + // Zero these out because they're non-deterministic and also not + // relevant to the final result. + td.RowCount = 0 + td.DataLength = 0 + } + } + } + + assert.NoError(t, err) + assert.ElementsMatch(t, expected.Schemas, resp.Schemas) + }) +} + +func TestGetTablet(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + clusterTablets [][]*vtadminpb.Tablet + dbconfigs map[string]vtadmintestutil.Dbcfg + req *vtadminpb.GetTabletRequest + expected *vtadminpb.Tablet + shouldErr bool + }{ + { + name: "single cluster", + clusterTablets: [][]*vtadminpb.Tablet{ + { + /* cluster 0 */ + { + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Uid: 100, + Cell: "zone1", + }, + Hostname: "ks1-00-00-zone1-a", + Keyspace: "ks1", + Shard: "-", + Type: topodatapb.TabletType_MASTER, + }, + }, + }, + }, + dbconfigs: map[string]vtadmintestutil.Dbcfg{}, + req: &vtadminpb.GetTabletRequest{ + Hostname: "ks1-00-00-zone1-a", + }, + expected: &vtadminpb.Tablet{ + Cluster: &vtadminpb.Cluster{ + Id: "c0", + Name: "cluster0", + }, + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Uid: 100, + Cell: "zone1", + }, + Hostname: "ks1-00-00-zone1-a", + Keyspace: "ks1", + Shard: "-", + Type: topodatapb.TabletType_MASTER, + }, + }, + shouldErr: false, + }, + { + name: "one cluster errors", + clusterTablets: [][]*vtadminpb.Tablet{ + /* cluster 0 */ + { + { + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Uid: 100, + Cell: "zone1", + }, + Hostname: "ks1-00-00-zone1-a", + Keyspace: "ks1", + Shard: "-", + Type: topodatapb.TabletType_MASTER, + }, + }, + }, + /* cluster 1 */ + { + { + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Uid: 200, + Cell: "zone1", + }, + Hostname: "ks2-00-00-zone1-a", + Keyspace: "ks2", + Shard: "-", + Type: topodatapb.TabletType_MASTER, + }, + }, + }, + }, + dbconfigs: map[string]vtadmintestutil.Dbcfg{ + "c1": {ShouldErr: true}, + }, + req: &vtadminpb.GetTabletRequest{ + Hostname: "doesn't matter", + }, + expected: nil, + shouldErr: true, + }, + { + name: "multi cluster, selecting one with tablet", + clusterTablets: [][]*vtadminpb.Tablet{ + /* cluster 0 */ + { + { + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Uid: 100, + Cell: "zone1", + }, + Hostname: "ks1-00-00-zone1-a", + Keyspace: "ks1", + Shard: "-", + Type: topodatapb.TabletType_MASTER, + }, + }, + }, + /* cluster 1 */ + { + { + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Uid: 200, + Cell: "zone1", + }, + Hostname: "ks2-00-00-zone1-a", + Keyspace: "ks2", + Shard: "-", + Type: topodatapb.TabletType_MASTER, + }, + }, + }, + }, + dbconfigs: map[string]vtadmintestutil.Dbcfg{}, + req: &vtadminpb.GetTabletRequest{ + Hostname: "ks1-00-00-zone1-a", + ClusterIds: []string{"c0"}, + }, + expected: &vtadminpb.Tablet{ + Cluster: &vtadminpb.Cluster{ + Id: "c0", + Name: "cluster0", + }, + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Uid: 100, + Cell: "zone1", + }, + Hostname: "ks1-00-00-zone1-a", + Keyspace: "ks1", + Shard: "-", + Type: topodatapb.TabletType_MASTER, + }, + }, + shouldErr: false, + }, + { + name: "multi cluster, multiple results", + clusterTablets: [][]*vtadminpb.Tablet{ + /* cluster 0 */ + { + { + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Uid: 100, + Cell: "zone1", + }, + Hostname: "ks1-00-00-zone1-a", + Keyspace: "ks1", + Shard: "-", + Type: topodatapb.TabletType_MASTER, + }, + }, + }, + /* cluster 1 */ + { + { + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Uid: 200, + Cell: "zone1", + }, + Hostname: "ks1-00-00-zone1-a", + Keyspace: "ks1", + Shard: "-", + Type: topodatapb.TabletType_MASTER, + }, + }, + }, + }, + dbconfigs: map[string]vtadmintestutil.Dbcfg{}, + req: &vtadminpb.GetTabletRequest{ + Hostname: "ks1-00-00-zone1-a", + }, + expected: nil, + shouldErr: true, + }, + { + name: "no results", + clusterTablets: [][]*vtadminpb.Tablet{ + /* cluster 0 */ + {}, + }, + dbconfigs: map[string]vtadmintestutil.Dbcfg{}, + req: &vtadminpb.GetTabletRequest{ + Hostname: "ks1-00-00-zone1-a", + }, + expected: nil, + shouldErr: true, + }, + } + + ctx := context.Background() + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + clusters := make([]*cluster.Cluster, len(tt.clusterTablets)) + + for i, tablets := range tt.clusterTablets { + cid := fmt.Sprintf("c%d", i) + dbconfigs := tt.dbconfigs[cid] + + clusters[i] = vtadmintestutil.BuildCluster(vtadmintestutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: cid, + Name: fmt.Sprintf("cluster%d", i), + }, + Tablets: tablets, + DBConfig: dbconfigs, + }) + } + + api := NewAPI(clusters, grpcserver.Options{}, http.Options{}) + resp, err := api.GetTablet(ctx, tt.req) + if tt.shouldErr { + assert.Error(t, err) + return + } + + assert.NoError(t, err) + assert.Equal(t, tt.expected, resp) + }) + } +} + +func TestGetTablets(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + clusterTablets [][]*vtadminpb.Tablet + dbconfigs map[string]vtadmintestutil.Dbcfg + req *vtadminpb.GetTabletsRequest + expected []*vtadminpb.Tablet + shouldErr bool + }{ + { + name: "single cluster", + clusterTablets: [][]*vtadminpb.Tablet{ + { + /* cluster 0 */ + { + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Uid: 100, + Cell: "zone1", + }, + Hostname: "ks1-00-00-zone1-a", + Keyspace: "ks1", + Shard: "-", + Type: topodatapb.TabletType_MASTER, + }, + }, + }, + }, + dbconfigs: map[string]vtadmintestutil.Dbcfg{}, + req: &vtadminpb.GetTabletsRequest{}, + expected: []*vtadminpb.Tablet{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c0", + Name: "cluster0", + }, + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Uid: 100, + Cell: "zone1", + }, + Hostname: "ks1-00-00-zone1-a", + Keyspace: "ks1", + Shard: "-", + Type: topodatapb.TabletType_MASTER, + }, + }, + }, + shouldErr: false, + }, + { + name: "one cluster errors", + clusterTablets: [][]*vtadminpb.Tablet{ + /* cluster 0 */ + { + { + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Uid: 100, + Cell: "zone1", + }, + Hostname: "ks1-00-00-zone1-a", + Keyspace: "ks1", + Shard: "-", + Type: topodatapb.TabletType_MASTER, + }, + }, + }, + /* cluster 1 */ + { + { + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Uid: 200, + Cell: "zone1", + }, + Hostname: "ks2-00-00-zone1-a", + Keyspace: "ks2", + Shard: "-", + Type: topodatapb.TabletType_MASTER, + }, + }, + }, + }, + dbconfigs: map[string]vtadmintestutil.Dbcfg{ + "c1": {ShouldErr: true}, + }, + req: &vtadminpb.GetTabletsRequest{}, + expected: nil, + shouldErr: true, + }, + { + name: "multi cluster, selecting one", + clusterTablets: [][]*vtadminpb.Tablet{ + /* cluster 0 */ + { + { + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Uid: 100, + Cell: "zone1", + }, + Hostname: "ks1-00-00-zone1-a", + Keyspace: "ks1", + Shard: "-", + Type: topodatapb.TabletType_MASTER, + }, + }, + }, + /* cluster 1 */ + { + { + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Uid: 200, + Cell: "zone1", + }, + Hostname: "ks2-00-00-zone1-a", + Keyspace: "ks2", + Shard: "-", + Type: topodatapb.TabletType_MASTER, + }, + }, + }, + }, + dbconfigs: map[string]vtadmintestutil.Dbcfg{}, + req: &vtadminpb.GetTabletsRequest{ClusterIds: []string{"c0"}}, + expected: []*vtadminpb.Tablet{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c0", + Name: "cluster0", + }, + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Uid: 100, + Cell: "zone1", + }, + Hostname: "ks1-00-00-zone1-a", + Keyspace: "ks1", + Shard: "-", + Type: topodatapb.TabletType_MASTER, + }, + }, + }, + shouldErr: false, + }, + } + + ctx := context.Background() + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + clusters := make([]*cluster.Cluster, len(tt.clusterTablets)) + + for i, tablets := range tt.clusterTablets { + cid := fmt.Sprintf("c%d", i) + dbconfigs := tt.dbconfigs[cid] + + clusters[i] = vtadmintestutil.BuildCluster(vtadmintestutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: cid, + Name: fmt.Sprintf("cluster%d", i), + }, + Tablets: tablets, + DBConfig: dbconfigs, + }) + } + + api := NewAPI(clusters, grpcserver.Options{}, http.Options{}) + resp, err := api.GetTablets(ctx, tt.req) + if tt.shouldErr { + assert.Error(t, err) + return + } + + assert.NoError(t, err) + assert.ElementsMatch(t, tt.expected, resp.Tablets) + }) + } +} + +func TestGetVSchema(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + clusterCfg vtadmintestutil.TestClusterConfig + req *vtadminpb.GetVSchemaRequest + expected *vtadminpb.VSchema + shouldErr bool + }{ { - Hostname: "cluster1-gate1", + name: "success", + clusterCfg: vtadmintestutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + VtctldClient: &vtadmintestutil.VtctldClient{ + GetVSchemaResults: map[string]struct { + Response *vtctldatapb.GetVSchemaResponse + Error error + }{ + "testkeyspace": { + Response: &vtctldatapb.GetVSchemaResponse{ + VSchema: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "hash": { + Type: "md5hash", + }, + }, + }, + }, + }, + }, + }, + }, + req: &vtadminpb.GetVSchemaRequest{ + ClusterId: "c1", + Keyspace: "testkeyspace", + }, + expected: &vtadminpb.VSchema{ + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + Name: "testkeyspace", + VSchema: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "hash": { + Type: "md5hash", + }, + }, + }, + }, + shouldErr: false, + }, + { + name: "no vschema for keyspace", + clusterCfg: vtadmintestutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + VtctldClient: &vtadmintestutil.VtctldClient{ + GetVSchemaResults: map[string]struct { + Response *vtctldatapb.GetVSchemaResponse + Error error + }{ + "testkeyspace": { + Response: &vtctldatapb.GetVSchemaResponse{ + VSchema: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "hash": { + Type: "md5hash", + }, + }, + }, + }, + }, + }, + }, + }, + req: &vtadminpb.GetVSchemaRequest{ + ClusterId: "c1", + Keyspace: "otherkeyspace", + }, + expected: nil, + shouldErr: true, + }, + { + name: "cluster not found", + clusterCfg: vtadmintestutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + }, + req: &vtadminpb.GetVSchemaRequest{ + ClusterId: "c2", + Keyspace: "testkeyspace", + }, + expected: nil, + shouldErr: true, + }, + } + + ctx := context.Background() + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + clusters := []*cluster.Cluster{vtadmintestutil.BuildCluster(tt.clusterCfg)} + api := NewAPI(clusters, grpcserver.Options{}, http.Options{}) + + resp, err := api.GetVSchema(ctx, tt.req) + if tt.shouldErr { + assert.Error(t, err) + + return + } + + assert.NoError(t, err) + assert.Equal(t, tt.expected, resp) + }) + } +} + +func TestGetVSchemas(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + clusterCfgs []vtadmintestutil.TestClusterConfig + req *vtadminpb.GetVSchemasRequest + expected *vtadminpb.GetVSchemasResponse + shouldErr bool + }{ + { + name: "success", + clusterCfgs: []vtadmintestutil.TestClusterConfig{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + VtctldClient: &vtadmintestutil.VtctldClient{ + GetKeyspacesResults: struct { + Keyspaces []*vtctldatapb.Keyspace + Error error + }{ + Keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "testkeyspace", + }, + }, + }, + GetVSchemaResults: map[string]struct { + Response *vtctldatapb.GetVSchemaResponse + Error error + }{ + "testkeyspace": { + Response: &vtctldatapb.GetVSchemaResponse{ + VSchema: &vschemapb.Keyspace{}, + }, + }, + }, + }, + }, + { + Cluster: &vtadminpb.Cluster{ + Id: "c2", + Name: "cluster2", + }, + VtctldClient: &vtadmintestutil.VtctldClient{ + GetKeyspacesResults: struct { + Keyspaces []*vtctldatapb.Keyspace + Error error + }{ + Keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "k2", + }, + }, + }, + GetVSchemaResults: map[string]struct { + Response *vtctldatapb.GetVSchemaResponse + Error error + }{ + "k2": { + Response: &vtctldatapb.GetVSchemaResponse{ + VSchema: &vschemapb.Keyspace{}, + }, + }, + }, + }, + }, + }, + req: &vtadminpb.GetVSchemasRequest{}, + expected: &vtadminpb.GetVSchemasResponse{ + VSchemas: []*vtadminpb.VSchema{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + Name: "testkeyspace", + VSchema: &vschemapb.Keyspace{}, + }, + { + Cluster: &vtadminpb.Cluster{ + Id: "c2", + Name: "cluster2", + }, + Name: "k2", + VSchema: &vschemapb.Keyspace{}, + }, + }, + }, + shouldErr: false, + }, + { + name: "requesting specific clusters", + clusterCfgs: []vtadmintestutil.TestClusterConfig{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + VtctldClient: &vtadmintestutil.VtctldClient{ + GetKeyspacesResults: struct { + Keyspaces []*vtctldatapb.Keyspace + Error error + }{ + Keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "testkeyspace", + }, + }, + }, + GetVSchemaResults: map[string]struct { + Response *vtctldatapb.GetVSchemaResponse + Error error + }{ + "testkeyspace": { + Response: &vtctldatapb.GetVSchemaResponse{ + VSchema: &vschemapb.Keyspace{}, + }, + }, + }, + }, + }, + { + Cluster: &vtadminpb.Cluster{ + Id: "c2", + Name: "cluster2", + }, + VtctldClient: &vtadmintestutil.VtctldClient{ + GetKeyspacesResults: struct { + Keyspaces []*vtctldatapb.Keyspace + Error error + }{ + Keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "k2", + }, + }, + }, + GetVSchemaResults: map[string]struct { + Response *vtctldatapb.GetVSchemaResponse + Error error + }{ + "k2": { + Response: &vtctldatapb.GetVSchemaResponse{ + VSchema: &vschemapb.Keyspace{}, + }, + }, + }, + }, + }, + }, + req: &vtadminpb.GetVSchemasRequest{ + ClusterIds: []string{"c2"}, + }, + expected: &vtadminpb.GetVSchemasResponse{ + VSchemas: []*vtadminpb.VSchema{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c2", + Name: "cluster2", + }, + Name: "k2", + VSchema: &vschemapb.Keyspace{}, + }, + }, + }, + shouldErr: false, + }, + { + name: "GetKeyspaces failure", + clusterCfgs: []vtadmintestutil.TestClusterConfig{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + VtctldClient: &vtadmintestutil.VtctldClient{ + GetKeyspacesResults: struct { + Keyspaces []*vtctldatapb.Keyspace + Error error + }{ + Keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "testkeyspace", + }, + }, + }, + GetVSchemaResults: map[string]struct { + Response *vtctldatapb.GetVSchemaResponse + Error error + }{ + "testkeyspace": { + Response: &vtctldatapb.GetVSchemaResponse{ + VSchema: &vschemapb.Keyspace{}, + }, + }, + }, + }, + }, + { + Cluster: &vtadminpb.Cluster{ + Id: "c2", + Name: "cluster2", + }, + VtctldClient: &vtadmintestutil.VtctldClient{ + GetKeyspacesResults: struct { + Keyspaces []*vtctldatapb.Keyspace + Error error + }{ + Error: assert.AnError, + }, + }, + }, + }, + req: &vtadminpb.GetVSchemasRequest{}, + expected: nil, + shouldErr: true, + }, + { + name: "GetVSchema failure", + clusterCfgs: []vtadmintestutil.TestClusterConfig{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + VtctldClient: &vtadmintestutil.VtctldClient{ + GetKeyspacesResults: struct { + Keyspaces []*vtctldatapb.Keyspace + Error error + }{ + Keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "testkeyspace", + }, + }, + }, + GetVSchemaResults: map[string]struct { + Response *vtctldatapb.GetVSchemaResponse + Error error + }{ + "testkeyspace": { + Error: assert.AnError, + }, + }, + }, + }, + { + Cluster: &vtadminpb.Cluster{ + Id: "c2", + Name: "cluster2", + }, + VtctldClient: &vtadmintestutil.VtctldClient{ + GetKeyspacesResults: struct { + Keyspaces []*vtctldatapb.Keyspace + Error error + }{ + Keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "k2", + }, + }, + }, + GetVSchemaResults: map[string]struct { + Response *vtctldatapb.GetVSchemaResponse + Error error + }{ + "k2": { + Response: &vtctldatapb.GetVSchemaResponse{ + VSchema: &vschemapb.Keyspace{}, + }, + }, + }, + }, + }, + }, + req: &vtadminpb.GetVSchemasRequest{}, + expected: nil, + shouldErr: true, }, { - Hostname: "cluster1-gate2", + name: "no clusters specified", + clusterCfgs: []vtadmintestutil.TestClusterConfig{}, + req: &vtadminpb.GetVSchemasRequest{}, + expected: &vtadminpb.GetVSchemasResponse{ + VSchemas: []*vtadminpb.VSchema{}, + }, + shouldErr: false, }, { - Hostname: "cluster1-gate3", + name: "requested invalid cluster", + clusterCfgs: []vtadmintestutil.TestClusterConfig{}, + req: &vtadminpb.GetVSchemasRequest{ + ClusterIds: []string{"c1"}, + }, + expected: nil, + shouldErr: true, }, } - fakedisco1.AddTaggedGates(nil, cluster1Gates...) + ctx := context.Background() - fakedisco2 := fakediscovery.New() - cluster2 := &cluster.Cluster{ - ID: "c2", - Name: "cluster2", - Discovery: fakedisco2, + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + if tt.req == nil { + t.SkipNow() + } + + clusters := vtadmintestutil.BuildClusters(tt.clusterCfgs...) + api := NewAPI(clusters, grpcserver.Options{}, http.Options{}) + + resp, err := api.GetVSchemas(ctx, tt.req) + if tt.shouldErr { + assert.Error(t, err) + + return + } + + assert.NoError(t, err) + assert.ElementsMatch(t, tt.expected.VSchemas, resp.VSchemas) + }) } - cluster2Gates := []*vtadminpb.VTGate{ +} + +func TestGetWorkflow(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + cfgs []vtadmintestutil.TestClusterConfig + req *vtadminpb.GetWorkflowRequest + expected *vtadminpb.Workflow + shouldErr bool + }{ { - Hostname: "cluster2-gate1", + name: "success", + cfgs: []vtadmintestutil.TestClusterConfig{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + VtctldClient: &vtadmintestutil.VtctldClient{ + GetWorkflowsResults: map[string]struct { + Response *vtctldatapb.GetWorkflowsResponse + Error error + }{ + "testkeyspace": { + Response: &vtctldatapb.GetWorkflowsResponse{ + Workflows: []*vtctldatapb.Workflow{ + { + Name: "workflow1", + }, + { + Name: "workflow2", + }, + }, + }, + }, + }, + }, + }, + }, + req: &vtadminpb.GetWorkflowRequest{ + ClusterId: "c1", + Keyspace: "testkeyspace", + Name: "workflow1", + }, + expected: &vtadminpb.Workflow{ + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + Keyspace: "testkeyspace", + Workflow: &vtctldatapb.Workflow{ + Name: "workflow1", + }, + }, + shouldErr: false, + }, + { + name: "no such workflow", + cfgs: []vtadmintestutil.TestClusterConfig{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + VtctldClient: &vtadmintestutil.VtctldClient{ + GetWorkflowsResults: map[string]struct { + Response *vtctldatapb.GetWorkflowsResponse + Error error + }{ + "testkeyspace": { + Response: &vtctldatapb.GetWorkflowsResponse{ + Workflows: []*vtctldatapb.Workflow{ + { + Name: "workflow1", + }, + { + Name: "workflow2", + }, + }, + }, + }, + }, + }, + }, + }, + req: &vtadminpb.GetWorkflowRequest{ + ClusterId: "c1", + Keyspace: "testkeyspace", + Name: "workflow3", + }, + expected: nil, + shouldErr: true, + }, + { + name: "no such cluster", + cfgs: []vtadmintestutil.TestClusterConfig{}, + req: &vtadminpb.GetWorkflowRequest{ + ClusterId: "c1", + Keyspace: "testkeyspace", + Name: "workflow1", + }, + expected: nil, + shouldErr: true, }, } - fakedisco2.AddTaggedGates(nil, cluster2Gates...) - - api := NewAPI([]*cluster.Cluster{cluster1, cluster2}, grpcserver.Options{}, http.Options{}) ctx := context.Background() - resp, err := api.GetGates(ctx, &vtadminpb.GetGatesRequest{}) - assert.NoError(t, err) - assert.ElementsMatch(t, append(cluster1Gates, cluster2Gates...), resp.Gates) + for _, tt := range tests { + tt := tt - resp, err = api.GetGates(ctx, &vtadminpb.GetGatesRequest{ClusterIds: []string{cluster1.ID}}) - assert.NoError(t, err) - assert.ElementsMatch(t, cluster1Gates, resp.Gates) + t.Run(tt.name, func(t *testing.T) { + t.Parallel() - fakedisco1.SetGatesError(true) + api := NewAPI( + vtadmintestutil.BuildClusters(tt.cfgs...), + grpcserver.Options{}, + http.Options{}, + ) - resp, err = api.GetGates(ctx, &vtadminpb.GetGatesRequest{}) - assert.Error(t, err) - assert.Nil(t, resp) + resp, err := api.GetWorkflow(ctx, tt.req) + if tt.shouldErr { + assert.Error(t, err) + + return + } + + assert.NoError(t, err) + assert.Equal(t, tt.expected, resp) + }) + } } -func TestGetTablets(t *testing.T) { +func TestGetWorkflows(t *testing.T) { + t.Parallel() + tests := []struct { - name string - clusterTablets [][]*vtadminpb.Tablet - dbconfigs map[string]*dbcfg - req *vtadminpb.GetTabletsRequest - expected []*vtadminpb.Tablet - shouldErr bool + name string + cfgs []vtadmintestutil.TestClusterConfig + req *vtadminpb.GetWorkflowsRequest + expected *vtadminpb.GetWorkflowsResponse + shouldErr bool }{ { - name: "single cluster", - clusterTablets: [][]*vtadminpb.Tablet{ + name: "success", + cfgs: []vtadmintestutil.TestClusterConfig{ { - /* cluster 0 */ - { - State: vtadminpb.Tablet_SERVING, - Tablet: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Uid: 100, - Cell: "zone1", + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + VtctldClient: &vtadmintestutil.VtctldClient{ + GetKeyspacesResults: struct { + Keyspaces []*vtctldatapb.Keyspace + Error error + }{ + Keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "testkeyspace", + }, + }, + }, + GetWorkflowsResults: map[string]struct { + Response *vtctldatapb.GetWorkflowsResponse + Error error + }{ + "testkeyspace": { + Response: &vtctldatapb.GetWorkflowsResponse{ + Workflows: []*vtctldatapb.Workflow{ + { + Name: "workflow1", + }, + { + Name: "workflow2", + }, + }, + }, }, - Hostname: "ks1-00-00-zone1-a", - Keyspace: "ks1", - Shard: "-", - Type: topodatapb.TabletType_MASTER, }, }, }, - }, - dbconfigs: map[string]*dbcfg{}, - req: &vtadminpb.GetTabletsRequest{}, - expected: []*vtadminpb.Tablet{ { Cluster: &vtadminpb.Cluster{ - Id: "c0", - Name: "cluster0", + Id: "c2", + Name: "cluster2", }, - State: vtadminpb.Tablet_SERVING, - Tablet: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Uid: 100, - Cell: "zone1", + VtctldClient: &vtadmintestutil.VtctldClient{ + GetKeyspacesResults: struct { + Keyspaces []*vtctldatapb.Keyspace + Error error + }{ + Keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "otherkeyspace", + }, + }, + }, + GetWorkflowsResults: map[string]struct { + Response *vtctldatapb.GetWorkflowsResponse + Error error + }{ + "otherkeyspace": { + Response: &vtctldatapb.GetWorkflowsResponse{ + Workflows: []*vtctldatapb.Workflow{ + { + Name: "workflow1", + }, + }, + }, + }, + }, + }, + }, + }, + req: &vtadminpb.GetWorkflowsRequest{}, + expected: &vtadminpb.GetWorkflowsResponse{ + WorkflowsByCluster: map[string]*vtadminpb.ClusterWorkflows{ + "c1": { + Workflows: []*vtadminpb.Workflow{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + Keyspace: "testkeyspace", + Workflow: &vtctldatapb.Workflow{ + Name: "workflow1", + }, + }, + { + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + Keyspace: "testkeyspace", + Workflow: &vtctldatapb.Workflow{ + Name: "workflow2", + }, + }, + }, + }, + "c2": { + Workflows: []*vtadminpb.Workflow{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c2", + Name: "cluster2", + }, + Keyspace: "otherkeyspace", + Workflow: &vtctldatapb.Workflow{ + Name: "workflow1", + }, + }, }, - Hostname: "ks1-00-00-zone1-a", - Keyspace: "ks1", - Shard: "-", - Type: topodatapb.TabletType_MASTER, }, }, }, shouldErr: false, }, { - name: "one cluster errors", - clusterTablets: [][]*vtadminpb.Tablet{ - /* cluster 0 */ + name: "one cluster has partial error then request succeeds", + cfgs: []vtadmintestutil.TestClusterConfig{ { - { - State: vtadminpb.Tablet_SERVING, - Tablet: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Uid: 100, - Cell: "zone1", + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + VtctldClient: &vtadmintestutil.VtctldClient{ + GetKeyspacesResults: struct { + Keyspaces []*vtctldatapb.Keyspace + Error error + }{ + Keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "testkeyspace", + }, + }, + }, + GetWorkflowsResults: map[string]struct { + Response *vtctldatapb.GetWorkflowsResponse + Error error + }{ + "testkeyspace": { + Response: &vtctldatapb.GetWorkflowsResponse{ + Workflows: []*vtctldatapb.Workflow{ + { + Name: "workflow1", + }, + { + Name: "workflow2", + }, + }, + }, }, - Hostname: "ks1-00-00-zone1-a", - Keyspace: "ks1", - Shard: "-", - Type: topodatapb.TabletType_MASTER, }, }, }, - /* cluster 1 */ { - { - State: vtadminpb.Tablet_SERVING, - Tablet: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Uid: 200, - Cell: "zone1", + Cluster: &vtadminpb.Cluster{ + Id: "c2", + Name: "cluster2", + }, + VtctldClient: &vtadmintestutil.VtctldClient{ + GetKeyspacesResults: struct { + Keyspaces []*vtctldatapb.Keyspace + Error error + }{ + Keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "otherkeyspace", + }, + { + Name: "badkeyspace", + }, + }, + }, + GetWorkflowsResults: map[string]struct { + Response *vtctldatapb.GetWorkflowsResponse + Error error + }{ + "otherkeyspace": { + Response: &vtctldatapb.GetWorkflowsResponse{ + Workflows: []*vtctldatapb.Workflow{ + { + Name: "workflow1", + }, + }, + }, + }, + "badkeyspace": { + Error: assert.AnError, }, - Hostname: "ks2-00-00-zone1-a", - Keyspace: "ks2", - Shard: "-", - Type: topodatapb.TabletType_MASTER, }, }, }, }, - dbconfigs: map[string]*dbcfg{ - "c1": {shouldErr: true}, + req: &vtadminpb.GetWorkflowsRequest{}, + expected: &vtadminpb.GetWorkflowsResponse{ + WorkflowsByCluster: map[string]*vtadminpb.ClusterWorkflows{ + "c1": { + Workflows: []*vtadminpb.Workflow{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + Keyspace: "testkeyspace", + Workflow: &vtctldatapb.Workflow{ + Name: "workflow1", + }, + }, + { + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + Keyspace: "testkeyspace", + Workflow: &vtctldatapb.Workflow{ + Name: "workflow2", + }, + }, + }, + Warnings: []string{}, + }, + "c2": { + Workflows: []*vtadminpb.Workflow{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c2", + Name: "cluster2", + }, + Keyspace: "otherkeyspace", + Workflow: &vtctldatapb.Workflow{ + Name: "workflow1", + }, + }, + }, + Warnings: []string{"some warning about badkeyspace"}, + }, + }, }, - req: &vtadminpb.GetTabletsRequest{}, - expected: nil, - shouldErr: true, + shouldErr: false, }, { - name: "multi cluster, selecting one", - clusterTablets: [][]*vtadminpb.Tablet{ - /* cluster 0 */ + name: "IgnoreKeyspaces applies across clusters", + cfgs: []vtadmintestutil.TestClusterConfig{ { - { - State: vtadminpb.Tablet_SERVING, - Tablet: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Uid: 100, - Cell: "zone1", + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + VtctldClient: &vtadmintestutil.VtctldClient{ + GetKeyspacesResults: struct { + Keyspaces []*vtctldatapb.Keyspace + Error error + }{ + Keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "testkeyspace", + }, + }, + }, + GetWorkflowsResults: map[string]struct { + Response *vtctldatapb.GetWorkflowsResponse + Error error + }{ + "testkeyspace": { + Response: &vtctldatapb.GetWorkflowsResponse{ + Workflows: []*vtctldatapb.Workflow{ + { + Name: "workflow1", + }, + { + Name: "workflow2", + }, + }, + }, }, - Hostname: "ks1-00-00-zone1-a", - Keyspace: "ks1", - Shard: "-", - Type: topodatapb.TabletType_MASTER, }, }, }, - /* cluster 1 */ { - { - State: vtadminpb.Tablet_SERVING, - Tablet: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Uid: 200, - Cell: "zone1", + Cluster: &vtadminpb.Cluster{ + Id: "c2", + Name: "cluster2", + }, + VtctldClient: &vtadmintestutil.VtctldClient{ + GetKeyspacesResults: struct { + Keyspaces []*vtctldatapb.Keyspace + Error error + }{ + Keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "testkeyspace", + }, + { + Name: "otherkeyspace", + }, + }, + }, + GetWorkflowsResults: map[string]struct { + Response *vtctldatapb.GetWorkflowsResponse + Error error + }{ + "testkeyspace": { + Response: &vtctldatapb.GetWorkflowsResponse{ + Workflows: []*vtctldatapb.Workflow{ + { + Name: "workflow1", + }, + }, + }, + }, + "otherkeyspace": { + Response: &vtctldatapb.GetWorkflowsResponse{ + Workflows: []*vtctldatapb.Workflow{ + { + Name: "workflow1", + }, + }, + }, }, - Hostname: "ks2-00-00-zone1-a", - Keyspace: "ks2", - Shard: "-", - Type: topodatapb.TabletType_MASTER, }, }, }, }, - dbconfigs: map[string]*dbcfg{}, - req: &vtadminpb.GetTabletsRequest{ClusterIds: []string{"c0"}}, - expected: []*vtadminpb.Tablet{ + req: &vtadminpb.GetWorkflowsRequest{ + IgnoreKeyspaces: []string{"testkeyspace"}, + }, + expected: &vtadminpb.GetWorkflowsResponse{ + WorkflowsByCluster: map[string]*vtadminpb.ClusterWorkflows{ + "c1": {}, + "c2": { + Workflows: []*vtadminpb.Workflow{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c2", + Name: "cluster2", + }, + Keyspace: "otherkeyspace", + Workflow: &vtctldatapb.Workflow{ + Name: "workflow1", + }, + }, + }, + }, + }, + }, + shouldErr: false, + }, + { + name: "one cluster has fatal error, request fails", + cfgs: []vtadmintestutil.TestClusterConfig{ { Cluster: &vtadminpb.Cluster{ - Id: "c0", - Name: "cluster0", + Id: "c1", + Name: "cluster1", }, - State: vtadminpb.Tablet_SERVING, - Tablet: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Uid: 100, - Cell: "zone1", + VtctldClient: &vtadmintestutil.VtctldClient{ + GetKeyspacesResults: struct { + Keyspaces []*vtctldatapb.Keyspace + Error error + }{ + Keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "testkeyspace", + }, + }, + }, + GetWorkflowsResults: map[string]struct { + Response *vtctldatapb.GetWorkflowsResponse + Error error + }{ + "testkeyspace": { + Response: &vtctldatapb.GetWorkflowsResponse{ + Workflows: []*vtctldatapb.Workflow{ + { + Name: "workflow1", + }, + { + Name: "workflow2", + }, + }, + }, + }, + }, + }, + }, + { + Cluster: &vtadminpb.Cluster{ + Id: "c2", + Name: "cluster2", + }, + VtctldClient: &vtadmintestutil.VtctldClient{ + GetKeyspacesResults: struct { + Keyspaces []*vtctldatapb.Keyspace + Error error + }{ + Error: assert.AnError, // GetKeyspaces is a fatal error }, - Hostname: "ks1-00-00-zone1-a", - Keyspace: "ks1", - Shard: "-", - Type: topodatapb.TabletType_MASTER, }, }, }, - shouldErr: false, + req: &vtadminpb.GetWorkflowsRequest{}, + expected: nil, + shouldErr: true, }, } + ctx := context.Background() + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { - clusters := make([]*cluster.Cluster, len(tt.clusterTablets)) + t.Parallel() - for i, tablets := range tt.clusterTablets { - cluster := buildCluster(i, tablets, tt.dbconfigs) - clusters[i] = cluster - } + api := NewAPI( + vtadmintestutil.BuildClusters(tt.cfgs...), + grpcserver.Options{}, + http.Options{}, + ) - api := NewAPI(clusters, grpcserver.Options{}, http.Options{}) - resp, err := api.GetTablets(context.Background(), tt.req) + resp, err := api.GetWorkflows(ctx, tt.req) if tt.shouldErr { assert.Error(t, err) + return } assert.NoError(t, err) - assert.ElementsMatch(t, tt.expected, resp.Tablets) + require.NotNil(t, resp) + + vtadmintestutil.AssertGetWorkflowsResponsesEqual(t, tt.expected, resp) }) } } -// This test only validates the error handling on dialing database connections. -// Other cases are covered by one or both of TestGetTablets and TestGetTablet. -func Test_getTablets(t *testing.T) { - api := &API{} - disco := fakediscovery.New() - disco.AddTaggedGates(nil, &vtadminpb.VTGate{Hostname: "gate"}) - - db := vtsql.New(&vtsql.Config{ - Cluster: &vtadminpb.Cluster{ - Id: "c1", - Name: "one", - }, - Discovery: disco, - }) - db.DialFunc = func(cfg vitessdriver.Configuration) (*sql.DB, error) { - return nil, assert.AnError - } - - _, err := api.getTablets(context.Background(), &cluster.Cluster{ - DB: db, - }) - assert.Error(t, err) -} +func TestVTExplain(t *testing.T) { + t.Parallel() -func TestGetTablet(t *testing.T) { tests := []struct { - name string - clusterTablets [][]*vtadminpb.Tablet - dbconfigs map[string]*dbcfg - req *vtadminpb.GetTabletRequest - expected *vtadminpb.Tablet - shouldErr bool + name string + keyspaces []*vtctldatapb.Keyspace + shards []*vtctldatapb.Shard + srvVSchema *vschemapb.SrvVSchema + tabletSchemas map[string]*tabletmanagerdatapb.SchemaDefinition + tablets []*vtadminpb.Tablet + req *vtadminpb.VTExplainRequest + expectedError error }{ { - name: "single cluster", - clusterTablets: [][]*vtadminpb.Tablet{ + name: "runs VTExplain given a valid request in a valid topology", + keyspaces: []*vtctldatapb.Keyspace{ { - /* cluster 0 */ - { - State: vtadminpb.Tablet_SERVING, - Tablet: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Uid: 100, - Cell: "zone1", - }, - Hostname: "ks1-00-00-zone1-a", - Keyspace: "ks1", - Shard: "-", - Type: topodatapb.TabletType_MASTER, - }, - }, + Name: "commerce", + Keyspace: &topodatapb.Keyspace{}, }, }, - dbconfigs: map[string]*dbcfg{}, - req: &vtadminpb.GetTabletRequest{ - Hostname: "ks1-00-00-zone1-a", - }, - expected: &vtadminpb.Tablet{ - Cluster: &vtadminpb.Cluster{ - Id: "c0", - Name: "cluster0", + shards: []*vtctldatapb.Shard{ + { + Name: "-", + Keyspace: "commerce", }, - State: vtadminpb.Tablet_SERVING, - Tablet: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Uid: 100, - Cell: "zone1", + }, + srvVSchema: &vschemapb.SrvVSchema{ + Keyspaces: map[string]*vschemapb.Keyspace{ + "commerce": { + Sharded: false, + Tables: map[string]*vschemapb.Table{ + "customers": {}, + }, }, - Hostname: "ks1-00-00-zone1-a", - Keyspace: "ks1", - Shard: "-", - Type: topodatapb.TabletType_MASTER, + }, + RoutingRules: &vschemapb.RoutingRules{ + Rules: []*vschemapb.RoutingRule{}, }, }, - shouldErr: false, - }, - { - name: "one cluster errors", - clusterTablets: [][]*vtadminpb.Tablet{ - /* cluster 0 */ - { - { - State: vtadminpb.Tablet_SERVING, - Tablet: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Uid: 100, - Cell: "zone1", + tabletSchemas: map[string]*tabletmanagerdatapb.SchemaDefinition{ + "c0_cell1-0000000100": { + DatabaseSchema: "CREATE DATABASE commerce", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "t1", + Schema: `CREATE TABLE customers (id int(11) not null,PRIMARY KEY (id));`, + Type: "BASE", + Columns: []string{"id"}, + DataLength: 100, + RowCount: 50, + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, }, - Hostname: "ks1-00-00-zone1-a", - Keyspace: "ks1", - Shard: "-", - Type: topodatapb.TabletType_MASTER, }, }, }, - /* cluster 1 */ + }, + tablets: []*vtadminpb.Tablet{ { - { - State: vtadminpb.Tablet_SERVING, - Tablet: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Uid: 200, - Cell: "zone1", - }, - Hostname: "ks2-00-00-zone1-a", - Keyspace: "ks2", - Shard: "-", - Type: topodatapb.TabletType_MASTER, + Cluster: &vtadminpb.Cluster{ + Id: "c0", + Name: "cluster0", + }, + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Uid: 100, + Cell: "c0_cell1", }, + Hostname: "tablet-cell1-a", + Keyspace: "commerce", + Shard: "-", + Type: topodatapb.TabletType_REPLICA, }, }, }, - dbconfigs: map[string]*dbcfg{ - "c1": {shouldErr: true}, + req: &vtadminpb.VTExplainRequest{ + Cluster: "c0", + Keyspace: "commerce", + Sql: "select * from customers", }, - req: &vtadminpb.GetTabletRequest{ - Hostname: "doesn't matter", - }, - expected: nil, - shouldErr: true, }, { - name: "multi cluster, selecting one with tablet", - clusterTablets: [][]*vtadminpb.Tablet{ - /* cluster 0 */ + name: "returns an error if no appropriate tablet found in keyspace", + keyspaces: []*vtctldatapb.Keyspace{ { - { - State: vtadminpb.Tablet_SERVING, - Tablet: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Uid: 100, - Cell: "zone1", - }, - Hostname: "ks1-00-00-zone1-a", - Keyspace: "ks1", - Shard: "-", - Type: topodatapb.TabletType_MASTER, + Name: "commerce", + Keyspace: &topodatapb.Keyspace{}, + }, + }, + shards: []*vtctldatapb.Shard{ + { + Name: "-", + Keyspace: "commerce", + }, + }, + srvVSchema: &vschemapb.SrvVSchema{ + Keyspaces: map[string]*vschemapb.Keyspace{ + "commerce": { + Sharded: false, + Tables: map[string]*vschemapb.Table{ + "customers": {}, }, }, }, - /* cluster 1 */ - { - { - State: vtadminpb.Tablet_SERVING, - Tablet: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Uid: 200, - Cell: "zone1", + RoutingRules: &vschemapb.RoutingRules{ + Rules: []*vschemapb.RoutingRule{}, + }, + }, + tabletSchemas: map[string]*tabletmanagerdatapb.SchemaDefinition{ + "c0_cell1-0000000102": { + DatabaseSchema: "CREATE DATABASE commerce", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "t1", + Schema: `CREATE TABLE customers (id int(11) not null,PRIMARY KEY (id));`, + Type: "BASE", + Columns: []string{"id"}, + DataLength: 100, + RowCount: 50, + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, }, - Hostname: "ks2-00-00-zone1-a", - Keyspace: "ks2", - Shard: "-", - Type: topodatapb.TabletType_MASTER, }, }, }, }, - dbconfigs: map[string]*dbcfg{}, - req: &vtadminpb.GetTabletRequest{ - Hostname: "ks1-00-00-zone1-a", - ClusterIds: []string{"c0"}, - }, - expected: &vtadminpb.Tablet{ - Cluster: &vtadminpb.Cluster{ - Id: "c0", - Name: "cluster0", - }, - State: vtadminpb.Tablet_SERVING, - Tablet: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Uid: 100, - Cell: "zone1", + tablets: []*vtadminpb.Tablet{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c0", + Name: "cluster0", + }, + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Uid: 100, + Cell: "c0_cell1", + }, + Hostname: "tablet-cell1-a", + Keyspace: "commerce", + Shard: "-", + Type: topodatapb.TabletType_MASTER, }, - Hostname: "ks1-00-00-zone1-a", - Keyspace: "ks1", - Shard: "-", - Type: topodatapb.TabletType_MASTER, }, - }, - shouldErr: false, - }, - { - name: "multi cluster, multiple results", - clusterTablets: [][]*vtadminpb.Tablet{ - /* cluster 0 */ { - { - State: vtadminpb.Tablet_SERVING, - Tablet: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Uid: 100, - Cell: "zone1", - }, - Hostname: "ks1-00-00-zone1-a", - Keyspace: "ks1", - Shard: "-", - Type: topodatapb.TabletType_MASTER, + Cluster: &vtadminpb.Cluster{ + Id: "c0", + Name: "cluster0", + }, + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Uid: 101, + Cell: "c0_cell1", }, + Hostname: "tablet-cell1-b", + Keyspace: "commerce", + Shard: "-", + Type: topodatapb.TabletType_DRAINED, }, }, - /* cluster 1 */ { - { - State: vtadminpb.Tablet_SERVING, - Tablet: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Uid: 200, - Cell: "zone1", - }, - Hostname: "ks1-00-00-zone1-a", - Keyspace: "ks1", - Shard: "-", - Type: topodatapb.TabletType_MASTER, + Cluster: &vtadminpb.Cluster{ + Id: "c0", + Name: "cluster0", + }, + State: vtadminpb.Tablet_NOT_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Uid: 102, + Cell: "c0_cell1", }, + Hostname: "tablet-cell1-c", + Keyspace: "commerce", + Shard: "-", + Type: topodatapb.TabletType_REPLICA, }, }, }, - dbconfigs: map[string]*dbcfg{}, - req: &vtadminpb.GetTabletRequest{ - Hostname: "ks1-00-00-zone1-a", + req: &vtadminpb.VTExplainRequest{ + Cluster: "c0", + Keyspace: "commerce", + Sql: "select * from customers", }, - expected: nil, - shouldErr: true, + expectedError: vtadminerrors.ErrNoTablet, }, { - name: "no results", - clusterTablets: [][]*vtadminpb.Tablet{ - /* cluster 0 */ - {}, + name: "returns an error if cluster unspecified in request", + req: &vtadminpb.VTExplainRequest{ + Keyspace: "commerce", + Sql: "select * from customers", }, - dbconfigs: map[string]*dbcfg{}, - req: &vtadminpb.GetTabletRequest{ - Hostname: "ks1-00-00-zone1-a", + expectedError: vtadminerrors.ErrInvalidRequest, + }, + { + name: "returns an error if keyspace unspecified in request", + req: &vtadminpb.VTExplainRequest{ + Cluster: "c0", + Sql: "select * from customers", }, - expected: nil, - shouldErr: true, + expectedError: vtadminerrors.ErrInvalidRequest, + }, + { + name: "returns an error if SQL unspecified in request", + req: &vtadminpb.VTExplainRequest{ + Cluster: "c0", + Keyspace: "commerce", + }, + expectedError: vtadminerrors.ErrInvalidRequest, }, } + ctx := context.Background() + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { - clusters := make([]*cluster.Cluster, len(tt.clusterTablets)) + t.Parallel() - for i, tablets := range tt.clusterTablets { - cluster := buildCluster(i, tablets, tt.dbconfigs) - clusters[i] = cluster - } + toposerver := memorytopo.NewServer("c0_cell1") - api := NewAPI(clusters, grpcserver.Options{}, http.Options{}) - resp, err := api.GetTablet(context.Background(), tt.req) - if tt.shouldErr { - assert.Error(t, err) - return + tmc := testutil.TabletManagerClient{ + GetSchemaResults: map[string]struct { + Schema *tabletmanagerdatapb.SchemaDefinition + Error error + }{}, } - assert.NoError(t, err) - assert.Equal(t, tt.expected, resp) - }) - } -} + vtctldserver := testutil.NewVtctldServerWithTabletManagerClient(t, toposerver, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return grpcvtctldserver.NewVtctldServer(ts) + }) -type dbcfg struct { - shouldErr bool -} + testutil.WithTestServer(t, vtctldserver, func(t *testing.T, vtctldClient vtctldclient.VtctldClient) { + if tt.srvVSchema != nil { + err := toposerver.UpdateSrvVSchema(ctx, "c0_cell1", tt.srvVSchema) + require.NoError(t, err) + } + testutil.AddKeyspaces(ctx, t, toposerver, tt.keyspaces...) + testutil.AddShards(ctx, t, toposerver, tt.shards...) -// shared helper for building a cluster that contains the given tablets. -// dbconfigs contains an optional config for controlling the behavior of the -// cluster's DB at the package sql level. -func buildCluster(i int, tablets []*vtadminpb.Tablet, dbconfigs map[string]*dbcfg) *cluster.Cluster { - disco := fakediscovery.New() - disco.AddTaggedGates(nil, &vtadminpb.VTGate{Hostname: fmt.Sprintf("cluster%d-gate", i)}) - - cluster := &cluster.Cluster{ - ID: fmt.Sprintf("c%d", i), - Name: fmt.Sprintf("cluster%d", i), - Discovery: disco, - } + for _, tablet := range tt.tablets { + testutil.AddTablet(ctx, t, toposerver, tablet.Tablet, nil) - dbconfig, ok := dbconfigs[cluster.ID] - if !ok { - dbconfig = &dbcfg{shouldErr: false} - } + // Adds each SchemaDefinition to the fake TabletManagerClient, or nil + // if there are no schemas for that tablet. (All tablet aliases must + // exist in the map. Otherwise, TabletManagerClient will return an error when + // looking up the schema with tablet alias that doesn't exist.) + alias := topoproto.TabletAliasString(tablet.Tablet.Alias) + tmc.GetSchemaResults[alias] = struct { + Schema *tabletmanagerdatapb.SchemaDefinition + Error error + }{ + Schema: tt.tabletSchemas[alias], + Error: nil, + } + } - db := vtsql.New(&vtsql.Config{ - Cluster: cluster.ToProto(), - Discovery: disco, - }) - db.DialFunc = func(cfg vitessdriver.Configuration) (*sql.DB, error) { - return sql.OpenDB(&fakevtsql.Connector{Tablets: tablets, ShouldErr: dbconfig.shouldErr}), nil + clusters := []*cluster.Cluster{ + vtadmintestutil.BuildCluster(vtadmintestutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: "c0", + Name: "cluster0", + }, + VtctldClient: vtctldClient, + Tablets: tt.tablets, + }), + } + + api := NewAPI(clusters, grpcserver.Options{}, http.Options{}) + resp, err := api.VTExplain(ctx, tt.req) + + if tt.expectedError != nil { + assert.True(t, errors.Is(err, tt.expectedError), "expected error type %w does not match actual error type %w", err, tt.expectedError) + } else { + require.NoError(t, err) + + // We don't particularly care to test the contents of the VTExplain response, + // just that it exists. + assert.NotEmpty(t, resp.Response) + } + }) + }) } +} - cluster.DB = db +func init() { + // For tests that don't actually care about mocking the tmclient (i.e. they + // call grpcvtctldserver.NewVtctldServer to initialize the unit under test), + // this needs to be set. + // + // Tests that do care about the tmclient should use + // testutil.NewVtctldServerWithTabletManagerClient to initialize their + // VtctldServer. + *tmclient.TabletManagerProtocol = "vtadmin.test" + tmclient.RegisterTabletManagerClientFactory("vtadmin.test", func() tmclient.TabletManagerClient { + return nil + }) - return cluster + // This prevents data-race failures in tests involving grpc client or server + // creation. For example, vtctldclient.New() eventually ends up calling + // grpccommon.EnableTracingOpt() which does a synchronized, one-time + // mutation of the global grpc.EnableTracing. This variable is also read, + // unguarded, by grpc.NewServer(), which is a function call that appears in + // most, if not all, vtadmin.API tests. + // + // Calling this here ensures that one-time write happens before any test + // attempts to read that value by way of grpc.NewServer(). + grpccommon.EnableTracingOpt() } diff --git a/go/vt/vtadmin/cluster/cluster.go b/go/vt/vtadmin/cluster/cluster.go index 3f5b0af7e5d..26e8bba8335 100644 --- a/go/vt/vtadmin/cluster/cluster.go +++ b/go/vt/vtadmin/cluster/cluster.go @@ -17,12 +17,31 @@ limitations under the License. package cluster import ( + "context" + "database/sql" "fmt" + "math/rand" + "strings" + "sync" + "time" + "k8s.io/apimachinery/pkg/util/sets" + + "vitess.io/vitess/go/trace" + "vitess.io/vitess/go/vt/concurrency" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtadmin/cluster/discovery" + "vitess.io/vitess/go/vt/vtadmin/errors" + "vitess.io/vitess/go/vt/vtadmin/vtadminproto" + "vitess.io/vitess/go/vt/vtadmin/vtctldclient" "vitess.io/vitess/go/vt/vtadmin/vtsql" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/proto/vtadmin" vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" ) // Cluster is the self-contained unit of services required for vtadmin to talk @@ -33,10 +52,8 @@ type Cluster struct { Name string Discovery discovery.Discovery - // (TODO|@amason): after merging #7128, this still requires some additional - // work, so deferring this for now! - // vtctl vtctldclient.VtctldClient - DB vtsql.DB + DB vtsql.DB + Vtctld vtctldclient.Proxy // These fields are kept to power debug endpoints. // (TODO|@amason): Figure out if these are needed or if there's a way to @@ -55,19 +72,29 @@ func New(cfg Config) (*Cluster, error) { disco, err := discovery.New(cfg.DiscoveryImpl, cluster.ToProto(), discoargs) if err != nil { - return nil, fmt.Errorf("error while creating discovery impl (%s): %w", cfg.DiscoveryImpl, err) + return nil, fmt.Errorf("error creating discovery impl (%s): %w", cfg.DiscoveryImpl, err) } cluster.Discovery = disco + protocluster := cluster.ToProto() + vtsqlargs := buildPFlagSlice(cfg.VtSQLFlags) - vtsqlCfg, err := vtsql.Parse(cluster.ToProto(), disco, vtsqlargs) + vtsqlCfg, err := vtsql.Parse(protocluster, disco, vtsqlargs) if err != nil { - return nil, fmt.Errorf("error while creating vtsql connection: %w", err) + return nil, fmt.Errorf("error creating vtsql connection config: %w", err) + } + + vtctldargs := buildPFlagSlice(cfg.VtctldFlags) + + vtctldCfg, err := vtctldclient.Parse(protocluster, disco, vtctldargs) + if err != nil { + return nil, fmt.Errorf("error creating vtctldclient proxy config: %w", err) } cluster.DB = vtsql.New(vtsqlCfg) + cluster.Vtctld = vtctldclient.New(vtctldCfg) return cluster, nil } @@ -89,3 +116,705 @@ func buildPFlagSlice(flags map[string]string) []string { return args } + +// parseTablets converts a set of *sql.Rows into a slice of Tablets, for the +// given cluster. +func (c *Cluster) parseTablets(rows *sql.Rows) ([]*vtadminpb.Tablet, error) { + var tablets []*vtadminpb.Tablet + + for rows.Next() { + if err := rows.Err(); err != nil { + return nil, err + } + + tablet, err := c.parseTablet(rows) + if err != nil { + return nil, err + } + + tablets = append(tablets, tablet) + } + + if err := rows.Err(); err != nil { + return nil, err + } + + return tablets, nil +} + +// Fields are: +// Cell | Keyspace | Shard | TabletType (string) | ServingState (string) | Alias | Hostname | MasterTermStartTime. +func (c *Cluster) parseTablet(rows *sql.Rows) (*vtadminpb.Tablet, error) { + var ( + cell string + tabletTypeStr string + servingStateStr string + aliasStr string + mtstStr string + topotablet topodatapb.Tablet + + err error + ) + + if err := rows.Scan( + &cell, + &topotablet.Keyspace, + &topotablet.Shard, + &tabletTypeStr, + &servingStateStr, + &aliasStr, + &topotablet.Hostname, + &mtstStr, + ); err != nil { + return nil, err + } + + tablet := &vtadminpb.Tablet{ + Cluster: &vtadminpb.Cluster{ + Id: c.ID, + Name: c.Name, + }, + Tablet: &topotablet, + } + + topotablet.Type, err = topoproto.ParseTabletType(tabletTypeStr) + if err != nil { + return nil, err + } + + tablet.State = vtadminproto.ParseTabletServingState(servingStateStr) + + topotablet.Alias, err = topoproto.ParseTabletAlias(aliasStr) + if err != nil { + return nil, fmt.Errorf("failed to parse tablet_alias %s: %w", aliasStr, err) + } + + if topotablet.Alias.Cell != cell { + // (TODO:@amason) ??? + log.Warningf("tablet cell %s does not match alias %s. ignoring for now", cell, topoproto.TabletAliasString(topotablet.Alias)) + } + + if mtstStr != "" { + timeTime, err := time.Parse(time.RFC3339, mtstStr) + if err != nil { + return nil, fmt.Errorf("failed parsing master_term_start_time %s: %w", mtstStr, err) + } + + topotablet.MasterTermStartTime = logutil.TimeToProto(timeTime) + } + + return tablet, nil +} + +// FindAllShardsInKeyspaceOptions modify the behavior of a cluster's +// FindAllShardsInKeyspace method. +type FindAllShardsInKeyspaceOptions struct { + // SkipDial indicates that the cluster can assume the vtctldclient has + // already dialed up a connection to a vtctld. + SkipDial bool +} + +// FindAllShardsInKeyspace proxies a FindAllShardsInKeyspace RPC to a cluster's +// vtctld, unpacking the response struct. +// +// It can also optionally ensure the vtctldclient has a valid connection before +// making the RPC call. +func (c *Cluster) FindAllShardsInKeyspace(ctx context.Context, keyspace string, opts FindAllShardsInKeyspaceOptions) (map[string]*vtctldatapb.Shard, error) { + span, ctx := trace.NewSpan(ctx, "Cluster.FindAllShardsInKeyspace") + defer span.Finish() + + AnnotateSpan(c, span) + span.Annotate("keyspace", keyspace) + + if !opts.SkipDial { + if err := c.Vtctld.Dial(ctx); err != nil { + return nil, fmt.Errorf("failed to Dial vtctld for cluster = %s for FindAllShardsInKeyspace: %w", c.ID, err) + } + } + + resp, err := c.Vtctld.FindAllShardsInKeyspace(ctx, &vtctldatapb.FindAllShardsInKeyspaceRequest{ + Keyspace: keyspace, + }) + if err != nil { + return nil, fmt.Errorf("FindAllShardsInKeyspace(cluster = %s, keyspace = %s) failed: %w", c.ID, keyspace, err) + } + + return resp.Shards, nil +} + +// FindWorkflowsOptions is the set of options for FindWorkflows requests. +type FindWorkflowsOptions struct { + ActiveOnly bool + IgnoreKeyspaces sets.String + Filter func(workflow *vtadminpb.Workflow) bool +} + +// FindWorkflows returns a list of Workflows in this cluster, across the given +// keyspaces and filtering according to the options passed in. +// +// If the list of keyspaces to check is empty, then FindWorkflows will use the +// result of GetKeyspaces to search all keyspaces in the cluster. In this case, +// opts.IgnoreKeyspaces is respected. +// +// Callers should use this function when they want more fine-grained filtering, +// and GetWorkflows when they just want to filter on keyspace name. +// +// Note that if only a subset of keyspaces error on their vtctld GetWorkflows +// rpc, this is treated as a partial success, and the ClusterWorkflows response +// will include any errors in the Warnings slice. If all keyspaces fail, or if +// non-(Vtctld.GetWorkflows) calls fail, this is treated as an error by this +// function. +func (c *Cluster) FindWorkflows(ctx context.Context, keyspaces []string, opts FindWorkflowsOptions) (*vtadminpb.ClusterWorkflows, error) { + span, ctx := trace.NewSpan(ctx, "Cluster.FindWorkflows") + defer span.Finish() + + AnnotateSpan(c, span) + span.Annotate("active_only", opts.ActiveOnly) + + if err := c.Vtctld.Dial(ctx); err != nil { + return nil, fmt.Errorf("FindWorkflows(cluster = %v, keyspaces = %v, opts = %v) dial failed: %w", c.ID, keyspaces, opts, err) + } + + return c.findWorkflows(ctx, keyspaces, opts) +} + +func (c *Cluster) findWorkflows(ctx context.Context, keyspaces []string, opts FindWorkflowsOptions) (*vtadminpb.ClusterWorkflows, error) { + if opts.Filter == nil { + opts.Filter = func(_ *vtadminpb.Workflow) bool { return true } + } + + if opts.IgnoreKeyspaces == nil { + opts.IgnoreKeyspaces = sets.NewString() + } + + if len(keyspaces) == 0 { + span, ctx := trace.NewSpan(ctx, "Cluster.GetKeyspaces") + AnnotateSpan(c, span) + + resp, err := c.Vtctld.GetKeyspaces(ctx, &vtctldatapb.GetKeyspacesRequest{}) + if err != nil { + span.Finish() + return nil, fmt.Errorf("GetKeyspaces(cluster = %s) failed: %w", c.ID, err) + } + + for _, ks := range resp.Keyspaces { + keyspaces = append(keyspaces, ks.Name) + } + + span.Finish() + } else if opts.IgnoreKeyspaces.Len() > 0 { + log.Warningf("Cluster.findWorkflows: IgnoreKeyspaces was set, but Keyspaces was not empty; ignoring IgnoreKeyspaces in favor of explicitly checking everything in Keyspaces: (%s)", strings.Join(keyspaces, ", ")) + opts.IgnoreKeyspaces = sets.NewString() + } + + // Annotate the parent span with some additional information about the call. + if span, _ := trace.FromContext(ctx); span != nil { + span.Annotate("num_keyspaces", len(keyspaces)) + span.Annotate("keyspaces", strings.Join(keyspaces, ",")) + span.Annotate("num_ignore_keyspaces", opts.IgnoreKeyspaces.Len()) + span.Annotate("ignore_keyspaces", strings.Join(opts.IgnoreKeyspaces.List(), ",")) + } + + clusterpb := c.ToProto() + + var ( + m sync.Mutex + wg sync.WaitGroup + rec concurrency.AllErrorRecorder + results []*vtadminpb.Workflow + ) + + for _, ks := range keyspaces { + if opts.IgnoreKeyspaces.Has(ks) { + log.Infof("Cluster.findWorkflows: ignoring keyspace %s", ks) + + continue + } + + wg.Add(1) + + go func(ks string) { + defer wg.Done() + + span, ctx := trace.NewSpan(ctx, "Cluster.GetWorkflowsForKeyspace") + defer span.Finish() + + AnnotateSpan(c, span) + span.Annotate("keyspace", ks) + span.Annotate("active_only", opts.ActiveOnly) + + resp, err := c.Vtctld.GetWorkflows(ctx, &vtctldatapb.GetWorkflowsRequest{ + Keyspace: ks, + ActiveOnly: opts.ActiveOnly, + }) + if err != nil { + err = fmt.Errorf("GetWorkflows(cluster = %s, keyspace = %s, active_only = %v) failed: %w", c.ID, ks, opts.ActiveOnly, err) + rec.RecordError(err) + + return + } + + workflows := make([]*vtadminpb.Workflow, 0, len(resp.Workflows)) + for _, wf := range resp.Workflows { + workflow := &vtadminpb.Workflow{ + Cluster: clusterpb, + Keyspace: ks, + Workflow: wf, + } + + if opts.Filter(workflow) { + workflows = append(workflows, workflow) + } + } + + m.Lock() + results = append(results, workflows...) + m.Unlock() + }(ks) + } + + wg.Wait() + + // If every keyspace failed, treat this as an error. + if rec.HasErrors() && len(rec.Errors) == len(keyspaces) { + return nil, rec.Error() + } + + // Otherwise, append any failures into the warnings slice, and return what + // results we have. + return &vtadminpb.ClusterWorkflows{ + Workflows: results, + Warnings: rec.ErrorStrings(), + }, nil +} + +// GetTablets returns all tablets in the cluster. +func (c *Cluster) GetTablets(ctx context.Context) ([]*vtadminpb.Tablet, error) { + span, ctx := trace.NewSpan(ctx, "Cluster.GetTablets") + defer span.Finish() + + AnnotateSpan(c, span) + + return c.getTablets(ctx) +} + +func (c *Cluster) getTablets(ctx context.Context) ([]*vtadminpb.Tablet, error) { + if err := c.DB.Dial(ctx, ""); err != nil { + return nil, err + } + + rows, err := c.DB.ShowTablets(ctx) + if err != nil { + return nil, err + } + + return c.parseTablets(rows) +} + +// GetSchemaOptions contains the options that modify the behavior of the +// (*Cluster).GetSchema method. +type GetSchemaOptions struct { + // Tablets is the starting set of tablets that GetSchema will filter to find + // suitable tablet(s) to make GetSchema RPC(s) to. + // + // If empty, GetSchema will first call (*Cluster).FindTablets() to fetch all + // tablets for the keyspace. + Tablets []*vtadminpb.Tablet + // BaseRequest is used to share some common parameters to use for the + // individual tablet GetSchema RPCs made by (*Cluster).GetSchema, which + // takes a copy of this request in order to makeb certain overrides as + // needed, so these mutations are transparent to the caller. + // + // The TabletAlias field is ignored completely by (*Cluster).GetSchema, as + // it is overwritten for each tablet RPC that method makes. + // + // The TableSizesOnly field is overwritten only in certain tablet RPCs when + // SizeOpts.AggregateSizes is true. In order to move minimal bytes over the + // wire, we assume that schema definitions match across all shards, so we + // can get the full schema from just one tablet, and then just the table + // size information from the other N-1 tablets. + // + // The TableNamesOnly field is untouched by (*Cluster).GetSchema when not + // doing size aggregation. However, when doing size aggregation, if + // TableNamesOnly is true, we log a warning and override it. This is because + // TableNamesOnly is mutually exclusive with TableSizesOnly, and size + // aggregation requires setting TableSizesOnly in the cases described above. + BaseRequest *vtctldatapb.GetSchemaRequest + // TableSizeOptions control whether the (*Cluster).GetSchema method performs + // cross-shard table size aggregation (via the AggregateSizes field). + // + // If the AggregateSizes field is false, the rest of this struct is ignored, + // no size aggregation is done, and (*Cluster).GetSchema will make exactly + // one GetSchema RPC to a SERVING tablet in the keyspace. + // + // If the AggregateSizes field is true, (*Cluster).GetSchema will make a + // FindAllShardsInKeyspace vtctld RPC, and then filter the given Tablets + // (described above) to find one SERVING tablet for each shard in the + // keyspace, skipping any non-serving shards in the keyspace. + TableSizeOptions *vtadminpb.GetSchemaTableSizeOptions +} + +// GetSchema returns the schema for a given keyspace. GetSchema has a few +// different behaviors depending on the GetSchemaOptions provided, as follows: +// +// (1) If opts.Tablets is empty, we will first use FindTablets to fetch all +// tablets for the keyspace, regardless of their serving state. Additional +// filtering of either this set, or the provided Tablets, will happen later. +// +// (2) If opts.SizeOpts.AggregateSizes is true, we will also make a call to +// FindAllShardsInKeyspace, in order to fan out GetSchema RPCs to a tablet in +// each shard. If this option is false, we make exactly one GetSchema request to +// a single, randomly-chosen, tablet in the keyspace. +// +// (3) We will only make GetSchema RPCs to tablets that are in SERVING state; we +// don't want to use a tablet that might be in a bad state as the source of +// truth for a schema. Therefore if we can't find a SERVING tablet for the +// keyspace (in non-aggregation mode) or for a shard in that keyspace (in +// aggregation mode), then we will return an error back to the caller. +func (c *Cluster) GetSchema(ctx context.Context, keyspace string, opts GetSchemaOptions) (*vtadminpb.Schema, error) { + span, ctx := trace.NewSpan(ctx, "Cluster.GetSchema") + defer span.Finish() + + if opts.TableSizeOptions == nil { + opts.TableSizeOptions = &vtadminpb.GetSchemaTableSizeOptions{ + AggregateSizes: false, + } + } + + if opts.BaseRequest == nil { + opts.BaseRequest = &vtctldatapb.GetSchemaRequest{} + } + + if opts.TableSizeOptions.AggregateSizes && opts.BaseRequest.TableNamesOnly { + log.Warningf("GetSchema(cluster = %s) size aggregation is incompatible with TableNamesOnly, ignoring the latter in favor of aggregating sizes", c.ID) + opts.BaseRequest.TableNamesOnly = false + } + + AnnotateSpan(c, span) + span.Annotate("keyspace", keyspace) + annotateGetSchemaRequest(opts.BaseRequest, span) + vtadminproto.AnnotateSpanWithGetSchemaTableSizeOptions(opts.TableSizeOptions, span) + + if len(opts.Tablets) == 0 { + // Fetch all tablets for the keyspace. + var err error + + opts.Tablets, err = c.FindTablets(ctx, func(tablet *vtadminpb.Tablet) bool { + return tablet.Tablet.Keyspace == keyspace + }, -1) + if err != nil { + return nil, fmt.Errorf("%w for keyspace %s", errors.ErrNoTablet, keyspace) + } + } + + if err := c.Vtctld.Dial(ctx); err != nil { + return nil, fmt.Errorf("failed to Dial vtctld for cluster = %s for GetSchema: %w", c.ID, err) + } + + tabletsToQuery, err := c.getTabletsToQueryForSchemas(ctx, keyspace, opts) + if err != nil { + return nil, err + } + + return c.getSchemaFromTablets(ctx, keyspace, tabletsToQuery, opts) +} + +// Note that for this function we use the tablets parameter, ignoring the +// opts.Tablets value completely. +func (c *Cluster) getSchemaFromTablets(ctx context.Context, keyspace string, tablets []*vtadminpb.Tablet, opts GetSchemaOptions) (*vtadminpb.Schema, error) { + var ( + m sync.Mutex + wg sync.WaitGroup + rec concurrency.AllErrorRecorder + schema = &vtadminpb.Schema{ + Cluster: c.ToProto(), + Keyspace: keyspace, + TableSizes: map[string]*vtadminpb.Schema_TableSize{}, + } + // Instead of starting at false, we start with whatever the base request + // specified. If we have exactly one tablet to query (i.e. we're not + // doing multi-shard aggregation), it's possible the request was to + // literally just get the table sizes; we shouldn't assume. If we have + // more than one tablet to query, then we are doing size aggregation, + // and we'll flip this to true after spawning the first GetSchema rpc. + sizesOnly = opts.BaseRequest.TableSizesOnly + ) + + for _, tablet := range tablets { + wg.Add(1) + + go func(tablet *vtadminpb.Tablet, sizesOnly bool) { + defer wg.Done() + + span, ctx := trace.NewSpan(ctx, "Vtctld.GetSchema") + defer span.Finish() + + req := *opts.BaseRequest + req.TableSizesOnly = sizesOnly + req.TabletAlias = tablet.Tablet.Alias + + AnnotateSpan(c, span) + annotateGetSchemaRequest(&req, span) + span.Annotate("keyspace", keyspace) + span.Annotate("shard", tablet.Tablet.Shard) + + resp, err := c.Vtctld.GetSchema(ctx, &req) + if err != nil { + err = fmt.Errorf("GetSchema(cluster = %s, keyspace = %s, tablet = %s) failed: %w", c.ID, keyspace, tablet.Tablet.Alias, err) + rec.RecordError(err) + + return + } + + if resp == nil || resp.Schema == nil { + return + } + + m.Lock() + defer m.Unlock() + + if !sizesOnly { + schema.TableDefinitions = resp.Schema.TableDefinitions + } + + if !opts.TableSizeOptions.AggregateSizes { + return + } + + for _, td := range resp.Schema.TableDefinitions { + tableSize, ok := schema.TableSizes[td.Name] + if !ok { + tableSize = &vtadminpb.Schema_TableSize{ + ByShard: map[string]*vtadminpb.Schema_ShardTableSize{}, + } + schema.TableSizes[td.Name] = tableSize + } + + if _, ok = tableSize.ByShard[tablet.Tablet.Shard]; ok { + err := fmt.Errorf("duplicate shard queries for table %s on shard %s/%s", td.Name, keyspace, tablet.Tablet.Shard) + log.Warningf("Impossible: %s", err) + rec.RecordError(err) + + return + } + + tableSize.RowCount += td.RowCount + tableSize.DataLength += td.DataLength + + tableSize.ByShard[tablet.Tablet.Shard] = &vtadminpb.Schema_ShardTableSize{ + RowCount: td.RowCount, + DataLength: td.DataLength, + } + } + }(tablet, sizesOnly) + + // If we have more than one tablet to query, we definitely don't want to + // get more than the sizes twice, so invariably set this to true for + // subsequent iterations + sizesOnly = true + } + + wg.Wait() + + if rec.HasErrors() { + return nil, rec.Error() + } + + return schema, nil +} + +func (c *Cluster) getTabletsToQueryForSchemas(ctx context.Context, keyspace string, opts GetSchemaOptions) ([]*vtadminpb.Tablet, error) { + if opts.TableSizeOptions.AggregateSizes { + shards, err := c.FindAllShardsInKeyspace(ctx, keyspace, FindAllShardsInKeyspaceOptions{SkipDial: true}) + if err != nil { + return nil, err + } + + tabletsToQuery := make([]*vtadminpb.Tablet, 0, len(shards)) + + for _, shard := range shards { + if !shard.Shard.IsMasterServing { + log.Infof("%s/%s is not serving; ignoring ...", keyspace, shard.Name) + continue + } + + shardTablets := vtadminproto.FilterTablets(func(tablet *vtadminpb.Tablet) bool { + return tablet.Tablet.Keyspace == keyspace && tablet.Tablet.Shard == shard.Name && tablet.State == vtadminpb.Tablet_SERVING + }, opts.Tablets, len(opts.Tablets)) + + if len(shardTablets) == 0 { + return nil, fmt.Errorf("%w for shard %s/%s", errors.ErrNoServingTablet, shard.Keyspace, shard.Name) + } + + randomServingTablet := shardTablets[rand.Intn(len(shardTablets))] + tabletsToQuery = append(tabletsToQuery, randomServingTablet) + } + + return tabletsToQuery, nil + } + + keyspaceTablets := vtadminproto.FilterTablets(func(tablet *vtadminpb.Tablet) bool { + return tablet.Tablet.Keyspace == keyspace && tablet.State == vtadminpb.Tablet_SERVING + }, opts.Tablets, len(opts.Tablets)) + + if len(keyspaceTablets) == 0 { + err := fmt.Errorf("%w for keyspace %s", errors.ErrNoServingTablet, keyspace) + log.Warningf("%s. Searched tablets: %v", err, vtadminproto.Tablets(opts.Tablets).AliasStringList()) + return nil, err + } + + randomServingTablet := keyspaceTablets[rand.Intn(len(keyspaceTablets))] + return []*vtadmin.Tablet{randomServingTablet}, nil +} + +// GetVSchema returns the vschema for a given keyspace in this cluster. The +// caller is responsible for making at least one call to c.Vtctld.Dial prior to +// calling this function. +func (c *Cluster) GetVSchema(ctx context.Context, keyspace string) (*vtadminpb.VSchema, error) { + span, ctx := trace.NewSpan(ctx, "Cluster.GetVSchema") + defer span.Finish() + + AnnotateSpan(c, span) + span.Annotate("keyspace", keyspace) + + vschema, err := c.Vtctld.GetVSchema(ctx, &vtctldatapb.GetVSchemaRequest{ + Keyspace: keyspace, + }) + + if err != nil { + return nil, err + } + + return &vtadminpb.VSchema{ + Cluster: c.ToProto(), + Name: keyspace, + VSchema: vschema.VSchema, + }, nil +} + +// GetWorkflowOptions is the set of filtering options for GetWorkflow requests. +type GetWorkflowOptions struct { + ActiveOnly bool +} + +// GetWorkflow returns the single Workflow in this cluster for the given +// keyspace and workflow name. It returns an error if either no workflows or +// multiple workflows are found. +func (c *Cluster) GetWorkflow(ctx context.Context, keyspace string, name string, opts GetWorkflowOptions) (*vtadminpb.Workflow, error) { + span, ctx := trace.NewSpan(ctx, "Cluster.GetWorkflow") + defer span.Finish() + + AnnotateSpan(c, span) + span.Annotate("active_only", opts.ActiveOnly) + span.Annotate("keyspace", keyspace) + span.Annotate("workflow_name", name) + + if err := c.Vtctld.Dial(ctx); err != nil { + return nil, fmt.Errorf("GetWorkflow(cluster = %v, keyspace = %v, workflow = %v, opts = %+v) dial failed: %w", c.ID, keyspace, name, opts, err) + } + + workflows, err := c.findWorkflows(ctx, []string{keyspace}, FindWorkflowsOptions{ + ActiveOnly: opts.ActiveOnly, + Filter: func(workflow *vtadminpb.Workflow) bool { + return workflow.Workflow.Name == name + }, + }) + if err != nil { + return nil, err + } + + switch len(workflows.Workflows) { + case 0: + msg := "%w for keyspace %s and name %s (active_only = %v)" + if len(workflows.Warnings) > 0 { + return nil, fmt.Errorf(msg+"; warnings: %v", errors.ErrNoWorkflow, keyspace, name, opts.ActiveOnly, workflows.Warnings) + } + + return nil, fmt.Errorf(msg, errors.ErrNoWorkflow, keyspace, name, opts.ActiveOnly) + case 1: + return workflows.Workflows[0], nil + default: + return nil, fmt.Errorf("%w: found %d workflows in keyspace %s with name %s (active_only = %v); this should be impossible", errors.ErrAmbiguousWorkflow, len(workflows.Workflows), keyspace, name, opts.ActiveOnly) + } +} + +// GetWorkflowsOptions is the set of filtering options for GetWorkflows +// requests. +type GetWorkflowsOptions struct { + ActiveOnly bool + IgnoreKeyspaces sets.String +} + +// GetWorkflows returns a list of Workflows in this cluster, across the given +// keyspaces and filtering according to the options passed in. +// +// If the list of keyspaces to check is empty, then GetWorkflows will use the +// result of GetKeyspaces to search all keyspaces in the cluster. In this case, +// opts.IgnoreKeyspaces is respected. +func (c *Cluster) GetWorkflows(ctx context.Context, keyspaces []string, opts GetWorkflowsOptions) (*vtadminpb.ClusterWorkflows, error) { + span, ctx := trace.NewSpan(ctx, "Cluster.GetWorkflows") + defer span.Finish() + + AnnotateSpan(c, span) + span.Annotate("active_only", opts.ActiveOnly) + + if err := c.Vtctld.Dial(ctx); err != nil { + return nil, fmt.Errorf("GetWorkflows(cluster = %v, keyspaces = %v, opts = %v) dial failed: %w", c.ID, keyspaces, opts, err) + } + + return c.findWorkflows(ctx, keyspaces, FindWorkflowsOptions{ + ActiveOnly: opts.ActiveOnly, + IgnoreKeyspaces: opts.IgnoreKeyspaces, + Filter: func(_ *vtadminpb.Workflow) bool { return true }, + }) +} + +// FindTablet returns the first tablet in a given cluster that satisfies the filter function. +func (c *Cluster) FindTablet(ctx context.Context, filter func(*vtadminpb.Tablet) bool) (*vtadminpb.Tablet, error) { + span, ctx := trace.NewSpan(ctx, "Cluster.FindTablet") + defer span.Finish() + + AnnotateSpan(c, span) + + tablets, err := c.findTablets(ctx, filter, 1) + if err != nil { + return nil, err + } + + if len(tablets) != 1 { + return nil, errors.ErrNoTablet + } + + return tablets[0], nil +} + +// FindTablets returns the first N tablets in the given cluster that satisfy +// the filter function. If N = -1, then all matching tablets are returned. +// Ordering is not guaranteed, and callers should write their filter functions accordingly. +func (c *Cluster) FindTablets(ctx context.Context, filter func(*vtadminpb.Tablet) bool, n int) ([]*vtadminpb.Tablet, error) { + span, ctx := trace.NewSpan(ctx, "Cluster.FindTablets") + defer span.Finish() + + AnnotateSpan(c, span) + + return c.findTablets(ctx, filter, n) +} + +func (c *Cluster) findTablets(ctx context.Context, filter func(*vtadminpb.Tablet) bool, n int) ([]*vtadminpb.Tablet, error) { + span, _ := trace.FromContext(ctx) + + tablets, err := c.GetTablets(ctx) + if err != nil { + return nil, err + } + + if n == -1 { + n = len(tablets) + } + + if span != nil { + span.Annotate("max_result_length", n) // this is a bad name; I didn't want just "n", but it's more like, "requested result length". + } + + return vtadminproto.FilterTablets(filter, tablets, n), nil +} diff --git a/go/vt/vtadmin/cluster/cluster_test.go b/go/vt/vtadmin/cluster/cluster_test.go new file mode 100644 index 00000000000..3246876c483 --- /dev/null +++ b/go/vt/vtadmin/cluster/cluster_test.go @@ -0,0 +1,2160 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster_test + +import ( + "context" + "database/sql" + "errors" + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/util/sets" + + "vitess.io/vitess/go/vt/vitessdriver" + "vitess.io/vitess/go/vt/vtadmin/cluster" + "vitess.io/vitess/go/vt/vtadmin/cluster/discovery/fakediscovery" + vtadminerrors "vitess.io/vitess/go/vt/vtadmin/errors" + "vitess.io/vitess/go/vt/vtadmin/testutil" + "vitess.io/vitess/go/vt/vtadmin/vtsql" + "vitess.io/vitess/go/vt/vtctl/vtctldclient" + + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" + vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +func TestFindTablet(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + tablets []*vtadminpb.Tablet + filter func(*vtadminpb.Tablet) bool + expected *vtadminpb.Tablet + expectedError error + }{ + { + name: "returns the first matching tablet", + tablets: []*vtadminpb.Tablet{ + { + State: vtadminpb.Tablet_NOT_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "c0_cell1", + Uid: 100, + }, + Keyspace: "commerce", + }, + }, + { + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "c0_cell1", + Uid: 101, + }, + Keyspace: "commerce", + }, + }, + { + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "c0_cell1", + Uid: 102, + }, + Keyspace: "commerce", + }, + }, + }, + + filter: func(t *vtadminpb.Tablet) bool { + return t.State == vtadminpb.Tablet_SERVING + }, + expected: &vtadminpb.Tablet{ + Cluster: &vtadminpb.Cluster{ + Id: "c0", + Name: "cluster0", + }, + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "c0_cell1", + Uid: 101, + }, + Keyspace: "commerce", + }, + }, + }, + { + name: "returns an error if no match found", + tablets: []*vtadminpb.Tablet{ + { + State: vtadminpb.Tablet_NOT_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "c0_cell1", + Uid: 100, + }, + Keyspace: "commerce", + }, + }, + { + State: vtadminpb.Tablet_NOT_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "c0_cell1", + Uid: 101, + }, + Keyspace: "commerce", + }, + }, + }, + filter: func(t *vtadminpb.Tablet) bool { + return t.State == vtadminpb.Tablet_SERVING + }, + expectedError: vtadminerrors.ErrNoTablet, + }, + } + + ctx := context.Background() + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + cluster := testutil.BuildCluster(testutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: "c0", + Name: "cluster0", + }, + Tablets: tt.tablets, + }) + tablet, err := cluster.FindTablet(ctx, tt.filter) + + if tt.expectedError != nil { + assert.True(t, errors.Is(err, tt.expectedError), "expected error type %w does not match actual error type %w", err, tt.expectedError) + } else { + assert.NoError(t, err) + testutil.AssertTabletsEqual(t, tt.expected, tablet) + } + }) + } +} + +func TestFindTablets(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + tablets []*vtadminpb.Tablet + filter func(*vtadminpb.Tablet) bool + n int + expected []*vtadminpb.Tablet + }{ + { + name: "returns n filtered tablets", + tablets: []*vtadminpb.Tablet{ + { + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "c0_cell1", + Uid: 100, + }, + Keyspace: "commerce", + }, + }, + { + State: vtadminpb.Tablet_NOT_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "c0_cell1", + Uid: 101, + }, + Keyspace: "commerce", + }, + }, + { + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "c0_cell1", + Uid: 102, + }, + Keyspace: "commerce", + }, + }, + { + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "c0_cell1", + Uid: 103, + }, + Keyspace: "commerce", + }, + }, + }, + filter: func(t *vtadminpb.Tablet) bool { + return t.State == vtadminpb.Tablet_SERVING + }, + n: 2, + expected: []*vtadminpb.Tablet{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c0", + Name: "cluster0", + }, + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "c0_cell1", + Uid: 100, + }, + Keyspace: "commerce", + }, + }, + { + Cluster: &vtadminpb.Cluster{ + Id: "c0", + Name: "cluster0", + }, + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "c0_cell1", + Uid: 102, + }, + Keyspace: "commerce", + }, + }, + }, + }, + { + name: "returns all filtered tablets when n == -1", + tablets: []*vtadminpb.Tablet{ + { + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "c0_cell1", + Uid: 100, + }, + Keyspace: "commerce", + }, + }, + { + State: vtadminpb.Tablet_NOT_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "c0_cell1", + Uid: 101, + }, + Keyspace: "commerce", + }, + }, + { + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "c0_cell1", + Uid: 102, + }, + Keyspace: "commerce", + }, + }, + { + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "c0_cell1", + Uid: 103, + }, + Keyspace: "commerce", + }, + }, + }, + filter: func(t *vtadminpb.Tablet) bool { + return t.State == vtadminpb.Tablet_SERVING + }, + n: -1, + expected: []*vtadminpb.Tablet{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c0", + Name: "cluster0", + }, + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "c0_cell1", + Uid: 100, + }, + Keyspace: "commerce", + }, + }, + { + Cluster: &vtadminpb.Cluster{ + Id: "c0", + Name: "cluster0", + }, + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "c0_cell1", + Uid: 102, + }, + Keyspace: "commerce", + }, + }, + { + Cluster: &vtadminpb.Cluster{ + Id: "c0", + Name: "cluster0", + }, + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "c0_cell1", + Uid: 103, + }, + Keyspace: "commerce", + }, + }, + }, + }, + } + + ctx := context.Background() + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + cluster := testutil.BuildCluster(testutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: "c0", + Name: "cluster0", + }, + Tablets: tt.tablets, + }) + tablets, err := cluster.FindTablets(ctx, tt.filter, tt.n) + + assert.NoError(t, err) + testutil.AssertTabletSlicesEqual(t, tt.expected, tablets) + }) + } +} + +func TestGetSchema(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + vtctld vtctldclient.VtctldClient + req *vtctldatapb.GetSchemaRequest + tablet *vtadminpb.Tablet + expected *vtadminpb.Schema + shouldErr bool + }{ + { + name: "success", + vtctld: &testutil.VtctldClient{ + GetSchemaResults: map[string]struct { + Response *vtctldatapb.GetSchemaResponse + Error error + }{ + "zone1-0000000100": { + Response: &vtctldatapb.GetSchemaResponse{ + Schema: &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "some_table", + }, + }, + }, + }, + }, + }, + }, + req: &vtctldatapb.GetSchemaRequest{}, + tablet: &vtadminpb.Tablet{ + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + }, + }, + expected: &vtadminpb.Schema{ + Cluster: &vtadminpb.Cluster{ + Name: "cluster0", + Id: "c0", + }, + Keyspace: "testkeyspace", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "some_table", + }, + }, + TableSizes: map[string]*vtadminpb.Schema_TableSize{}, + }, + shouldErr: false, + }, + { + name: "error getting schema", + vtctld: &testutil.VtctldClient{ + GetSchemaResults: map[string]struct { + Response *vtctldatapb.GetSchemaResponse + Error error + }{ + "zone1-0000000100": { + Error: assert.AnError, + }, + }, + }, + req: &vtctldatapb.GetSchemaRequest{}, + tablet: &vtadminpb.Tablet{ + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + }, + }, + expected: nil, + shouldErr: true, + }, + { + name: "underlying schema is nil", + vtctld: &testutil.VtctldClient{ + GetSchemaResults: map[string]struct { + Response *vtctldatapb.GetSchemaResponse + Error error + }{ + "zone1-0000000100": { + Response: &vtctldatapb.GetSchemaResponse{ + Schema: nil, + }, + Error: nil, + }, + }, + }, + req: &vtctldatapb.GetSchemaRequest{}, + tablet: &vtadminpb.Tablet{ + State: vtadminpb.Tablet_SERVING, + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + }, + }, + expected: &vtadminpb.Schema{ + Cluster: &vtadminpb.Cluster{ + Id: "c2", + Name: "cluster2", + }, + Keyspace: "testkeyspace", + TableDefinitions: nil, + TableSizes: map[string]*vtadminpb.Schema_TableSize{}, + }, + shouldErr: false, + }, + } + + ctx := context.Background() + + for i, tt := range tests { + i := i + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + c := testutil.BuildCluster(testutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: fmt.Sprintf("c%d", i), + Name: fmt.Sprintf("cluster%d", i), + }, + VtctldClient: tt.vtctld, + Tablets: nil, + DBConfig: testutil.Dbcfg{}, + }) + + err := c.Vtctld.Dial(ctx) + require.NoError(t, err, "could not dial test vtctld") + + schema, err := c.GetSchema(ctx, "testkeyspace", cluster.GetSchemaOptions{ + Tablets: []*vtadminpb.Tablet{tt.tablet}, + BaseRequest: tt.req, + }) + if tt.shouldErr { + assert.Error(t, err) + + return + } + + assert.NoError(t, err) + assert.Equal(t, tt.expected, schema) + }) + } + + t.Run("does not modify passed-in request", func(t *testing.T) { + t.Parallel() + + vtctld := &testutil.VtctldClient{ + GetSchemaResults: map[string]struct { + Response *vtctldatapb.GetSchemaResponse + Error error + }{ + "zone1-0000000100": { + Response: &vtctldatapb.GetSchemaResponse{}, + }, + }, + } + + req := &vtctldatapb.GetSchemaRequest{ + TabletAlias: &topodatapb.TabletAlias{ + Cell: "otherzone", + Uid: 500, + }, + } + tablet := &vtadminpb.Tablet{ + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + } + + c := testutil.BuildCluster(testutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: "c0", + Name: "cluster0", + }, + VtctldClient: vtctld, + }) + + err := c.Vtctld.Dial(ctx) + require.NoError(t, err, "could not dial test vtctld") + + c.GetSchema(ctx, "testkeyspace", cluster.GetSchemaOptions{ + BaseRequest: req, + Tablets: []*vtadminpb.Tablet{tablet}, + }) + + assert.NotEqual(t, req.TabletAlias, tablet.Tablet.Alias, "expected GetSchema to not modify original request object") + }) + + t.Run("size aggregation", func(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + cfg testutil.TestClusterConfig + keyspace string + opts cluster.GetSchemaOptions + expected *vtadminpb.Schema + shouldErr bool + }{ + { + name: "success", + cfg: testutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: "c0", + Name: "cluster0", + }, + Tablets: []*vtadminpb.Tablet{ + { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + Shard: "-80", + }, + State: vtadminpb.Tablet_SERVING, + }, + { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Keyspace: "testkeyspace", + Shard: "80-", + }, + State: vtadminpb.Tablet_SERVING, + }, + }, + VtctldClient: &testutil.VtctldClient{ + FindAllShardsInKeyspaceResults: map[string]struct { + Response *vtctldatapb.FindAllShardsInKeyspaceResponse + Error error + }{ + "testkeyspace": { + Response: &vtctldatapb.FindAllShardsInKeyspaceResponse{ + Shards: map[string]*vtctldatapb.Shard{ + "-80": { + Name: "-80", + Shard: &topodatapb.Shard{ + IsMasterServing: true, + }, + }, + "80-": { + Name: "80-", + Shard: &topodatapb.Shard{ + IsMasterServing: true, + }, + }, + "-": { + Name: "-", + Shard: &topodatapb.Shard{ + IsMasterServing: false, + }, + }, + }, + }, + }, + }, + GetSchemaResults: map[string]struct { + Response *vtctldatapb.GetSchemaResponse + Error error + }{ + "zone1-0000000100": { + Response: &vtctldatapb.GetSchemaResponse{ + Schema: &tabletmanagerdatapb.SchemaDefinition{ + DatabaseSchema: "CREATE DATABASE vt_testkeyspcae", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "foo", + Schema: "CREATE TABLE foo (\n\tid INT(11) NOT NULL\n) ENGINE=InnoDB", + DataLength: 100, + RowCount: 5, + }, + }, + }, + }, + }, + "zone1-0000000200": { + Response: &vtctldatapb.GetSchemaResponse{ + Schema: &tabletmanagerdatapb.SchemaDefinition{ + DatabaseSchema: "CREATE DATABASE vt_testkeyspcae", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "foo", + Schema: "CREATE TABLE foo (\n\tid INT(11) NOT NULL\n) ENGINE=InnoDB", + DataLength: 200, + RowCount: 420, + }, + }, + }, + }, + }, + }, + }, + }, + keyspace: "testkeyspace", + opts: cluster.GetSchemaOptions{ + TableSizeOptions: &vtadminpb.GetSchemaTableSizeOptions{ + AggregateSizes: true, + }, + }, + expected: &vtadminpb.Schema{ + Cluster: &vtadminpb.Cluster{ + Id: "c0", + Name: "cluster0", + }, + Keyspace: "testkeyspace", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "foo", + Schema: "CREATE TABLE foo (\n\tid INT(11) NOT NULL\n) ENGINE=InnoDB", + DataLength: 0, + RowCount: 0, + }, + }, + TableSizes: map[string]*vtadminpb.Schema_TableSize{ + "foo": { + DataLength: 100 + 200, + RowCount: 5 + 420, + ByShard: map[string]*vtadminpb.Schema_ShardTableSize{ + "-80": { + DataLength: 100, + RowCount: 5, + }, + "80-": { + DataLength: 200, + RowCount: 420, + }, + }, + }, + }, + }, + shouldErr: false, + }, + { + name: "no serving tablets found for shard", + cfg: testutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: "c0", + Name: "cluster0", + }, + Tablets: []*vtadminpb.Tablet{ + { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + Shard: "-80", + }, + State: vtadminpb.Tablet_NOT_SERVING, + }, + { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Keyspace: "testkeyspace", + Shard: "80-", + }, + State: vtadminpb.Tablet_SERVING, + }, + }, + VtctldClient: &testutil.VtctldClient{ + FindAllShardsInKeyspaceResults: map[string]struct { + Response *vtctldatapb.FindAllShardsInKeyspaceResponse + Error error + }{ + "testkeyspace": { + Response: &vtctldatapb.FindAllShardsInKeyspaceResponse{ + Shards: map[string]*vtctldatapb.Shard{ + "-80": { + Name: "-80", + Shard: &topodatapb.Shard{ + IsMasterServing: true, + }, + }, + "80-": { + Name: "80-", + Shard: &topodatapb.Shard{ + IsMasterServing: true, + }, + }, + "-": { + Name: "-", + Shard: &topodatapb.Shard{ + IsMasterServing: false, + }, + }, + }, + }, + }, + }, + GetSchemaResults: map[string]struct { + Response *vtctldatapb.GetSchemaResponse + Error error + }{ + "zone1-0000000100": { + Response: &vtctldatapb.GetSchemaResponse{ + Schema: &tabletmanagerdatapb.SchemaDefinition{ + DatabaseSchema: "CREATE DATABASE vt_testkeyspcae", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "foo", + Schema: "CREATE TABLE foo (\n\tid INT(11) NOT NULL\n) ENGINE=InnoDB", + DataLength: 100, + RowCount: 5, + }, + }, + }, + }, + }, + "zone1-0000000200": { + Response: &vtctldatapb.GetSchemaResponse{ + Schema: &tabletmanagerdatapb.SchemaDefinition{ + DatabaseSchema: "CREATE DATABASE vt_testkeyspcae", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "foo", + Schema: "CREATE TABLE foo (\n\tid INT(11) NOT NULL\n) ENGINE=InnoDB", + DataLength: 200, + RowCount: 420, + }, + }, + }, + }, + }, + }, + }, + }, + keyspace: "testkeyspace", + opts: cluster.GetSchemaOptions{ + TableSizeOptions: &vtadminpb.GetSchemaTableSizeOptions{ + AggregateSizes: true, + }, + }, + expected: nil, + shouldErr: true, + }, + { + name: "ignore TableNamesOnly", + cfg: testutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: "c0", + Name: "cluster0", + }, + Tablets: []*vtadminpb.Tablet{ + { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + Shard: "-80", + }, + State: vtadminpb.Tablet_SERVING, + }, + { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Keyspace: "testkeyspace", + Shard: "80-", + }, + State: vtadminpb.Tablet_SERVING, + }, + }, + VtctldClient: &testutil.VtctldClient{ + FindAllShardsInKeyspaceResults: map[string]struct { + Response *vtctldatapb.FindAllShardsInKeyspaceResponse + Error error + }{ + "testkeyspace": { + Response: &vtctldatapb.FindAllShardsInKeyspaceResponse{ + Shards: map[string]*vtctldatapb.Shard{ + "-80": { + Name: "-80", + Shard: &topodatapb.Shard{ + IsMasterServing: true, + }, + }, + "80-": { + Name: "80-", + Shard: &topodatapb.Shard{ + IsMasterServing: true, + }, + }, + "-": { + Name: "-", + Shard: &topodatapb.Shard{ + IsMasterServing: false, + }, + }, + }, + }, + }, + }, + GetSchemaResults: map[string]struct { + Response *vtctldatapb.GetSchemaResponse + Error error + }{ + "zone1-0000000100": { + Response: &vtctldatapb.GetSchemaResponse{ + Schema: &tabletmanagerdatapb.SchemaDefinition{ + DatabaseSchema: "CREATE DATABASE vt_testkeyspcae", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "foo", + Schema: "CREATE TABLE foo (\n\tid INT(11) NOT NULL\n) ENGINE=InnoDB", + DataLength: 100, + RowCount: 5, + }, + }, + }, + }, + }, + "zone1-0000000200": { + Response: &vtctldatapb.GetSchemaResponse{ + Schema: &tabletmanagerdatapb.SchemaDefinition{ + DatabaseSchema: "CREATE DATABASE vt_testkeyspcae", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "foo", + Schema: "CREATE TABLE foo (\n\tid INT(11) NOT NULL\n) ENGINE=InnoDB", + DataLength: 200, + RowCount: 420, + }, + }, + }, + }, + }, + }, + }, + }, + keyspace: "testkeyspace", + opts: cluster.GetSchemaOptions{ + BaseRequest: &vtctldatapb.GetSchemaRequest{ + TableNamesOnly: true, // Just checking things to blow up if this gets set. + }, + TableSizeOptions: &vtadminpb.GetSchemaTableSizeOptions{ + AggregateSizes: true, + }, + }, + expected: &vtadminpb.Schema{ + Cluster: &vtadminpb.Cluster{ + Id: "c0", + Name: "cluster0", + }, + Keyspace: "testkeyspace", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "foo", + Schema: "CREATE TABLE foo (\n\tid INT(11) NOT NULL\n) ENGINE=InnoDB", + DataLength: 0, + RowCount: 0, + }, + }, + TableSizes: map[string]*vtadminpb.Schema_TableSize{ + "foo": { + DataLength: 100 + 200, + RowCount: 5 + 420, + ByShard: map[string]*vtadminpb.Schema_ShardTableSize{ + "-80": { + DataLength: 100, + RowCount: 5, + }, + "80-": { + DataLength: 200, + RowCount: 420, + }, + }, + }, + }, + }, + shouldErr: false, + }, + { + name: "single GetSchema error fails the request", + cfg: testutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: "c0", + Name: "cluster0", + }, + Tablets: []*vtadminpb.Tablet{ + { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + Shard: "-80", + }, + State: vtadminpb.Tablet_SERVING, + }, + { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Keyspace: "testkeyspace", + Shard: "80-", + }, + State: vtadminpb.Tablet_SERVING, + }, + }, + VtctldClient: &testutil.VtctldClient{ + FindAllShardsInKeyspaceResults: map[string]struct { + Response *vtctldatapb.FindAllShardsInKeyspaceResponse + Error error + }{ + "testkeyspace": { + Response: &vtctldatapb.FindAllShardsInKeyspaceResponse{ + Shards: map[string]*vtctldatapb.Shard{ + "-80": { + Name: "-80", + Shard: &topodatapb.Shard{ + IsMasterServing: true, + }, + }, + "80-": { + Name: "80-", + Shard: &topodatapb.Shard{ + IsMasterServing: true, + }, + }, + "-": { + Name: "-", + Shard: &topodatapb.Shard{ + IsMasterServing: false, + }, + }, + }, + }, + }, + }, + GetSchemaResults: map[string]struct { + Response *vtctldatapb.GetSchemaResponse + Error error + }{ + "zone1-0000000100": { + Response: &vtctldatapb.GetSchemaResponse{ + Schema: &tabletmanagerdatapb.SchemaDefinition{ + DatabaseSchema: "CREATE DATABASE vt_testkeyspcae", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "foo", + Schema: "CREATE TABLE foo (\n\tid INT(11) NOT NULL\n) ENGINE=InnoDB", + DataLength: 100, + RowCount: 5, + }, + }, + }, + }, + }, + "zone1-0000000200": { + Error: assert.AnError, + }, + }, + }, + }, + keyspace: "testkeyspace", + opts: cluster.GetSchemaOptions{ + TableSizeOptions: &vtadminpb.GetSchemaTableSizeOptions{ + AggregateSizes: true, + }, + }, + expected: nil, + shouldErr: true, + }, + { + name: "FindAllShardsInKeyspace error", + cfg: testutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: "c0", + Name: "cluster0", + }, + Tablets: []*vtadminpb.Tablet{ + { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + Shard: "-80", + }, + State: vtadminpb.Tablet_SERVING, + }, + { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Keyspace: "testkeyspace", + Shard: "80-", + }, + State: vtadminpb.Tablet_SERVING, + }, + }, + VtctldClient: &testutil.VtctldClient{ + FindAllShardsInKeyspaceResults: map[string]struct { + Response *vtctldatapb.FindAllShardsInKeyspaceResponse + Error error + }{ + "testkeyspace": { + Error: assert.AnError, + }, + }, + GetSchemaResults: map[string]struct { + Response *vtctldatapb.GetSchemaResponse + Error error + }{ + "zone1-0000000100": { + Response: &vtctldatapb.GetSchemaResponse{ + Schema: &tabletmanagerdatapb.SchemaDefinition{ + DatabaseSchema: "CREATE DATABASE vt_testkeyspcae", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "foo", + Schema: "CREATE TABLE foo (\n\tid INT(11) NOT NULL\n) ENGINE=InnoDB", + DataLength: 100, + RowCount: 5, + }, + }, + }, + }, + }, + "zone1-0000000200": { + Response: &vtctldatapb.GetSchemaResponse{ + Schema: &tabletmanagerdatapb.SchemaDefinition{ + DatabaseSchema: "CREATE DATABASE vt_testkeyspcae", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "foo", + Schema: "CREATE TABLE foo (\n\tid INT(11) NOT NULL\n) ENGINE=InnoDB", + DataLength: 200, + RowCount: 420, + }, + }, + }, + }, + }, + }, + }, + }, + keyspace: "testkeyspace", + opts: cluster.GetSchemaOptions{ + TableSizeOptions: &vtadminpb.GetSchemaTableSizeOptions{ + AggregateSizes: true, + }, + }, + expected: nil, + shouldErr: true, + }, + { + name: "tablet filtering checks keyspace field", + cfg: testutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: "c0", + Name: "cluster0", + }, + Tablets: []*vtadminpb.Tablet{ + { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + Shard: "-80", + }, + State: vtadminpb.Tablet_SERVING, + }, + { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Keyspace: "testkeyspace", + Shard: "80-", + }, + State: vtadminpb.Tablet_SERVING, + }, + }, + VtctldClient: &testutil.VtctldClient{ + FindAllShardsInKeyspaceResults: map[string]struct { + Response *vtctldatapb.FindAllShardsInKeyspaceResponse + Error error + }{ + "testkeyspace": { + Response: &vtctldatapb.FindAllShardsInKeyspaceResponse{ + Shards: map[string]*vtctldatapb.Shard{ + "-80": { + Name: "-80", + Shard: &topodatapb.Shard{ + IsMasterServing: true, + }, + }, + "80-": { + Name: "80-", + Shard: &topodatapb.Shard{ + IsMasterServing: true, + }, + }, + }, + }, + }, + }, + GetSchemaResults: map[string]struct { + Response *vtctldatapb.GetSchemaResponse + Error error + }{ + "zone1-0000000100": { + Response: &vtctldatapb.GetSchemaResponse{ + Schema: &tabletmanagerdatapb.SchemaDefinition{ + DatabaseSchema: "CREATE DATABASE vt_testkeyspcae", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "foo", + Schema: "CREATE TABLE foo (\n\tid INT(11) NOT NULL\n) ENGINE=InnoDB", + DataLength: 100, + RowCount: 5, + }, + }, + }, + }, + }, + "zone1-0000000200": { + Response: &vtctldatapb.GetSchemaResponse{ + Schema: &tabletmanagerdatapb.SchemaDefinition{ + DatabaseSchema: "CREATE DATABASE vt_testkeyspcae", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "foo", + Schema: "CREATE TABLE foo (\n\tid INT(11) NOT NULL\n) ENGINE=InnoDB", + DataLength: 200, + RowCount: 420, + }, + }, + }, + }, + }, + "zone1-0000000300": { + Response: &vtctldatapb.GetSchemaResponse{ + Schema: &tabletmanagerdatapb.SchemaDefinition{ + DatabaseSchema: "CREATE DATABASE vt_otherkeyspacekeyspcae", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "bar", + Schema: "CREATE TABLE bar (\n\tid INT(11) NOT NULL\n) ENGINE=InnoDB", + DataLength: 101, + RowCount: 202, + }, + }, + }, + }, + }, + }, + }, + }, + keyspace: "testkeyspace", + opts: cluster.GetSchemaOptions{ + TableSizeOptions: &vtadminpb.GetSchemaTableSizeOptions{ + AggregateSizes: true, + }, + Tablets: []*vtadminpb.Tablet{ + { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + Shard: "-80", + }, + State: vtadminpb.Tablet_SERVING, + }, + { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 300, + }, + // Note this is for another keyspace, so we fail to find a tablet for testkeyspace/-80. + Keyspace: "otherkeyspace", + Shard: "80-", + }, + State: vtadminpb.Tablet_SERVING, + }, + }, + }, + expected: nil, + shouldErr: true, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + if tt.keyspace == "" { + t.SkipNow() + } + + c := testutil.BuildCluster(tt.cfg) + schema, err := c.GetSchema(ctx, tt.keyspace, tt.opts) + if tt.shouldErr { + assert.Error(t, err) + + return + } + + if schema.TableDefinitions != nil { + // For simplicity, we're going to assert only on the state + // of the aggregated sizes (in schema.TableSizes), since the + // TableDefinitions size values depends on tablet iteration + // order, and that's not something we're interested in + // coupling the implementation to. + for _, td := range schema.TableDefinitions { + td.DataLength = 0 + td.RowCount = 0 + } + } + + assert.NoError(t, err) + testutil.AssertSchemaSlicesEqual(t, []*vtadminpb.Schema{tt.expected}, []*vtadminpb.Schema{schema}) + }) + } + }) +} + +func TestFindWorkflows(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + cfg testutil.TestClusterConfig + keyspaces []string + opts cluster.FindWorkflowsOptions + expected *vtadminpb.ClusterWorkflows + shouldErr bool + }{ + { + name: "success", + cfg: testutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + VtctldClient: &testutil.VtctldClient{ + GetWorkflowsResults: map[string]struct { + Response *vtctldatapb.GetWorkflowsResponse + Error error + }{ + "ks1": { + Response: &vtctldatapb.GetWorkflowsResponse{ + Workflows: []*vtctldatapb.Workflow{ + { + Name: "workflow1", + }, + }, + }, + }, + }, + }, + }, + keyspaces: []string{"ks1"}, + expected: &vtadminpb.ClusterWorkflows{ + Workflows: []*vtadminpb.Workflow{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + Keyspace: "ks1", + Workflow: &vtctldatapb.Workflow{ + Name: "workflow1", + }, + }, + }, + }, + shouldErr: false, + }, + { + name: "error getting keyspaces is fatal", + cfg: testutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + VtctldClient: &testutil.VtctldClient{ + GetKeyspacesResults: struct { + Keyspaces []*vtctldatapb.Keyspace + Error error + }{ + Keyspaces: nil, + Error: assert.AnError, + }, + }, + }, + keyspaces: nil, + expected: nil, + shouldErr: true, + }, + { + name: "no keyspaces found", + cfg: testutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + VtctldClient: &testutil.VtctldClient{ + GetKeyspacesResults: struct { + Keyspaces []*vtctldatapb.Keyspace + Error error + }{ + Keyspaces: []*vtctldatapb.Keyspace{}, + Error: nil, + }, + }, + }, + keyspaces: nil, + expected: &vtadminpb.ClusterWorkflows{ + Workflows: []*vtadminpb.Workflow{}, + }, + shouldErr: false, + }, + { + name: "when specifying keyspaces and IgnoreKeyspaces, IgnoreKeyspaces is discarded", + cfg: testutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + VtctldClient: &testutil.VtctldClient{ + GetKeyspacesResults: struct { + Keyspaces []*vtctldatapb.Keyspace + Error error + }{ + Keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "ks1", + }, + { + Name: "ks2", + }, + }, + Error: nil, + }, + GetWorkflowsResults: map[string]struct { + Response *vtctldatapb.GetWorkflowsResponse + Error error + }{ + "ks1": { + Response: &vtctldatapb.GetWorkflowsResponse{ + Workflows: []*vtctldatapb.Workflow{ + { + Name: "workflow1", + }, + { + Name: "workflow2", + }, + }, + }, + }, + "ks2": { + Response: &vtctldatapb.GetWorkflowsResponse{ + Workflows: []*vtctldatapb.Workflow{ + { + Name: "workflow_a", + }, + }, + }, + }, + }, + }, + }, + keyspaces: []string{"ks2"}, + opts: cluster.FindWorkflowsOptions{ + IgnoreKeyspaces: sets.NewString("ks2"), + }, + expected: &vtadminpb.ClusterWorkflows{ + Workflows: []*vtadminpb.Workflow{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + Keyspace: "ks2", + Workflow: &vtctldatapb.Workflow{ + Name: "workflow_a", + }, + }, + }, + }, + shouldErr: false, + }, + { + name: "ignore keyspaces", + cfg: testutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + VtctldClient: &testutil.VtctldClient{ + GetKeyspacesResults: struct { + Keyspaces []*vtctldatapb.Keyspace + Error error + }{ + Keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "ks1", + }, + { + Name: "ks2", + }, + }, + Error: nil, + }, + GetWorkflowsResults: map[string]struct { + Response *vtctldatapb.GetWorkflowsResponse + Error error + }{ + "ks1": { + Response: &vtctldatapb.GetWorkflowsResponse{ + Workflows: []*vtctldatapb.Workflow{ + { + Name: "workflow1", + }, + { + Name: "workflow2", + }, + }, + }, + }, + "ks2": { + Response: &vtctldatapb.GetWorkflowsResponse{ + Workflows: []*vtctldatapb.Workflow{ + { + Name: "workflow_a", + }, + }, + }, + }, + }, + }, + }, + keyspaces: nil, + opts: cluster.FindWorkflowsOptions{ + IgnoreKeyspaces: sets.NewString("ks2"), + }, + expected: &vtadminpb.ClusterWorkflows{ + Workflows: []*vtadminpb.Workflow{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + Keyspace: "ks1", + Workflow: &vtctldatapb.Workflow{ + Name: "workflow1", + }, + }, + { + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + Keyspace: "ks1", + Workflow: &vtctldatapb.Workflow{ + Name: "workflow2", + }, + }, + }, + }, + shouldErr: false, + }, + { + name: "error getting workflows is fatal if all keyspaces fail", + cfg: testutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + VtctldClient: &testutil.VtctldClient{ + GetWorkflowsResults: map[string]struct { + Response *vtctldatapb.GetWorkflowsResponse + Error error + }{ + "ks1": { + Error: assert.AnError, + }, + }, + }, + }, + keyspaces: []string{"ks1"}, + expected: nil, + shouldErr: true, + }, + { + name: "error getting workflows is non-fatal if some keyspaces fail", + cfg: testutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + VtctldClient: &testutil.VtctldClient{ + GetWorkflowsResults: map[string]struct { + Response *vtctldatapb.GetWorkflowsResponse + Error error + }{ + "ks1": { + Error: assert.AnError, + }, + "ks2": { + Response: &vtctldatapb.GetWorkflowsResponse{ + Workflows: []*vtctldatapb.Workflow{ + { + Name: "workflow1", + }, + }, + }, + }, + }, + }, + }, + keyspaces: []string{"ks1", "ks2"}, + expected: &vtadminpb.ClusterWorkflows{ + Workflows: []*vtadminpb.Workflow{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + Keyspace: "ks2", + Workflow: &vtctldatapb.Workflow{ + Name: "workflow1", + }, + }, + }, + Warnings: []string{"something about ks1"}, + }, + shouldErr: false, + }, + { + name: "filtered workflows", + cfg: testutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + VtctldClient: &testutil.VtctldClient{ + GetWorkflowsResults: map[string]struct { + Response *vtctldatapb.GetWorkflowsResponse + Error error + }{ + "ks1": { + Response: &vtctldatapb.GetWorkflowsResponse{ + Workflows: []*vtctldatapb.Workflow{ + { + Name: "include_me", + }, + { + Name: "dont_include_me", + }, + }, + }, + }, + }, + }, + }, + keyspaces: []string{"ks1"}, + opts: cluster.FindWorkflowsOptions{ + Filter: func(workflow *vtadminpb.Workflow) bool { + return strings.HasPrefix(workflow.Workflow.Name, "include_me") + }, + }, + expected: &vtadminpb.ClusterWorkflows{ + Workflows: []*vtadminpb.Workflow{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + Keyspace: "ks1", + Workflow: &vtctldatapb.Workflow{ + Name: "include_me", + }, + }, + }, + }, + shouldErr: false, + }, + } + + ctx := context.Background() + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + c := testutil.BuildCluster(tt.cfg) + workflows, err := c.FindWorkflows(ctx, tt.keyspaces, tt.opts) + if tt.shouldErr { + assert.Error(t, err) + + return + } + + assert.NoError(t, err) + testutil.AssertClusterWorkflowsEqual(t, tt.expected, workflows) + }) + } +} + +// This test only validates the error handling on dialing database connections. +// Other cases are covered by one or both of TestFindTablets and TestFindTablet. +func TestGetTablets(t *testing.T) { + t.Parallel() + + disco := fakediscovery.New() + disco.AddTaggedGates(nil, &vtadminpb.VTGate{Hostname: "gate"}) + + db := vtsql.New(&vtsql.Config{ + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "one", + }, + Discovery: disco, + }) + db.DialFunc = func(cfg vitessdriver.Configuration) (*sql.DB, error) { + return nil, assert.AnError + } + + c := &cluster.Cluster{DB: db} + _, err := c.GetTablets(context.Background()) + assert.Error(t, err) +} + +func TestGetVSchema(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + cfg testutil.TestClusterConfig + keyspace string + expected *vtadminpb.VSchema + shouldErr bool + }{ + { + name: "success", + cfg: testutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: "c0", + Name: "cluster0", + }, + VtctldClient: &testutil.VtctldClient{ + GetVSchemaResults: map[string]struct { + Response *vtctldatapb.GetVSchemaResponse + Error error + }{ + "testkeyspace": { + Response: &vtctldatapb.GetVSchemaResponse{ + VSchema: &vschemapb.Keyspace{Sharded: true}, + }, + }, + }, + }, + }, + keyspace: "testkeyspace", + expected: &vtadminpb.VSchema{ + Cluster: &vtadminpb.Cluster{ + Id: "c0", + Name: "cluster0", + }, + Name: "testkeyspace", + VSchema: &vschemapb.Keyspace{Sharded: true}, + }, + shouldErr: false, + }, + { + name: "error", + cfg: testutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: "c0", + Name: "cluster0", + }, + VtctldClient: &testutil.VtctldClient{ + GetVSchemaResults: map[string]struct { + Response *vtctldatapb.GetVSchemaResponse + Error error + }{ + "testkeyspace": { + Response: &vtctldatapb.GetVSchemaResponse{ + VSchema: &vschemapb.Keyspace{Sharded: true}, + }, + }, + }, + }, + }, + keyspace: "notfound", + expected: nil, + shouldErr: true, + }, + } + + ctx := context.Background() + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + cluster := testutil.BuildCluster(tt.cfg) + err := cluster.Vtctld.Dial(ctx) + require.NoError(t, err, "could not dial test vtctld") + + vschema, err := cluster.GetVSchema(ctx, tt.keyspace) + if tt.shouldErr { + assert.Error(t, err) + + return + } + + assert.NoError(t, err) + assert.Equal(t, tt.expected, vschema) + }) + } +} + +func TestGetWorkflow(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + cfg testutil.TestClusterConfig + keyspace string + workflow string + opts cluster.GetWorkflowOptions + expected *vtadminpb.Workflow + shouldErr bool + }{ + { + name: "found", + cfg: testutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + VtctldClient: &testutil.VtctldClient{ + GetWorkflowsResults: map[string]struct { + Response *vtctldatapb.GetWorkflowsResponse + Error error + }{ + "ks1": { + Response: &vtctldatapb.GetWorkflowsResponse{ + Workflows: []*vtctldatapb.Workflow{ + { + Name: "workflow1", + }, + { + Name: "workflow2", + }, + }, + }, + }, + }, + }, + }, + keyspace: "ks1", + workflow: "workflow2", + expected: &vtadminpb.Workflow{ + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + Keyspace: "ks1", + Workflow: &vtctldatapb.Workflow{ + Name: "workflow2", + }, + }, + shouldErr: false, + }, + { + name: "error getting workflows", + cfg: testutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + VtctldClient: &testutil.VtctldClient{ + GetWorkflowsResults: map[string]struct { + Response *vtctldatapb.GetWorkflowsResponse + Error error + }{ + "ks1": { + Error: assert.AnError, + }, + }, + }, + }, + keyspace: "ks1", + workflow: "workflow2", + expected: nil, + shouldErr: true, + }, + { + name: "no workflows found", + cfg: testutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + VtctldClient: &testutil.VtctldClient{ + GetWorkflowsResults: map[string]struct { + Response *vtctldatapb.GetWorkflowsResponse + Error error + }{ + "ks1": { + Response: &vtctldatapb.GetWorkflowsResponse{ + Workflows: []*vtctldatapb.Workflow{}, + }, + }, + }, + }, + }, + keyspace: "ks1", + workflow: "workflow2", + expected: nil, + shouldErr: true, + }, + { + name: "multiple workflows found", + cfg: testutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + VtctldClient: &testutil.VtctldClient{ + GetWorkflowsResults: map[string]struct { + Response *vtctldatapb.GetWorkflowsResponse + Error error + }{ + "ks1": { + Response: &vtctldatapb.GetWorkflowsResponse{ + Workflows: []*vtctldatapb.Workflow{ + { + Name: "duplicate", + }, + { + Name: "duplicate", + }, + }, + }, + }, + }, + }, + }, + keyspace: "ks1", + workflow: "duplicate", + expected: nil, + shouldErr: true, + }, + } + + ctx := context.Background() + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + c := testutil.BuildCluster(tt.cfg) + workflow, err := c.GetWorkflow(ctx, tt.keyspace, tt.workflow, tt.opts) + if tt.shouldErr { + assert.Error(t, err) + + return + } + + assert.NoError(t, err) + assert.Equal(t, tt.expected, workflow) + }) + } +} + +func TestGetWorkflows(t *testing.T) { + t.Parallel() + + // Note: GetWorkflows is almost entirely a passthrough to FindWorkflows, so + // these test cases mostly just verify we're calling that function more or + // less correctly. + + tests := []struct { + name string + cfg testutil.TestClusterConfig + keyspaces []string + opts cluster.GetWorkflowsOptions + expected *vtadminpb.ClusterWorkflows + shouldErr bool + }{ + { + name: "success", + cfg: testutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + VtctldClient: &testutil.VtctldClient{ + GetWorkflowsResults: map[string]struct { + Response *vtctldatapb.GetWorkflowsResponse + Error error + }{ + "ks1": { + Response: &vtctldatapb.GetWorkflowsResponse{ + Workflows: []*vtctldatapb.Workflow{ + { + Name: "ks1-workflow1", + }, + }, + }, + }, + "ks2": { + Response: &vtctldatapb.GetWorkflowsResponse{ + Workflows: []*vtctldatapb.Workflow{ + { + Name: "ks2-workflow1", + }, + }, + }, + }, + }, + }, + }, + keyspaces: []string{"ks1", "ks2"}, + expected: &vtadminpb.ClusterWorkflows{ + Workflows: []*vtadminpb.Workflow{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + Keyspace: "ks1", + Workflow: &vtctldatapb.Workflow{ + Name: "ks1-workflow1", + }, + }, + { + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + Keyspace: "ks2", + Workflow: &vtctldatapb.Workflow{ + Name: "ks2-workflow1", + }, + }, + }, + }, + shouldErr: false, + }, + { + name: "partial error", + cfg: testutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + VtctldClient: &testutil.VtctldClient{ + GetWorkflowsResults: map[string]struct { + Response *vtctldatapb.GetWorkflowsResponse + Error error + }{ + "ks1": { + Response: &vtctldatapb.GetWorkflowsResponse{ + Workflows: []*vtctldatapb.Workflow{ + { + Name: "ks1-workflow1", + }, + }, + }, + }, + "ks2": { + Error: assert.AnError, + }, + }, + }, + }, + keyspaces: []string{"ks1", "ks2"}, + expected: &vtadminpb.ClusterWorkflows{ + Workflows: []*vtadminpb.Workflow{ + { + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + Keyspace: "ks1", + Workflow: &vtctldatapb.Workflow{ + Name: "ks1-workflow1", + }, + }, + }, + Warnings: []string{"something about ks2"}, + }, + shouldErr: false, + }, + { + name: "error", + cfg: testutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: "c1", + Name: "cluster1", + }, + VtctldClient: &testutil.VtctldClient{ + GetWorkflowsResults: map[string]struct { + Response *vtctldatapb.GetWorkflowsResponse + Error error + }{ + "ks1": { + Error: assert.AnError, + }, + }, + }, + }, + keyspaces: []string{"ks1"}, + expected: nil, + shouldErr: true, + }, + } + + ctx := context.Background() + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + c := testutil.BuildCluster(tt.cfg) + workflows, err := c.GetWorkflows(ctx, tt.keyspaces, tt.opts) + if tt.shouldErr { + assert.Error(t, err) + + return + } + + assert.NoError(t, err) + testutil.AssertClusterWorkflowsEqual(t, tt.expected, workflows) + }) + } +} diff --git a/go/vt/vtadmin/cluster/config.go b/go/vt/vtadmin/cluster/config.go index 1b641f60458..bd693e5ad88 100644 --- a/go/vt/vtadmin/cluster/config.go +++ b/go/vt/vtadmin/cluster/config.go @@ -25,6 +25,7 @@ type Config struct { DiscoveryImpl string DiscoveryFlagsByImpl FlagsByImpl VtSQLFlags map[string]string + VtctldFlags map[string]string } // Cluster returns a new cluster instance from the given config. @@ -82,6 +83,7 @@ func (cfg Config) Merge(override Config) Config { DiscoveryImpl: cfg.DiscoveryImpl, DiscoveryFlagsByImpl: map[string]map[string]string{}, VtSQLFlags: map[string]string{}, + VtctldFlags: map[string]string{}, } if override.ID != "" { @@ -104,6 +106,9 @@ func (cfg Config) Merge(override Config) Config { mergeStringMap(merged.VtSQLFlags, cfg.VtSQLFlags) mergeStringMap(merged.VtSQLFlags, override.VtSQLFlags) + mergeStringMap(merged.VtctldFlags, cfg.VtctldFlags) + mergeStringMap(merged.VtctldFlags, override.VtctldFlags) + return merged } diff --git a/go/vt/vtadmin/cluster/config_test.go b/go/vt/vtadmin/cluster/config_test.go index 22839b38323..5bdfaf49b65 100644 --- a/go/vt/vtadmin/cluster/config_test.go +++ b/go/vt/vtadmin/cluster/config_test.go @@ -25,6 +25,8 @@ import ( ) func TestMergeConfig(t *testing.T) { + t.Parallel() + tests := []struct { name string base Config @@ -46,6 +48,7 @@ func TestMergeConfig(t *testing.T) { DiscoveryImpl: "consul", DiscoveryFlagsByImpl: FlagsByImpl{}, VtSQLFlags: map[string]string{}, + VtctldFlags: map[string]string{}, }, }, { @@ -81,11 +84,12 @@ func TestMergeConfig(t *testing.T) { "foo": "baz", }, }, - VtSQLFlags: map[string]string{}, + VtSQLFlags: map[string]string{}, + VtctldFlags: map[string]string{}, }, }, { - name: "merging vtsql flags", + name: "merging vtsql/vtctld flags", base: Config{ ID: "c1", Name: "cluster1", @@ -93,6 +97,10 @@ func TestMergeConfig(t *testing.T) { "one": "one", "two": "2", }, + VtctldFlags: map[string]string{ + "a": "A", + "b": "B", + }, }, override: Config{ ID: "c1", @@ -101,6 +109,10 @@ func TestMergeConfig(t *testing.T) { "two": "two", "three": "three", }, + VtctldFlags: map[string]string{ + "a": "alpha", + "c": "C", + }, }, expected: Config{ ID: "c1", @@ -111,12 +123,21 @@ func TestMergeConfig(t *testing.T) { "two": "two", "three": "three", }, + VtctldFlags: map[string]string{ + "a": "alpha", + "b": "B", + "c": "C", + }, }, }, } for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + actual := tt.base.Merge(tt.override) assert.Equal(t, tt.expected, actual) }) @@ -124,6 +145,8 @@ func TestMergeConfig(t *testing.T) { } func TestConfigUnmarshalYAML(t *testing.T) { + t.Parallel() + tests := []struct { name string yaml string @@ -172,7 +195,11 @@ discovery-zk-whatever: 5 } for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + cfg := Config{ DiscoveryFlagsByImpl: map[string]map[string]string{}, } diff --git a/go/vt/vtadmin/cluster/discovery/discovery.go b/go/vt/vtadmin/cluster/discovery/discovery.go index 3dbcb41f996..616c29a9f37 100644 --- a/go/vt/vtadmin/cluster/discovery/discovery.go +++ b/go/vt/vtadmin/cluster/discovery/discovery.go @@ -33,6 +33,9 @@ var ( // ErrNoVTGates should be returned from DiscoverVTGate* methods when they // are unable to find any vtgates for the given filter/query/tags. ErrNoVTGates = errors.New("no vtgates found") + // ErrNoVtctlds should be returned from DiscoverVtctld* methods when they + // are unable to find any vtctlds for the given filter/query/tags. + ErrNoVtctlds = errors.New("no vtctlds found") ) // Discovery defines the interface that service discovery plugins must @@ -53,6 +56,21 @@ type Discovery interface { // Tags can optionally be used to filter gates. Order of the gates is not // specified by the interface, and can be implementation-specific. DiscoverVTGates(ctx context.Context, tags []string) ([]*vtadminpb.VTGate, error) + // DiscoverVtctld returns a vtctld found in the discovery service. + // Tags can optionally be used to filter the set of potential vtctlds + // further. Which vtctld in a set of found vtctlds is returned is not + // specified by the interface, and can be implementation-specific. + DiscoverVtctld(ctx context.Context, tags []string) (*vtadminpb.Vtctld, error) + // DiscoverVtctldAddr returns the address of a vtctld found in the discovery + // service. Tags can optionally be used to filter the set of potential + // vtctlds further. Which vtctld in a set of potential vtctld is used to + // return an address is not specified by the interface, and can be + // implementation-specific. + DiscoverVtctldAddr(ctx context.Context, tags []string) (string, error) + // DiscoverVtctlds returns a list of vtctlds found in the discovery service. + // Tags can optionally be used to filter vtctlds. Order of the vtctlds is + // not specified by the interface, and can be implementation-specific. + DiscoverVtctlds(ctx context.Context, tags []string) ([]*vtadminpb.Vtctld, error) } // Factory represents a function that can create a Discovery implementation. @@ -91,5 +109,5 @@ func New(impl string, cluster *vtadminpb.Cluster, args []string) (Discovery, err func init() { // nolint:gochecknoinits Register("consul", NewConsul) - Register("staticFile", NewStaticFile) + Register("staticfile", NewStaticFile) } diff --git a/go/vt/vtadmin/cluster/discovery/discovery_consul.go b/go/vt/vtadmin/cluster/discovery/discovery_consul.go index e67daabe10b..ddfdda27c8d 100644 --- a/go/vt/vtadmin/cluster/discovery/discovery_consul.go +++ b/go/vt/vtadmin/cluster/discovery/discovery_consul.go @@ -19,6 +19,7 @@ package discovery import ( "bytes" "context" + "fmt" "math/rand" "strings" "text/template" @@ -48,6 +49,11 @@ type ConsulDiscovery struct { vtgateCellTag string vtgateKeyspacesToWatchTag string vtgateAddrTmpl *template.Template + + /* vtctld options */ + vtctldDatacenter string + vtctldService string + vtctldAddrTmpl *template.Template } // NewConsul returns a ConsulDiscovery for the given cluster. Args are a slice @@ -77,6 +83,7 @@ func NewConsul(cluster *vtadminpb.Cluster, flags *pflag.FlagSet, args []string) flags.StringVar(&disco.queryOptions.Token, "token", "", "consul ACL token to use for requests") flags.BoolVar(&disco.passingOnly, "passing-only", true, "whether to include only nodes passing healthchecks") + /* vtgate discovery config options */ flags.StringVar(&disco.vtgateService, "vtgate-service-name", "vtgate", "consul service name vtgates register as") flags.StringVar(&disco.vtgatePoolTag, "vtgate-pool-tag", "pool", "consul service tag to group vtgates by pool") flags.StringVar(&disco.vtgateCellTag, "vtgate-cell-tag", "cell", "consul service tag to group vtgates by cell") @@ -90,38 +97,67 @@ func NewConsul(cluster *vtadminpb.Cluster, flags *pflag.FlagSet, args []string) "The meta information about the cluster is provided to the template via {{ .Cluster }}. "+ "Used once during initialization.") + /* vtctld discovery config options */ + flags.StringVar(&disco.vtctldService, "vtctld-service-name", "vtctld", "consul service name vtctlds register as") + + vtctldAddrTmplStr := flags.String("vtctld-addr-tmpl", "{{ .Hostname }}", + "Go template string to produce a dialable address from a *vtadminpb.Vtctld") + vtctldDatacenterTmplStr := flags.String("vtctld-datacenter-tmpl", "", + "Go template string to generate the datacenter for vtgate consul queries. "+ + "The cluster name is provided to the template via {{ .Cluster }}. "+ + "Used once during initialization.") + if err := flags.Parse(args); err != nil { return nil, err } if *vtgateDatacenterTmplStr != "" { - tmpl, err := template.New("consul-vtgate-datacenter-" + cluster.Id).Parse(*vtgateDatacenterTmplStr) + disco.vtgateDatacenter, err = generateConsulDatacenter("vtgate", cluster, *vtgateDatacenterTmplStr) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to generate vtgate consul datacenter from template: %w", err) } + } - buf := bytes.NewBuffer(nil) - err = tmpl.Execute(buf, &struct { - Cluster *vtadminpb.Cluster - }{ - Cluster: cluster, - }) + disco.vtgateAddrTmpl, err = template.New("consul-vtgate-address-template-" + cluster.Id).Parse(*vtgateAddrTmplStr) + if err != nil { + return nil, fmt.Errorf("failed to parse vtgate host address template %s: %w", *vtgateAddrTmplStr, err) + } + if *vtctldDatacenterTmplStr != "" { + disco.vtctldDatacenter, err = generateConsulDatacenter("vtctld", cluster, *vtctldDatacenterTmplStr) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to generate vtctld consul datacenter from template: %w", err) } - - disco.vtgateDatacenter = buf.String() } - disco.vtgateAddrTmpl, err = template.New("consul-vtgate-address-template").Parse(*vtgateAddrTmplStr) + disco.vtctldAddrTmpl, err = template.New("consul-vtctld-address-template-" + cluster.Id).Parse(*vtctldAddrTmplStr) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to parse vtctld host address template %s: %w", *vtctldAddrTmplStr, err) } return disco, nil } +func generateConsulDatacenter(component string, cluster *vtadminpb.Cluster, tmplStr string) (string, error) { + tmpl, err := template.New("consul-" + component + "-datacenter-" + cluster.Id).Parse(tmplStr) + if err != nil { + return "", fmt.Errorf("error parsing template %s: %w", tmplStr, err) + } + + buf := bytes.NewBuffer(nil) + err = tmpl.Execute(buf, &struct { + Cluster *vtadminpb.Cluster + }{ + Cluster: cluster, + }) + + if err != nil { + return "", fmt.Errorf("failed to execute template: %w", err) + } + + return buf.String(), nil +} + // DiscoverVTGate is part of the Discovery interface. func (c *ConsulDiscovery) DiscoverVTGate(ctx context.Context, tags []string) (*vtadminpb.VTGate, error) { span, ctx := trace.NewSpan(ctx, "ConsulDiscovery.DiscoverVTGate") @@ -155,7 +191,7 @@ func (c *ConsulDiscovery) DiscoverVTGateAddr(ctx context.Context, tags []string) buf := bytes.NewBuffer(nil) if err := c.vtgateAddrTmpl.Execute(buf, vtgate); err != nil { - return "", err + return "", fmt.Errorf("failed to execute vtgate address template for %v: %w", vtgate, err) } return buf.String(), nil @@ -222,6 +258,79 @@ func (c *ConsulDiscovery) discoverVTGates(_ context.Context, tags []string) ([]* return vtgates, nil } +// DiscoverVtctld is part of the Discovery interface. +func (c *ConsulDiscovery) DiscoverVtctld(ctx context.Context, tags []string) (*vtadminpb.Vtctld, error) { + span, ctx := trace.NewSpan(ctx, "ConsulDiscovery.DiscoverVtctld") + defer span.Finish() + + return c.discoverVtctld(ctx, tags) +} + +func (c *ConsulDiscovery) discoverVtctld(ctx context.Context, tags []string) (*vtadminpb.Vtctld, error) { + vtctlds, err := c.discoverVtctlds(ctx, tags) + if err != nil { + return nil, err + } + + if len(vtctlds) == 0 { + return nil, ErrNoVtctlds + } + + return vtctlds[rand.Intn(len(vtctlds))], nil +} + +// DiscoverVtctldAddr is part of the Discovery interface. +func (c *ConsulDiscovery) DiscoverVtctldAddr(ctx context.Context, tags []string) (string, error) { + span, ctx := trace.NewSpan(ctx, "ConsulDiscovery.DiscoverVtctldAddr") + defer span.Finish() + + vtctld, err := c.discoverVtctld(ctx, tags) + if err != nil { + return "", err + } + + buf := bytes.NewBuffer(nil) + if err := c.vtctldAddrTmpl.Execute(buf, vtctld); err != nil { + return "", fmt.Errorf("failed to execute vtctld address template for %v: %w", vtctld, err) + } + + return buf.String(), nil +} + +// DiscoverVtctlds is part of the Discovery interface. +func (c *ConsulDiscovery) DiscoverVtctlds(ctx context.Context, tags []string) ([]*vtadminpb.Vtctld, error) { + span, ctx := trace.NewSpan(ctx, "ConsulDiscovery.DiscoverVtctlds") + defer span.Finish() + + return c.discoverVtctlds(ctx, tags) +} + +func (c *ConsulDiscovery) discoverVtctlds(_ context.Context, tags []string) ([]*vtadminpb.Vtctld, error) { + opts := c.getQueryOptions() + opts.Datacenter = c.vtctldDatacenter + + entries, _, err := c.client.Health().ServiceMultipleTags(c.vtctldService, tags, c.passingOnly, &opts) + if err != nil { + return nil, err + } + + vtctlds := make([]*vtadminpb.Vtctld, len(entries)) + + for i, entry := range entries { + vtctld := &vtadminpb.Vtctld{ + Cluster: &vtadminpb.Cluster{ + Id: c.cluster.Id, + Name: c.cluster.Name, + }, + Hostname: entry.Node.Node, + } + + vtctlds[i] = vtctld + } + + return vtctlds, nil +} + // getQueryOptions returns a shallow copy so we can swap in the vtgateDatacenter. // If we were to set it directly, we'd need a mutex to guard against concurrent // vtgate and (soon) vtctld queries. diff --git a/go/vt/vtadmin/cluster/discovery/discovery_consul_test.go b/go/vt/vtadmin/cluster/discovery/discovery_consul_test.go index 8d8e67722c3..a298e5906c9 100644 --- a/go/vt/vtadmin/cluster/discovery/discovery_consul_test.go +++ b/go/vt/vtadmin/cluster/discovery/discovery_consul_test.go @@ -87,6 +87,8 @@ func consulServiceEntry(name string, tags []string, meta map[string]string) *con } func TestConsulDiscoverVTGates(t *testing.T) { + t.Parallel() + tests := []struct { name string disco *ConsulDiscovery @@ -228,15 +230,21 @@ func TestConsulDiscoverVTGates(t *testing.T) { }, } + ctx := context.Background() + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + tt.disco.client = &fakeConsulClient{ health: &fakeConsulHealth{ entries: tt.entries, }, } - gates, err := tt.disco.DiscoverVTGates(context.Background(), tt.tags) + gates, err := tt.disco.DiscoverVTGates(ctx, tt.tags) if tt.shouldErr { assert.Error(t, err, assert.AnError) return @@ -249,6 +257,8 @@ func TestConsulDiscoverVTGates(t *testing.T) { } func TestConsulDiscoverVTGate(t *testing.T) { + t.Parallel() + tests := []struct { name string disco *ConsulDiscovery @@ -331,15 +341,21 @@ func TestConsulDiscoverVTGate(t *testing.T) { }, } + ctx := context.Background() + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + tt.disco.client = &fakeConsulClient{ health: &fakeConsulHealth{ entries: tt.entries, }, } - gate, err := tt.disco.DiscoverVTGate(context.Background(), tt.tags) + gate, err := tt.disco.DiscoverVTGate(ctx, tt.tags) if tt.shouldErr { assert.Error(t, err, assert.AnError) return @@ -352,6 +368,8 @@ func TestConsulDiscoverVTGate(t *testing.T) { } func TestConsulDiscoverVTGateAddr(t *testing.T) { + t.Parallel() + tests := []struct { name string disco *ConsulDiscovery @@ -421,15 +439,21 @@ func TestConsulDiscoverVTGateAddr(t *testing.T) { }, } + ctx := context.Background() + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + tt.disco.client = &fakeConsulClient{ health: &fakeConsulHealth{ entries: tt.entries, }, } - addr, err := tt.disco.DiscoverVTGateAddr(context.Background(), tt.tags) + addr, err := tt.disco.DiscoverVTGateAddr(ctx, tt.tags) if tt.shouldErr { assert.Error(t, err, assert.AnError) return diff --git a/go/vt/vtadmin/cluster/discovery/discovery_static_file.go b/go/vt/vtadmin/cluster/discovery/discovery_static_file.go index fdb94e65f21..59a759e6c5d 100644 --- a/go/vt/vtadmin/cluster/discovery/discovery_static_file.go +++ b/go/vt/vtadmin/cluster/discovery/discovery_static_file.go @@ -20,11 +20,13 @@ import ( "context" "encoding/json" "errors" + "fmt" "io/ioutil" "math/rand" "github.com/spf13/pflag" + "vitess.io/vitess/go/trace" vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin" ) @@ -52,11 +54,16 @@ type StaticFileDiscovery struct { byName map[string]*vtadminpb.VTGate byTag map[string][]*vtadminpb.VTGate } + vtctlds struct { + byName map[string]*vtadminpb.Vtctld + byTag map[string][]*vtadminpb.Vtctld + } } // StaticFileClusterConfig configures Vitess components for a single cluster. type StaticFileClusterConfig struct { VTGates []*StaticFileVTGateConfig `json:"vtgates,omitempty"` + Vtctlds []*StaticFileVtctldConfig `json:"vtctlds,omitempty"` } // StaticFileVTGateConfig contains host and tag information for a single VTGate in a cluster. @@ -65,6 +72,13 @@ type StaticFileVTGateConfig struct { Tags []string `json:"tags"` } +// StaticFileVtctldConfig contains a host and tag information for a single +// Vtctld in a cluster. +type StaticFileVtctldConfig struct { + Host *vtadminpb.Vtctld `json:"host"` + Tags []string `json:"tags"` +} + // NewStaticFile returns a StaticFileDiscovery for the given cluster. func NewStaticFile(cluster *vtadminpb.Cluster, flags *pflag.FlagSet, args []string) (Discovery, error) { disco := &StaticFileDiscovery{ @@ -94,7 +108,7 @@ func NewStaticFile(cluster *vtadminpb.Cluster, flags *pflag.FlagSet, args []stri func (d *StaticFileDiscovery) parseConfig(bytes []byte) error { if err := json.Unmarshal(bytes, &d.config); err != nil { - return err + return fmt.Errorf("failed to unmarshal staticfile config from json: %w", err) } d.gates.byName = make(map[string]*vtadminpb.VTGate, len(d.config.VTGates)) @@ -108,12 +122,32 @@ func (d *StaticFileDiscovery) parseConfig(bytes []byte) error { d.gates.byTag[tag] = append(d.gates.byTag[tag], gate.Host) } } + + d.vtctlds.byName = make(map[string]*vtadminpb.Vtctld, len(d.config.Vtctlds)) + d.vtctlds.byTag = make(map[string][]*vtadminpb.Vtctld) + + // Index the vtctlds by name and by tag for easier lookups + for _, vtctld := range d.config.Vtctlds { + d.vtctlds.byName[vtctld.Host.Hostname] = vtctld.Host + + for _, tag := range vtctld.Tags { + d.vtctlds.byTag[tag] = append(d.vtctlds.byTag[tag], vtctld.Host) + } + } + return nil } // DiscoverVTGate is part of the Discovery interface. func (d *StaticFileDiscovery) DiscoverVTGate(ctx context.Context, tags []string) (*vtadminpb.VTGate, error) { - gates, err := d.DiscoverVTGates(ctx, tags) + span, ctx := trace.NewSpan(ctx, "StaticFileDiscovery.DiscoverVTGate") + defer span.Finish() + + return d.discoverVTGate(ctx, tags) +} + +func (d *StaticFileDiscovery) discoverVTGate(ctx context.Context, tags []string) (*vtadminpb.VTGate, error) { + gates, err := d.discoverVTGates(ctx, tags) if err != nil { return nil, err } @@ -129,6 +163,9 @@ func (d *StaticFileDiscovery) DiscoverVTGate(ctx context.Context, tags []string) // DiscoverVTGateAddr is part of the Discovery interface. func (d *StaticFileDiscovery) DiscoverVTGateAddr(ctx context.Context, tags []string) (string, error) { + span, ctx := trace.NewSpan(ctx, "StaticFileDiscovery.DiscoverVTGateAddr") + defer span.Finish() + gate, err := d.DiscoverVTGate(ctx, tags) if err != nil { return "", err @@ -139,6 +176,13 @@ func (d *StaticFileDiscovery) DiscoverVTGateAddr(ctx context.Context, tags []str // DiscoverVTGates is part of the Discovery interface. func (d *StaticFileDiscovery) DiscoverVTGates(ctx context.Context, tags []string) ([]*vtadminpb.VTGate, error) { + span, ctx := trace.NewSpan(ctx, "StaticFileDiscovery.DiscoverVTGates") + defer span.Finish() + + return d.discoverVTGates(ctx, tags) +} + +func (d *StaticFileDiscovery) discoverVTGates(ctx context.Context, tags []string) ([]*vtadminpb.VTGate, error) { if len(tags) == 0 { results := []*vtadminpb.VTGate{} for _, g := range d.gates.byName { @@ -175,3 +219,85 @@ func (d *StaticFileDiscovery) DiscoverVTGates(ctx context.Context, tags []string return results, nil } + +// DiscoverVtctld is part of the Discovery interface. +func (d *StaticFileDiscovery) DiscoverVtctld(ctx context.Context, tags []string) (*vtadminpb.Vtctld, error) { + span, ctx := trace.NewSpan(ctx, "StaticFileDiscovery.DiscoverVtctld") + defer span.Finish() + + return d.discoverVtctld(ctx, tags) +} + +func (d *StaticFileDiscovery) discoverVtctld(ctx context.Context, tags []string) (*vtadminpb.Vtctld, error) { + vtctlds, err := d.discoverVtctlds(ctx, tags) + if err != nil { + return nil, err + } + + count := len(vtctlds) + if count == 0 { + return nil, ErrNoVtctlds + } + + vtctld := vtctlds[rand.Intn(len(vtctlds))] + return vtctld, nil +} + +// DiscoverVtctldAddr is part of the Discovery interface. +func (d *StaticFileDiscovery) DiscoverVtctldAddr(ctx context.Context, tags []string) (string, error) { + span, ctx := trace.NewSpan(ctx, "StaticFileDiscovery.DiscoverVtctldAddr") + defer span.Finish() + + vtctld, err := d.discoverVtctld(ctx, tags) + if err != nil { + return "", err + } + + return vtctld.Hostname, nil +} + +// DiscoverVtctlds is part of the Discovery interface. +func (d *StaticFileDiscovery) DiscoverVtctlds(ctx context.Context, tags []string) ([]*vtadminpb.Vtctld, error) { + span, ctx := trace.NewSpan(ctx, "StaticFileDiscovery.DiscoverVtctlds") + defer span.Finish() + + return d.discoverVtctlds(ctx, tags) +} + +func (d *StaticFileDiscovery) discoverVtctlds(ctx context.Context, tags []string) ([]*vtadminpb.Vtctld, error) { + if len(tags) == 0 { + results := []*vtadminpb.Vtctld{} + for _, v := range d.vtctlds.byName { + results = append(results, v) + } + + return results, nil + } + + set := d.vtctlds.byName + + for _, tag := range tags { + intermediate := map[string]*vtadminpb.Vtctld{} + + vtctlds, ok := d.vtctlds.byTag[tag] + if !ok { + return []*vtadminpb.Vtctld{}, nil + } + + for _, v := range vtctlds { + if _, ok := set[v.Hostname]; ok { + intermediate[v.Hostname] = v + } + } + + set = intermediate + } + + results := make([]*vtadminpb.Vtctld, 0, len(set)) + + for _, vtctld := range set { + results = append(results, vtctld) + } + + return results, nil +} diff --git a/go/vt/vtadmin/cluster/discovery/discovery_static_file_test.go b/go/vt/vtadmin/cluster/discovery/discovery_static_file_test.go index 934f8ea3da8..8fa049f8540 100644 --- a/go/vt/vtadmin/cluster/discovery/discovery_static_file_test.go +++ b/go/vt/vtadmin/cluster/discovery/discovery_static_file_test.go @@ -28,6 +28,8 @@ import ( ) func TestDiscoverVTGate(t *testing.T) { + t.Parallel() + tests := []struct { name string contents []byte @@ -89,15 +91,21 @@ func TestDiscoverVTGate(t *testing.T) { }, } + ctx := context.Background() + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + disco := &StaticFileDiscovery{} err := disco.parseConfig(tt.contents) require.NoError(t, err) - gate, err := disco.DiscoverVTGate(context.Background(), tt.tags) + gate, err := disco.DiscoverVTGate(ctx, tt.tags) if tt.shouldErr { - assert.Error(t, err, assert.AnError) + assert.Error(t, err) return } @@ -108,6 +116,8 @@ func TestDiscoverVTGate(t *testing.T) { } func TestDiscoverVTGates(t *testing.T) { + t.Parallel() + tests := []struct { name string contents []byte @@ -226,20 +236,26 @@ func TestDiscoverVTGates(t *testing.T) { }, } + ctx := context.Background() + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + disco := &StaticFileDiscovery{} err := disco.parseConfig(tt.contents) if tt.shouldErrConfig { - assert.Error(t, err, assert.AnError) + assert.Error(t, err) } else { require.NoError(t, err) } - gates, err := disco.DiscoverVTGates(context.Background(), tt.tags) + gates, err := disco.DiscoverVTGates(ctx, tt.tags) if tt.shouldErr { - assert.Error(t, err, assert.AnError) + assert.Error(t, err) return } @@ -248,3 +264,241 @@ func TestDiscoverVTGates(t *testing.T) { }) } } + +func TestDiscoverVtctld(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + contents []byte + expected *vtadminpb.Vtctld + tags []string + shouldErr bool + }{ + { + name: "empty config", + contents: []byte(`{}`), + expected: nil, + shouldErr: true, + }, + { + name: "one vtctld", + contents: []byte(` + { + "vtctlds": [{ + "host": { + "hostname": "127.0.0.1:12345" + } + }] + } + `), + expected: &vtadmin.Vtctld{ + Hostname: "127.0.0.1:12345", + }, + }, + { + name: "filtered by tags (one match)", + contents: []byte(` + { + "vtctlds": [ + { + "host": { + "hostname": "127.0.0.1:11111" + }, + "tags": ["cell:cellA"] + }, + { + "host": { + "hostname": "127.0.0.1:22222" + }, + "tags": ["cell:cellB"] + }, + { + "host": { + "hostname": "127.0.0.1:33333" + }, + "tags": ["cell:cellA"] + } + ] + } + `), + expected: &vtadminpb.Vtctld{ + Hostname: "127.0.0.1:22222", + }, + tags: []string{"cell:cellB"}, + }, + } + + ctx := context.Background() + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + disco := &StaticFileDiscovery{} + err := disco.parseConfig(tt.contents) + require.NoError(t, err) + + vtctld, err := disco.DiscoverVtctld(ctx, tt.tags) + if tt.shouldErr { + assert.Error(t, err) + return + } + + assert.NoError(t, err) + assert.Equal(t, tt.expected, vtctld) + }) + } +} + +func TestDiscoverVtctlds(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + contents []byte + tags []string + expected []*vtadminpb.Vtctld + // True if the test should produce an error on the DiscoverVTGates call + shouldErr bool + // True if the test should produce an error on the disco.parseConfig step + shouldErrConfig bool + }{ + { + name: "empty config", + contents: []byte(`{}`), + expected: []*vtadminpb.Vtctld{}, + shouldErr: false, + }, + { + name: "no tags", + contents: []byte(` + { + "vtctlds": [ + { + "host": { + "hostname": "127.0.0.1:12345" + } + }, + { + "host": { + "hostname": "127.0.0.1:67890" + } + } + ] + } + `), + expected: []*vtadminpb.Vtctld{ + {Hostname: "127.0.0.1:12345"}, + {Hostname: "127.0.0.1:67890"}, + }, + shouldErr: false, + }, + { + name: "filtered by tags", + contents: []byte(` + { + "vtctlds": [ + { + "host": { + "hostname": "127.0.0.1:11111" + }, + "tags": ["cell:cellA"] + }, + { + "host": { + "hostname": "127.0.0.1:22222" + }, + "tags": ["cell:cellB"] + }, + { + "host": { + "hostname": "127.0.0.1:33333" + }, + "tags": ["cell:cellA"] + } + ] + } + `), + tags: []string{"cell:cellA"}, + expected: []*vtadminpb.Vtctld{ + {Hostname: "127.0.0.1:11111"}, + {Hostname: "127.0.0.1:33333"}, + }, + shouldErr: false, + }, + { + name: "filtered by multiple tags", + contents: []byte(` + { + "vtctlds": [ + { + "host": { + "hostname": "127.0.0.1:11111" + }, + "tags": ["cell:cellA"] + }, + { + "host": { + "hostname": "127.0.0.1:22222" + }, + "tags": ["cell:cellA", "pool:poolZ"] + }, + { + "host": { + "hostname": "127.0.0.1:33333" + }, + "tags": ["pool:poolZ"] + } + ] + } + `), + tags: []string{"cell:cellA", "pool:poolZ"}, + expected: []*vtadminpb.Vtctld{ + {Hostname: "127.0.0.1:22222"}, + }, + shouldErr: false, + }, + { + name: "invalid json", + contents: []byte(` + { + "vtctlds": "malformed" + } + `), + tags: []string{}, + shouldErr: false, + shouldErrConfig: true, + }, + } + + ctx := context.Background() + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + disco := &StaticFileDiscovery{} + + err := disco.parseConfig(tt.contents) + if tt.shouldErrConfig { + assert.Error(t, err) + } else { + require.NoError(t, err) + } + + vtctlds, err := disco.DiscoverVtctlds(ctx, tt.tags) + if tt.shouldErr { + assert.Error(t, err) + return + } + + assert.NoError(t, err) + assert.ElementsMatch(t, tt.expected, vtctlds) + }) + } +} diff --git a/go/vt/vtadmin/cluster/discovery/discovery_test.go b/go/vt/vtadmin/cluster/discovery/discovery_test.go index b00071288c7..3ee67cbc9df 100644 --- a/go/vt/vtadmin/cluster/discovery/discovery_test.go +++ b/go/vt/vtadmin/cluster/discovery/discovery_test.go @@ -17,7 +17,9 @@ limitations under the License. package discovery import ( + "fmt" "testing" + "time" "github.com/stretchr/testify/assert" @@ -25,6 +27,8 @@ import ( ) func TestNew(t *testing.T) { + t.Parallel() + tests := []struct { name string impl string @@ -46,7 +50,11 @@ func TestNew(t *testing.T) { } for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + disco, err := New(tt.impl, &vtadminpb.Cluster{Id: "testid", Name: "testcluster"}, []string{}) if tt.err != nil { assert.Error(t, err, tt.err.Error()) @@ -60,7 +68,13 @@ func TestNew(t *testing.T) { } func TestRegister(t *testing.T) { - Register("testfactory", nil) + t.Parallel() + + // Use a timestamp to allow running tests with `-count=N`. + ts := time.Now().UnixNano() + factoryName := fmt.Sprintf("testfactory-%d", ts) + + Register(factoryName, nil) defer func() { err := recover() @@ -70,6 +84,6 @@ func TestRegister(t *testing.T) { }() // this one panics - Register("testfactory", nil) + Register(factoryName, nil) assert.Equal(t, 1, 2, "double register should have panicked") } diff --git a/go/vt/vtadmin/cluster/discovery/fakediscovery/discovery.go b/go/vt/vtadmin/cluster/discovery/fakediscovery/discovery.go index 896902d41d2..87f7dd5fdda 100644 --- a/go/vt/vtadmin/cluster/discovery/fakediscovery/discovery.go +++ b/go/vt/vtadmin/cluster/discovery/fakediscovery/discovery.go @@ -28,6 +28,12 @@ import ( vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin" ) +type vtctlds struct { + byTag map[string][]*vtadminpb.Vtctld + byName map[string]*vtadminpb.Vtctld + shouldErr bool +} + type gates struct { byTag map[string][]*vtadminpb.VTGate byName map[string]*vtadminpb.VTGate @@ -36,7 +42,8 @@ type gates struct { // Fake is a fake discovery implementation for use in testing. type Fake struct { - gates *gates + gates *gates + vtctlds *vtctlds } // New returns a new fake. @@ -46,12 +53,16 @@ func New() *Fake { byTag: map[string][]*vtadminpb.VTGate{}, byName: map[string]*vtadminpb.VTGate{}, }, + vtctlds: &vtctlds{ + byTag: map[string][]*vtadminpb.Vtctld{}, + byName: map[string]*vtadminpb.Vtctld{}, + }, } } // AddTaggedGates adds the given gates to the discovery fake, associating each // gate with each tag. To tag different gates with multiple tags, call multiple -// times with the same gates but different tag slices. Gates an uniquely +// times with the same gates but different tag slices. Gates are uniquely // identified by hostname. func (d *Fake) AddTaggedGates(tags []string, gates ...*vtadminpb.VTGate) { for _, tag := range tags { @@ -63,6 +74,20 @@ func (d *Fake) AddTaggedGates(tags []string, gates ...*vtadminpb.VTGate) { } } +// AddTaggedVtctlds adds the given vtctlds to the discovery fake, associating +// each vtctld with each tag. To tag different vtctlds with multiple tags, call +// multiple times with the same vtctlds but different tag slices. Vtctlds are +// uniquely identified by hostname. +func (d *Fake) AddTaggedVtctlds(tags []string, vtctlds ...*vtadminpb.Vtctld) { + for _, tag := range tags { + d.vtctlds.byTag[tag] = append(d.vtctlds.byTag[tag], vtctlds...) + } + + for _, vtctld := range vtctlds { + d.vtctlds.byName[vtctld.Hostname] = vtctld + } +} + // SetGatesError instructs whether the fake should return an error on gate // discovery functions. func (d *Fake) SetGatesError(shouldErr bool) { @@ -137,3 +162,70 @@ func (d *Fake) DiscoverVTGateAddr(ctx context.Context, tags []string) (string, e return gate.Hostname, nil } + +// DiscoverVtctlds is part of the discover.Discovery interface. +func (d *Fake) DiscoverVtctlds(ctx context.Context, tags []string) ([]*vtadminpb.Vtctld, error) { + if d.vtctlds.shouldErr { + return nil, assert.AnError + } + + if len(tags) == 0 { + results := make([]*vtadminpb.Vtctld, 0, len(d.vtctlds.byName)) + for _, vtctld := range d.vtctlds.byName { + results = append(results, vtctld) + } + + return results, nil + } + + set := d.vtctlds.byName + + for _, tag := range tags { + intermediate := map[string]*vtadminpb.Vtctld{} + + vtctlds, ok := d.vtctlds.byTag[tag] + if !ok { + return []*vtadminpb.Vtctld{}, nil + } + + for _, v := range vtctlds { + if _, ok := set[v.Hostname]; ok { + intermediate[v.Hostname] = v + } + } + + set = intermediate + } + + results := make([]*vtadminpb.Vtctld, 0, len(set)) + + for _, vtctld := range set { + results = append(results, vtctld) + } + + return results, nil +} + +// DiscoverVtctldAddr is part of the discover.Discovery interface. +func (d *Fake) DiscoverVtctldAddr(ctx context.Context, tags []string) (string, error) { + vtctld, err := d.DiscoverVtctld(ctx, tags) + if err != nil { + return "", err + } + + return vtctld.Hostname, nil +} + +// DiscoverVtctld is part of the discover.Discovery interface. +func (d *Fake) DiscoverVtctld(ctx context.Context, tags []string) (*vtadminpb.Vtctld, error) { + vtctlds, err := d.DiscoverVtctlds(ctx, tags) + if err != nil { + return nil, err + } + + if len(vtctlds) == 0 { + return nil, assert.AnError + } + + return vtctlds[rand.Intn(len(vtctlds))], nil +} diff --git a/go/vt/vtadmin/cluster/discovery/fakediscovery/discovery_test.go b/go/vt/vtadmin/cluster/discovery/fakediscovery/discovery_test.go index 25bdd0f57ae..841f73e70de 100644 --- a/go/vt/vtadmin/cluster/discovery/fakediscovery/discovery_test.go +++ b/go/vt/vtadmin/cluster/discovery/fakediscovery/discovery_test.go @@ -26,6 +26,8 @@ import ( ) func TestDiscoverVTGates(t *testing.T) { + t.Parallel() + fake := New() gates := []*vtadminpb.VTGate{ { @@ -39,33 +41,35 @@ func TestDiscoverVTGates(t *testing.T) { }, } + ctx := context.Background() + fake.AddTaggedGates(nil, gates...) fake.AddTaggedGates([]string{"tag1:val1"}, gates[0], gates[1]) fake.AddTaggedGates([]string{"tag2:val2"}, gates[0], gates[2]) - actual, err := fake.DiscoverVTGates(context.Background(), nil) + actual, err := fake.DiscoverVTGates(ctx, nil) assert.NoError(t, err) assert.ElementsMatch(t, gates, actual) - actual, err = fake.DiscoverVTGates(context.Background(), []string{"tag1:val1"}) + actual, err = fake.DiscoverVTGates(ctx, []string{"tag1:val1"}) assert.NoError(t, err) assert.ElementsMatch(t, []*vtadminpb.VTGate{gates[0], gates[1]}, actual) - actual, err = fake.DiscoverVTGates(context.Background(), []string{"tag2:val2"}) + actual, err = fake.DiscoverVTGates(ctx, []string{"tag2:val2"}) assert.NoError(t, err) assert.ElementsMatch(t, []*vtadminpb.VTGate{gates[0], gates[2]}, actual) - actual, err = fake.DiscoverVTGates(context.Background(), []string{"tag1:val1", "tag2:val2"}) + actual, err = fake.DiscoverVTGates(ctx, []string{"tag1:val1", "tag2:val2"}) assert.NoError(t, err) assert.ElementsMatch(t, []*vtadminpb.VTGate{gates[0]}, actual) - actual, err = fake.DiscoverVTGates(context.Background(), []string{"differentTag:val"}) + actual, err = fake.DiscoverVTGates(ctx, []string{"differentTag:val"}) assert.NoError(t, err) assert.Equal(t, []*vtadminpb.VTGate{}, actual) fake.SetGatesError(true) - actual, err = fake.DiscoverVTGates(context.Background(), nil) + actual, err = fake.DiscoverVTGates(ctx, nil) assert.Error(t, err) assert.Nil(t, actual) } diff --git a/go/vt/vtadmin/cluster/file_config_test.go b/go/vt/vtadmin/cluster/file_config_test.go index 00e3108db3c..07bf6dbddbd 100644 --- a/go/vt/vtadmin/cluster/file_config_test.go +++ b/go/vt/vtadmin/cluster/file_config_test.go @@ -25,6 +25,8 @@ import ( ) func TestFileConfigUnmarshalYAML(t *testing.T) { + t.Parallel() + tests := []struct { name string yaml string @@ -35,16 +37,19 @@ func TestFileConfigUnmarshalYAML(t *testing.T) { name: "simple", yaml: `defaults: discovery: consul - discovery-consul-vtgate-datacenter-tmpl: "dev-{{ .Cluster }}" + discovery-consul-vtctld-datacenter-tmpl: "dev-{{ .Cluster.Name }}" + discovery-consul-vtctld-service-name: vtctld-svc + discovery-consul-vtctld-addr-tmpl: "{{ .Hostname }}.example.com:15000" + discovery-consul-vtgate-datacenter-tmpl: "dev-{{ .Cluster.Name }}" discovery-consul-vtgate-service-name: vtgate-svc discovery-consul-vtgate-pool-tag: type discovery-consul-vtgate-cell-tag: zone - discovery-consul-vtgate-addr-tmpl: "{{ .Name }}.example.com:15999" + discovery-consul-vtgate-addr-tmpl: "{{ .Hostname }}.example.com:15999" clusters: c1: name: testcluster1 - discovery-consul-vtgate-datacenter-tmpl: "dev-{{ .Cluster }}-test" + discovery-consul-vtgate-datacenter-tmpl: "dev-{{ .Cluster.Name }}-test" c2: name: devcluster`, config: FileConfig{ @@ -52,11 +57,14 @@ clusters: DiscoveryImpl: "consul", DiscoveryFlagsByImpl: map[string]map[string]string{ "consul": { - "vtgate-datacenter-tmpl": "dev-{{ .Cluster }}", + "vtctld-datacenter-tmpl": "dev-{{ .Cluster.Name }}", + "vtctld-service-name": "vtctld-svc", + "vtctld-addr-tmpl": "{{ .Hostname }}.example.com:15000", + "vtgate-datacenter-tmpl": "dev-{{ .Cluster.Name }}", "vtgate-service-name": "vtgate-svc", "vtgate-pool-tag": "type", "vtgate-cell-tag": "zone", - "vtgate-addr-tmpl": "{{ .Name }}.example.com:15999", + "vtgate-addr-tmpl": "{{ .Hostname }}.example.com:15999", }, }, }, @@ -66,16 +74,18 @@ clusters: Name: "testcluster1", DiscoveryFlagsByImpl: map[string]map[string]string{ "consul": { - "vtgate-datacenter-tmpl": "dev-{{ .Cluster }}-test", + "vtgate-datacenter-tmpl": "dev-{{ .Cluster.Name }}-test", }, }, - VtSQLFlags: map[string]string{}, + VtSQLFlags: map[string]string{}, + VtctldFlags: map[string]string{}, }, "c2": { ID: "c2", Name: "devcluster", DiscoveryFlagsByImpl: map[string]map[string]string{}, VtSQLFlags: map[string]string{}, + VtctldFlags: map[string]string{}, }, }, }, @@ -84,7 +94,11 @@ clusters: } for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + cfg := FileConfig{ Defaults: Config{ DiscoveryFlagsByImpl: map[string]map[string]string{}, @@ -105,6 +119,8 @@ clusters: } func TestCombine(t *testing.T) { + t.Parallel() + tests := []struct { name string fc FileConfig @@ -153,7 +169,8 @@ func TestCombine(t *testing.T) { "vtgate-datacenter-tmpl": "dev-{{ .Cluster }}", }, }, - VtSQLFlags: map[string]string{}, + VtSQLFlags: map[string]string{}, + VtctldFlags: map[string]string{}, }, { ID: "2", @@ -164,7 +181,8 @@ func TestCombine(t *testing.T) { "vtgate-datacenter-tmpl": "dev-{{ .Cluster }}-test", }, }, - VtSQLFlags: map[string]string{}, + VtSQLFlags: map[string]string{}, + VtctldFlags: map[string]string{}, }, }, }, @@ -212,7 +230,8 @@ func TestCombine(t *testing.T) { "flag": "val", }, }, - VtSQLFlags: map[string]string{}, + VtSQLFlags: map[string]string{}, + VtctldFlags: map[string]string{}, }, { ID: "c2", @@ -223,7 +242,8 @@ func TestCombine(t *testing.T) { "flag": "val", }, }, - VtSQLFlags: map[string]string{}, + VtSQLFlags: map[string]string{}, + VtctldFlags: map[string]string{}, }, { ID: "c3", @@ -234,14 +254,19 @@ func TestCombine(t *testing.T) { "flag": "val", }, }, - VtSQLFlags: map[string]string{}, + VtSQLFlags: map[string]string{}, + VtctldFlags: map[string]string{}, }, }, }, } for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + actual := tt.fc.Combine(tt.defaults, tt.configs) assert.ElementsMatch(t, tt.expected, actual) }) diff --git a/go/vt/vtadmin/cluster/flags.go b/go/vt/vtadmin/cluster/flags.go index bb9b180183a..f0c600109c5 100644 --- a/go/vt/vtadmin/cluster/flags.go +++ b/go/vt/vtadmin/cluster/flags.go @@ -150,6 +150,12 @@ func parseOne(cfg *Config, name string, val string) error { cfg.VtSQLFlags[strings.TrimPrefix(name, "vtsql-")] = val return nil + } else if strings.HasPrefix(name, "vtctld-") { + if cfg.VtctldFlags == nil { + cfg.VtctldFlags = map[string]string{} + } + + cfg.VtctldFlags[strings.TrimPrefix(name, "vtctld-")] = val } match := discoveryFlagRegexp.FindStringSubmatch(name) diff --git a/go/vt/vtadmin/cluster/flags_test.go b/go/vt/vtadmin/cluster/flags_test.go index 48867ac9441..676c0243029 100644 --- a/go/vt/vtadmin/cluster/flags_test.go +++ b/go/vt/vtadmin/cluster/flags_test.go @@ -23,6 +23,8 @@ import ( ) func TestMergeFlagsByImpl(t *testing.T) { + t.Parallel() + var NilMap map[string]map[string]string tests := []struct { @@ -95,11 +97,15 @@ func TestMergeFlagsByImpl(t *testing.T) { }, } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - flags := FlagsByImpl(test.base) - flags.Merge(test.in) - assert.Equal(t, FlagsByImpl(test.expected), flags) + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + flags := FlagsByImpl(tt.base) + flags.Merge(tt.in) + assert.Equal(t, FlagsByImpl(tt.expected), flags) }) } } diff --git a/go/vt/vtadmin/cluster/trace.go b/go/vt/vtadmin/cluster/trace.go new file mode 100644 index 00000000000..06b16761c3c --- /dev/null +++ b/go/vt/vtadmin/cluster/trace.go @@ -0,0 +1,47 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "strings" + + "vitess.io/vitess/go/trace" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtadmin/vtadminproto" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +// AnnotateSpan adds the cluster_id and cluster_name to a span. +func AnnotateSpan(c *Cluster, span trace.Span) { + vtadminproto.AnnotateClusterSpan(c.ToProto(), span) + // (TODO:@ajm188) add support for discovery impls to add annotations to a + // span, like `discovery_impl` and any parameters that might be relevant. +} + +// (TODO: @ajm188) perhaps we want a ./go/vt/vtctl/vtctlproto package for this? +func annotateGetSchemaRequest(req *vtctldatapb.GetSchemaRequest, span trace.Span) { + if req.TabletAlias != nil { + span.Annotate("tablet_alias", topoproto.TabletAliasString(req.TabletAlias)) + } + + span.Annotate("exclude_tables", strings.Join(req.ExcludeTables, ",")) + span.Annotate("tables", strings.Join(req.Tables, ",")) + span.Annotate("include_views", req.IncludeViews) + span.Annotate("table_names_only", req.TableNamesOnly) + span.Annotate("table_sizes_only", req.TableSizesOnly) +} diff --git a/go/vt/vtadmin/credentials/credentials.go b/go/vt/vtadmin/credentials/credentials.go new file mode 100644 index 00000000000..54e87dec4f6 --- /dev/null +++ b/go/vt/vtadmin/credentials/credentials.go @@ -0,0 +1,72 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package credentials + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "text/template" + + "vitess.io/vitess/go/vt/grpcclient" +) + +// LoadFromTemplate renders a template string into a path, using the data +// provided as the template data. It then loads the contents at the resulting +// path as a JSON file containing a grpcclient.StaticAuthClientCreds, and +// returns both the parsed credentials as well as the concrete path used. +func LoadFromTemplate(tmplStr string, data interface{}) (*grpcclient.StaticAuthClientCreds, string, error) { + path, err := renderTemplate(tmplStr, data) + if err != nil { + return nil, "", err + } + + creds, err := loadCredentials(path) + if err != nil { + return nil, "", err + } + + return creds, path, nil +} + +func renderTemplate(tmplStr string, data interface{}) (string, error) { + tmpl, err := template.New("").Parse(tmplStr) + if err != nil { + return "", err + } + + buf := bytes.NewBuffer(nil) + if err := tmpl.Execute(buf, data); err != nil { + return "", err + } + + return buf.String(), nil +} + +func loadCredentials(path string) (*grpcclient.StaticAuthClientCreds, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + + var creds grpcclient.StaticAuthClientCreds + if err := json.Unmarshal(data, &creds); err != nil { + return nil, err + } + + return &creds, nil +} diff --git a/go/vt/vtadmin/vtsql/credentials_test.go b/go/vt/vtadmin/credentials/credentials_test.go similarity index 94% rename from go/vt/vtadmin/vtsql/credentials_test.go rename to go/vt/vtadmin/credentials/credentials_test.go index 1a20c22816b..eacea70408c 100644 --- a/go/vt/vtadmin/vtsql/credentials_test.go +++ b/go/vt/vtadmin/credentials/credentials_test.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Vitess Authors. +Copyright 2021 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package vtsql +package credentials import ( "io/ioutil" @@ -28,6 +28,8 @@ import ( ) func Test_loadCredentials(t *testing.T) { + t.Parallel() + tests := []struct { name string contents []byte @@ -55,7 +57,11 @@ func Test_loadCredentials(t *testing.T) { } for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + path := "" if len(tt.contents) > 0 { diff --git a/go/vt/vtadmin/errors/errors.go b/go/vt/vtadmin/errors/errors.go new file mode 100644 index 00000000000..58c9cb104dd --- /dev/null +++ b/go/vt/vtadmin/errors/errors.go @@ -0,0 +1,55 @@ +/* +Copyright 2020 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +import ( + "errors" + "fmt" +) + +var ( + // ErrAmbiguousSchema occurs when more than one schema is found for a given + // set of filter criteria. + ErrAmbiguousSchema = errors.New("multiple schemas found") + // ErrAmbiguousTablet occurs when more than one tablet is found for a given + // set of filter criteria. + ErrAmbiguousTablet = errors.New("multiple tablets found") + // ErrAmbiguousWorkflow occurs when more than one workflow is found for a + // set of filter criteria that should ordinarily never return more than one + // workflow. + ErrAmbiguousWorkflow = errors.New("multiple workflows found") + // ErrInvalidRequest occurs when a request is invalid for any reason. + // For example, if mandatory parameters are undefined. + ErrInvalidRequest = errors.New("Invalid request") + // ErrNoSchema occurs when a schema definition cannot be found for a given + // set of filter criteria. + ErrNoSchema = errors.New("no such schema") + // ErrNoServingTablet occurs when a tablet with state SERVING cannot be + // found for a given set of filter criteria. It is a more specific form of + // ErrNoTablet + ErrNoServingTablet = fmt.Errorf("%w with state=SERVING", ErrNoTablet) + // ErrNoSrvVSchema occurs when no SrvVSchema is found for a given keyspace. + ErrNoSrvVSchema = errors.New("SrvVSchema not found") + // ErrNoTablet occurs when a tablet cannot be found for a given set of + // filter criteria. + ErrNoTablet = errors.New("no such tablet") + // ErrNoWorkflow occurs when a workflow cannot be found for a given set of + // filter criteria. + ErrNoWorkflow = errors.New("no such workflow") + // ErrUnsupportedCluster occurs when a cluster parameter is invalid. + ErrUnsupportedCluster = errors.New("unsupported cluster(s)") +) diff --git a/go/vt/vtadmin/errors/typed_error.go b/go/vt/vtadmin/errors/typed_error.go index f5174cbaeae..eec521990c7 100644 --- a/go/vt/vtadmin/errors/typed_error.go +++ b/go/vt/vtadmin/errors/typed_error.go @@ -30,6 +30,17 @@ type TypedError interface { HTTPStatus() int } +// BadRequest is returned when some request parameter is invalid. +type BadRequest struct { + Err error + ErrDetails interface{} +} + +func (e *BadRequest) Error() string { return e.Err.Error() } +func (e *BadRequest) Code() string { return "bad request" } +func (e *BadRequest) Details() interface{} { return e.ErrDetails } +func (e *BadRequest) HTTPStatus() int { return 400 } + // Unknown is the generic error, used when a more specific error is either // unspecified or inappropriate. type Unknown struct { diff --git a/go/vt/vtadmin/grpcserver/server.go b/go/vt/vtadmin/grpcserver/server.go index 77d23ceb663..ea3e469e150 100644 --- a/go/vt/vtadmin/grpcserver/server.go +++ b/go/vt/vtadmin/grpcserver/server.go @@ -63,8 +63,23 @@ type Options struct { // EnableTracing specifies whether to install opentracing interceptors on // the gRPC server. EnableTracing bool + // Services is a list of service names to declare as SERVING in health + // checks. Names should be fully-qualified (package_name.service_name, e.g. + // vtadmin.VTAdminServer, not VTAdminServer), and must be unique for a + // single Server instance. Users of this package are responsible for + // ensuring they do not pass a list with duplicate service names. + // + // The service name "grpc.health.v1.Health" is reserved by this package in + // order to power the healthcheck service. Attempting to pass this in the + // Services list to a grpcserver will be ignored. + // + // See https://github.com/grpc/grpc/blob/7324556353e831c57d30973db33df489c3ed3576/doc/health-checking.md + // for more details on healthchecking. + Services []string } +const healthServiceName = "grpc.health.v1.Health" // reserved health service name + // Server provides a multiplexed gRPC/HTTP server. type Server struct { name string @@ -201,9 +216,16 @@ func (s *Server) ListenAndServe() error { // nolint:funlen shutdown <- err }() - // (TODO:@amason) Figure out a good abstraction to have other services - // register themselves. - s.healthServer.SetServingStatus("grpc.health.v1.Health", healthpb.HealthCheckResponse_SERVING) + s.healthServer.SetServingStatus(healthServiceName, healthpb.HealthCheckResponse_SERVING) + + for _, name := range s.opts.Services { + if name == healthServiceName { + log.Warningf("Attempted to register a service under the reserved healthcheck service name %s; ignoring", healthServiceName) + continue + } + + s.healthServer.SetServingStatus(name, healthpb.HealthCheckResponse_SERVING) + } s.setServing(true) log.Infof("server %s listening on %s", s.name, s.opts.Addr) diff --git a/go/vt/vtadmin/http/clusters.go b/go/vt/vtadmin/http/clusters.go new file mode 100644 index 00000000000..ff02719679a --- /dev/null +++ b/go/vt/vtadmin/http/clusters.go @@ -0,0 +1,29 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package http + +import ( + "context" + + vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin" +) + +// GetClusters implements the http wrapper for /clusters +func GetClusters(ctx context.Context, r Request, api *API) *JSONResponse { + clusters, err := api.server.GetClusters(ctx, &vtadminpb.GetClustersRequest{}) + return NewJSONResponse(clusters, err) +} diff --git a/go/vt/vtadmin/http/keyspaces.go b/go/vt/vtadmin/http/keyspaces.go new file mode 100644 index 00000000000..a2a9f15c52a --- /dev/null +++ b/go/vt/vtadmin/http/keyspaces.go @@ -0,0 +1,32 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package http + +import ( + "context" + + vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin" +) + +// GetKeyspaces implements the http wrapper for /keyspaces[?cluster=[&cluster=]]. +func GetKeyspaces(ctx context.Context, r Request, api *API) *JSONResponse { + keyspaces, err := api.server.GetKeyspaces(ctx, &vtadminpb.GetKeyspacesRequest{ + ClusterIds: r.URL.Query()["cluster"], + }) + + return NewJSONResponse(keyspaces, err) +} diff --git a/go/vt/vtadmin/http/request.go b/go/vt/vtadmin/http/request.go index 6c0b7181c47..cdf4f006a17 100644 --- a/go/vt/vtadmin/http/request.go +++ b/go/vt/vtadmin/http/request.go @@ -17,9 +17,13 @@ limitations under the License. package http import ( + "fmt" "net/http" + "strconv" "github.com/gorilla/mux" + + "vitess.io/vitess/go/vt/vtadmin/errors" ) // Request wraps an *http.Request to provide some convenience functions for @@ -31,3 +35,22 @@ type Request struct{ *http.Request } func (r Request) Vars() map[string]string { return mux.Vars(r.Request) } + +// ParseQueryParamAsBool attempts to parse the query parameter of the given name +// into a boolean value. If the parameter is not set, the provided default value +// is returned. +func (r Request) ParseQueryParamAsBool(name string, defaultVal bool) (bool, error) { + if param := r.URL.Query().Get(name); param != "" { + val, err := strconv.ParseBool(param) + if err != nil { + return defaultVal, &errors.BadRequest{ + Err: err, + ErrDetails: fmt.Sprintf("could not parse query parameter %s (= %v) into bool value", name, param), + } + } + + return val, nil + } + + return defaultVal, nil +} diff --git a/go/vt/vtadmin/http/request_test.go b/go/vt/vtadmin/http/request_test.go new file mode 100644 index 00000000000..ba235a4877f --- /dev/null +++ b/go/vt/vtadmin/http/request_test.go @@ -0,0 +1,98 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package http + +import ( + "fmt" + "net/http" + "net/url" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestParseQueryParamAsBool(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + fragment string + param string + defaultValue bool + expected bool + shouldErr bool + }{ + { + name: "successful parse", + fragment: "?a=true&b=false", + param: "a", + defaultValue: false, + expected: true, + shouldErr: false, + }, + { + name: "no query fragment", + fragment: "", + param: "active_only", + defaultValue: false, + expected: false, + shouldErr: false, + }, + { + name: "param not set", + fragment: "?foo=bar", + param: "baz", + defaultValue: true, + expected: true, + shouldErr: false, + }, + { + name: "param not bool-like", + fragment: "?foo=bar", + param: "foo", + defaultValue: false, + shouldErr: true, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + rawurl := fmt.Sprintf("http://example.com/%s", tt.fragment) + u, err := url.Parse(rawurl) + require.NoError(t, err, "could not parse %s", rawurl) + + r := Request{ + &http.Request{URL: u}, + } + + val, err := r.ParseQueryParamAsBool(tt.param, tt.defaultValue) + if tt.shouldErr { + assert.Error(t, err) + + return + } + + assert.NoError(t, err) + assert.Equal(t, tt.expected, val) + }) + } +} diff --git a/go/vt/vtadmin/http/schemas.go b/go/vt/vtadmin/http/schemas.go new file mode 100644 index 00000000000..d309b5411c4 --- /dev/null +++ b/go/vt/vtadmin/http/schemas.go @@ -0,0 +1,90 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package http + +import ( + "context" + + vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin" +) + +// FindSchema implements the http wrapper for the +// /schema/{table}[?cluster=[&cluster=]] route. +func FindSchema(ctx context.Context, r Request, api *API) *JSONResponse { + vars := r.Vars() + query := r.URL.Query() + + sizeOpts, err := getTableSizeOpts(r) + if err != nil { + return NewJSONResponse(nil, err) + } + + schema, err := api.server.FindSchema(ctx, &vtadminpb.FindSchemaRequest{ + Table: vars["table"], + ClusterIds: query["cluster"], + TableSizeOptions: sizeOpts, + }) + + return NewJSONResponse(schema, err) +} + +// GetSchema implements the http wrapper for the +// /schema/{cluster_id}/{keyspace}/{table} route. +func GetSchema(ctx context.Context, r Request, api *API) *JSONResponse { + vars := r.Vars() + + sizeOpts, err := getTableSizeOpts(r) + if err != nil { + return NewJSONResponse(nil, err) + } + + schema, err := api.server.GetSchema(ctx, &vtadminpb.GetSchemaRequest{ + ClusterId: vars["cluster_id"], + Keyspace: vars["keyspace"], + Table: vars["table"], + TableSizeOptions: sizeOpts, + }) + + return NewJSONResponse(schema, err) +} + +// GetSchemas implements the http wrapper for the /schemas[?cluster=[&cluster=] +// route. +func GetSchemas(ctx context.Context, r Request, api *API) *JSONResponse { + sizeOpts, err := getTableSizeOpts(r) + if err != nil { + return NewJSONResponse(nil, err) + } + + schemas, err := api.server.GetSchemas(ctx, &vtadminpb.GetSchemasRequest{ + ClusterIds: r.URL.Query()["cluster"], + TableSizeOptions: sizeOpts, + }) + + return NewJSONResponse(schemas, err) +} + +func getTableSizeOpts(r Request) (*vtadminpb.GetSchemaTableSizeOptions, error) { + aggregateSizes, err := r.ParseQueryParamAsBool("aggregate_sizes", true) + if err != nil { + return nil, err + } + + return &vtadminpb.GetSchemaTableSizeOptions{ + AggregateSizes: aggregateSizes, + }, nil +} diff --git a/go/vt/vtadmin/http/vschemas.go b/go/vt/vtadmin/http/vschemas.go new file mode 100644 index 00000000000..09d706527c1 --- /dev/null +++ b/go/vt/vtadmin/http/vschemas.go @@ -0,0 +1,46 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package http + +import ( + "context" + + vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin" +) + +// GetVSchema implements the http wrapper for the +// /vschema/{cluster_id}/{keyspace} route. +func GetVSchema(ctx context.Context, r Request, api *API) *JSONResponse { + vars := r.Vars() + + vschema, err := api.server.GetVSchema(ctx, &vtadminpb.GetVSchemaRequest{ + ClusterId: vars["cluster_id"], + Keyspace: vars["keyspace"], + }) + + return NewJSONResponse(vschema, err) +} + +// GetVSchemas implements the http wrapper for the +// /vschemas[?cluster=[&cluster=]] route. +func GetVSchemas(ctx context.Context, r Request, api *API) *JSONResponse { + vschemas, err := api.server.GetVSchemas(ctx, &vtadminpb.GetVSchemasRequest{ + ClusterIds: r.URL.Query()["cluster"], + }) + + return NewJSONResponse(vschemas, err) +} diff --git a/go/vt/vtadmin/http/vtexplain.go b/go/vt/vtadmin/http/vtexplain.go new file mode 100644 index 00000000000..53a70f3b899 --- /dev/null +++ b/go/vt/vtadmin/http/vtexplain.go @@ -0,0 +1,34 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package http + +import ( + "context" + + vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin" +) + +// VTExplain implements the http wrapper for /vtexplain?cluster=&keyspace=&sql= +func VTExplain(ctx context.Context, r Request, api *API) *JSONResponse { + query := r.URL.Query() + res, err := api.server.VTExplain(ctx, &vtadminpb.VTExplainRequest{ + Cluster: query.Get("cluster"), + Keyspace: query.Get("keyspace"), + Sql: query.Get("sql"), + }) + return NewJSONResponse(res, err) +} diff --git a/go/vt/vtadmin/http/workflows.go b/go/vt/vtadmin/http/workflows.go new file mode 100644 index 00000000000..2a6fed4766e --- /dev/null +++ b/go/vt/vtadmin/http/workflows.go @@ -0,0 +1,71 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package http + +import ( + "context" + + vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin" +) + +// GetWorkflow implements the http wrapper for the VTAdminServer.GetWorkflow +// method. +// +// Its route is /workflow/{cluster_id}/{keyspace}/{name}[?active_only=]. +func GetWorkflow(ctx context.Context, r Request, api *API) *JSONResponse { + vars := r.Vars() + + activeOnly, err := r.ParseQueryParamAsBool("active_only", false) + if err != nil { + return NewJSONResponse(nil, err) + } + + workflow, err := api.server.GetWorkflow(ctx, &vtadminpb.GetWorkflowRequest{ + ClusterId: vars["cluster_id"], + Keyspace: vars["keyspace"], + Name: vars["name"], + ActiveOnly: activeOnly, + }) + + return NewJSONResponse(workflow, err) +} + +// GetWorkflows implements the http wrapper for the VTAdminServer.GetWorkflows +// method. +// +// Its route is /workflows, with query params: +// - cluster: repeated, cluster IDs +// - active_only +// - keyspace: repeated +// - ignore_keyspace: repeated +func GetWorkflows(ctx context.Context, r Request, api *API) *JSONResponse { + query := r.URL.Query() + + activeOnly, err := r.ParseQueryParamAsBool("active_only", false) + if err != nil { + return NewJSONResponse(nil, err) + } + + workflows, err := api.server.GetWorkflows(ctx, &vtadminpb.GetWorkflowsRequest{ + ClusterIds: query["cluster"], + Keyspaces: query["keyspace"], + IgnoreKeyspaces: query["ignore_keyspace"], + ActiveOnly: activeOnly, + }) + + return NewJSONResponse(workflows, err) +} diff --git a/go/vt/vtadmin/tablets.go b/go/vt/vtadmin/tablets.go deleted file mode 100644 index 739ffac1483..00000000000 --- a/go/vt/vtadmin/tablets.go +++ /dev/null @@ -1,120 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package vtadmin - -import ( - "database/sql" - "time" - - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/logutil" - "vitess.io/vitess/go/vt/topo/topoproto" - "vitess.io/vitess/go/vt/vtadmin/cluster" - "vitess.io/vitess/go/vt/vtadmin/vtadminproto" - - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin" -) - -// ParseTablets converts a set of *sql.Rows into a slice of Tablets, for the -// given cluster. -func ParseTablets(rows *sql.Rows, c *cluster.Cluster) ([]*vtadminpb.Tablet, error) { - var tablets []*vtadminpb.Tablet - - for rows.Next() { - if err := rows.Err(); err != nil { - return nil, err - } - - tablet, err := parseTablet(rows, c) - if err != nil { - return nil, err - } - - tablets = append(tablets, tablet) - } - - if err := rows.Err(); err != nil { - return nil, err - } - - return tablets, nil -} - -// Fields are: -// Cell | Keyspace | Shard | TabletType (string) | ServingState (string) | Alias | Hostname | MasterTermStartTime. -func parseTablet(rows *sql.Rows, c *cluster.Cluster) (*vtadminpb.Tablet, error) { - var ( - cell string - tabletTypeStr string - servingStateStr string - aliasStr string - mtstStr string - topotablet topodatapb.Tablet - - err error - ) - - if err := rows.Scan( - &cell, - &topotablet.Keyspace, - &topotablet.Shard, - &tabletTypeStr, - &servingStateStr, - &aliasStr, - &topotablet.Hostname, - &mtstStr, - ); err != nil { - return nil, err - } - - tablet := &vtadminpb.Tablet{ - Cluster: &vtadminpb.Cluster{ - Id: c.ID, - Name: c.Name, - }, - Tablet: &topotablet, - } - - topotablet.Type, err = topoproto.ParseTabletType(tabletTypeStr) - if err != nil { - return nil, err - } - - tablet.State = vtadminproto.ParseTabletServingState(servingStateStr) - - topotablet.Alias, err = topoproto.ParseTabletAlias(aliasStr) - if err != nil { - return nil, err - } - - if topotablet.Alias.Cell != cell { - // (TODO:@amason) ??? - log.Warningf("tablet cell %s does not match alias %s. ignoring for now", cell, topoproto.TabletAliasString(topotablet.Alias)) - } - - if mtstStr != "" { - timeTime, err := time.Parse(time.RFC3339, mtstStr) - if err != nil { - return nil, err - } - - topotablet.MasterTermStartTime = logutil.TimeToProto(timeTime) - } - - return tablet, nil -} diff --git a/go/vt/vtadmin/testutil/cluster.go b/go/vt/vtadmin/testutil/cluster.go new file mode 100644 index 00000000000..3bf228e8390 --- /dev/null +++ b/go/vt/vtadmin/testutil/cluster.go @@ -0,0 +1,117 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testutil + +import ( + "database/sql" + "fmt" + + "google.golang.org/grpc" + + "vitess.io/vitess/go/vt/grpcclient" + "vitess.io/vitess/go/vt/vitessdriver" + "vitess.io/vitess/go/vt/vtadmin/cluster" + "vitess.io/vitess/go/vt/vtadmin/cluster/discovery/fakediscovery" + vtadminvtctldclient "vitess.io/vitess/go/vt/vtadmin/vtctldclient" + "vitess.io/vitess/go/vt/vtadmin/vtsql" + "vitess.io/vitess/go/vt/vtadmin/vtsql/fakevtsql" + "vitess.io/vitess/go/vt/vtctl/vtctldclient" + + vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin" +) + +// Dbcfg is a test utility for controlling the behavior of the cluster's DB +// at the package sql level. +type Dbcfg struct { + ShouldErr bool +} + +// TestClusterConfig controls the way that a cluster.Cluster object is +// constructed for testing vtadmin code. +type TestClusterConfig struct { + // Cluster provides the protobuf-based version of the cluster info. It is + // to set the ID and Name of the resulting cluster.Cluster, as well as to + // name a single, phony, vtgate entry in the cluster's discovery service. + Cluster *vtadminpb.Cluster + // VtctldClient provides the vtctldclient.VtctldClient implementation the + // cluster's vtctld proxy will use. Most unit tests will use an instance of + // the VtctldClient type provided by this package in order to mock out the + // vtctld layer. + VtctldClient vtctldclient.VtctldClient + // Tablets provides the set of tablets reachable by this cluster's vtsql.DB. + // Tablets are copied, and then mutated to have their Cluster field set to + // match the Cluster provided by this TestClusterConfig, so mutations are + // transparent to the caller. + Tablets []*vtadminpb.Tablet + // DBConfig controls the behavior of the cluster's vtsql.DB. + DBConfig Dbcfg +} + +// BuildCluster is a shared helper for building a cluster based on the given +// test configuration. +func BuildCluster(cfg TestClusterConfig) *cluster.Cluster { + disco := fakediscovery.New() + disco.AddTaggedGates(nil, &vtadminpb.VTGate{Hostname: fmt.Sprintf("%s-%s-gate", cfg.Cluster.Name, cfg.Cluster.Id)}) + disco.AddTaggedVtctlds(nil, &vtadminpb.Vtctld{Hostname: "doesn't matter"}) + + tablets := make([]*vtadminpb.Tablet, len(cfg.Tablets)) + for i, t := range cfg.Tablets { + tablet := &vtadminpb.Tablet{ + Cluster: cfg.Cluster, + Tablet: t.Tablet, + State: t.State, + } + + tablets[i] = tablet + } + + db := vtsql.New(&vtsql.Config{ + Cluster: cfg.Cluster, + Discovery: disco, + }) + db.DialFunc = func(_ vitessdriver.Configuration) (*sql.DB, error) { + return sql.OpenDB(&fakevtsql.Connector{Tablets: tablets, ShouldErr: cfg.DBConfig.ShouldErr}), nil + } + + vtctld := vtadminvtctldclient.New(&vtadminvtctldclient.Config{ + Cluster: cfg.Cluster, + Discovery: disco, + }) + vtctld.DialFunc = func(addr string, ff grpcclient.FailFast, opts ...grpc.DialOption) (vtctldclient.VtctldClient, error) { + return cfg.VtctldClient, nil + } + + return &cluster.Cluster{ + ID: cfg.Cluster.Id, + Name: cfg.Cluster.Name, + Discovery: disco, + DB: db, + Vtctld: vtctld, + } +} + +// BuildClusters is a helper for building multiple clusters from a slice of +// TestClusterConfigs. +func BuildClusters(cfgs ...TestClusterConfig) []*cluster.Cluster { + clusters := make([]*cluster.Cluster, len(cfgs)) + + for i, cfg := range cfgs { + clusters[i] = BuildCluster(cfg) + } + + return clusters +} diff --git a/go/vt/vtadmin/testutil/proto_compare.go b/go/vt/vtadmin/testutil/proto_compare.go new file mode 100644 index 00000000000..496b302d432 --- /dev/null +++ b/go/vt/vtadmin/testutil/proto_compare.go @@ -0,0 +1,113 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testutil + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin" +) + +// AssertKeyspaceSlicesEqual is a convenience function to assert that two +// []*vtadminpb.Keyspaces slices are equal, after clearing out any reserved +// proto XXX_ fields. +func AssertKeyspaceSlicesEqual(t *testing.T, expected []*vtadminpb.Keyspace, actual []*vtadminpb.Keyspace, msgAndArgs ...interface{}) { + t.Helper() + + for _, ks := range [][]*vtadminpb.Keyspace{expected, actual} { + for _, k := range ks { + if k.Shards != nil { + for _, ss := range k.Shards { + ss.XXX_sizecache = 0 + ss.XXX_unrecognized = nil + ss.Shard.KeyRange = nil + } + } + } + } + + assert.ElementsMatch(t, expected, actual, msgAndArgs...) +} + +// AssertSchemaSlicesEqual is a convenience function to assert that two +// []*vtadminpb.Schema slices are equal, after clearing out any reserved +// proto XXX_ fields. +func AssertSchemaSlicesEqual(t *testing.T, expected []*vtadminpb.Schema, actual []*vtadminpb.Schema, msgAndArgs ...interface{}) { + t.Helper() + + for _, ss := range [][]*vtadminpb.Schema{expected, actual} { + for _, s := range ss { + if s.TableDefinitions != nil { + for _, td := range s.TableDefinitions { + td.XXX_sizecache = 0 + td.XXX_unrecognized = nil + + if td.Fields != nil { + for _, f := range td.Fields { + f.XXX_sizecache = 0 + f.XXX_unrecognized = nil + } + } + } + } + } + } + + assert.ElementsMatch(t, expected, actual, msgAndArgs...) +} + +// AssertTabletSlicesEqual is a convenience function to assert that two +// []*vtadminpb.Tablet slices are equal, after clearing out any reserved +// proto XXX_ fields. +func AssertTabletSlicesEqual(t *testing.T, expected []*vtadminpb.Tablet, actual []*vtadminpb.Tablet, msgAndArgs ...interface{}) { + t.Helper() + + for _, ts := range [][]*vtadminpb.Tablet{expected, actual} { + for _, t := range ts { + t.XXX_sizecache = 0 + t.XXX_unrecognized = nil + + if t.Cluster != nil { + t.Cluster.XXX_sizecache = 0 + t.Cluster.XXX_unrecognized = nil + } + + if t.Tablet != nil { + t.Tablet.XXX_sizecache = 0 + t.Tablet.XXX_unrecognized = nil + + if t.Tablet.Alias != nil { + t.Tablet.Alias.XXX_sizecache = 0 + t.Tablet.Alias.XXX_unrecognized = nil + } + } + } + } + + assert.ElementsMatch(t, expected, actual, msgAndArgs...) +} + +// AssertTabletsEqual is a convenience function to assert that two +// *vtadminpb.Tablets are equal, after clearing out any reserved +// proto XXX_ fields. +func AssertTabletsEqual(t *testing.T, expected *vtadminpb.Tablet, actual *vtadminpb.Tablet, msgAndArgs ...interface{}) { + t.Helper() + + AssertTabletSlicesEqual(t, []*vtadminpb.Tablet{expected}, []*vtadminpb.Tablet{actual}, msgAndArgs...) +} diff --git a/go/vt/vtadmin/testutil/tablets.go b/go/vt/vtadmin/testutil/tablets.go new file mode 100644 index 00000000000..d4b6e60e024 --- /dev/null +++ b/go/vt/vtadmin/testutil/tablets.go @@ -0,0 +1,37 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testutil + +import ( + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin" +) + +// TopodataTabletsFromVTAdminTablets returns a slice of topodatapb.Tablet +// objects from a slice of vtadminpb.Tablet objects. It is the equivalent of +// +// map(func(t *vtadminpb.Tablet) (*topodatapb.Tablet) { return t.Tablet }, tablets) +// +func TopodataTabletsFromVTAdminTablets(tablets []*vtadminpb.Tablet) []*topodatapb.Tablet { + results := make([]*topodatapb.Tablet, len(tablets)) + + for i, tablet := range tablets { + results[i] = tablet.Tablet + } + + return results +} diff --git a/go/vt/vtadmin/testutil/vtctldclient.go b/go/vt/vtadmin/testutil/vtctldclient.go new file mode 100644 index 00000000000..e1168678921 --- /dev/null +++ b/go/vt/vtadmin/testutil/vtctldclient.go @@ -0,0 +1,130 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testutil + +import ( + "context" + "fmt" + + "github.com/stretchr/testify/assert" + "google.golang.org/grpc" + + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtctl/vtctldclient" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +// VtctldClient provides a partial mock implementation of the +// vtctldclient.VtctldClient interface for use in testing. +type VtctldClient struct { + vtctldclient.VtctldClient + + FindAllShardsInKeyspaceResults map[string]struct { + Response *vtctldatapb.FindAllShardsInKeyspaceResponse + Error error + } + GetKeyspacesResults struct { + Keyspaces []*vtctldatapb.Keyspace + Error error + } + GetSchemaResults map[string]struct { + Response *vtctldatapb.GetSchemaResponse + Error error + } + GetVSchemaResults map[string]struct { + Response *vtctldatapb.GetVSchemaResponse + Error error + } + GetWorkflowsResults map[string]struct { + Response *vtctldatapb.GetWorkflowsResponse + Error error + } +} + +// Compile-time type assertion to make sure we haven't overriden a method +// incorrectly. +var _ vtctldclient.VtctldClient = (*VtctldClient)(nil) + +// FindAllShardsInKeyspace is part of the vtctldclient.VtctldClient interface. +func (fake *VtctldClient) FindAllShardsInKeyspace(ctx context.Context, req *vtctldatapb.FindAllShardsInKeyspaceRequest, opts ...grpc.CallOption) (*vtctldatapb.FindAllShardsInKeyspaceResponse, error) { + if fake.FindAllShardsInKeyspaceResults == nil { + return nil, fmt.Errorf("%w: FindAllShardsInKeyspaceResults not set on fake vtctldclient", assert.AnError) + } + + if result, ok := fake.FindAllShardsInKeyspaceResults[req.Keyspace]; ok { + return result.Response, result.Error + } + + return nil, fmt.Errorf("%w: no result set for keyspace %s", assert.AnError, req.Keyspace) +} + +// GetKeyspaces is part of the vtctldclient.VtctldClient interface. +func (fake *VtctldClient) GetKeyspaces(ctx context.Context, req *vtctldatapb.GetKeyspacesRequest, opts ...grpc.CallOption) (*vtctldatapb.GetKeyspacesResponse, error) { + if fake.GetKeyspacesResults.Error != nil { + return nil, fake.GetKeyspacesResults.Error + } + + return &vtctldatapb.GetKeyspacesResponse{ + Keyspaces: fake.GetKeyspacesResults.Keyspaces, + }, nil +} + +// GetSchema is part of the vtctldclient.VtctldClient interface. +func (fake *VtctldClient) GetSchema(ctx context.Context, req *vtctldatapb.GetSchemaRequest, opts ...grpc.CallOption) (*vtctldatapb.GetSchemaResponse, error) { + if fake.GetSchemaResults == nil { + return nil, fmt.Errorf("%w: GetSchemaResults not set on fake vtctldclient", assert.AnError) + } + + if req.TabletAlias == nil { + return nil, fmt.Errorf("%w: req.TabletAlias == nil", assert.AnError) + } + + key := topoproto.TabletAliasString(req.TabletAlias) + + if result, ok := fake.GetSchemaResults[key]; ok { + return result.Response, result.Error + } + + return nil, fmt.Errorf("%w: no result set for tablet alias %s", assert.AnError, key) +} + +// GetVSchema is part of the vtctldclient.VtctldClient interface. +func (fake *VtctldClient) GetVSchema(ctx context.Context, req *vtctldatapb.GetVSchemaRequest, opts ...grpc.CallOption) (*vtctldatapb.GetVSchemaResponse, error) { + if fake.GetVSchemaResults == nil { + return nil, fmt.Errorf("%w: GetVSchemaResults not set on fake vtctldclient", assert.AnError) + } + + if result, ok := fake.GetVSchemaResults[req.Keyspace]; ok { + return result.Response, result.Error + } + + return nil, fmt.Errorf("%w: no result set for keyspace %s", assert.AnError, req.Keyspace) +} + +// GetWorkflows is part of the vtctldclient.VtctldClient interface. +func (fake *VtctldClient) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflowsRequest, opts ...grpc.CallOption) (*vtctldatapb.GetWorkflowsResponse, error) { + if fake.GetWorkflowsResults == nil { + return nil, fmt.Errorf("%w: GetWorkflowsResults not set on fake vtctldclient", assert.AnError) + } + + if result, ok := fake.GetWorkflowsResults[req.Keyspace]; ok { + return result.Response, result.Error + } + + return nil, fmt.Errorf("%w: no result set for keyspace %s", assert.AnError, req.Keyspace) +} diff --git a/go/vt/vtadmin/testutil/workflows.go b/go/vt/vtadmin/testutil/workflows.go new file mode 100644 index 00000000000..de6e27744a4 --- /dev/null +++ b/go/vt/vtadmin/testutil/workflows.go @@ -0,0 +1,75 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testutil + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin" +) + +// AssertClusterWorkflowsEqual is a test helper for asserting that two +// ClusterWorkflows objects are equal. +func AssertClusterWorkflowsEqual(t *testing.T, expected *vtadminpb.ClusterWorkflows, actual *vtadminpb.ClusterWorkflows, msgAndArgs ...interface{}) { + t.Helper() + + if expected == nil && actual == nil { + return + } + + require.NotNil(t, expected, msgAndArgs...) + require.NotNil(t, actual, msgAndArgs...) + + if expected.Warnings != nil && actual.Warnings != nil { + assert.Equal(t, len(expected.Warnings), len(actual.Warnings), msgAndArgs...) + } + + assert.ElementsMatch(t, expected.Workflows, actual.Workflows, msgAndArgs...) +} + +// AssertGetWorkflowsResponsesEqual is a test helper for asserting that two +// GetWorkflowsResponse objects are equal. +func AssertGetWorkflowsResponsesEqual(t *testing.T, expected *vtadminpb.GetWorkflowsResponse, actual *vtadminpb.GetWorkflowsResponse, msgAndArgs ...interface{}) { + t.Helper() + + if expected == nil && actual == nil { + return + } + + require.NotNil(t, expected, msgAndArgs...) + require.NotNil(t, actual, msgAndArgs...) + + keysLeft := make([]string, 0, len(expected.WorkflowsByCluster)) + keysRight := make([]string, 0, len(actual.WorkflowsByCluster)) + + for k := range expected.WorkflowsByCluster { + keysLeft = append(keysLeft, k) + } + + for k := range actual.WorkflowsByCluster { + keysRight = append(keysRight, k) + } + + require.ElementsMatch(t, keysLeft, keysRight, msgAndArgs...) + + for _, k := range keysLeft { + AssertClusterWorkflowsEqual(t, expected.WorkflowsByCluster[k], actual.WorkflowsByCluster[k], msgAndArgs...) + } +} diff --git a/go/vt/vtadmin/vtadminproto/tablet.go b/go/vt/vtadmin/vtadminproto/tablet.go index a37aa2579b9..12af6cf2edb 100644 --- a/go/vt/vtadmin/vtadminproto/tablet.go +++ b/go/vt/vtadmin/vtadminproto/tablet.go @@ -16,7 +16,50 @@ limitations under the License. package vtadminproto -import vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin" +import ( + "vitess.io/vitess/go/vt/topo/topoproto" + + vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin" +) + +// Tablets is a list of Tablet protobuf objects. +type Tablets []*vtadminpb.Tablet + +// AliasStringList returns a list of TabletAlias strings for each tablet in the +// list. +func (tablets Tablets) AliasStringList() []string { + aliases := make([]string, len(tablets)) + + for i, tablet := range tablets { + aliases[i] = topoproto.TabletAliasString(tablet.Tablet.Alias) + } + + return aliases +} + +// FilterTablets returns a subset of tablets (not exceeding maxResults) that +// satisfy the given condition. +// +// If maxResults is negative, len(tablets) is used instead. +func FilterTablets(condition func(tablet *vtadminpb.Tablet) bool, tablets []*vtadminpb.Tablet, maxResults int) []*vtadminpb.Tablet { + if maxResults < 0 { + maxResults = len(tablets) + } + + results := make([]*vtadminpb.Tablet, 0, maxResults) + + for _, tablet := range tablets { + if len(results) >= maxResults { + break + } + + if condition(tablet) { + results = append(results, tablet) + } + } + + return results +} // ParseTabletServingState returns a ServingState value from the given string. // If the string does not map to a valid value, this function returns UNKNOWN. diff --git a/go/vt/vtadmin/vtadminproto/trace.go b/go/vt/vtadmin/vtadminproto/trace.go new file mode 100644 index 00000000000..7110dd95d0c --- /dev/null +++ b/go/vt/vtadmin/vtadminproto/trace.go @@ -0,0 +1,39 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vtadminproto + +import ( + "vitess.io/vitess/go/trace" + + vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin" +) + +// AnnotateClusterSpan adds the cluster_id and cluster_name to a span. +func AnnotateClusterSpan(c *vtadminpb.Cluster, span trace.Span) { + span.Annotate("cluster_id", c.Id) + span.Annotate("cluster_name", c.Name) +} + +// AnnotateSpanWithGetSchemaTableSizeOptions adds the aggregate_table_sizes to a +// span. It is a noop if the size options object is nil. +func AnnotateSpanWithGetSchemaTableSizeOptions(opts *vtadminpb.GetSchemaTableSizeOptions, span trace.Span) { + if opts == nil { + opts = &vtadminpb.GetSchemaTableSizeOptions{} + } + + span.Annotate("aggregate_table_sizes", opts.AggregateSizes) +} diff --git a/go/vt/vtadmin/vtctldclient/config.go b/go/vt/vtadmin/vtctldclient/config.go new file mode 100644 index 00000000000..2ff3ad1d34f --- /dev/null +++ b/go/vt/vtadmin/vtctldclient/config.go @@ -0,0 +1,84 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vtctldclient + +import ( + "fmt" + + "github.com/spf13/pflag" + + "vitess.io/vitess/go/vt/grpcclient" + "vitess.io/vitess/go/vt/vtadmin/cluster/discovery" + "vitess.io/vitess/go/vt/vtadmin/credentials" + + vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin" +) + +// Config represents the options that modify the behavior of a Proxy. +type Config struct { + Discovery discovery.Discovery + Credentials *grpcclient.StaticAuthClientCreds + + CredentialsPath string + + Cluster *vtadminpb.Cluster +} + +// Parse returns a new config with the given cluster and discovery, after +// attempting to parse the command-line pflags into that Config. See +// (*Config).Parse() for more details. +func Parse(cluster *vtadminpb.Cluster, disco discovery.Discovery, args []string) (*Config, error) { + cfg := &Config{ + Cluster: cluster, + Discovery: disco, + } + + err := cfg.Parse(args) + if err != nil { + return nil, err + } + + return cfg, nil +} + +// Parse reads options specified as command-line pflags (--key=value, note the +// double-dash!) into a Config. It is meant to be called from +// (*cluster.Cluster).New(). +func (c *Config) Parse(args []string) error { + fs := pflag.NewFlagSet("", pflag.ContinueOnError) + + credentialsTmplStr := fs.String("credentials-path-tmpl", "", + "Go template used to specify a path to a credentials file, which is a json file containing "+ + "a Username and Password. Templates are given the context of the vtctldclient.Config, "+ + "and primarily interoplate the cluster name and ID variables.") + + if err := fs.Parse(args); err != nil { + return err + } + + if *credentialsTmplStr != "" { + creds, path, err := credentials.LoadFromTemplate(*credentialsTmplStr, c) + if err != nil { + return fmt.Errorf("cannot load credentials from path template %s: %w", *credentialsTmplStr, err) + } + + c.CredentialsPath = path + c.Credentials = creds + } + + return nil +} diff --git a/go/vt/vtadmin/vtctldclient/config_test.go b/go/vt/vtadmin/vtctldclient/config_test.go new file mode 100644 index 00000000000..69d09c7bbba --- /dev/null +++ b/go/vt/vtadmin/vtctldclient/config_test.go @@ -0,0 +1,100 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vtctldclient + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/grpcclient" + + vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin" +) + +func withTempFile(t *testing.T, tmpdir string, name string, f func(*testing.T, *os.File)) { + tmpfile, err := ioutil.TempFile(tmpdir, name) + require.NoError(t, err, "TempFile(%s, %s)", tmpdir, name) + defer os.Remove(tmpfile.Name()) + + f(t, tmpfile) +} + +func TestParse(t *testing.T) { + t.Parallel() + + t.Run("no credentials provided", func(t *testing.T) { + t.Parallel() + + cfg, err := Parse(nil, nil, []string{}) + require.NoError(t, err) + + expected := &Config{ + Cluster: nil, + Discovery: nil, + Credentials: nil, + CredentialsPath: "", + } + assert.Equal(t, expected, cfg) + }) + + t.Run("credential loading", func(t *testing.T) { + t.Parallel() + + withTempFile(t, "", "vtctldclient.config_test.testcluster.*", func(t *testing.T, credsfile *os.File) { + creds := &grpcclient.StaticAuthClientCreds{ + Username: "admin", + Password: "hunter2", + } + + data, err := json.Marshal(creds) + require.NoError(t, err, "cannot marshal credentials %+v into credsfile", creds) + + _, err = credsfile.Write(data) + require.NoError(t, err, "cannot write credentials to file") + + credsdir := filepath.Dir(credsfile.Name()) + baseParts := strings.Split(filepath.Base(credsfile.Name()), ".") + tmplParts := append(baseParts[:2], "{{ .Cluster.Name }}", baseParts[3]) + + args := []string{ + fmt.Sprintf("--credentials-path-tmpl=%s", filepath.Join(credsdir, strings.Join(tmplParts, "."))), + } + + cfg, err := Parse(&vtadminpb.Cluster{Name: "testcluster"}, nil, args) + require.NoError(t, err) + + expected := &Config{ + Cluster: &vtadminpb.Cluster{ + Name: "testcluster", + }, + Discovery: nil, + Credentials: creds, + CredentialsPath: credsfile.Name(), + } + + assert.Equal(t, expected, cfg) + }) + }) +} diff --git a/go/vt/vtadmin/vtctldclient/proxy.go b/go/vt/vtadmin/vtctldclient/proxy.go new file mode 100644 index 00000000000..fce4c786762 --- /dev/null +++ b/go/vt/vtadmin/vtctldclient/proxy.go @@ -0,0 +1,160 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vtctldclient + +import ( + "context" + "fmt" + + "google.golang.org/grpc" + + "vitess.io/vitess/go/trace" + "vitess.io/vitess/go/vt/grpcclient" + "vitess.io/vitess/go/vt/vtadmin/cluster/discovery" + "vitess.io/vitess/go/vt/vtctl/grpcvtctldclient" + "vitess.io/vitess/go/vt/vtctl/vtctldclient" + + vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin" + vtctlservicepb "vitess.io/vitess/go/vt/proto/vtctlservice" +) + +// Proxy defines the connection interface of a proxied vtctldclient used by +// VTAdmin clusters. +type Proxy interface { + // Dial opens a gRPC connection to a vtctld in the cluster. If the Proxy + // already has a valid connection, this is a no-op. + Dial(ctx context.Context) error + + // Hostname returns the hostname the Proxy is currently connected to. + Hostname() string + + // Close closes the underlying vtctldclient connection. This is a no-op if + // the Proxy has no current, valid connection. It is safe to call repeatedly. + // Users may call Dial on a previously-closed Proxy to create a new + // connection, but that connection may not be to the same particular vtctld. + Close() error + + vtctlservicepb.VtctldClient +} + +// ClientProxy implements the Proxy interface relying on a discovery.Discovery +// implementation to handle vtctld discovery and connection management. +type ClientProxy struct { + vtctldclient.VtctldClient // embedded to provide easy implementation of the vtctlservicepb.VtctldClient interface + + cluster *vtadminpb.Cluster + creds *grpcclient.StaticAuthClientCreds + discovery discovery.Discovery + + // DialFunc is called to open a new vtctdclient connection. In production, + // this should always be grpcvtctldclient.NewWithDialOpts, but it is + // exported for testing purposes. + DialFunc func(addr string, ff grpcclient.FailFast, opts ...grpc.DialOption) (vtctldclient.VtctldClient, error) + + closed bool + host string +} + +// New returns a ClientProxy to the given cluster. When Dial-ing, it will use +// the given discovery implementation to find a vtctld to connect to, and the +// given creds to dial the underlying gRPC connection, both of which are +// provided by the Config. +// +// It does not open a connection to a vtctld; users must call Dial before first +// use. +func New(cfg *Config) *ClientProxy { + return &ClientProxy{ + cluster: cfg.Cluster, + creds: cfg.Credentials, + discovery: cfg.Discovery, + DialFunc: grpcvtctldclient.NewWithDialOpts, + } +} + +// Dial is part of the Proxy interface. +func (vtctld *ClientProxy) Dial(ctx context.Context) error { + span, ctx := trace.NewSpan(ctx, "VtctldClientProxy.Dial") + defer span.Finish() + + if vtctld.VtctldClient != nil { + if !vtctld.closed { + span.Annotate("is_noop", true) + + return nil + } + + span.Annotate("is_stale", true) + + // close before reopen. this is safe to call on an already-closed client. + if err := vtctld.Close(); err != nil { + return fmt.Errorf("error closing possibly-stale connection before re-dialing: %w", err) + } + } + + addr, err := vtctld.discovery.DiscoverVtctldAddr(ctx, nil) + if err != nil { + return fmt.Errorf("error discovering vtctld to dial: %w", err) + } + + span.Annotate("vtctld_host", addr) + span.Annotate("is_using_credentials", vtctld.creds != nil) + + opts := []grpc.DialOption{ + // TODO: make configurable. right now, omitting this and attempting + // to not use TLS results in: + // grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials) + grpc.WithInsecure(), + } + + if vtctld.creds != nil { + opts = append(opts, grpc.WithPerRPCCredentials(vtctld.creds)) + } + + client, err := vtctld.DialFunc(addr, grpcclient.FailFast(false), opts...) + if err != nil { + return err + } + + vtctld.host = addr + vtctld.VtctldClient = client + vtctld.closed = false + + return nil +} + +// Hostname is part of the Proxy interface. +func (vtctld *ClientProxy) Hostname() string { + return vtctld.host +} + +// Close is part of the Proxy interface. +func (vtctld *ClientProxy) Close() error { + if vtctld.VtctldClient == nil { + vtctld.closed = true + + return nil + } + + err := vtctld.VtctldClient.Close() + if err != nil { + return err + } + + vtctld.closed = true + + return nil +} diff --git a/go/vt/vtadmin/vtsql/config.go b/go/vt/vtadmin/vtsql/config.go index 1268a0c48a3..8655bfd7c9f 100644 --- a/go/vt/vtadmin/vtsql/config.go +++ b/go/vt/vtadmin/vtsql/config.go @@ -17,14 +17,14 @@ limitations under the License. package vtsql import ( - "bytes" "fmt" - "text/template" + "time" "github.com/spf13/pflag" "vitess.io/vitess/go/vt/grpcclient" "vitess.io/vitess/go/vt/vtadmin/cluster/discovery" + "vitess.io/vitess/go/vt/vtadmin/credentials" vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin" ) @@ -35,6 +35,8 @@ type Config struct { DiscoveryTags []string Credentials Credentials + DialPingTimeout time.Duration + // CredentialsPath is used only to power vtadmin debug endpoints; there may // be a better way where we don't need to put this in the config, because // it's not really an "option" in normal use. @@ -66,6 +68,8 @@ func Parse(cluster *vtadminpb.Cluster, disco discovery.Discovery, args []string) func (c *Config) Parse(args []string) error { fs := pflag.NewFlagSet("", pflag.ContinueOnError) + fs.DurationVar(&c.DialPingTimeout, "dial-ping-timeout", time.Millisecond*500, + "Timeout to use when pinging an existing connection during calls to Dial.") fs.StringSliceVar(&c.DiscoveryTags, "discovery-tags", []string{}, "repeated, comma-separated list of tags to use when discovering a vtgate to connect to. "+ "the semantics of the tags may depend on the specific discovery implementation used") @@ -83,7 +87,7 @@ func (c *Config) Parse(args []string) error { var creds *grpcclient.StaticAuthClientCreds if *credentialsTmplStr != "" { - _creds, path, err := c.loadCredentialsFromTemplate(*credentialsTmplStr) + _creds, path, err := credentials.LoadFromTemplate(*credentialsTmplStr, c) if err != nil { return fmt.Errorf("cannot load credentials from path template %s: %w", *credentialsTmplStr, err) } @@ -107,28 +111,3 @@ func (c *Config) Parse(args []string) error { return nil } - -func (c Config) loadCredentialsFromTemplate(tmplStr string) (*grpcclient.StaticAuthClientCreds, string, error) { - path, err := c.renderTemplate(tmplStr) - if err != nil { - return nil, "", err - } - - creds, err := loadCredentials(path) - - return creds, path, err -} - -func (c Config) renderTemplate(tmplStr string) (string, error) { - tmpl, err := template.New("").Parse(tmplStr) - if err != nil { - return "", err - } - - buf := bytes.NewBuffer(nil) - if err := tmpl.Execute(buf, &c); err != nil { - return "", err - } - - return buf.String(), nil -} diff --git a/go/vt/vtadmin/vtsql/config_test.go b/go/vt/vtadmin/vtsql/config_test.go index e2ba9acdc4d..84f877f234a 100644 --- a/go/vt/vtadmin/vtsql/config_test.go +++ b/go/vt/vtadmin/vtsql/config_test.go @@ -23,6 +23,7 @@ import ( "path/filepath" "strings" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -33,6 +34,8 @@ import ( ) func TestConfigParse(t *testing.T) { + t.Parallel() + cfg := Config{} // This asserts we do not attempt to load a credentialsFlag via its Set func @@ -41,6 +44,8 @@ func TestConfigParse(t *testing.T) { assert.NoError(t, err) t.Run("", func(t *testing.T) { + t.Parallel() + f, err := ioutil.TempFile("", "vtsql-config-test-testcluster-*") // testcluster is going to appear in the template require.NoError(t, err) @@ -93,6 +98,8 @@ func TestConfigParse(t *testing.T) { }) t.Run("", func(t *testing.T) { + t.Parallel() + f, err := ioutil.TempFile("", "vtsql-config-test-testcluster-*") // testcluster is going to appear in the template require.NoError(t, err) @@ -137,6 +144,7 @@ func TestConfigParse(t *testing.T) { Id: "cid", Name: "testcluster", }, + DialPingTimeout: time.Millisecond * 500, DiscoveryTags: expectedTags, Credentials: expectedCreds, CredentialsPath: path, diff --git a/go/vt/vtadmin/vtsql/credentials.go b/go/vt/vtadmin/vtsql/credentials.go index 70fa31c5ac7..6e06fee4361 100644 --- a/go/vt/vtadmin/vtsql/credentials.go +++ b/go/vt/vtadmin/vtsql/credentials.go @@ -17,9 +17,6 @@ limitations under the License. package vtsql import ( - "encoding/json" - "io/ioutil" - "google.golang.org/grpc/credentials" "vitess.io/vitess/go/vt/grpcclient" @@ -54,17 +51,3 @@ func (creds *StaticAuthCredentials) GetEffectiveUsername() string { func (creds *StaticAuthCredentials) GetUsername() string { return creds.Username } - -func loadCredentials(path string) (*grpcclient.StaticAuthClientCreds, error) { - data, err := ioutil.ReadFile(path) - if err != nil { - return nil, err - } - - var creds grpcclient.StaticAuthClientCreds - if err := json.Unmarshal(data, &creds); err != nil { - return nil, err - } - - return &creds, nil -} diff --git a/go/vt/vtadmin/vtsql/vtsql.go b/go/vt/vtadmin/vtsql/vtsql.go index 82782215190..5c52dad36ef 100644 --- a/go/vt/vtadmin/vtsql/vtsql.go +++ b/go/vt/vtadmin/vtsql/vtsql.go @@ -21,13 +21,16 @@ import ( "database/sql" "errors" "fmt" + "time" "google.golang.org/grpc" "vitess.io/vitess/go/trace" "vitess.io/vitess/go/vt/callerid" + "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/vitessdriver" "vitess.io/vitess/go/vt/vtadmin/cluster/discovery" + "vitess.io/vitess/go/vt/vtadmin/vtadminproto" vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin" ) @@ -70,7 +73,8 @@ type VTGateProxy struct { // DialFunc is called to open a new database connection. In production this // should always be vitessdriver.OpenWithConfiguration, but it is exported // for testing purposes. - DialFunc func(cfg vitessdriver.Configuration) (*sql.DB, error) + DialFunc func(cfg vitessdriver.Configuration) (*sql.DB, error) + dialPingTimeout time.Duration host string conn *sql.DB @@ -95,11 +99,12 @@ func New(cfg *Config) *VTGateProxy { } return &VTGateProxy{ - cluster: cfg.Cluster, - discovery: cfg.Discovery, - discoveryTags: discoveryTags, - creds: cfg.Credentials, - DialFunc: vitessdriver.OpenWithConfiguration, + cluster: cfg.Cluster, + discovery: cfg.Discovery, + discoveryTags: discoveryTags, + creds: cfg.Credentials, + DialFunc: vitessdriver.OpenWithConfiguration, + dialPingTimeout: cfg.DialPingTimeout, } } @@ -131,15 +136,31 @@ func (vtgate *VTGateProxy) Dial(ctx context.Context, target string, opts ...grpc vtgate.annotateSpan(span) if vtgate.conn != nil { - // (TODO:@amason): consider a quick Ping() check in this case, and get a - // new connection if that fails. - return nil + ctx, cancel := context.WithTimeout(ctx, vtgate.dialPingTimeout) + defer cancel() + + err := vtgate.PingContext(ctx) + switch err { + case nil: + log.Infof("Have valid connection to %s, reusing it.", vtgate.host) + span.Annotate("is_noop", true) + + return nil + default: + log.Warningf("Ping failed on host %s: %s; Rediscovering a vtgate to get new connection", vtgate.host, err) + + if err := vtgate.Close(); err != nil { + log.Warningf("Error when closing connection to vtgate %s: %s; Continuing anyway ...", vtgate.host, err) + } + } } + span.Annotate("is_noop", false) + if vtgate.host == "" { gate, err := vtgate.discovery.DiscoverVTGateAddr(ctx, vtgate.discoveryTags) if err != nil { - return err + return fmt.Errorf("error discovering vtgate to dial: %w", err) } vtgate.host = gate @@ -147,23 +168,24 @@ func (vtgate *VTGateProxy) Dial(ctx context.Context, target string, opts ...grpc span.Annotate("vtgate_host", gate) } + log.Infof("Dialing %s ...", vtgate.host) + conf := vitessdriver.Configuration{ Protocol: fmt.Sprintf("grpc_%s", vtgate.cluster.Id), Address: vtgate.host, Target: target, - GRPCDialOptions: opts, + GRPCDialOptions: append(opts, grpc.WithInsecure()), } if vtgate.creds != nil { conf.GRPCDialOptions = append([]grpc.DialOption{ grpc.WithPerRPCCredentials(vtgate.creds), - grpc.WithInsecure(), }, conf.GRPCDialOptions...) } db, err := vtgate.DialFunc(conf) if err != nil { - return err + return fmt.Errorf("error dialing vtgate %s: %w", vtgate.host, err) } vtgate.conn = db @@ -187,11 +209,20 @@ func (vtgate *VTGateProxy) ShowTablets(ctx context.Context) (*sql.Rows, error) { // Ping is part of the DB interface. func (vtgate *VTGateProxy) Ping() error { - return vtgate.PingContext(context.Background()) + return vtgate.pingContext(context.Background()) } // PingContext is part of the DB interface. func (vtgate *VTGateProxy) PingContext(ctx context.Context) error { + span, ctx := trace.NewSpan(ctx, "VTGateProxy.PingContext") + defer span.Finish() + + vtgate.annotateSpan(span) + + return vtgate.pingContext(ctx) +} + +func (vtgate *VTGateProxy) pingContext(ctx context.Context) error { if vtgate.conn == nil { return ErrConnClosed } @@ -219,8 +250,7 @@ func (vtgate *VTGateProxy) Hostname() string { } func (vtgate *VTGateProxy) annotateSpan(span trace.Span) { - span.Annotate("cluster_id", vtgate.cluster.Id) - span.Annotate("cluster_name", vtgate.cluster.Name) + vtadminproto.AnnotateClusterSpan(vtgate.cluster, span) if vtgate.host != "" { span.Annotate("vtgate_host", vtgate.host) diff --git a/go/vt/vtadmin/vtsql/vtsql_test.go b/go/vt/vtadmin/vtsql/vtsql_test.go index 4746a3fb420..c0ddd84e170 100644 --- a/go/vt/vtadmin/vtsql/vtsql_test.go +++ b/go/vt/vtadmin/vtsql/vtsql_test.go @@ -20,6 +20,7 @@ import ( "context" "database/sql" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -36,11 +37,15 @@ import ( ) func assertImmediateCaller(t *testing.T, im *querypb.VTGateCallerID, expected string) { + t.Helper() + require.NotNil(t, im, "immediate caller cannot be nil") assert.Equal(t, im.Username, expected, "immediate caller username mismatch") } func assertEffectiveCaller(t *testing.T, ef *vtrpcpb.CallerID, principal string, component string, subcomponent string) { + t.Helper() + require.NotNil(t, ef, "effective caller cannot be nil") assert.Equal(t, ef.Principal, principal, "effective caller principal mismatch") assert.Equal(t, ef.Component, component, "effective caller component mismatch") @@ -48,6 +53,8 @@ func assertEffectiveCaller(t *testing.T, ef *vtrpcpb.CallerID, principal string, } func Test_getQueryContext(t *testing.T) { + t.Parallel() + ctx := context.Background() creds := &StaticAuthCredentials{ @@ -81,6 +88,8 @@ func Test_getQueryContext(t *testing.T) { } func TestDial(t *testing.T) { + t.Helper() + tests := []struct { name string disco *fakediscovery.Fake @@ -92,8 +101,9 @@ func TestDial(t *testing.T) { { name: "existing conn", proxy: &VTGateProxy{ - cluster: &vtadminpb.Cluster{}, - conn: sql.OpenDB(&fakevtsql.Connector{}), + cluster: &vtadminpb.Cluster{}, + conn: sql.OpenDB(&fakevtsql.Connector{}), + dialPingTimeout: time.Millisecond * 10, }, shouldErr: false, }, @@ -144,8 +154,14 @@ func TestDial(t *testing.T) { }, } + ctx := context.Background() + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + if tt.disco != nil { if len(tt.gates) > 0 { tt.disco.AddTaggedGates(nil, tt.gates...) @@ -154,7 +170,7 @@ func TestDial(t *testing.T) { tt.proxy.discovery = tt.disco } - err := tt.proxy.Dial(context.Background(), "") + err := tt.proxy.Dial(ctx, "") if tt.shouldErr { assert.Error(t, err) return diff --git a/go/vt/vtctl/endtoend/get_schema_test.go b/go/vt/vtctl/endtoend/get_schema_test.go index ddb0c204048..1fb10fff0f0 100644 --- a/go/vt/vtctl/endtoend/get_schema_test.go +++ b/go/vt/vtctl/endtoend/get_schema_test.go @@ -2,7 +2,6 @@ package endtoend import ( "context" - "fmt" "testing" "github.com/google/uuid" @@ -14,7 +13,7 @@ import ( "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtctl" - "vitess.io/vitess/go/vt/vttablet/faketmclient" + "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver/testutil" "vitess.io/vitess/go/vt/vttablet/tmclient" "vitess.io/vitess/go/vt/wrangler" @@ -23,29 +22,6 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) -type fakeTabletManagerClient struct { - tmclient.TabletManagerClient - schemas map[string]*tabletmanagerdatapb.SchemaDefinition -} - -func newTMClient() *fakeTabletManagerClient { - return &fakeTabletManagerClient{ - TabletManagerClient: faketmclient.NewFakeTabletManagerClient(), - schemas: map[string]*tabletmanagerdatapb.SchemaDefinition{}, - } -} - -func (c *fakeTabletManagerClient) GetSchema(ctx context.Context, tablet *topodatapb.Tablet, tablets []string, excludeTables []string, includeViews bool) (*tabletmanagerdatapb.SchemaDefinition, error) { - key := topoproto.TabletAliasString(tablet.Alias) - - schema, ok := c.schemas[key] - if !ok { - return nil, fmt.Errorf("no schemas for %s", key) - } - - return schema, nil -} - func TestGetSchema(t *testing.T) { ctx := context.Background() @@ -162,12 +138,26 @@ func TestGetSchema(t *testing.T) { }, } - tmc := newTMClient() - tmc.schemas[topoproto.TabletAliasString(tablet.Alias)] = sd + tmc := testutil.TabletManagerClient{ + GetSchemaResults: map[string]struct { + Schema *tabletmanagerdatapb.SchemaDefinition + Error error + }{ + topoproto.TabletAliasString(tablet.Alias): { + Schema: sd, + Error: nil, + }, + }, + } + + tmclient.RegisterTabletManagerClientFactory(t.Name(), func() tmclient.TabletManagerClient { + return &tmc + }) + *tmclient.TabletManagerProtocol = t.Name() logger := logutil.NewMemoryLogger() - err := vtctl.RunCommand(ctx, wrangler.New(logger, topo, tmc), []string{ + err := vtctl.RunCommand(ctx, wrangler.New(logger, topo, &tmc), []string{ "GetSchema", topoproto.TabletAliasString(tablet.Alias), }) @@ -207,7 +197,7 @@ func TestGetSchema(t *testing.T) { }, } - err = vtctl.RunCommand(ctx, wrangler.New(logger, topo, tmc), []string{ + err = vtctl.RunCommand(ctx, wrangler.New(logger, topo, &tmc), []string{ "GetSchema", "-table_sizes_only", topoproto.TabletAliasString(tablet.Alias), @@ -224,12 +214,3 @@ func TestGetSchema(t *testing.T) { assert.Equal(t, sd, actual) } - -func init() { - // enforce we will use the right protocol (gRPC) (note the - // client is unused, but it is initialized, so it needs to exist) - *tmclient.TabletManagerProtocol = "grpc" - tmclient.RegisterTabletManagerClientFactory("grpc", func() tmclient.TabletManagerClient { - return nil - }) -} diff --git a/go/vt/vtctl/grpcvtctldclient/client.go b/go/vt/vtctl/grpcvtctldclient/client.go index af3e8e07802..a0f5a14ef77 100644 --- a/go/vt/vtctl/grpcvtctldclient/client.go +++ b/go/vt/vtctl/grpcvtctldclient/client.go @@ -72,6 +72,20 @@ func gRPCVtctldClientFactory(addr string) (vtctldclient.VtctldClient, error) { }, nil } +// NewWithDialOpts returns a vtctldclient.VtctldClient configured with the given +// DialOptions. It is exported for use in vtadmin. +func NewWithDialOpts(addr string, failFast grpcclient.FailFast, opts ...grpc.DialOption) (vtctldclient.VtctldClient, error) { + conn, err := grpcclient.Dial(addr, failFast, opts...) + if err != nil { + return nil, err + } + + return &gRPCVtctldClient{ + cc: conn, + c: vtctlservicepb.NewVtctldClient(conn), + }, nil +} + func (client *gRPCVtctldClient) Close() error { err := client.cc.Close() if err == nil { diff --git a/go/vt/vtctl/grpcvtctldclient/client_gen.go b/go/vt/vtctl/grpcvtctldclient/client_gen.go index cbe87b15d87..ee7b4d99b44 100644 --- a/go/vt/vtctl/grpcvtctldclient/client_gen.go +++ b/go/vt/vtctl/grpcvtctldclient/client_gen.go @@ -28,6 +28,69 @@ import ( vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" ) +// ChangeTabletType is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) ChangeTabletType(ctx context.Context, in *vtctldatapb.ChangeTabletTypeRequest, opts ...grpc.CallOption) (*vtctldatapb.ChangeTabletTypeResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.ChangeTabletType(ctx, in, opts...) +} + +// CreateKeyspace is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) CreateKeyspace(ctx context.Context, in *vtctldatapb.CreateKeyspaceRequest, opts ...grpc.CallOption) (*vtctldatapb.CreateKeyspaceResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.CreateKeyspace(ctx, in, opts...) +} + +// CreateShard is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) CreateShard(ctx context.Context, in *vtctldatapb.CreateShardRequest, opts ...grpc.CallOption) (*vtctldatapb.CreateShardResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.CreateShard(ctx, in, opts...) +} + +// DeleteKeyspace is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) DeleteKeyspace(ctx context.Context, in *vtctldatapb.DeleteKeyspaceRequest, opts ...grpc.CallOption) (*vtctldatapb.DeleteKeyspaceResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.DeleteKeyspace(ctx, in, opts...) +} + +// DeleteShards is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) DeleteShards(ctx context.Context, in *vtctldatapb.DeleteShardsRequest, opts ...grpc.CallOption) (*vtctldatapb.DeleteShardsResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.DeleteShards(ctx, in, opts...) +} + +// DeleteTablets is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) DeleteTablets(ctx context.Context, in *vtctldatapb.DeleteTabletsRequest, opts ...grpc.CallOption) (*vtctldatapb.DeleteTabletsResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.DeleteTablets(ctx, in, opts...) +} + +// EmergencyReparentShard is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) EmergencyReparentShard(ctx context.Context, in *vtctldatapb.EmergencyReparentShardRequest, opts ...grpc.CallOption) (*vtctldatapb.EmergencyReparentShardResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.EmergencyReparentShard(ctx, in, opts...) +} + // FindAllShardsInKeyspace is part of the vtctlservicepb.VtctldClient interface. func (client *gRPCVtctldClient) FindAllShardsInKeyspace(ctx context.Context, in *vtctldatapb.FindAllShardsInKeyspaceRequest, opts ...grpc.CallOption) (*vtctldatapb.FindAllShardsInKeyspaceResponse, error) { if client.c == nil { @@ -37,6 +100,42 @@ func (client *gRPCVtctldClient) FindAllShardsInKeyspace(ctx context.Context, in return client.c.FindAllShardsInKeyspace(ctx, in, opts...) } +// GetBackups is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) GetBackups(ctx context.Context, in *vtctldatapb.GetBackupsRequest, opts ...grpc.CallOption) (*vtctldatapb.GetBackupsResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.GetBackups(ctx, in, opts...) +} + +// GetCellInfo is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) GetCellInfo(ctx context.Context, in *vtctldatapb.GetCellInfoRequest, opts ...grpc.CallOption) (*vtctldatapb.GetCellInfoResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.GetCellInfo(ctx, in, opts...) +} + +// GetCellInfoNames is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) GetCellInfoNames(ctx context.Context, in *vtctldatapb.GetCellInfoNamesRequest, opts ...grpc.CallOption) (*vtctldatapb.GetCellInfoNamesResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.GetCellInfoNames(ctx, in, opts...) +} + +// GetCellsAliases is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) GetCellsAliases(ctx context.Context, in *vtctldatapb.GetCellsAliasesRequest, opts ...grpc.CallOption) (*vtctldatapb.GetCellsAliasesResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.GetCellsAliases(ctx, in, opts...) +} + // GetKeyspace is part of the vtctlservicepb.VtctldClient interface. func (client *gRPCVtctldClient) GetKeyspace(ctx context.Context, in *vtctldatapb.GetKeyspaceRequest, opts ...grpc.CallOption) (*vtctldatapb.GetKeyspaceResponse, error) { if client.c == nil { @@ -54,3 +153,138 @@ func (client *gRPCVtctldClient) GetKeyspaces(ctx context.Context, in *vtctldatap return client.c.GetKeyspaces(ctx, in, opts...) } + +// GetSchema is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) GetSchema(ctx context.Context, in *vtctldatapb.GetSchemaRequest, opts ...grpc.CallOption) (*vtctldatapb.GetSchemaResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.GetSchema(ctx, in, opts...) +} + +// GetShard is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) GetShard(ctx context.Context, in *vtctldatapb.GetShardRequest, opts ...grpc.CallOption) (*vtctldatapb.GetShardResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.GetShard(ctx, in, opts...) +} + +// GetSrvKeyspaces is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) GetSrvKeyspaces(ctx context.Context, in *vtctldatapb.GetSrvKeyspacesRequest, opts ...grpc.CallOption) (*vtctldatapb.GetSrvKeyspacesResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.GetSrvKeyspaces(ctx, in, opts...) +} + +// GetSrvVSchema is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) GetSrvVSchema(ctx context.Context, in *vtctldatapb.GetSrvVSchemaRequest, opts ...grpc.CallOption) (*vtctldatapb.GetSrvVSchemaResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.GetSrvVSchema(ctx, in, opts...) +} + +// GetTablet is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) GetTablet(ctx context.Context, in *vtctldatapb.GetTabletRequest, opts ...grpc.CallOption) (*vtctldatapb.GetTabletResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.GetTablet(ctx, in, opts...) +} + +// GetTablets is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) GetTablets(ctx context.Context, in *vtctldatapb.GetTabletsRequest, opts ...grpc.CallOption) (*vtctldatapb.GetTabletsResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.GetTablets(ctx, in, opts...) +} + +// GetVSchema is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) GetVSchema(ctx context.Context, in *vtctldatapb.GetVSchemaRequest, opts ...grpc.CallOption) (*vtctldatapb.GetVSchemaResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.GetVSchema(ctx, in, opts...) +} + +// GetWorkflows is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) GetWorkflows(ctx context.Context, in *vtctldatapb.GetWorkflowsRequest, opts ...grpc.CallOption) (*vtctldatapb.GetWorkflowsResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.GetWorkflows(ctx, in, opts...) +} + +// InitShardPrimary is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) InitShardPrimary(ctx context.Context, in *vtctldatapb.InitShardPrimaryRequest, opts ...grpc.CallOption) (*vtctldatapb.InitShardPrimaryResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.InitShardPrimary(ctx, in, opts...) +} + +// PlannedReparentShard is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) PlannedReparentShard(ctx context.Context, in *vtctldatapb.PlannedReparentShardRequest, opts ...grpc.CallOption) (*vtctldatapb.PlannedReparentShardResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.PlannedReparentShard(ctx, in, opts...) +} + +// RemoveKeyspaceCell is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) RemoveKeyspaceCell(ctx context.Context, in *vtctldatapb.RemoveKeyspaceCellRequest, opts ...grpc.CallOption) (*vtctldatapb.RemoveKeyspaceCellResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.RemoveKeyspaceCell(ctx, in, opts...) +} + +// RemoveShardCell is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) RemoveShardCell(ctx context.Context, in *vtctldatapb.RemoveShardCellRequest, opts ...grpc.CallOption) (*vtctldatapb.RemoveShardCellResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.RemoveShardCell(ctx, in, opts...) +} + +// ReparentTablet is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) ReparentTablet(ctx context.Context, in *vtctldatapb.ReparentTabletRequest, opts ...grpc.CallOption) (*vtctldatapb.ReparentTabletResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.ReparentTablet(ctx, in, opts...) +} + +// ShardReplicationPositions is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) ShardReplicationPositions(ctx context.Context, in *vtctldatapb.ShardReplicationPositionsRequest, opts ...grpc.CallOption) (*vtctldatapb.ShardReplicationPositionsResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.ShardReplicationPositions(ctx, in, opts...) +} + +// TabletExternallyReparented is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) TabletExternallyReparented(ctx context.Context, in *vtctldatapb.TabletExternallyReparentedRequest, opts ...grpc.CallOption) (*vtctldatapb.TabletExternallyReparentedResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.TabletExternallyReparented(ctx, in, opts...) +} diff --git a/go/vt/vtctl/grpcvtctldclient/client_test.go b/go/vt/vtctl/grpcvtctldclient/client_test.go index b4a34be38b2..d365b376b34 100644 --- a/go/vt/vtctl/grpcvtctldclient/client_test.go +++ b/go/vt/vtctl/grpcvtctldclient/client_test.go @@ -22,62 +22,28 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "golang.org/x/net/nettest" - "google.golang.org/grpc" - "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver" + "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver/testutil" "vitess.io/vitess/go/vt/vtctl/vtctldclient" topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/proto/vtctldata" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" - vtctlservicepb "vitess.io/vitess/go/vt/proto/vtctlservice" ) -// annoyingly, this is duplicated with theu tests in package grpcvtctldserver. -// fine for now, I suppose. -func addKeyspace(ctx context.Context, t *testing.T, ts *topo.Server, ks *vtctldatapb.Keyspace) { - in := *ks.Keyspace // take a copy to avoid the XXX_ fields changing - - err := ts.CreateKeyspace(ctx, ks.Name, &in) - require.NoError(t, err) -} - -func withTestServer( - t *testing.T, - server vtctlservicepb.VtctldServer, - test func(t *testing.T, client vtctldclient.VtctldClient), -) { - lis, err := nettest.NewLocalListener("tcp") - require.NoError(t, err, "cannot create nettest listener") - - defer lis.Close() - - s := grpc.NewServer() - vtctlservicepb.RegisterVtctldServer(s, server) - - go s.Serve(lis) - defer s.Stop() - - client, err := vtctldclient.New("grpc", lis.Addr().String()) - require.NoError(t, err, "cannot create vtctld client") - - test(t, client) -} - func TestFindAllShardsInKeyspace(t *testing.T) { ctx := context.Background() ts := memorytopo.NewServer("cell1") vtctld := grpcvtctldserver.NewVtctldServer(ts) - withTestServer(t, vtctld, func(t *testing.T, client vtctldclient.VtctldClient) { + testutil.WithTestServer(t, vtctld, func(t *testing.T, client vtctldclient.VtctldClient) { ks := &vtctldatapb.Keyspace{ Name: "testkeyspace", Keyspace: &topodatapb.Keyspace{}, } - addKeyspace(ctx, t, ts, ks) + testutil.AddKeyspace(ctx, t, ts, ks) si1, err := ts.GetOrCreateShard(ctx, ks.Name, "-80") require.NoError(t, err) @@ -115,7 +81,7 @@ func TestGetKeyspace(t *testing.T) { ts := memorytopo.NewServer("cell1") vtctld := grpcvtctldserver.NewVtctldServer(ts) - withTestServer(t, vtctld, func(t *testing.T, client vtctldclient.VtctldClient) { + testutil.WithTestServer(t, vtctld, func(t *testing.T, client vtctldclient.VtctldClient) { expected := &vtctldatapb.GetKeyspaceResponse{ Keyspace: &vtctldata.Keyspace{ Name: "testkeyspace", @@ -124,7 +90,7 @@ func TestGetKeyspace(t *testing.T) { }, }, } - addKeyspace(ctx, t, ts, expected.Keyspace) + testutil.AddKeyspace(ctx, t, ts, expected.Keyspace) resp, err := client.GetKeyspace(ctx, &vtctldatapb.GetKeyspaceRequest{Keyspace: expected.Keyspace.Name}) assert.NoError(t, err) @@ -142,7 +108,7 @@ func TestGetKeyspaces(t *testing.T) { ts := memorytopo.NewServer("cell1") vtctld := grpcvtctldserver.NewVtctldServer(ts) - withTestServer(t, vtctld, func(t *testing.T, client vtctldclient.VtctldClient) { + testutil.WithTestServer(t, vtctld, func(t *testing.T, client vtctldclient.VtctldClient) { resp, err := client.GetKeyspaces(ctx, &vtctldatapb.GetKeyspacesRequest{}) assert.NoError(t, err) assert.Empty(t, resp.Keyspaces) @@ -151,7 +117,7 @@ func TestGetKeyspaces(t *testing.T) { Name: "testkeyspace", Keyspace: &topodatapb.Keyspace{}, } - addKeyspace(ctx, t, ts, expected) + testutil.AddKeyspace(ctx, t, ts, expected) resp, err = client.GetKeyspaces(ctx, &vtctldatapb.GetKeyspacesRequest{}) assert.NoError(t, err) diff --git a/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go b/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go new file mode 100644 index 00000000000..1b685104a7e --- /dev/null +++ b/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go @@ -0,0 +1,162 @@ +/* +Copyright 2020 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package endtoend + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql/fakesqldb" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver" + "vitess.io/vitess/go/vt/vttablet/tabletservermock" + "vitess.io/vitess/go/vt/vttablet/tmclient" + "vitess.io/vitess/go/vt/wrangler" + "vitess.io/vitess/go/vt/wrangler/testlib" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +func TestInitShardPrimary(t *testing.T) { + ts := memorytopo.NewServer("cell1") + tmc := tmclient.NewTabletManagerClient() + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmc) + + primaryDb := fakesqldb.New(t) + primaryDb.AddQuery("create database if not exists `vt_test_keyspace`", &sqltypes.Result{InsertID: 0, RowsAffected: 0}) + + tablet1 := testlib.NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_MASTER, primaryDb) + tablet2 := testlib.NewFakeTablet(t, wr, "cell1", 1, topodatapb.TabletType_REPLICA, nil) + tablet3 := testlib.NewFakeTablet(t, wr, "cell1", 2, topodatapb.TabletType_REPLICA, nil) + + tablet1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ + "FAKE RESET ALL REPLICATION", + "CREATE DATABASE IF NOT EXISTS _vt", + "SUBCREATE TABLE IF NOT EXISTS _vt.reparent_journal", + "CREATE DATABASE IF NOT EXISTS _vt", + "SUBCREATE TABLE IF NOT EXISTS _vt.reparent_journal", + "SUBINSERT INTO _vt.reparent_journal (time_created_ns, action_name, master_alias, replication_position) VALUES", + } + + tablet2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ + "FAKE RESET ALL REPLICATION", + "FAKE RESET ALL REPLICATION", + "FAKE SET SLAVE POSITION", + "FAKE SET MASTER", + "START SLAVE", + } + tablet2.FakeMysqlDaemon.SetMasterInput = fmt.Sprintf("%v:%v", tablet1.Tablet.Hostname, tablet1.Tablet.MysqlPort) + + tablet3.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ + "FAKE RESET ALL REPLICATION", + "FAKE RESET ALL REPLICATION", + "FAKE SET SLAVE POSITION", + "FAKE SET MASTER", + "START SLAVE", + } + tablet3.FakeMysqlDaemon.SetMasterInput = fmt.Sprintf("%v:%v", tablet1.Tablet.Hostname, tablet1.Tablet.MysqlPort) + + for _, tablet := range []*testlib.FakeTablet{tablet1, tablet2, tablet3} { + tablet.StartActionLoop(t, wr) + defer tablet.StopActionLoop(t) + + tablet.TM.QueryServiceControl.(*tabletservermock.Controller).SetQueryServiceEnabledForTests(true) + } + + vtctld := grpcvtctldserver.NewVtctldServer(ts) + resp, err := vtctld.InitShardPrimary(context.Background(), &vtctldatapb.InitShardPrimaryRequest{ + Keyspace: tablet1.Tablet.Keyspace, + Shard: tablet1.Tablet.Shard, + PrimaryElectTabletAlias: tablet1.Tablet.Alias, + }) + + assert.NoError(t, err) + assert.NotNil(t, resp) +} + +func TestInitShardPrimaryNoFormerPrimary(t *testing.T) { + ts := memorytopo.NewServer("cell1") + tmc := tmclient.NewTabletManagerClient() + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmc) + + primaryDb := fakesqldb.New(t) + primaryDb.AddQuery("create database if not exists `vt_test_keyspace`", &sqltypes.Result{InsertID: 0, RowsAffected: 0}) + + tablet1 := testlib.NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_REPLICA, primaryDb) + tablet2 := testlib.NewFakeTablet(t, wr, "cell1", 1, topodatapb.TabletType_REPLICA, nil) + tablet3 := testlib.NewFakeTablet(t, wr, "cell1", 2, topodatapb.TabletType_REPLICA, nil) + + tablet1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ + "FAKE RESET ALL REPLICATION", + "CREATE DATABASE IF NOT EXISTS _vt", + "SUBCREATE TABLE IF NOT EXISTS _vt.reparent_journal", + "CREATE DATABASE IF NOT EXISTS _vt", + "SUBCREATE TABLE IF NOT EXISTS _vt.reparent_journal", + "SUBINSERT INTO _vt.reparent_journal (time_created_ns, action_name, master_alias, replication_position) VALUES", + } + + tablet2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ + "FAKE RESET ALL REPLICATION", + "FAKE SET SLAVE POSITION", + "FAKE SET MASTER", + "START SLAVE", + } + tablet2.FakeMysqlDaemon.SetMasterInput = fmt.Sprintf("%v:%v", tablet1.Tablet.Hostname, tablet1.Tablet.MysqlPort) + + tablet3.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ + "FAKE RESET ALL REPLICATION", + "FAKE SET SLAVE POSITION", + "FAKE SET MASTER", + "START SLAVE", + } + tablet3.FakeMysqlDaemon.SetMasterInput = fmt.Sprintf("%v:%v", tablet1.Tablet.Hostname, tablet1.Tablet.MysqlPort) + + for _, tablet := range []*testlib.FakeTablet{tablet1, tablet2, tablet3} { + tablet.StartActionLoop(t, wr) + defer tablet.StopActionLoop(t) + + tablet.TM.QueryServiceControl.(*tabletservermock.Controller).SetQueryServiceEnabledForTests(true) + } + + vtctld := grpcvtctldserver.NewVtctldServer(ts) + _, err := vtctld.InitShardPrimary(context.Background(), &vtctldatapb.InitShardPrimaryRequest{ + Keyspace: tablet1.Tablet.Keyspace, + Shard: tablet1.Tablet.Shard, + PrimaryElectTabletAlias: tablet1.Tablet.Alias, + }) + + assert.Error(t, err) + + resp, err := vtctld.InitShardPrimary(context.Background(), &vtctldatapb.InitShardPrimaryRequest{ + Keyspace: tablet1.Tablet.Keyspace, + Shard: tablet1.Tablet.Shard, + PrimaryElectTabletAlias: tablet1.Tablet.Alias, + Force: true, + }) + assert.NoError(t, err) + assert.NotNil(t, resp) + tablet1PostInit, err := ts.GetTablet(context.Background(), tablet1.Tablet.Alias) + require.NoError(t, err) + assert.Equal(t, topodatapb.TabletType_MASTER, tablet1PostInit.Type) +} diff --git a/go/vt/vtctl/grpcvtctldserver/server.go b/go/vt/vtctl/grpcvtctldserver/server.go index 4af7816e859..7bbf73d4c1b 100644 --- a/go/vt/vtctl/grpcvtctldserver/server.go +++ b/go/vt/vtctl/grpcvtctldserver/server.go @@ -18,23 +18,364 @@ package grpcvtctldserver import ( "context" + "errors" + "fmt" + "path/filepath" + "sync" + "time" "google.golang.org/grpc" + "k8s.io/apimachinery/pkg/util/sets" + "vitess.io/vitess/go/event" + "vitess.io/vitess/go/protoutil" + "vitess.io/vitess/go/sqlescape" + "vitess.io/vitess/go/vt/concurrency" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/mysqlctl/backupstorage" + "vitess.io/vitess/go/vt/mysqlctl/mysqlctlproto" "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/topotools/events" + "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/workflow" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vttablet/tmclient" + logutilpb "vitess.io/vitess/go/vt/proto/logutil" + mysqlctlpb "vitess.io/vitess/go/vt/proto/mysqlctl" + replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" vtctlservicepb "vitess.io/vitess/go/vt/proto/vtctlservice" + "vitess.io/vitess/go/vt/proto/vtrpc" +) + +const ( + initShardMasterOperation = "InitShardMaster" // (TODO:@amason) Can I rename this to Primary? ) // VtctldServer implements the Vtctld RPC service protocol. type VtctldServer struct { - ts *topo.Server + ts *topo.Server + tmc tmclient.TabletManagerClient + ws *workflow.Server } // NewVtctldServer returns a new VtctldServer for the given topo server. func NewVtctldServer(ts *topo.Server) *VtctldServer { - return &VtctldServer{ts: ts} + tmc := tmclient.NewTabletManagerClient() + + return &VtctldServer{ + ts: ts, + tmc: tmc, + ws: workflow.NewServer(ts, tmc), + } +} + +// ChangeTabletType is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) ChangeTabletType(ctx context.Context, req *vtctldatapb.ChangeTabletTypeRequest) (*vtctldatapb.ChangeTabletTypeResponse, error) { + tablet, err := s.ts.GetTablet(ctx, req.TabletAlias) + if err != nil { + return nil, err + } + + if !topo.IsTrivialTypeChange(tablet.Type, req.DbType) { + return nil, fmt.Errorf("tablet %v type change %v -> %v is not an allowed transition for ChangeTabletType", req.TabletAlias, tablet.Type, req.DbType) + } + + if req.DryRun { + afterTablet := *tablet.Tablet + afterTablet.Type = req.DbType + + return &vtctldatapb.ChangeTabletTypeResponse{ + BeforeTablet: tablet.Tablet, + AfterTablet: &afterTablet, + WasDryRun: true, + }, nil + } + + err = s.tmc.ChangeType(ctx, tablet.Tablet, req.DbType) + if err != nil { + return nil, err + } + + var changedTablet *topodatapb.Tablet + + changedTabletInfo, err := s.ts.GetTablet(ctx, req.TabletAlias) + if err != nil { + log.Warningf("error while reading the tablet we just changed back out of the topo: %v", err) + } else { + changedTablet = changedTabletInfo.Tablet + } + + return &vtctldatapb.ChangeTabletTypeResponse{ + BeforeTablet: tablet.Tablet, + AfterTablet: changedTablet, + WasDryRun: false, + }, nil +} + +// CreateKeyspace is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) CreateKeyspace(ctx context.Context, req *vtctldatapb.CreateKeyspaceRequest) (*vtctldatapb.CreateKeyspaceResponse, error) { + switch req.Type { + case topodatapb.KeyspaceType_NORMAL: + case topodatapb.KeyspaceType_SNAPSHOT: + if req.BaseKeyspace == "" { + return nil, errors.New("BaseKeyspace is required for SNAPSHOT keyspaces") + } + + if req.SnapshotTime == nil { + return nil, errors.New("SnapshotTime is required for SNAPSHOT keyspaces") + } + default: + return nil, fmt.Errorf("unknown keyspace type %v", req.Type) + } + + ki := &topodatapb.Keyspace{ + KeyspaceType: req.Type, + ShardingColumnName: req.ShardingColumnName, + ShardingColumnType: req.ShardingColumnType, + + ServedFroms: req.ServedFroms, + + BaseKeyspace: req.BaseKeyspace, + SnapshotTime: req.SnapshotTime, + } + + err := s.ts.CreateKeyspace(ctx, req.Name, ki) + if req.Force && topo.IsErrType(err, topo.NodeExists) { + log.Infof("keyspace %v already exists (ignoring error with Force=true)", req.Name) + err = nil + + // Get the actual keyspace out of the topo; it may differ in structure, + // and we want to return the authoritative version as the "created" one + // to the client. + var ks *topo.KeyspaceInfo + ks, _ = s.ts.GetKeyspace(ctx, req.Name) + ki = ks.Keyspace + } + + if err != nil { + return nil, err + } + + if !req.AllowEmptyVSchema { + if err := s.ts.EnsureVSchema(ctx, req.Name); err != nil { + return nil, err + } + } + + if req.Type == topodatapb.KeyspaceType_SNAPSHOT { + vs, err := s.ts.GetVSchema(ctx, req.BaseKeyspace) + if err != nil { + log.Infof("error from GetVSchema(%v) = %v", req.BaseKeyspace, err) + if topo.IsErrType(err, topo.NoNode) { + log.Infof("base keyspace %v does not exist; continuing with bare, unsharded vschema", req.BaseKeyspace) + vs = &vschemapb.Keyspace{ + Sharded: false, + Tables: map[string]*vschemapb.Table{}, + Vindexes: map[string]*vschemapb.Vindex{}, + } + } else { + return nil, err + } + } + + // SNAPSHOT keyspaces are excluded from global routing. + vs.RequireExplicitRouting = true + + if err := s.ts.SaveVSchema(ctx, req.Name, vs); err != nil { + return nil, fmt.Errorf("SaveVSchema(%v) = %w", vs, err) + } + } + + cells := []string{} + err = s.ts.RebuildSrvVSchema(ctx, cells) + if err != nil { + return nil, fmt.Errorf("RebuildSrvVSchema(%v) = %w", cells, err) + } + + return &vtctldatapb.CreateKeyspaceResponse{ + Keyspace: &vtctldatapb.Keyspace{ + Name: req.Name, + Keyspace: ki, + }, + }, nil +} + +// CreateShard is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) CreateShard(ctx context.Context, req *vtctldatapb.CreateShardRequest) (*vtctldatapb.CreateShardResponse, error) { + if req.IncludeParent { + log.Infof("Creating empty keyspace for %s", req.Keyspace) + if err := s.ts.CreateKeyspace(ctx, req.Keyspace, &topodatapb.Keyspace{}); err != nil { + if req.Force && topo.IsErrType(err, topo.NodeExists) { + log.Infof("keyspace %v already exists; ignoring error because Force = true", req.Keyspace) + } else { + return nil, err + } + } + } + + shardExists := false + + if err := s.ts.CreateShard(ctx, req.Keyspace, req.ShardName); err != nil { + if req.Force && topo.IsErrType(err, topo.NodeExists) { + log.Infof("shard %v/%v already exists; ignoring error because Force = true", req.Keyspace, req.ShardName) + shardExists = true + } else { + return nil, err + } + } + + // Fetch what we just created out of the topo. Errors should never happen + // here, but we'll check them anyway. + + ks, err := s.ts.GetKeyspace(ctx, req.Keyspace) + if err != nil { + return nil, err + } + + shard, err := s.ts.GetShard(ctx, req.Keyspace, req.ShardName) + if err != nil { + return nil, err + } + + return &vtctldatapb.CreateShardResponse{ + Keyspace: &vtctldatapb.Keyspace{ + Name: req.Keyspace, + Keyspace: ks.Keyspace, + }, + Shard: &vtctldatapb.Shard{ + Keyspace: req.Keyspace, + Name: req.ShardName, + Shard: shard.Shard, + }, + ShardAlreadyExists: shardExists, + }, nil +} + +// DeleteKeyspace is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) DeleteKeyspace(ctx context.Context, req *vtctldatapb.DeleteKeyspaceRequest) (*vtctldatapb.DeleteKeyspaceResponse, error) { + shards, err := s.ts.GetShardNames(ctx, req.Keyspace) + if err != nil { + return nil, err + } + + if len(shards) > 0 { + if !req.Recursive { + return nil, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "keyspace %v still has %d shards; use Recursive=true or remove them manually", req.Keyspace, len(shards)) + } + + log.Infof("Deleting all %d shards (and their tablets) in keyspace %v", len(shards), req.Keyspace) + recursive := true + evenIfServing := true + + for _, shard := range shards { + log.Infof("Recursively deleting shard %v/%v", req.Keyspace, shard) + if err := deleteShard(ctx, s.ts, req.Keyspace, shard, recursive, evenIfServing); err != nil { + return nil, fmt.Errorf("cannot delete shard %v/%v: %w", req.Keyspace, shard, err) + } + } + } + + cells, err := s.ts.GetKnownCells(ctx) + if err != nil { + return nil, err + } + + for _, cell := range cells { + if err := s.ts.DeleteKeyspaceReplication(ctx, cell, req.Keyspace); err != nil && !topo.IsErrType(err, topo.NoNode) { + log.Warningf("Cannot delete KeyspaceReplication in cell %v for %v: %v", cell, req.Keyspace, err) + } + + if err := s.ts.DeleteSrvKeyspace(ctx, cell, req.Keyspace); err != nil && !topo.IsErrType(err, topo.NoNode) { + log.Warningf("Cannot delete SrvKeyspace in cell %v for %v: %v", cell, req.Keyspace, err) + } + } + + if err := s.ts.DeleteKeyspace(ctx, req.Keyspace); err != nil { + return nil, err + } + + return &vtctldatapb.DeleteKeyspaceResponse{}, nil +} + +// DeleteShards is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) DeleteShards(ctx context.Context, req *vtctldatapb.DeleteShardsRequest) (*vtctldatapb.DeleteShardsResponse, error) { + for _, shard := range req.Shards { + if err := deleteShard(ctx, s.ts, shard.Keyspace, shard.Name, req.Recursive, req.EvenIfServing); err != nil { + return nil, err + } + } + + return &vtctldatapb.DeleteShardsResponse{}, nil +} + +// DeleteTablets is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) DeleteTablets(ctx context.Context, req *vtctldatapb.DeleteTabletsRequest) (*vtctldatapb.DeleteTabletsResponse, error) { + for _, alias := range req.TabletAliases { + if err := deleteTablet(ctx, s.ts, alias, req.AllowPrimary); err != nil { + return nil, err + } + } + + return &vtctldatapb.DeleteTabletsResponse{}, nil +} + +// EmergencyReparentShard is part of the vtctldservicepb.VtctldServer interface. +func (s *VtctldServer) EmergencyReparentShard(ctx context.Context, req *vtctldatapb.EmergencyReparentShardRequest) (*vtctldatapb.EmergencyReparentShardResponse, error) { + waitReplicasTimeout, ok, err := protoutil.DurationFromProto(req.WaitReplicasTimeout) + if err != nil { + return nil, err + } else if !ok { + waitReplicasTimeout = time.Second * 30 + } + + m := sync.RWMutex{} + logstream := []*logutilpb.Event{} + logger := logutil.NewCallbackLogger(func(e *logutilpb.Event) { + m.Lock() + defer m.Unlock() + + logstream = append(logstream, e) + }) + + ev, err := reparentutil.NewEmergencyReparenter(s.ts, s.tmc, logger).ReparentShard(ctx, + req.Keyspace, + req.Shard, + reparentutil.EmergencyReparentOptions{ + NewPrimaryAlias: req.NewPrimary, + IgnoreReplicas: sets.NewString(topoproto.TabletAliasList(req.IgnoreReplicas).ToStringSlice()...), + WaitReplicasTimeout: waitReplicasTimeout, + }, + ) + + resp := &vtctldatapb.EmergencyReparentShardResponse{ + Keyspace: req.Keyspace, + Shard: req.Shard, + } + + if ev != nil { + resp.Keyspace = ev.ShardInfo.Keyspace() + resp.Shard = ev.ShardInfo.ShardName() + + if !topoproto.TabletAliasIsZero(ev.NewMaster.Alias) { + resp.PromotedPrimary = ev.NewMaster.Alias + } + } + + m.RLock() + defer m.RUnlock() + + resp.Events = make([]*logutilpb.Event, len(logstream)) + copy(resp.Events, logstream) + + return resp, err } // FindAllShardsInKeyspace is part of the vtctlservicepb.VtctldServer interface. @@ -58,6 +399,70 @@ func (s *VtctldServer) FindAllShardsInKeyspace(ctx context.Context, req *vtctlda }, nil } +// GetBackups is part of the vtctldservicepb.VtctldServer interface. +func (s *VtctldServer) GetBackups(ctx context.Context, req *vtctldatapb.GetBackupsRequest) (*vtctldatapb.GetBackupsResponse, error) { + bs, err := backupstorage.GetBackupStorage() + if err != nil { + return nil, err + } + + defer bs.Close() + + bucket := filepath.Join(req.Keyspace, req.Shard) + bhs, err := bs.ListBackups(ctx, bucket) + if err != nil { + return nil, err + } + + resp := &vtctldatapb.GetBackupsResponse{ + Backups: make([]*mysqlctlpb.BackupInfo, len(bhs)), + } + + for i, bh := range bhs { + resp.Backups[i] = mysqlctlproto.BackupHandleToProto(bh) + } + + return resp, nil +} + +// GetCellInfoNames is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) GetCellInfoNames(ctx context.Context, req *vtctldatapb.GetCellInfoNamesRequest) (*vtctldatapb.GetCellInfoNamesResponse, error) { + names, err := s.ts.GetCellInfoNames(ctx) + if err != nil { + return nil, err + } + + return &vtctldatapb.GetCellInfoNamesResponse{Names: names}, nil +} + +// GetCellInfo is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) GetCellInfo(ctx context.Context, req *vtctldatapb.GetCellInfoRequest) (*vtctldatapb.GetCellInfoResponse, error) { + if req.Cell == "" { + return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cell field is required") + } + + // We use a strong read, because users using this command want the latest + // data, and this is user-generated, not used in any automated process. + strongRead := true + ci, err := s.ts.GetCellInfo(ctx, req.Cell, strongRead) + if err != nil { + return nil, err + } + + return &vtctldatapb.GetCellInfoResponse{CellInfo: ci}, nil +} + +// GetCellsAliases is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) GetCellsAliases(ctx context.Context, req *vtctldatapb.GetCellsAliasesRequest) (*vtctldatapb.GetCellsAliasesResponse, error) { + strongRead := true + aliases, err := s.ts.GetCellsAliases(ctx, strongRead) + if err != nil { + return nil, err + } + + return &vtctldatapb.GetCellsAliasesResponse{Aliases: aliases}, nil +} + // GetKeyspace is part of the vtctlservicepb.VtctldServer interface. func (s *VtctldServer) GetKeyspace(ctx context.Context, req *vtctldatapb.GetKeyspaceRequest) (*vtctldatapb.GetKeyspaceResponse, error) { keyspace, err := s.ts.GetKeyspace(ctx, req.Keyspace) @@ -94,6 +499,853 @@ func (s *VtctldServer) GetKeyspaces(ctx context.Context, req *vtctldatapb.GetKey return &vtctldatapb.GetKeyspacesResponse{Keyspaces: keyspaces}, nil } +// GetSchema is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) GetSchema(ctx context.Context, req *vtctldatapb.GetSchemaRequest) (*vtctldatapb.GetSchemaResponse, error) { + tablet, err := s.ts.GetTablet(ctx, req.TabletAlias) + if err != nil { + return nil, fmt.Errorf("GetTablet(%v) failed: %w", req.TabletAlias, err) + } + + sd, err := s.tmc.GetSchema(ctx, tablet.Tablet, req.Tables, req.ExcludeTables, req.IncludeViews) + if err != nil { + return nil, fmt.Errorf("GetSchema(%v, %v, %v, %v) failed: %w", tablet.Tablet, req.Tables, req.ExcludeTables, req.IncludeViews, err) + } + + if req.TableNamesOnly { + nameTds := make([]*tabletmanagerdatapb.TableDefinition, len(sd.TableDefinitions)) + + for i, td := range sd.TableDefinitions { + nameTds[i] = &tabletmanagerdatapb.TableDefinition{ + Name: td.Name, + } + } + + sd.TableDefinitions = nameTds + } else if req.TableSizesOnly { + sizeTds := make([]*tabletmanagerdatapb.TableDefinition, len(sd.TableDefinitions)) + + for i, td := range sd.TableDefinitions { + sizeTds[i] = &tabletmanagerdatapb.TableDefinition{ + Name: td.Name, + Type: td.Type, + RowCount: td.RowCount, + DataLength: td.DataLength, + } + } + + sd.TableDefinitions = sizeTds + } + + return &vtctldatapb.GetSchemaResponse{ + Schema: sd, + }, nil +} + +// GetShard is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) GetShard(ctx context.Context, req *vtctldatapb.GetShardRequest) (*vtctldatapb.GetShardResponse, error) { + shard, err := s.ts.GetShard(ctx, req.Keyspace, req.ShardName) + if err != nil { + return nil, err + } + + return &vtctldatapb.GetShardResponse{ + Shard: &vtctldatapb.Shard{ + Keyspace: req.Keyspace, + Name: req.ShardName, + Shard: shard.Shard, + }, + }, nil +} + +// GetSrvKeyspaces is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) GetSrvKeyspaces(ctx context.Context, req *vtctldatapb.GetSrvKeyspacesRequest) (*vtctldatapb.GetSrvKeyspacesResponse, error) { + cells := req.Cells + + if len(cells) == 0 { + var err error + + cells, err = s.ts.GetCellInfoNames(ctx) + if err != nil { + return nil, err + } + } + + srvKeyspaces := make(map[string]*topodatapb.SrvKeyspace, len(cells)) + + for _, cell := range cells { + srvKeyspace, err := s.ts.GetSrvKeyspace(ctx, cell, req.Keyspace) + + if err != nil { + if !topo.IsErrType(err, topo.NoNode) { + return nil, err + } + + log.Infof("no srvkeyspace for keyspace %s in cell %s", req.Keyspace, cell) + + srvKeyspace = nil + } + + srvKeyspaces[cell] = srvKeyspace + } + + return &vtctldatapb.GetSrvKeyspacesResponse{ + SrvKeyspaces: srvKeyspaces, + }, nil +} + +// GetSrvVSchema is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) GetSrvVSchema(ctx context.Context, req *vtctldatapb.GetSrvVSchemaRequest) (*vtctldatapb.GetSrvVSchemaResponse, error) { + vschema, err := s.ts.GetSrvVSchema(ctx, req.Cell) + if err != nil { + return nil, err + } + + return &vtctldatapb.GetSrvVSchemaResponse{ + SrvVSchema: vschema, + }, nil +} + +// GetTablet is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) GetTablet(ctx context.Context, req *vtctldatapb.GetTabletRequest) (*vtctldatapb.GetTabletResponse, error) { + ti, err := s.ts.GetTablet(ctx, req.TabletAlias) + if err != nil { + return nil, err + } + + return &vtctldatapb.GetTabletResponse{ + Tablet: ti.Tablet, + }, nil +} + +// GetTablets is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) GetTablets(ctx context.Context, req *vtctldatapb.GetTabletsRequest) (*vtctldatapb.GetTabletsResponse, error) { + // It is possible that an old primary has not yet updated its type in the + // topo. In that case, report its type as UNKNOWN. It used to be MASTER but + // is no longer the serving primary. + adjustTypeForStalePrimary := func(ti *topo.TabletInfo, mtst time.Time) { + if ti.Type == topodatapb.TabletType_MASTER && ti.GetMasterTermStartTime().Before(mtst) { + ti.Tablet.Type = topodatapb.TabletType_UNKNOWN + } + } + + // Create a context for our per-cell RPCs, with a timeout upper-bounded at + // the RemoteOperationTimeout. + // + // Per-cell goroutines may also cancel this context if they fail and the + // request specified Strict=true to allow us to fail faster. + ctx, cancel := context.WithTimeout(ctx, *topo.RemoteOperationTimeout) + defer cancel() + + var ( + tabletMap map[string]*topo.TabletInfo + err error + ) + + switch { + case len(req.TabletAliases) > 0: + tabletMap, err = s.ts.GetTabletMap(ctx, req.TabletAliases) + if err != nil { + err = fmt.Errorf("GetTabletMap(%v) failed: %w", req.TabletAliases, err) + } + case req.Keyspace != "" && req.Shard != "": + tabletMap, err = s.ts.GetTabletMapForShard(ctx, req.Keyspace, req.Shard) + if err != nil { + err = fmt.Errorf("GetTabletMapForShard(%s, %s) failed: %w", req.Keyspace, req.Shard, err) + } + default: + // goto the req.Cells branch + tabletMap = nil + } + + if err != nil { + switch { + case topo.IsErrType(err, topo.PartialResult): + if req.Strict { + return nil, err + } + + log.Warningf("GetTablets encountered non-fatal error %s; continuing because Strict=false", err) + default: + return nil, err + } + } + + if tabletMap != nil { + var trueMasterTimestamp time.Time + for _, ti := range tabletMap { + if ti.Type == topodatapb.TabletType_MASTER { + masterTimestamp := ti.GetMasterTermStartTime() + if masterTimestamp.After(trueMasterTimestamp) { + trueMasterTimestamp = masterTimestamp + } + } + } + + tablets := make([]*topodatapb.Tablet, 0, len(tabletMap)) + for _, ti := range tabletMap { + adjustTypeForStalePrimary(ti, trueMasterTimestamp) + tablets = append(tablets, ti.Tablet) + } + + return &vtctldatapb.GetTabletsResponse{Tablets: tablets}, nil + } + + cells := req.Cells + if len(cells) == 0 { + c, err := s.ts.GetKnownCells(ctx) + if err != nil { + return nil, err + } + + cells = c + } + + var ( + m sync.Mutex + wg sync.WaitGroup + rec concurrency.AllErrorRecorder + allTablets []*topo.TabletInfo + ) + + for _, cell := range cells { + wg.Add(1) + + go func(cell string) { + defer wg.Done() + + tablets, err := topotools.GetAllTablets(ctx, s.ts, cell) + if err != nil { + if req.Strict { + log.Infof("GetTablets got an error from cell %s: %s. Running in strict mode, so canceling other cell RPCs", cell, err) + cancel() + } + + rec.RecordError(fmt.Errorf("GetAllTablets(cell = %s) failed: %w", cell, err)) + + return + } + + m.Lock() + defer m.Unlock() + allTablets = append(allTablets, tablets...) + }(cell) + } + + wg.Wait() + + if rec.HasErrors() { + if req.Strict || len(rec.Errors) == len(cells) { + return nil, rec.Error() + } + } + + // Collect true master term start times, and optionally filter out any + // tablets by keyspace according to the request. + masterTermStartTimes := map[string]time.Time{} + filteredTablets := make([]*topo.TabletInfo, 0, len(allTablets)) + + for _, tablet := range allTablets { + if req.Keyspace != "" && tablet.Keyspace != req.Keyspace { + continue + } + + key := tablet.Keyspace + "." + tablet.Shard + if v, ok := masterTermStartTimes[key]; ok { + if tablet.GetMasterTermStartTime().After(v) { + masterTermStartTimes[key] = tablet.GetMasterTermStartTime() + } + } else { + masterTermStartTimes[key] = tablet.GetMasterTermStartTime() + } + + filteredTablets = append(filteredTablets, tablet) + } + + adjustedTablets := make([]*topodatapb.Tablet, len(filteredTablets)) + + // collect the tablets with adjusted master term start times. they've + // already been filtered by the above loop, so no keyspace filtering + // here. + for i, ti := range filteredTablets { + key := ti.Keyspace + "." + ti.Shard + adjustTypeForStalePrimary(ti, masterTermStartTimes[key]) + + adjustedTablets[i] = ti.Tablet + } + + return &vtctldatapb.GetTabletsResponse{ + Tablets: adjustedTablets, + }, nil +} + +// GetVSchema is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) GetVSchema(ctx context.Context, req *vtctldatapb.GetVSchemaRequest) (*vtctldatapb.GetVSchemaResponse, error) { + vschema, err := s.ts.GetVSchema(ctx, req.Keyspace) + if err != nil { + return nil, err + } + + return &vtctldatapb.GetVSchemaResponse{ + VSchema: vschema, + }, nil +} + +// GetWorkflows is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflowsRequest) (*vtctldatapb.GetWorkflowsResponse, error) { + return s.ws.GetWorkflows(ctx, req) +} + +// InitShardPrimary is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) InitShardPrimary(ctx context.Context, req *vtctldatapb.InitShardPrimaryRequest) (*vtctldatapb.InitShardPrimaryResponse, error) { + if req.Keyspace == "" { + return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "keyspace field is required") + } + + if req.Shard == "" { + return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "shard field is required") + } + + waitReplicasTimeout, ok, err := protoutil.DurationFromProto(req.WaitReplicasTimeout) + if err != nil { + return nil, err + } else if !ok { + waitReplicasTimeout = time.Second * 30 + } + + ctx, unlock, err := s.ts.LockShard(ctx, req.Keyspace, req.Shard, fmt.Sprintf("InitShardPrimary(%v)", topoproto.TabletAliasString(req.PrimaryElectTabletAlias))) + if err != nil { + return nil, err + } + defer unlock(&err) + + m := sync.RWMutex{} + ev := &events.Reparent{} + logstream := []*logutilpb.Event{} + + resp := &vtctldatapb.InitShardPrimaryResponse{} + err = s.InitShardPrimaryLocked(ctx, ev, req, waitReplicasTimeout, tmclient.NewTabletManagerClient(), logutil.NewCallbackLogger(func(e *logutilpb.Event) { + m.Lock() + defer m.Unlock() + + logstream = append(logstream, e) + })) + if err != nil { + event.DispatchUpdate(ev, "failed InitShardPrimary: "+err.Error()) + } else { + event.DispatchUpdate(ev, "finished InitShardPrimary") + } + + m.RLock() + defer m.RUnlock() + + resp.Events = make([]*logutilpb.Event, len(logstream)) + copy(resp.Events, logstream) + + return resp, err +} + +// InitShardPrimaryLocked is the main work of doing an InitShardPrimary. It +// should only called by callers that have already locked the shard in the topo. +// It is only public so that it can be used in wrangler and legacy vtctl server. +func (s *VtctldServer) InitShardPrimaryLocked( + ctx context.Context, + ev *events.Reparent, + req *vtctldatapb.InitShardPrimaryRequest, + waitReplicasTimeout time.Duration, + tmc tmclient.TabletManagerClient, + logger logutil.Logger, +) error { + // (TODO:@amason) The code below this point is a verbatim copy of + // initShardMasterLocked in package wrangler, modulo the following: + // - s/keyspace/req.Keyspace + // - s/shard/req.Shard + // - s/masterElectTabletAlias/req.PrimaryElectTabletAlias + // - s/wr.logger/logger + // - s/wr.tmc/tmc + // - s/wr.ts/s.ts + // + // It is also sufficiently complex and critical code that I feel it's unwise + // to port and refactor in one change; so, this comment serves both as an + // acknowledgement of that, as well as a TODO marker for us to revisit this. + shardInfo, err := s.ts.GetShard(ctx, req.Keyspace, req.Shard) + if err != nil { + return err + } + ev.ShardInfo = *shardInfo + + event.DispatchUpdate(ev, "reading tablet map") + tabletMap, err := s.ts.GetTabletMapForShard(ctx, req.Keyspace, req.Shard) + if err != nil { + return err + } + + // Check the master elect is in tabletMap. + masterElectTabletAliasStr := topoproto.TabletAliasString(req.PrimaryElectTabletAlias) + masterElectTabletInfo, ok := tabletMap[masterElectTabletAliasStr] + if !ok { + return fmt.Errorf("master-elect tablet %v is not in the shard", topoproto.TabletAliasString(req.PrimaryElectTabletAlias)) + } + ev.NewMaster = *masterElectTabletInfo.Tablet + + // Check the master is the only master is the shard, or -force was used. + _, masterTabletMap := topotools.SortedTabletMap(tabletMap) + if !topoproto.TabletAliasEqual(shardInfo.MasterAlias, req.PrimaryElectTabletAlias) { + if !req.Force { + return fmt.Errorf("master-elect tablet %v is not the shard master, use -force to proceed anyway", topoproto.TabletAliasString(req.PrimaryElectTabletAlias)) + } + + logger.Warningf("master-elect tablet %v is not the shard master, proceeding anyway as -force was used", topoproto.TabletAliasString(req.PrimaryElectTabletAlias)) + } + if _, ok := masterTabletMap[masterElectTabletAliasStr]; !ok { + if !req.Force { + return fmt.Errorf("master-elect tablet %v is not a master in the shard, use -force to proceed anyway", topoproto.TabletAliasString(req.PrimaryElectTabletAlias)) + } + logger.Warningf("master-elect tablet %v is not a master in the shard, proceeding anyway as -force was used", topoproto.TabletAliasString(req.PrimaryElectTabletAlias)) + } + haveOtherMaster := false + for alias := range masterTabletMap { + if masterElectTabletAliasStr != alias { + haveOtherMaster = true + } + } + if haveOtherMaster { + if !req.Force { + return fmt.Errorf("master-elect tablet %v is not the only master in the shard, use -force to proceed anyway", topoproto.TabletAliasString(req.PrimaryElectTabletAlias)) + } + logger.Warningf("master-elect tablet %v is not the only master in the shard, proceeding anyway as -force was used", topoproto.TabletAliasString(req.PrimaryElectTabletAlias)) + } + + // First phase: reset replication on all tablets. If anyone fails, + // we stop. It is probably because it is unreachable, and may leave + // an unstable database process in the mix, with a database daemon + // at a wrong replication spot. + + // Create a context for the following RPCs that respects waitReplicasTimeout + resetCtx, resetCancel := context.WithTimeout(ctx, waitReplicasTimeout) + defer resetCancel() + + event.DispatchUpdate(ev, "resetting replication on all tablets") + wg := sync.WaitGroup{} + rec := concurrency.AllErrorRecorder{} + for alias, tabletInfo := range tabletMap { + wg.Add(1) + go func(alias string, tabletInfo *topo.TabletInfo) { + defer wg.Done() + logger.Infof("resetting replication on tablet %v", alias) + if err := tmc.ResetReplication(resetCtx, tabletInfo.Tablet); err != nil { + rec.RecordError(fmt.Errorf("tablet %v ResetReplication failed (either fix it, or Scrap it): %v", alias, err)) + } + }(alias, tabletInfo) + } + wg.Wait() + if err := rec.Error(); err != nil { + // if any of the replicas failed + return err + } + + // Check we still have the topology lock. + if err := topo.CheckShardLocked(ctx, req.Keyspace, req.Shard); err != nil { + return fmt.Errorf("lost topology lock, aborting: %v", err) + } + + // Tell the new master to break its replicas, return its replication + // position + logger.Infof("initializing master on %v", topoproto.TabletAliasString(req.PrimaryElectTabletAlias)) + event.DispatchUpdate(ev, "initializing master") + rp, err := tmc.InitMaster(ctx, masterElectTabletInfo.Tablet) + if err != nil { + return err + } + + // Check we stil have the topology lock. + if err := topo.CheckShardLocked(ctx, req.Keyspace, req.Shard); err != nil { + return fmt.Errorf("lost topology lock, aborting: %v", err) + } + + // Create a cancelable context for the following RPCs. + // If error conditions happen, we can cancel all outgoing RPCs. + replCtx, replCancel := context.WithTimeout(ctx, waitReplicasTimeout) + defer replCancel() + + // Now tell the new master to insert the reparent_journal row, + // and tell everybody else to become a replica of the new master, + // and wait for the row in the reparent_journal table. + // We start all these in parallel, to handle the semi-sync + // case: for the master to be able to commit its row in the + // reparent_journal table, it needs connected replicas. + event.DispatchUpdate(ev, "reparenting all tablets") + now := time.Now().UnixNano() + wgMaster := sync.WaitGroup{} + wgReplicas := sync.WaitGroup{} + var masterErr error + for alias, tabletInfo := range tabletMap { + if alias == masterElectTabletAliasStr { + wgMaster.Add(1) + go func(alias string, tabletInfo *topo.TabletInfo) { + defer wgMaster.Done() + logger.Infof("populating reparent journal on new master %v", alias) + masterErr = tmc.PopulateReparentJournal(replCtx, tabletInfo.Tablet, now, + initShardMasterOperation, + req.PrimaryElectTabletAlias, rp) + }(alias, tabletInfo) + } else { + wgReplicas.Add(1) + go func(alias string, tabletInfo *topo.TabletInfo) { + defer wgReplicas.Done() + logger.Infof("initializing replica %v", alias) + if err := tmc.InitReplica(replCtx, tabletInfo.Tablet, req.PrimaryElectTabletAlias, rp, now); err != nil { + rec.RecordError(fmt.Errorf("tablet %v InitReplica failed: %v", alias, err)) + } + }(alias, tabletInfo) + } + } + + // After the master is done, we can update the shard record + // (note with semi-sync, it also means at least one replica is done). + wgMaster.Wait() + if masterErr != nil { + // The master failed, there is no way the + // replicas will work. So we cancel them all. + logger.Warningf("master failed to PopulateReparentJournal, canceling replicas") + replCancel() + wgReplicas.Wait() + return fmt.Errorf("failed to PopulateReparentJournal on master: %v", masterErr) + } + if !topoproto.TabletAliasEqual(shardInfo.MasterAlias, req.PrimaryElectTabletAlias) { + if _, err := s.ts.UpdateShardFields(ctx, req.Keyspace, req.Shard, func(si *topo.ShardInfo) error { + si.MasterAlias = req.PrimaryElectTabletAlias + return nil + }); err != nil { + wgReplicas.Wait() + return fmt.Errorf("failed to update shard master record: %v", err) + } + } + + // Wait for the replicas to complete. If some of them fail, we + // don't want to rebuild the shard serving graph (the failure + // will most likely be a timeout, and our context will be + // expired, so the rebuild will fail anyway) + wgReplicas.Wait() + if err := rec.Error(); err != nil { + return err + } + + // Create database if necessary on the master. replicas will get it too through + // replication. Since the user called InitShardMaster, they've told us to + // assume that whatever data is on all the replicas is what they intended. + // If the database doesn't exist, it means the user intends for these tablets + // to begin serving with no data (i.e. first time initialization). + createDB := fmt.Sprintf("CREATE DATABASE IF NOT EXISTS %s", sqlescape.EscapeID(topoproto.TabletDbName(masterElectTabletInfo.Tablet))) + if _, err := tmc.ExecuteFetchAsDba(ctx, masterElectTabletInfo.Tablet, false, []byte(createDB), 1, false, true); err != nil { + return fmt.Errorf("failed to create database: %v", err) + } + // Refresh the state to force the tabletserver to reconnect after db has been created. + if err := tmc.RefreshState(ctx, masterElectTabletInfo.Tablet); err != nil { + log.Warningf("RefreshState failed: %v", err) + } + + return nil +} + +// PlannedReparentShard is part of the vtctldservicepb.VtctldServer interface. +func (s *VtctldServer) PlannedReparentShard(ctx context.Context, req *vtctldatapb.PlannedReparentShardRequest) (*vtctldatapb.PlannedReparentShardResponse, error) { + waitReplicasTimeout, ok, err := protoutil.DurationFromProto(req.WaitReplicasTimeout) + if err != nil { + return nil, err + } else if !ok { + waitReplicasTimeout = time.Second * 30 + } + + m := sync.RWMutex{} + logstream := []*logutilpb.Event{} + logger := logutil.NewCallbackLogger(func(e *logutilpb.Event) { + m.Lock() + defer m.Unlock() + + logstream = append(logstream, e) + }) + + ev, err := reparentutil.NewPlannedReparenter(s.ts, s.tmc, logger).ReparentShard(ctx, + req.Keyspace, + req.Shard, + reparentutil.PlannedReparentOptions{ + AvoidPrimaryAlias: req.AvoidPrimary, + NewPrimaryAlias: req.NewPrimary, + WaitReplicasTimeout: waitReplicasTimeout, + }, + ) + + resp := &vtctldatapb.PlannedReparentShardResponse{ + Keyspace: req.Keyspace, + Shard: req.Shard, + } + + if ev != nil { + resp.Keyspace = ev.ShardInfo.Keyspace() + resp.Shard = ev.ShardInfo.ShardName() + + if !topoproto.TabletAliasIsZero(ev.NewMaster.Alias) { + resp.PromotedPrimary = ev.NewMaster.Alias + } + } + + m.RLock() + defer m.RUnlock() + + resp.Events = make([]*logutilpb.Event, len(logstream)) + copy(resp.Events, logstream) + + return resp, err +} + +// RemoveKeyspaceCell is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) RemoveKeyspaceCell(ctx context.Context, req *vtctldatapb.RemoveKeyspaceCellRequest) (*vtctldatapb.RemoveKeyspaceCellResponse, error) { + shards, err := s.ts.GetShardNames(ctx, req.Keyspace) + if err != nil { + return nil, err + } + + // Remove all the shards, serially. Stop immediately if any fail. + for _, shard := range shards { + log.Infof("Removing cell %v from shard %v/%v", req.Cell, req.Keyspace, shard) + if err := removeShardCell(ctx, s.ts, req.Cell, req.Keyspace, shard, req.Recursive, req.Force); err != nil { + return nil, fmt.Errorf("cannot remove cell %v from shard %v/%v: %w", req.Cell, req.Keyspace, shard, err) + } + } + + // Last, remove the SrvKeyspace object. + log.Infof("Removing cell %v keyspace %v SrvKeyspace object", req.Cell, req.Keyspace) + if err := s.ts.DeleteSrvKeyspace(ctx, req.Cell, req.Keyspace); err != nil { + return nil, fmt.Errorf("cannot delete SrvKeyspace from cell %v for keyspace %v: %w", req.Cell, req.Keyspace, err) + } + + return &vtctldatapb.RemoveKeyspaceCellResponse{}, nil +} + +// RemoveShardCell is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) RemoveShardCell(ctx context.Context, req *vtctldatapb.RemoveShardCellRequest) (*vtctldatapb.RemoveShardCellResponse, error) { + if err := removeShardCell(ctx, s.ts, req.Cell, req.Keyspace, req.ShardName, req.Recursive, req.Force); err != nil { + return nil, err + } + + return &vtctldatapb.RemoveShardCellResponse{}, nil +} + +// ReparentTablet is part of the vtctldservicepb.VtctldServer interface. +func (s *VtctldServer) ReparentTablet(ctx context.Context, req *vtctldatapb.ReparentTabletRequest) (*vtctldatapb.ReparentTabletResponse, error) { + if req.Tablet == nil { + return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "tablet alias must not be nil") + } + + tablet, err := s.ts.GetTablet(ctx, req.Tablet) + if err != nil { + return nil, err + } + + shard, err := s.ts.GetShard(ctx, tablet.Keyspace, tablet.Shard) + if err != nil { + return nil, err + } + + if !shard.HasMaster() { + return nil, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "no master tablet for shard %v/%v", tablet.Keyspace, tablet.Shard) + } + + shardPrimary, err := s.ts.GetTablet(ctx, shard.MasterAlias) + if err != nil { + return nil, fmt.Errorf("cannot lookup primary tablet %v for shard %v/%v: %w", topoproto.TabletAliasString(shard.MasterAlias), tablet.Keyspace, tablet.Shard, err) + } + + if shardPrimary.Type != topodatapb.TabletType_MASTER { + return nil, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "TopologyServer has incosistent state for shard master %v", topoproto.TabletAliasString(shard.MasterAlias)) + } + + if shardPrimary.Keyspace != tablet.Keyspace || shardPrimary.Shard != tablet.Shard { + return nil, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "master %v and potential replica %v not in same keypace shard (%v/%v)", topoproto.TabletAliasString(shard.MasterAlias), topoproto.TabletAliasString(req.Tablet), tablet.Keyspace, tablet.Shard) + } + + if err := s.tmc.SetMaster(ctx, tablet.Tablet, shard.MasterAlias, 0, "", false); err != nil { + return nil, err + } + + return &vtctldatapb.ReparentTabletResponse{ + Keyspace: tablet.Keyspace, + Shard: tablet.Shard, + Primary: shard.MasterAlias, + }, nil +} + +// ShardReplicationPositions is part of the vtctldservicepb.VtctldServer interface. +func (s *VtctldServer) ShardReplicationPositions(ctx context.Context, req *vtctldatapb.ShardReplicationPositionsRequest) (*vtctldatapb.ShardReplicationPositionsResponse, error) { + tabletInfoMap, err := s.ts.GetTabletMapForShard(ctx, req.Keyspace, req.Shard) + if err != nil { + return nil, fmt.Errorf("GetTabletMapForShard(%s, %s) failed: %w", req.Keyspace, req.Shard, err) + } + + log.Infof("Gathering tablet replication status for: %v", tabletInfoMap) + + var ( + m sync.Mutex + wg sync.WaitGroup + rec concurrency.AllErrorRecorder + results = make(map[string]*replicationdatapb.Status, len(tabletInfoMap)) + tabletMap = make(map[string]*topodatapb.Tablet, len(tabletInfoMap)) + ) + + // For each tablet, we're going to create an individual context, using + // *topo.RemoteOperationTimeout as the maximum timeout (but we'll respect + // any stricter timeout in the parent context). If an individual tablet + // times out fetching its replication position, we won't fail the overall + // request. Instead, we'll log a warning and record a nil entry in the + // result map; that way, the caller can tell the difference between a tablet + // that timed out vs a tablet that didn't get queried at all. + + for alias, tabletInfo := range tabletInfoMap { + switch { + case tabletInfo.Type == topodatapb.TabletType_MASTER: + wg.Add(1) + + go func(ctx context.Context, alias string, tablet *topodatapb.Tablet) { + defer wg.Done() + + ctx, cancel := context.WithTimeout(ctx, *topo.RemoteOperationTimeout) + defer cancel() + + var status *replicationdatapb.Status + + pos, err := s.tmc.MasterPosition(ctx, tablet) + if err != nil { + switch ctx.Err() { + case context.Canceled: + log.Warningf("context canceled before obtaining master position from %s: %s", alias, err) + case context.DeadlineExceeded: + log.Warningf("context deadline exceeded before obtaining master position from %s: %s", alias, err) + default: + // The RPC was not timed out or canceled. We treat this + // as a fatal error for the overall request. + rec.RecordError(fmt.Errorf("MasterPosition(%s) failed: %w", alias, err)) + return + } + } else { + // No error, record a valid status for this tablet. + status = &replicationdatapb.Status{ + Position: pos, + } + } + + m.Lock() + defer m.Unlock() + + results[alias] = status + tabletMap[alias] = tablet + }(ctx, alias, tabletInfo.Tablet) + case tabletInfo.IsReplicaType(): + wg.Add(1) + + go func(ctx context.Context, alias string, tablet *topodatapb.Tablet) { + defer wg.Done() + + ctx, cancel := context.WithTimeout(ctx, *topo.RemoteOperationTimeout) + defer cancel() + + status, err := s.tmc.ReplicationStatus(ctx, tablet) + if err != nil { + switch ctx.Err() { + case context.Canceled: + log.Warningf("context canceled before obtaining replication position from %s: %s", alias, err) + case context.DeadlineExceeded: + log.Warningf("context deadline exceeded before obtaining replication position from %s: %s", alias, err) + default: + // The RPC was not timed out or canceled. We treat this + // as a fatal error for the overall request. + rec.RecordError(fmt.Errorf("ReplicationStatus(%s) failed: %s", alias, err)) + return + } + + status = nil // Don't record any position for this tablet. + } + + m.Lock() + defer m.Unlock() + + results[alias] = status + tabletMap[alias] = tablet + }(ctx, alias, tabletInfo.Tablet) + } + } + + wg.Wait() + + if rec.HasErrors() { + return nil, rec.Error() + } + + return &vtctldatapb.ShardReplicationPositionsResponse{ + ReplicationStatuses: results, + TabletMap: tabletMap, + }, nil +} + +// TabletExternallyReparented is part of the vtctldservicepb.VtctldServer interface. +func (s *VtctldServer) TabletExternallyReparented(ctx context.Context, req *vtctldatapb.TabletExternallyReparentedRequest) (*vtctldatapb.TabletExternallyReparentedResponse, error) { + if req.Tablet == nil { + return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "TabletExternallyReparentedRequest.Tablet must not be nil") + } + + tablet, err := s.ts.GetTablet(ctx, req.Tablet) + if err != nil { + log.Warningf("TabletExternallyReparented: failed to read tablet record for %v: %v", topoproto.TabletAliasString(req.Tablet), err) + return nil, err + } + + shard, err := s.ts.GetShard(ctx, tablet.Keyspace, tablet.Shard) + if err != nil { + log.Warningf("TabletExternallyReparented: failed to read global shard record for %v/%v: %v", tablet.Keyspace, tablet.Shard, err) + return nil, err + } + + resp := &vtctldatapb.TabletExternallyReparentedResponse{ + Keyspace: shard.Keyspace(), + Shard: shard.ShardName(), + NewPrimary: req.Tablet, + OldPrimary: shard.MasterAlias, + } + + // If the externally reparented (new primary) tablet is already MASTER in + // the topo, this is a no-op. + if tablet.Type == topodatapb.TabletType_MASTER { + return resp, nil + } + + log.Infof("TabletExternallyReparented: executing tablet type change %v -> MASTER on %v", tablet.Type, topoproto.TabletAliasString(req.Tablet)) + ev := &events.Reparent{ + ShardInfo: *shard, + NewMaster: *tablet.Tablet, + OldMaster: topodatapb.Tablet{ + Alias: shard.MasterAlias, + Type: topodatapb.TabletType_MASTER, + }, + } + + defer func() { + // Ensure we dispatch an update with any failure. + if err != nil { + event.DispatchUpdate(ev, "failed: "+err.Error()) + } + }() + + event.DispatchUpdate(ev, "starting external reparent") + + if err := s.tmc.ChangeType(ctx, tablet.Tablet, topodatapb.TabletType_MASTER); err != nil { + log.Warningf("ChangeType(%v, MASTER): %v", topoproto.TabletAliasString(req.Tablet), err) + return nil, err + } + + event.DispatchUpdate(ev, "finished") + + return resp, nil +} + // StartServer registers a VtctldServer for RPCs on the given gRPC server. func StartServer(s *grpc.Server, ts *topo.Server) { vtctlservicepb.RegisterVtctldServer(s, NewVtctldServer(ts)) diff --git a/go/vt/vtctl/grpcvtctldserver/server_slow_test.go b/go/vt/vtctl/grpcvtctldserver/server_slow_test.go new file mode 100644 index 00000000000..8e1d17587a2 --- /dev/null +++ b/go/vt/vtctl/grpcvtctldserver/server_slow_test.go @@ -0,0 +1,596 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package grpcvtctldserver + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver/testutil" + "vitess.io/vitess/go/vt/vttablet/tmclient" + + replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" + vtctlservicepb "vitess.io/vitess/go/vt/proto/vtctlservice" + "vitess.io/vitess/go/vt/proto/vttime" +) + +func TestEmergencyReparentShardSlow(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + ts *topo.Server + tmc tmclient.TabletManagerClient + tablets []*topodatapb.Tablet + + req *vtctldatapb.EmergencyReparentShardRequest + expected *vtctldatapb.EmergencyReparentShardResponse + expectEventsToOccur bool + shouldErr bool + }{ + { + // Note: this test case and the one below combine to assert that a + // nil WaitReplicasTimeout in the request results in a default 30 + // second WaitReplicasTimeout. + // + // They are also very slow, because they require waiting 29 seconds + // and 30 seconds, respectively. Fortunately, we can run them + // concurrently, so the total time is only around 30 seconds, but + // that's still a long time for a unit test! + name: "nil WaitReplicasTimeout and request takes 29 seconds is ok", + ts: memorytopo.NewServer("zone1"), + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + MasterTermStartTime: &vttime.Time{ + Seconds: 100, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Type: topodatapb.TabletType_RDONLY, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + tmc: &testutil.TabletManagerClient{ + DemoteMasterResults: map[string]struct { + Status *replicationdatapb.MasterStatus + Error error + }{ + "zone1-0000000100": { + Status: &replicationdatapb.MasterStatus{ + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", + }, + }, + }, + PopulateReparentJournalDelays: map[string]time.Duration{ + "zone1-0000000200": time.Second * 29, + }, + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000200": nil, + }, + PromoteReplicaResults: map[string]struct { + Result string + Error error + }{ + "zone1-0000000200": {}, + }, + SetMasterResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000101": nil, + }, + StopReplicationAndGetStatusResults: map[string]struct { + Status *replicationdatapb.Status + StopStatus *replicationdatapb.StopReplicationStatus + Error error + }{ + "zone1-0000000100": { + Error: mysql.ErrNotReplica, + }, + "zone1-0000000101": { + Error: assert.AnError, + }, + "zone1-0000000200": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{}, + After: &replicationdatapb.Status{ + MasterUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", + }, + }, + }, + }, + WaitForPositionResults: map[string]map[string]error{ + "zone1-0000000100": { + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5": nil, + }, + "zone1-0000000200": { + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5": nil, + }, + }, + }, + req: &vtctldatapb.EmergencyReparentShardRequest{ + Keyspace: "testkeyspace", + Shard: "-", + NewPrimary: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + WaitReplicasTimeout: nil, + }, + expected: &vtctldatapb.EmergencyReparentShardResponse{ + Keyspace: "testkeyspace", + Shard: "-", + PromotedPrimary: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + }, + expectEventsToOccur: true, + shouldErr: false, + }, + { + name: "nil WaitReplicasTimeout and request takes 31 seconds is error", + ts: memorytopo.NewServer("zone1"), + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + MasterTermStartTime: &vttime.Time{ + Seconds: 100, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Type: topodatapb.TabletType_RDONLY, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + tmc: &testutil.TabletManagerClient{ + DemoteMasterResults: map[string]struct { + Status *replicationdatapb.MasterStatus + Error error + }{ + "zone1-0000000100": { + Status: &replicationdatapb.MasterStatus{ + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", + }, + }, + }, + PopulateReparentJournalDelays: map[string]time.Duration{ + "zone1-0000000200": time.Second * 31, + }, + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000200": nil, + }, + PromoteReplicaResults: map[string]struct { + Result string + Error error + }{ + "zone1-0000000200": {}, + }, + SetMasterResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000101": nil, + }, + StopReplicationAndGetStatusResults: map[string]struct { + Status *replicationdatapb.Status + StopStatus *replicationdatapb.StopReplicationStatus + Error error + }{ + "zone1-0000000100": { + Error: mysql.ErrNotReplica, + }, + "zone1-0000000101": { + Error: assert.AnError, + }, + "zone1-0000000200": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{}, + After: &replicationdatapb.Status{ + MasterUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", + }, + }, + }, + }, + WaitForPositionResults: map[string]map[string]error{ + "zone1-0000000100": { + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5": nil, + }, + "zone1-0000000200": { + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5": nil, + }, + }, + }, + req: &vtctldatapb.EmergencyReparentShardRequest{ + Keyspace: "testkeyspace", + Shard: "-", + NewPrimary: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + WaitReplicasTimeout: nil, + }, + expectEventsToOccur: true, + shouldErr: true, + }, + } + + ctx := context.Background() + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + if tt.req == nil { + t.Skip("tt.EmergencyReparentShardRequest = nil implies test not ready to run") + } + + testutil.AddTablets(ctx, t, tt.ts, &testutil.AddTabletOptions{ + AlsoSetShardMaster: true, + ForceSetShardMaster: true, + SkipShardCreation: false, + }, tt.tablets...) + + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + resp, err := vtctld.EmergencyReparentShard(ctx, tt.req) + + // We defer this because we want to check in both error and non- + // error cases, but after the main set of assertions for those + // cases. + defer func() { + if !tt.expectEventsToOccur { + testutil.AssertNoLogutilEventsOccurred(t, resp, "expected no events to occur during ERS") + + return + } + + testutil.AssertLogutilEventsOccurred(t, resp, "expected events to occur during ERS") + }() + + if tt.shouldErr { + assert.Error(t, err) + + return + } + + assert.NoError(t, err) + testutil.AssertEmergencyReparentShardResponsesEqual(t, *tt.expected, *resp) + }) + } +} + +func TestPlannedReparentShardSlow(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + ts *topo.Server + tmc tmclient.TabletManagerClient + tablets []*topodatapb.Tablet + + req *vtctldatapb.PlannedReparentShardRequest + expected *vtctldatapb.PlannedReparentShardResponse + expectEventsToOccur bool + shouldErr bool + }{ + { + // Note: this test case and the one below combine to assert that a + // nil WaitReplicasTimeout in the request results in a default 30 + // second WaitReplicasTimeout. + name: "nil WaitReplicasTimeout and request takes 29 seconds is ok", + ts: memorytopo.NewServer("zone1"), + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + MasterTermStartTime: &vttime.Time{ + Seconds: 100, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Type: topodatapb.TabletType_RDONLY, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + tmc: &testutil.TabletManagerClient{ + DemoteMasterResults: map[string]struct { + Status *replicationdatapb.MasterStatus + Error error + }{ + "zone1-0000000100": { + Status: &replicationdatapb.MasterStatus{ + Position: "primary-demotion position", + }, + Error: nil, + }, + }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Position: "doesn't matter", + Error: nil, + }, + }, + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000200": nil, + }, + PromoteReplicaPostDelays: map[string]time.Duration{ + "zone1-0000000200": time.Second * 28, + }, + PromoteReplicaResults: map[string]struct { + Result string + Error error + }{ + "zone1-0000000200": { + Result: "promotion position", + Error: nil, + }, + }, + SetMasterResults: map[string]error{ + "zone1-0000000200": nil, // waiting for master-position during promotion + // reparent SetMaster calls + "zone1-0000000100": nil, + "zone1-0000000101": nil, + }, + WaitForPositionResults: map[string]map[string]error{ + "zone1-0000000200": { + "primary-demotion position": nil, + }, + }, + }, + req: &vtctldatapb.PlannedReparentShardRequest{ + Keyspace: "testkeyspace", + Shard: "-", + NewPrimary: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + WaitReplicasTimeout: nil, + }, + expected: &vtctldatapb.PlannedReparentShardResponse{ + Keyspace: "testkeyspace", + Shard: "-", + PromotedPrimary: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + }, + expectEventsToOccur: true, + shouldErr: false, + }, + { + name: "nil WaitReplicasTimeout and request takes 31 seconds is error", + ts: memorytopo.NewServer("zone1"), + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + MasterTermStartTime: &vttime.Time{ + Seconds: 100, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Type: topodatapb.TabletType_RDONLY, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + tmc: &testutil.TabletManagerClient{ + DemoteMasterResults: map[string]struct { + Status *replicationdatapb.MasterStatus + Error error + }{ + "zone1-0000000100": { + Status: &replicationdatapb.MasterStatus{ + Position: "primary-demotion position", + }, + Error: nil, + }, + }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Position: "doesn't matter", + Error: nil, + }, + }, + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000200": nil, + }, + PromoteReplicaPostDelays: map[string]time.Duration{ + "zone1-0000000200": time.Second * 30, + }, + PromoteReplicaResults: map[string]struct { + Result string + Error error + }{ + "zone1-0000000200": { + Result: "promotion position", + Error: nil, + }, + }, + SetMasterResults: map[string]error{ + "zone1-0000000200": nil, // waiting for master-position during promotion + // reparent SetMaster calls + "zone1-0000000100": nil, + "zone1-0000000101": nil, + }, + WaitForPositionResults: map[string]map[string]error{ + "zone1-0000000200": { + "primary-demotion position": nil, + }, + }, + }, + req: &vtctldatapb.PlannedReparentShardRequest{ + Keyspace: "testkeyspace", + Shard: "-", + NewPrimary: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + WaitReplicasTimeout: nil, + }, + expected: &vtctldatapb.PlannedReparentShardResponse{ + Keyspace: "testkeyspace", + Shard: "-", + PromotedPrimary: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + }, + expectEventsToOccur: true, + shouldErr: false, + }, + } + + ctx := context.Background() + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + testutil.AddTablets(ctx, t, tt.ts, &testutil.AddTabletOptions{ + AlsoSetShardMaster: true, + ForceSetShardMaster: true, + SkipShardCreation: false, + }, tt.tablets...) + + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + resp, err := vtctld.PlannedReparentShard(ctx, tt.req) + + // We defer this because we want to check in both error and non- + // error cases, but after the main set of assertions for those + // cases. + defer func() { + if !tt.expectEventsToOccur { + testutil.AssertNoLogutilEventsOccurred(t, resp, "expected no events to occur during ERS") + + return + } + + testutil.AssertLogutilEventsOccurred(t, resp, "expected events to occur during ERS") + }() + + if tt.shouldErr { + assert.Error(t, err) + + return + } + + assert.NoError(t, err) + testutil.AssertPlannedReparentShardResponsesEqual(t, *tt.expected, *resp) + }) + } +} diff --git a/go/vt/vtctl/grpcvtctldserver/server_test.go b/go/vt/vtctl/grpcvtctldserver/server_test.go index aa3ecf2b1b4..15e59059638 100644 --- a/go/vt/vtctl/grpcvtctldserver/server_test.go +++ b/go/vt/vtctl/grpcvtctldserver/server_test.go @@ -19,126 +19,5405 @@ package grpcvtctldserver import ( "context" "errors" + "fmt" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/protoutil" + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/mysqlctl/backupstorage" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver/testutil" + "vitess.io/vitess/go/vt/vttablet/tmclient" + mysqlctlpb "vitess.io/vitess/go/vt/proto/mysqlctl" + querypb "vitess.io/vitess/go/vt/proto/query" + replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" + vtctlservicepb "vitess.io/vitess/go/vt/proto/vtctlservice" + "vitess.io/vitess/go/vt/proto/vttime" ) +func init() { + *backupstorage.BackupStorageImplementation = testutil.BackupStorageImplementation + + // For tests that don't actually care about mocking the tmclient (i.e. they + // call NewVtctldServer to initialize the unit under test), this needs to be + // set. + // + // Tests that do care about the tmclient should use + // testutil.NewVtctldServerWithTabletManagerClient to initialize their + // VtctldServer. + *tmclient.TabletManagerProtocol = "grpcvtctldserver.test" + tmclient.RegisterTabletManagerClientFactory("grpcvtctldserver.test", func() tmclient.TabletManagerClient { + return nil + }) +} + +func TestChangeTabletType(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + cells []string + tablets []*topodatapb.Tablet + req *vtctldatapb.ChangeTabletTypeRequest + expected *vtctldatapb.ChangeTabletTypeResponse + shouldErr bool + }{ + { + name: "success", + cells: []string{"zone1"}, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + req: &vtctldatapb.ChangeTabletTypeRequest{ + TabletAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + DbType: topodatapb.TabletType_RDONLY, + }, + expected: &vtctldatapb.ChangeTabletTypeResponse{ + BeforeTablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_REPLICA, + }, + AfterTablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_RDONLY, + }, + WasDryRun: false, + }, + shouldErr: false, + }, + { + name: "dry run", + cells: []string{"zone1"}, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + req: &vtctldatapb.ChangeTabletTypeRequest{ + TabletAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + DbType: topodatapb.TabletType_RDONLY, + DryRun: true, + }, + expected: &vtctldatapb.ChangeTabletTypeResponse{ + BeforeTablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_REPLICA, + }, + AfterTablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_RDONLY, + }, + WasDryRun: true, + }, + shouldErr: false, + }, + { + name: "tablet not found", + cells: []string{"zone1"}, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + req: &vtctldatapb.ChangeTabletTypeRequest{ + TabletAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + DbType: topodatapb.TabletType_RDONLY, + }, + expected: nil, + shouldErr: true, + }, + { + name: "master promotions not allowed", + cells: []string{"zone1"}, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + req: &vtctldatapb.ChangeTabletTypeRequest{ + TabletAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + DbType: topodatapb.TabletType_MASTER, + }, + expected: nil, + shouldErr: true, + }, + { + name: "master demotions not allowed", + cells: []string{"zone1"}, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + }, + }, + req: &vtctldatapb.ChangeTabletTypeRequest{ + TabletAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + DbType: topodatapb.TabletType_REPLICA, + }, + expected: nil, + shouldErr: true, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ctx := context.Background() + ts := memorytopo.NewServer(tt.cells...) + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &testutil.TabletManagerClient{ + TopoServer: ts, + }, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) + + testutil.AddTablets(ctx, t, ts, nil, tt.tablets...) + + resp, err := vtctld.ChangeTabletType(ctx, tt.req) + if tt.shouldErr { + assert.Error(t, err) + return + } + + assert.NoError(t, err) + assert.Equal(t, tt.expected, resp) + + // If we are testing a dry-run, then the tablet in the actual + // topo should match the BeforeTablet in the response. Otherwise, + // the tablet in the actual topo should match the AfterTablet in + // the response. + expectedRealType := resp.AfterTablet.Type + msg := "ChangeTabletType did not cause topo update" + if tt.req.DryRun { + expectedRealType = resp.BeforeTablet.Type + msg = "dryrun type change resulted in real type change" + } + + tablet, err := ts.GetTablet(ctx, tt.req.TabletAlias) + assert.NoError(t, err, + "could not load tablet %s from topo after type change %v -> %v [dryrun=%t]", + topoproto.TabletAliasString(tt.req.TabletAlias), + resp.BeforeTablet.Type, + resp.AfterTablet.Type, + resp.WasDryRun, + ) + assert.Equal(t, expectedRealType, tablet.Type, msg) + }) + } + + t.Run("tabletmanager failure", func(t *testing.T) { + t.Parallel() + + ctx := context.Background() + ts := memorytopo.NewServer("zone1") + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &testutil.TabletManagerClient{ + TopoServer: nil, + }, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) + + testutil.AddTablet(ctx, t, ts, &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_REPLICA, + }, nil) + + _, err := vtctld.ChangeTabletType(ctx, &vtctldatapb.ChangeTabletTypeRequest{ + TabletAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + DbType: topodatapb.TabletType_RDONLY, + }) + assert.Error(t, err) + }) +} + +func TestCreateKeyspace(t *testing.T) { + t.Parallel() + + cells := []string{"zone1", "zone2", "zone3"} + tests := []struct { + name string + topo map[string]*topodatapb.Keyspace + vschemas map[string]*vschemapb.Keyspace + req *vtctldatapb.CreateKeyspaceRequest + expected *vtctldatapb.CreateKeyspaceResponse + shouldErr bool + vschemaShouldExist bool + expectedVSchema *vschemapb.Keyspace + }{ + { + name: "normal keyspace", + topo: nil, + req: &vtctldatapb.CreateKeyspaceRequest{ + Name: "testkeyspace", + Type: topodatapb.KeyspaceType_NORMAL, + }, + expected: &vtctldatapb.CreateKeyspaceResponse{ + Keyspace: &vtctldatapb.Keyspace{ + Name: "testkeyspace", + Keyspace: &topodatapb.Keyspace{ + KeyspaceType: topodatapb.KeyspaceType_NORMAL, + }, + }, + }, + vschemaShouldExist: true, + expectedVSchema: &vschemapb.Keyspace{ + Sharded: false, + }, + shouldErr: false, + }, + { + name: "snapshot keyspace", + topo: map[string]*topodatapb.Keyspace{ + "testkeyspace": { + KeyspaceType: topodatapb.KeyspaceType_NORMAL, + }, + }, + vschemas: map[string]*vschemapb.Keyspace{ + "testkeyspace": { + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "h1": { + Type: "hash", + }, + }, + }, + }, + req: &vtctldatapb.CreateKeyspaceRequest{ + Name: "testsnapshot", + Type: topodatapb.KeyspaceType_SNAPSHOT, + BaseKeyspace: "testkeyspace", + SnapshotTime: &vttime.Time{ + Seconds: 1, + }, + }, + expected: &vtctldatapb.CreateKeyspaceResponse{ + Keyspace: &vtctldatapb.Keyspace{ + Name: "testsnapshot", + Keyspace: &topodatapb.Keyspace{ + KeyspaceType: topodatapb.KeyspaceType_SNAPSHOT, + BaseKeyspace: "testkeyspace", + SnapshotTime: &vttime.Time{ + Seconds: 1, + }, + }, + }, + }, + vschemaShouldExist: true, + expectedVSchema: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "h1": { + Type: "hash", + }, + }, + RequireExplicitRouting: true, + }, + shouldErr: false, + }, + { + name: "snapshot keyspace with no base keyspace specified", + topo: nil, + req: &vtctldatapb.CreateKeyspaceRequest{ + Name: "testsnapshot", + Type: topodatapb.KeyspaceType_SNAPSHOT, + SnapshotTime: &vttime.Time{}, + }, + expected: nil, + shouldErr: true, + }, + { + name: "snapshot keyspace with no snapshot time", + topo: nil, + req: &vtctldatapb.CreateKeyspaceRequest{ + Name: "testsnapshot", + Type: topodatapb.KeyspaceType_SNAPSHOT, + BaseKeyspace: "testkeyspace", + }, + expected: nil, + shouldErr: true, + }, + { + name: "snapshot keyspace with nonexistent base keyspace", + topo: nil, + req: &vtctldatapb.CreateKeyspaceRequest{ + Name: "testsnapshot", + Type: topodatapb.KeyspaceType_SNAPSHOT, + BaseKeyspace: "testkeyspace", + SnapshotTime: &vttime.Time{Seconds: 100}, + }, + expected: &vtctldatapb.CreateKeyspaceResponse{ + Keyspace: &vtctldatapb.Keyspace{ + Name: "testsnapshot", + Keyspace: &topodatapb.Keyspace{ + KeyspaceType: topodatapb.KeyspaceType_SNAPSHOT, + BaseKeyspace: "testkeyspace", + SnapshotTime: &vttime.Time{Seconds: 100}, + }, + }, + }, + vschemaShouldExist: true, + expectedVSchema: &vschemapb.Keyspace{ + Sharded: false, + RequireExplicitRouting: true, + }, + shouldErr: false, + }, + { + name: "invalid keyspace type", + topo: nil, + req: &vtctldatapb.CreateKeyspaceRequest{ + Name: "badkeyspacetype", + Type: 10000000, + }, + expected: nil, + shouldErr: true, + }, + { + name: "keyspace exists/no force", + topo: map[string]*topodatapb.Keyspace{ + "testkeyspace": { + KeyspaceType: topodatapb.KeyspaceType_NORMAL, + ShardingColumnName: "col1", + ShardingColumnType: topodatapb.KeyspaceIdType_UINT64, + }, + }, + req: &vtctldatapb.CreateKeyspaceRequest{ + Name: "testkeyspace", + Type: topodatapb.KeyspaceType_NORMAL, + Force: false, + }, + expected: nil, + shouldErr: true, + }, + { + name: "keyspace exists/force", + topo: map[string]*topodatapb.Keyspace{ + "testkeyspace": { + KeyspaceType: topodatapb.KeyspaceType_NORMAL, + ShardingColumnName: "col1", + ShardingColumnType: topodatapb.KeyspaceIdType_UINT64, + }, + }, + req: &vtctldatapb.CreateKeyspaceRequest{ + Name: "testkeyspace", + Type: topodatapb.KeyspaceType_NORMAL, + Force: true, + }, + expected: &vtctldatapb.CreateKeyspaceResponse{ + Keyspace: &vtctldatapb.Keyspace{ + Name: "testkeyspace", + Keyspace: &topodatapb.Keyspace{ + KeyspaceType: topodatapb.KeyspaceType_NORMAL, + ShardingColumnName: "col1", + ShardingColumnType: topodatapb.KeyspaceIdType_UINT64, + }, + }, + }, + vschemaShouldExist: true, + expectedVSchema: &vschemapb.Keyspace{ + Sharded: false, + }, + shouldErr: false, + }, + { + name: "allow empty vschema", + topo: nil, + req: &vtctldatapb.CreateKeyspaceRequest{ + Name: "testkeyspace", + Type: topodatapb.KeyspaceType_NORMAL, + AllowEmptyVSchema: true, + }, + expected: &vtctldatapb.CreateKeyspaceResponse{ + Keyspace: &vtctldatapb.Keyspace{ + Name: "testkeyspace", + Keyspace: &topodatapb.Keyspace{ + KeyspaceType: topodatapb.KeyspaceType_NORMAL, + }, + }, + }, + vschemaShouldExist: false, + expectedVSchema: nil, + shouldErr: false, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + if tt.req == nil { + t.Skip("test not yet implemented") + } + + ctx := context.Background() + ts := memorytopo.NewServer(cells...) + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + for name, ks := range tt.topo { + testutil.AddKeyspace(ctx, t, ts, &vtctldatapb.Keyspace{ + Name: name, + Keyspace: ks, + }) + } + + for name, vs := range tt.vschemas { + require.NoError(t, ts.SaveVSchema(ctx, name, vs), "error in SaveVSchema(%v, %+v)", name, vs) + } + + // Create the keyspace and make some assertions + resp, err := vtctld.CreateKeyspace(ctx, tt.req) + if tt.shouldErr { + assert.Error(t, err) + return + } + + assert.NoError(t, err) + testutil.AssertKeyspacesEqual(t, tt.expected.Keyspace, resp.Keyspace, "%+v\n%+v\n", tt.expected.Keyspace, resp.Keyspace) + + // Fetch the newly-created keyspace out of the topo and assert on it + ks, err := ts.GetKeyspace(ctx, tt.req.Name) + assert.NoError(t, err, "cannot get keyspace %v after creating", tt.req.Name) + require.NotNil(t, ks.Keyspace) + + actualKs := &vtctldatapb.Keyspace{ + Name: tt.req.Name, + Keyspace: ks.Keyspace, + } + testutil.AssertKeyspacesEqual( + t, + tt.expected.Keyspace, + actualKs, + "created keyspace %v does not match requested keyspace (name = %v) %v", + actualKs, + tt.expected.Keyspace, + ) + + // Finally, check the VSchema + vs, err := ts.GetVSchema(ctx, tt.req.Name) + if !tt.vschemaShouldExist { + assert.True(t, topo.IsErrType(err, topo.NoNode), "vschema should not exist, but got other error = %v", err) + return + } + assert.NoError(t, err) + assert.Equal(t, tt.expectedVSchema, vs) + }) + } +} + +func TestCreateShard(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + keyspaces []*vtctldatapb.Keyspace + shards []*vtctldatapb.Shard + topoErr error + req *vtctldatapb.CreateShardRequest + expected *vtctldatapb.CreateShardResponse + shouldErr bool + }{ + { + name: "success", + keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "testkeyspace", + Keyspace: &topodatapb.Keyspace{}, + }, + }, + shards: nil, + topoErr: nil, + req: &vtctldatapb.CreateShardRequest{ + Keyspace: "testkeyspace", + ShardName: "-", + }, + expected: &vtctldatapb.CreateShardResponse{ + Keyspace: &vtctldatapb.Keyspace{ + Name: "testkeyspace", + Keyspace: &topodatapb.Keyspace{}, + }, + Shard: &vtctldatapb.Shard{ + Keyspace: "testkeyspace", + Name: "-", + Shard: &topodatapb.Shard{ + KeyRange: &topodatapb.KeyRange{}, + IsMasterServing: true, + }, + }, + ShardAlreadyExists: false, + }, + shouldErr: false, + }, + { + name: "include parent", + keyspaces: nil, + shards: nil, + topoErr: nil, + req: &vtctldatapb.CreateShardRequest{ + Keyspace: "testkeyspace", + ShardName: "-", + IncludeParent: true, + }, + expected: &vtctldatapb.CreateShardResponse{ + Keyspace: &vtctldatapb.Keyspace{ + Name: "testkeyspace", + Keyspace: &topodatapb.Keyspace{}, + }, + Shard: &vtctldatapb.Shard{ + Keyspace: "testkeyspace", + Name: "-", + Shard: &topodatapb.Shard{ + KeyRange: &topodatapb.KeyRange{}, + IsMasterServing: true, + }, + }, + ShardAlreadyExists: false, + }, + shouldErr: false, + }, + { + name: "keyspace does not exist", + keyspaces: nil, + shards: nil, + topoErr: nil, + req: &vtctldatapb.CreateShardRequest{ + Keyspace: "testkeyspace", + ShardName: "-", + }, + expected: nil, + shouldErr: true, + }, + { + name: "include parent/keyspace exists/no force", + keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "testkeyspace", + Keyspace: &topodatapb.Keyspace{}, + }, + }, + shards: nil, + topoErr: nil, + req: &vtctldatapb.CreateShardRequest{ + Keyspace: "testkeyspace", + ShardName: "-", + IncludeParent: true, + }, + expected: nil, + shouldErr: true, + }, + { + name: "include parent/keyspace exists/force", + keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "testkeyspace", + Keyspace: &topodatapb.Keyspace{}, + }, + }, + shards: nil, + topoErr: nil, + req: &vtctldatapb.CreateShardRequest{ + Keyspace: "testkeyspace", + ShardName: "-", + IncludeParent: true, + Force: true, + }, + expected: &vtctldatapb.CreateShardResponse{ + Keyspace: &vtctldatapb.Keyspace{ + Name: "testkeyspace", + Keyspace: &topodatapb.Keyspace{}, + }, + Shard: &vtctldatapb.Shard{ + Keyspace: "testkeyspace", + Name: "-", + Shard: &topodatapb.Shard{ + KeyRange: &topodatapb.KeyRange{}, + IsMasterServing: true, + }, + }, + ShardAlreadyExists: false, + }, + shouldErr: false, + }, + { + name: "shard exists/no force", + keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "testkeyspace", + Keyspace: &topodatapb.Keyspace{}, + }, + }, + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + topoErr: nil, + req: &vtctldatapb.CreateShardRequest{ + Keyspace: "testkeyspace", + ShardName: "-", + }, + expected: nil, + shouldErr: true, + }, + { + name: "shard exists/force", + keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "testkeyspace", + Keyspace: &topodatapb.Keyspace{}, + }, + }, + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + topoErr: nil, + req: &vtctldatapb.CreateShardRequest{ + Keyspace: "testkeyspace", + ShardName: "-", + Force: true, + }, + expected: &vtctldatapb.CreateShardResponse{ + Keyspace: &vtctldatapb.Keyspace{ + Name: "testkeyspace", + Keyspace: &topodatapb.Keyspace{}, + }, + Shard: &vtctldatapb.Shard{ + Keyspace: "testkeyspace", + Name: "-", + Shard: &topodatapb.Shard{ + KeyRange: &topodatapb.KeyRange{}, + IsMasterServing: true, + }, + }, + ShardAlreadyExists: true, + }, + shouldErr: false, + }, + { + name: "topo is down", + keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "testkeyspace", + Keyspace: &topodatapb.Keyspace{}, + }, + }, + shards: nil, + topoErr: assert.AnError, + req: &vtctldatapb.CreateShardRequest{ + Keyspace: "testkeyspace", + ShardName: "-", + }, + expected: nil, + shouldErr: true, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + if tt.req == nil { + t.Skip("focusing on other tests") + } + + ctx := context.Background() + ts, topofactory := memorytopo.NewServerAndFactory("zone1") + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + for _, ks := range tt.keyspaces { + testutil.AddKeyspace(ctx, t, ts, ks) + } + + testutil.AddShards(ctx, t, ts, tt.shards...) + + if tt.topoErr != nil { + topofactory.SetError(tt.topoErr) + } + + resp, err := vtctld.CreateShard(ctx, tt.req) + if tt.shouldErr { + assert.Error(t, err) + return + } + + assert.NoError(t, err) + assert.Equal(t, tt.expected, resp) + }) + } +} + +func TestDeleteKeyspace(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + keyspaces []*vtctldatapb.Keyspace + shards []*vtctldatapb.Shard + srvKeyspaces map[string]map[string]*topodatapb.SrvKeyspace + topoErr error + req *vtctldatapb.DeleteKeyspaceRequest + expected *vtctldatapb.DeleteKeyspaceResponse + expectedRemainingKeyspaces []string + expectedRemainingShards map[string][]string + shouldErr bool + }{ + { + name: "success", + keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "testkeyspace", + Keyspace: &topodatapb.Keyspace{}, + }, + }, + shards: nil, + srvKeyspaces: nil, + topoErr: nil, + req: &vtctldatapb.DeleteKeyspaceRequest{ + Keyspace: "testkeyspace", + }, + expected: &vtctldatapb.DeleteKeyspaceResponse{}, + expectedRemainingKeyspaces: []string{}, + expectedRemainingShards: map[string][]string{}, + shouldErr: false, + }, + { + name: "keyspace does not exist", + keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "otherkeyspace", + Keyspace: &topodatapb.Keyspace{}, + }, + }, + shards: nil, + srvKeyspaces: nil, + topoErr: nil, + req: &vtctldatapb.DeleteKeyspaceRequest{ + Keyspace: "testkeyspace", + }, + expected: nil, + expectedRemainingKeyspaces: []string{"otherkeyspace"}, + expectedRemainingShards: map[string][]string{ + "otherkeyspace": nil, + }, + shouldErr: true, + }, + { + name: "keyspace has shards/Recursive=false", + keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "testkeyspace", + Keyspace: &topodatapb.Keyspace{}, + }, + }, + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-80", + }, + { + Keyspace: "testkeyspace", + Name: "80-", + }, + }, + srvKeyspaces: nil, + topoErr: nil, + req: &vtctldatapb.DeleteKeyspaceRequest{ + Keyspace: "testkeyspace", + }, + expected: nil, + expectedRemainingKeyspaces: []string{"testkeyspace"}, + expectedRemainingShards: map[string][]string{ + "testkeyspace": {"-80", "80-"}, + }, + shouldErr: true, + }, + { + name: "keyspace has shards/Recursive=true", + keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "testkeyspace", + Keyspace: &topodatapb.Keyspace{}, + }, + }, + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-80", + }, + { + Keyspace: "testkeyspace", + Name: "80-", + }, + }, + srvKeyspaces: nil, + topoErr: nil, + req: &vtctldatapb.DeleteKeyspaceRequest{ + Keyspace: "testkeyspace", + Recursive: true, + }, + expected: &vtctldatapb.DeleteKeyspaceResponse{}, + expectedRemainingKeyspaces: []string{}, + expectedRemainingShards: map[string][]string{}, + shouldErr: false, + }, + // Not sure how to force this case because we always pass + // (Recursive=true, EvenIfServing=true) so anything short of "topo + // server is down" won't fail, and "topo server is down" will cause us + // to error before we even reach this point in the code, so, ¯\_(ツ)_/¯. + // { + // name: "recursive/cannot delete shard", + // }, + { + name: "topo error", + keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "testkeyspace", + Keyspace: &topodatapb.Keyspace{}, + }, + }, + shards: nil, + srvKeyspaces: nil, + topoErr: assert.AnError, + req: &vtctldatapb.DeleteKeyspaceRequest{ + Keyspace: "testkeyspace", + }, + expected: nil, + expectedRemainingKeyspaces: []string{"testkeyspace"}, + expectedRemainingShards: map[string][]string{ + "testkeyspace": nil, + }, + shouldErr: true, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + cells := []string{"zone1", "zone2", "zone3"} + + ctx := context.Background() + ts, topofactory := memorytopo.NewServerAndFactory(cells...) + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + testutil.AddKeyspaces(ctx, t, ts, tt.keyspaces...) + testutil.AddShards(ctx, t, ts, tt.shards...) + testutil.UpdateSrvKeyspaces(ctx, t, ts, tt.srvKeyspaces) + + if tt.topoErr != nil { + topofactory.SetError(tt.topoErr) + } + + defer func() { + if tt.expectedRemainingKeyspaces == nil { + return + } + + topofactory.SetError(nil) + + keyspaces, err := ts.GetKeyspaces(ctx) + require.NoError(t, err, "cannot get keyspaces names after DeleteKeyspace call") + assert.ElementsMatch(t, tt.expectedRemainingKeyspaces, keyspaces) + + if tt.expectedRemainingShards == nil { + return + } + + remainingShards := make(map[string][]string, len(keyspaces)) + + for _, ks := range keyspaces { + shards, err := ts.GetShardNames(ctx, ks) + require.NoError(t, err, "cannot get shard names for keyspace %s", ks) + + remainingShards[ks] = shards + } + + assert.Equal(t, tt.expectedRemainingShards, remainingShards) + }() + + resp, err := vtctld.DeleteKeyspace(ctx, tt.req) + if tt.shouldErr { + assert.Error(t, err) + + return + } + + assert.NoError(t, err) + assert.Equal(t, tt.expected, resp) + }) + } +} + +func TestDeleteShards(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + shards []*vtctldatapb.Shard + tablets []*topodatapb.Tablet + replicationGraphs []*topo.ShardReplicationInfo + srvKeyspaces map[string]map[string]*topodatapb.SrvKeyspace + topoErr error + req *vtctldatapb.DeleteShardsRequest + expected *vtctldatapb.DeleteShardsResponse + expectedRemainingShards []*vtctldatapb.Shard + shouldErr bool + }{ + { + name: "success", + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + tablets: nil, + topoErr: nil, + req: &vtctldatapb.DeleteShardsRequest{ + Shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + }, + expected: &vtctldatapb.DeleteShardsResponse{}, + expectedRemainingShards: []*vtctldatapb.Shard{}, + shouldErr: false, + }, + { + name: "shard not found", + shards: nil, + tablets: nil, + topoErr: nil, + req: &vtctldatapb.DeleteShardsRequest{ + Shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + }, + expected: nil, + shouldErr: true, + }, + { + name: "multiple shards", + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + { + Keyspace: "otherkeyspace", + Name: "-80", + }, + { + Keyspace: "otherkeyspace", + Name: "80-", + }, + }, + tablets: nil, + topoErr: nil, + req: &vtctldatapb.DeleteShardsRequest{ + Shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + { + Keyspace: "otherkeyspace", + Name: "-80", + }, + }, + }, + expected: &vtctldatapb.DeleteShardsResponse{}, + expectedRemainingShards: []*vtctldatapb.Shard{ + { + Keyspace: "otherkeyspace", + Name: "80-", + }, + }, + shouldErr: false, + }, + { + name: "topo is down", + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + tablets: nil, + topoErr: assert.AnError, + req: &vtctldatapb.DeleteShardsRequest{ + Shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + }, + expected: nil, + expectedRemainingShards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + shouldErr: true, + }, + { + name: "shard is serving/EvenIfServing=false", + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + tablets: nil, + srvKeyspaces: map[string]map[string]*topodatapb.SrvKeyspace{ + "zone1": { + "testkeyspace": &topodatapb.SrvKeyspace{ + Partitions: []*topodatapb.SrvKeyspace_KeyspacePartition{ + { + ServedType: topodatapb.TabletType_MASTER, + ShardReferences: []*topodatapb.ShardReference{ + { + Name: "-", + KeyRange: &topodatapb.KeyRange{}, + }, + }, + }, + }, + }, + }, + }, + topoErr: nil, + req: &vtctldatapb.DeleteShardsRequest{ + Shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + }, + expected: nil, + expectedRemainingShards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + shouldErr: true, + }, + { + name: "shard is serving/EvenIfServing=true", + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + tablets: nil, + srvKeyspaces: map[string]map[string]*topodatapb.SrvKeyspace{ + "zone1": { + "testkeyspace": &topodatapb.SrvKeyspace{ + Partitions: []*topodatapb.SrvKeyspace_KeyspacePartition{ + { + ServedType: topodatapb.TabletType_MASTER, + ShardReferences: []*topodatapb.ShardReference{ + { + Name: "-", + KeyRange: &topodatapb.KeyRange{}, + }, + }, + }, + }, + }, + }, + }, + topoErr: nil, + req: &vtctldatapb.DeleteShardsRequest{ + Shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + EvenIfServing: true, + }, + expected: &vtctldatapb.DeleteShardsResponse{}, + expectedRemainingShards: []*vtctldatapb.Shard{}, + shouldErr: false, + }, + { + name: "ShardReplication in topo", + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + tablets: nil, + replicationGraphs: []*topo.ShardReplicationInfo{ + topo.NewShardReplicationInfo(&topodatapb.ShardReplication{ + Nodes: []*topodatapb.ShardReplication_Node{ + { + TabletAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + }, "zone1", "testkeyspace", "-"), + topo.NewShardReplicationInfo(&topodatapb.ShardReplication{ + Nodes: []*topodatapb.ShardReplication_Node{ + { + TabletAlias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 200, + }, + }, + }, + }, "zone2", "testkeyspace", "-"), + topo.NewShardReplicationInfo(&topodatapb.ShardReplication{ + Nodes: []*topodatapb.ShardReplication_Node{ + { + TabletAlias: &topodatapb.TabletAlias{ + Cell: "zone3", + Uid: 300, + }, + }, + }, + }, "zone3", "testkeyspace", "-"), + }, + topoErr: nil, + req: &vtctldatapb.DeleteShardsRequest{ + Shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + }, + expected: &vtctldatapb.DeleteShardsResponse{}, + expectedRemainingShards: []*vtctldatapb.Shard{}, + shouldErr: false, + }, + { + name: "shard has tablets/Recursive=false", + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + topoErr: nil, + req: &vtctldatapb.DeleteShardsRequest{ + Shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + }, + expected: nil, + expectedRemainingShards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + shouldErr: true, + }, + { + name: "shard has tablets/Recursive=true", + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + topoErr: nil, + req: &vtctldatapb.DeleteShardsRequest{ + Shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + Recursive: true, + }, + expected: &vtctldatapb.DeleteShardsResponse{}, + expectedRemainingShards: []*vtctldatapb.Shard{}, + shouldErr: false, + }, + { + name: "tablets in topo belonging to other shard", + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-80", + }, + { + Keyspace: "testkeyspace", + Name: "80-", + }, + }, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + Shard: "80-", + }, + }, + topoErr: nil, + req: &vtctldatapb.DeleteShardsRequest{ + Shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-80", + }, + }, + }, + expected: &vtctldatapb.DeleteShardsResponse{}, + expectedRemainingShards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "80-", + }, + }, + shouldErr: false, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + cells := []string{"zone1", "zone2", "zone3"} + + ctx := context.Background() + ts, topofactory := memorytopo.NewServerAndFactory(cells...) + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + testutil.AddShards(ctx, t, ts, tt.shards...) + testutil.AddTablets(ctx, t, ts, nil, tt.tablets...) + testutil.SetupReplicationGraphs(ctx, t, ts, tt.replicationGraphs...) + testutil.UpdateSrvKeyspaces(ctx, t, ts, tt.srvKeyspaces) + + if tt.topoErr != nil { + topofactory.SetError(tt.topoErr) + } + + if tt.expectedRemainingShards != nil { + defer func() { + topofactory.SetError(nil) + + actualShards := []*vtctldatapb.Shard{} + + keyspaces, err := ts.GetKeyspaces(ctx) + require.NoError(t, err, "cannot get keyspace names to check remaining shards") + + for _, ks := range keyspaces { + shards, err := ts.GetShardNames(ctx, ks) + require.NoError(t, err, "cannot get shard names for keyspace %s", ks) + + for _, shard := range shards { + actualShards = append(actualShards, &vtctldatapb.Shard{ + Keyspace: ks, + Name: shard, + }) + } + } + + assert.ElementsMatch(t, tt.expectedRemainingShards, actualShards) + }() + } + + resp, err := vtctld.DeleteShards(ctx, tt.req) + if tt.shouldErr { + assert.Error(t, err) + + return + } + + assert.NoError(t, err) + assert.Equal(t, tt.expected, resp) + }) + } +} + +func TestDeleteTablets(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + tablets []*topodatapb.Tablet + shardFieldUpdates map[string]func(*topo.ShardInfo) error + lockedShards []*vtctldatapb.Shard + topoError error + req *vtctldatapb.DeleteTabletsRequest + expected *vtctldatapb.DeleteTabletsResponse + expectedRemainingTablets []*topodatapb.Tablet + shouldErr bool + }{ + { + name: "single replica", + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + lockedShards: nil, + topoError: nil, + req: &vtctldatapb.DeleteTabletsRequest{ + TabletAliases: []*topodatapb.TabletAlias{ + { + Cell: "zone1", + Uid: 100, + }, + }, + }, + expected: &vtctldatapb.DeleteTabletsResponse{}, + expectedRemainingTablets: []*topodatapb.Tablet{}, + shouldErr: false, + }, + { + name: "single primary/no AllowPrimary", + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "testkeyspace", + Shard: "-", + MasterTermStartTime: &vttime.Time{ + Seconds: 100, + Nanoseconds: 10, + }, + }, + }, + lockedShards: nil, + topoError: nil, + req: &vtctldatapb.DeleteTabletsRequest{ + TabletAliases: []*topodatapb.TabletAlias{ + { + Cell: "zone1", + Uid: 100, + }, + }, + }, + expected: nil, + shouldErr: true, + }, + { + name: "single primary/with AllowPrimary", + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "testkeyspace", + Shard: "-", + MasterTermStartTime: &vttime.Time{ + Seconds: 100, + Nanoseconds: 10, + }, + }, + }, + lockedShards: nil, + topoError: nil, + req: &vtctldatapb.DeleteTabletsRequest{ + TabletAliases: []*topodatapb.TabletAlias{ + { + Cell: "zone1", + Uid: 100, + }, + }, + AllowPrimary: true, + }, + expected: &vtctldatapb.DeleteTabletsResponse{}, + expectedRemainingTablets: []*topodatapb.Tablet{}, + shouldErr: false, + }, + { + name: "multiple tablets", + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + lockedShards: nil, + topoError: nil, + req: &vtctldatapb.DeleteTabletsRequest{ + TabletAliases: []*topodatapb.TabletAlias{ + { + Cell: "zone1", + Uid: 100, + }, + { + Cell: "zone1", + Uid: 102, + }, + }, + }, + expected: &vtctldatapb.DeleteTabletsResponse{}, + expectedRemainingTablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + shouldErr: false, + }, + { + name: "stale primary record", + tablets: []*topodatapb.Tablet{ + { + // The stale primary we're going to delete. + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "testkeyspace", + Shard: "-", + MasterTermStartTime: &vttime.Time{ + Seconds: 100, + Nanoseconds: 10, + }, + }, + { + // The real shard primary, which we'll update in the shard + // record below. + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "testkeyspace", + Shard: "-", + MasterTermStartTime: &vttime.Time{ + Seconds: 1001, + Nanoseconds: 101, + }, + }, + }, + shardFieldUpdates: map[string]func(*topo.ShardInfo) error{ + "testkeyspace/-": func(si *topo.ShardInfo) error { + si.MasterAlias = &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + } + si.MasterTermStartTime = &vttime.Time{ + Seconds: 1001, + Nanoseconds: 101, + } + + return nil + }, + }, + lockedShards: nil, + topoError: nil, + req: &vtctldatapb.DeleteTabletsRequest{ + TabletAliases: []*topodatapb.TabletAlias{ + { + Cell: "zone1", + Uid: 100, + }, + }, + }, + expected: &vtctldatapb.DeleteTabletsResponse{}, + expectedRemainingTablets: []*topodatapb.Tablet{ + { + // The true shard primary still exists (phew!) + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "testkeyspace", + Shard: "-", + MasterTermStartTime: &vttime.Time{ + Seconds: 1001, + Nanoseconds: 101, + }, + }, + }, + shouldErr: false, + }, + { + name: "tablet not found", + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + lockedShards: nil, + topoError: nil, + req: &vtctldatapb.DeleteTabletsRequest{ + TabletAliases: []*topodatapb.TabletAlias{ + { + Cell: "zone1", + Uid: 200, + }, + }, + }, + expected: nil, + expectedRemainingTablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + shouldErr: true, + }, + { + name: "shard is locked", + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "testkeyspace", + Shard: "-", + MasterTermStartTime: &vttime.Time{ + Seconds: 100, + Nanoseconds: 10, + }, + }, + }, + lockedShards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + topoError: nil, + req: &vtctldatapb.DeleteTabletsRequest{ + TabletAliases: []*topodatapb.TabletAlias{ + { + Cell: "zone1", + Uid: 100, + }, + }, + AllowPrimary: true, + }, + expected: nil, + expectedRemainingTablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "testkeyspace", + Shard: "-", + MasterTermStartTime: &vttime.Time{ + Seconds: 100, + Nanoseconds: 10, + }, + }, + }, + shouldErr: true, + }, + { + name: "another shard is locked", + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "testkeyspace", + Shard: "-80", + MasterTermStartTime: &vttime.Time{ + Seconds: 100, + Nanoseconds: 10, + }, + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "testkeyspace", + Shard: "80-", + MasterTermStartTime: &vttime.Time{ + Seconds: 200, + Nanoseconds: 20, + }, + }, + }, + lockedShards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "80-", + }, + }, + topoError: nil, + req: &vtctldatapb.DeleteTabletsRequest{ + TabletAliases: []*topodatapb.TabletAlias{ + { + // testkeyspace/-80 + Cell: "zone1", + Uid: 100, + }, + }, + AllowPrimary: true, + }, + expected: &vtctldatapb.DeleteTabletsResponse{}, + expectedRemainingTablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "testkeyspace", + Shard: "80-", + MasterTermStartTime: &vttime.Time{ + Seconds: 200, + Nanoseconds: 20, + }, + }, + }, + shouldErr: false, + }, + { + name: "topo server is down", + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + lockedShards: nil, + topoError: assert.AnError, + req: &vtctldatapb.DeleteTabletsRequest{ + TabletAliases: []*topodatapb.TabletAlias{ + { + Cell: "zone1", + Uid: 200, + }, + }, + }, + expected: nil, + expectedRemainingTablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + shouldErr: true, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + if tt.req == nil { + t.Skip("focusing on other tests") + } + + ctx := context.Background() + ts, topofactory := memorytopo.NewServerAndFactory("zone1") + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + // Setup tablets and shards + testutil.AddTablets(ctx, t, ts, nil, tt.tablets...) + + for key, updateFn := range tt.shardFieldUpdates { + ks, shard, err := topoproto.ParseKeyspaceShard(key) + require.NoError(t, err, "bad keyspace/shard provided in shardFieldUpdates: %s", key) + + _, err = ts.UpdateShardFields(ctx, ks, shard, updateFn) + require.NoError(t, err, "failed to update shard fields for %s", key) + } + + // Set locks + for _, shard := range tt.lockedShards { + lctx, unlock, lerr := ts.LockShard(ctx, shard.Keyspace, shard.Name, "testing locked shard") + require.NoError(t, lerr, "cannot lock shard %s/%s", shard.Keyspace, shard.Name) + // unlock at the end of the test, we don't care about this error + // value anymore + defer unlock(&lerr) + + // we do, however, care that the lock context gets propogated + // both to additional calls to lock, and to the actual RPC call. + ctx = lctx + } + + // Set errors + if tt.topoError != nil { + topofactory.SetError(tt.topoError) + } + + checkRemainingTablets := func() { + topofactory.SetError(nil) + + resp, err := vtctld.GetTablets(ctx, &vtctldatapb.GetTabletsRequest{}) + assert.NoError(t, err, "cannot look up tablets from topo after issuing DeleteTablets request") + + assert.ElementsMatch(t, tt.expectedRemainingTablets, resp.Tablets) + } + + // Run the test + resp, err := vtctld.DeleteTablets(ctx, tt.req) + if tt.shouldErr { + assert.Error(t, err) + + if tt.expectedRemainingTablets != nil { + checkRemainingTablets() + } + + return + } + + assert.NoError(t, err) + assert.Equal(t, tt.expected, resp) + checkRemainingTablets() + }) + } +} + +func TestEmergencyReparentShard(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + ts *topo.Server + tmc tmclient.TabletManagerClient + tablets []*topodatapb.Tablet + + req *vtctldatapb.EmergencyReparentShardRequest + expected *vtctldatapb.EmergencyReparentShardResponse + expectEventsToOccur bool + shouldErr bool + }{ + { + name: "successful reparent", + ts: memorytopo.NewServer("zone1"), + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + MasterTermStartTime: &vttime.Time{ + Seconds: 100, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Type: topodatapb.TabletType_RDONLY, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + tmc: &testutil.TabletManagerClient{ + DemoteMasterResults: map[string]struct { + Status *replicationdatapb.MasterStatus + Error error + }{ + "zone1-0000000100": { + Status: &replicationdatapb.MasterStatus{ + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", + }, + }, + }, + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000200": nil, + }, + PromoteReplicaResults: map[string]struct { + Result string + Error error + }{ + "zone1-0000000200": {}, + }, + SetMasterResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000101": nil, + }, + StopReplicationAndGetStatusResults: map[string]struct { + Status *replicationdatapb.Status + StopStatus *replicationdatapb.StopReplicationStatus + Error error + }{ + "zone1-0000000100": { + Error: mysql.ErrNotReplica, + }, + "zone1-0000000101": { + Error: assert.AnError, + }, + "zone1-0000000200": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{}, + After: &replicationdatapb.Status{ + MasterUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", + }, + }, + }, + }, + WaitForPositionResults: map[string]map[string]error{ + "zone1-0000000100": { + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5": nil, + }, + "zone1-0000000200": { + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5": nil, + }, + }, + }, + req: &vtctldatapb.EmergencyReparentShardRequest{ + Keyspace: "testkeyspace", + Shard: "-", + NewPrimary: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + WaitReplicasTimeout: protoutil.DurationToProto(time.Millisecond * 10), + }, + expected: &vtctldatapb.EmergencyReparentShardResponse{ + Keyspace: "testkeyspace", + Shard: "-", + PromotedPrimary: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + }, + expectEventsToOccur: true, + shouldErr: false, + }, + { + // Note: this is testing the error-handling done in + // (*VtctldServer).EmergencyReparentShard, not the logic of an ERS. + // That logic is tested in reparentutil, and not here. Therefore, + // the simplest way to trigger a failure is to attempt an ERS on a + // shard that does not exist. + name: "failed reparent", + ts: memorytopo.NewServer("zone1"), + tablets: nil, + + req: &vtctldatapb.EmergencyReparentShardRequest{ + Keyspace: "testkeyspace", + Shard: "-", + }, + expectEventsToOccur: false, + shouldErr: true, + }, + { + name: "invalid WaitReplicasTimeout", + req: &vtctldatapb.EmergencyReparentShardRequest{ + WaitReplicasTimeout: &vttime.Duration{ + Seconds: -1, + Nanos: 1, + }, + }, + shouldErr: true, + }, + } + + ctx := context.Background() + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + testutil.AddTablets(ctx, t, tt.ts, &testutil.AddTabletOptions{ + AlsoSetShardMaster: true, + ForceSetShardMaster: true, + SkipShardCreation: false, + }, tt.tablets...) + + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + resp, err := vtctld.EmergencyReparentShard(ctx, tt.req) + + // We defer this because we want to check in both error and non- + // error cases, but after the main set of assertions for those + // cases. + defer func() { + if !tt.expectEventsToOccur { + testutil.AssertNoLogutilEventsOccurred(t, resp, "expected no events to occur during ERS") + + return + } + + testutil.AssertLogutilEventsOccurred(t, resp, "expected events to occur during ERS") + }() + + if tt.shouldErr { + assert.Error(t, err) + + return + } + + assert.NoError(t, err) + testutil.AssertEmergencyReparentShardResponsesEqual(t, *tt.expected, *resp) + }) + } +} + func TestFindAllShardsInKeyspace(t *testing.T) { + t.Parallel() + + ctx := context.Background() + ts := memorytopo.NewServer("cell1") + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + ks := &vtctldatapb.Keyspace{ + Name: "testkeyspace", + Keyspace: &topodatapb.Keyspace{}, + } + testutil.AddKeyspace(ctx, t, ts, ks) + + si1, err := ts.GetOrCreateShard(ctx, ks.Name, "-80") + require.NoError(t, err) + si2, err := ts.GetOrCreateShard(ctx, ks.Name, "80-") + require.NoError(t, err) + + resp, err := vtctld.FindAllShardsInKeyspace(ctx, &vtctldatapb.FindAllShardsInKeyspaceRequest{Keyspace: ks.Name}) + assert.NoError(t, err) + assert.NotNil(t, resp) + + expected := map[string]*vtctldatapb.Shard{ + "-80": { + Keyspace: ks.Name, + Name: "-80", + Shard: si1.Shard, + }, + "80-": { + Keyspace: ks.Name, + Name: "80-", + Shard: si2.Shard, + }, + } + + assert.Equal(t, expected, resp.Shards) + + _, err = vtctld.FindAllShardsInKeyspace(ctx, &vtctldatapb.FindAllShardsInKeyspaceRequest{Keyspace: "nothing"}) + assert.Error(t, err) +} + +func TestGetBackups(t *testing.T) { + ctx := context.Background() + ts := memorytopo.NewServer() + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + testutil.BackupStorage.Backups = map[string][]string{ + "testkeyspace/-": {"backup1", "backup2"}, + } + + expected := &vtctldatapb.GetBackupsResponse{ + Backups: []*mysqlctlpb.BackupInfo{ + { + Directory: "testkeyspace/-", + Name: "backup1", + }, + { + Directory: "testkeyspace/-", + Name: "backup2", + }, + }, + } + + resp, err := vtctld.GetBackups(ctx, &vtctldatapb.GetBackupsRequest{ + Keyspace: "testkeyspace", + Shard: "-", + }) + assert.NoError(t, err) + assert.Equal(t, expected, resp) + + t.Run("no backupstorage", func(t *testing.T) { + *backupstorage.BackupStorageImplementation = "doesnotexist" + defer func() { *backupstorage.BackupStorageImplementation = testutil.BackupStorageImplementation }() + + _, err := vtctld.GetBackups(ctx, &vtctldatapb.GetBackupsRequest{ + Keyspace: "testkeyspace", + Shard: "-", + }) + assert.Error(t, err) + }) + + t.Run("listbackups error", func(t *testing.T) { + testutil.BackupStorage.ListBackupsError = assert.AnError + defer func() { testutil.BackupStorage.ListBackupsError = nil }() + + _, err := vtctld.GetBackups(ctx, &vtctldatapb.GetBackupsRequest{ + Keyspace: "testkeyspace", + Shard: "-", + }) + assert.Error(t, err) + }) +} + +func TestGetKeyspace(t *testing.T) { + t.Parallel() + + ctx := context.Background() + ts := memorytopo.NewServer("cell1") + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + expected := &vtctldatapb.GetKeyspaceResponse{ + Keyspace: &vtctldatapb.Keyspace{ + Name: "testkeyspace", + Keyspace: &topodatapb.Keyspace{ + ShardingColumnName: "col1", + }, + }, + } + testutil.AddKeyspace(ctx, t, ts, expected.Keyspace) + + ks, err := vtctld.GetKeyspace(ctx, &vtctldatapb.GetKeyspaceRequest{Keyspace: expected.Keyspace.Name}) + assert.NoError(t, err) + assert.Equal(t, expected, ks) + + _, err = vtctld.GetKeyspace(ctx, &vtctldatapb.GetKeyspaceRequest{Keyspace: "notfound"}) + assert.Error(t, err) +} + +func TestGetCellInfoNames(t *testing.T) { + t.Parallel() + + ctx := context.Background() + ts := memorytopo.NewServer("cell1", "cell2", "cell3") + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + resp, err := vtctld.GetCellInfoNames(ctx, &vtctldatapb.GetCellInfoNamesRequest{}) + assert.NoError(t, err) + assert.ElementsMatch(t, []string{"cell1", "cell2", "cell3"}, resp.Names) + + ts = memorytopo.NewServer() + vtctld = testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + resp, err = vtctld.GetCellInfoNames(ctx, &vtctldatapb.GetCellInfoNamesRequest{}) + assert.NoError(t, err) + assert.Empty(t, resp.Names) + + ts, topofactory := memorytopo.NewServerAndFactory("cell1") + vtctld = testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + topofactory.SetError(assert.AnError) + _, err = vtctld.GetCellInfoNames(ctx, &vtctldatapb.GetCellInfoNamesRequest{}) + assert.Error(t, err) +} + +func TestGetCellInfo(t *testing.T) { + t.Parallel() + + ctx := context.Background() + ts := memorytopo.NewServer() + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + expected := &topodatapb.CellInfo{ + ServerAddress: "example.com", + Root: "vitess", + } + input := *expected // shallow copy + require.NoError(t, ts.CreateCellInfo(ctx, "cell1", &input)) + + resp, err := vtctld.GetCellInfo(ctx, &vtctldatapb.GetCellInfoRequest{Cell: "cell1"}) + assert.NoError(t, err) + assert.Equal(t, expected, resp.CellInfo) + + _, err = vtctld.GetCellInfo(ctx, &vtctldatapb.GetCellInfoRequest{Cell: "does_not_exist"}) + assert.Error(t, err) + + _, err = vtctld.GetCellInfo(ctx, &vtctldatapb.GetCellInfoRequest{}) + assert.Error(t, err) +} + +func TestGetCellsAliases(t *testing.T) { + t.Parallel() + + ctx := context.Background() + ts := memorytopo.NewServer("c11", "c12", "c13", "c21", "c22") + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + alias1 := &topodatapb.CellsAlias{ + Cells: []string{"c11", "c12", "c13"}, + } + alias2 := &topodatapb.CellsAlias{ + Cells: []string{"c21", "c22"}, + } + + for i, alias := range []*topodatapb.CellsAlias{alias1, alias2} { + input := *alias // shallow copy + name := fmt.Sprintf("a%d", i+1) + + require.NoError(t, ts.CreateCellsAlias(ctx, name, &input), "cannot create cells alias %d (idx = %d) = %+v", i+1, i, &input) + } + + expected := map[string]*topodatapb.CellsAlias{ + "a1": alias1, + "a2": alias2, + } + + resp, err := vtctld.GetCellsAliases(ctx, &vtctldatapb.GetCellsAliasesRequest{}) + assert.NoError(t, err) + assert.Equal(t, expected, resp.Aliases) + + ts, topofactory := memorytopo.NewServerAndFactory() + vtctld = testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + topofactory.SetError(assert.AnError) + _, err = vtctld.GetCellsAliases(ctx, &vtctldatapb.GetCellsAliasesRequest{}) + assert.Error(t, err) +} + +func TestGetKeyspaces(t *testing.T) { + t.Parallel() + + ctx := context.Background() + ts, topofactory := memorytopo.NewServerAndFactory("cell1") + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + resp, err := vtctld.GetKeyspaces(ctx, &vtctldatapb.GetKeyspacesRequest{}) + assert.NoError(t, err) + assert.Empty(t, resp.Keyspaces) + + expected := []*vtctldatapb.Keyspace{ + { + Name: "ks1", + Keyspace: &topodatapb.Keyspace{ + ShardingColumnName: "ks1_col1", + }, + }, + { + Name: "ks2", + Keyspace: &topodatapb.Keyspace{ + ShardingColumnName: "ks2_col1", + }, + }, + { + Name: "ks3", + Keyspace: &topodatapb.Keyspace{ + ShardingColumnName: "ks3_col1", + }, + }, + } + for _, ks := range expected { + testutil.AddKeyspace(ctx, t, ts, ks) + } + + resp, err = vtctld.GetKeyspaces(ctx, &vtctldatapb.GetKeyspacesRequest{}) + assert.NoError(t, err) + assert.Equal(t, expected, resp.Keyspaces) + + topofactory.SetError(errors.New("error from toposerver")) + + _, err = vtctld.GetKeyspaces(ctx, &vtctldatapb.GetKeyspacesRequest{}) + assert.Error(t, err) +} + +func TestGetTablet(t *testing.T) { + t.Parallel() + ctx := context.Background() ts := memorytopo.NewServer("cell1") - vtctld := NewVtctldServer(ts) + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) - ks := &vtctldatapb.Keyspace{ - Name: "testkeyspace", - Keyspace: &topodatapb.Keyspace{}, + tablet := &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Hostname: "localhost", + Keyspace: "testkeyspace", + Shard: "-", + Type: topodatapb.TabletType_REPLICA, + } + + testutil.AddTablet(ctx, t, ts, tablet, nil) + + resp, err := vtctld.GetTablet(ctx, &vtctldatapb.GetTabletRequest{ + TabletAlias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + }) + assert.NoError(t, err) + assert.Equal(t, resp.Tablet, tablet) + + // not found + _, err = vtctld.GetTablet(ctx, &vtctldatapb.GetTabletRequest{ + TabletAlias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 101, + }, + }) + assert.Error(t, err) +} + +func TestGetSchema(t *testing.T) { + ctx := context.Background() + ts := memorytopo.NewServer("zone1") + tmc := testutil.TabletManagerClient{ + GetSchemaResults: map[string]struct { + Schema *tabletmanagerdatapb.SchemaDefinition + Error error + }{}, + } + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + validAlias := &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + } + testutil.AddTablet(ctx, t, ts, &topodatapb.Tablet{ + Alias: validAlias, + }, nil) + otherAlias := &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + } + testutil.AddTablet(ctx, t, ts, &topodatapb.Tablet{ + Alias: otherAlias, + }, nil) + + // we need to run this on each test case or they will pollute each other + setupSchema := func() { + tmc.GetSchemaResults[topoproto.TabletAliasString(validAlias)] = struct { + Schema *tabletmanagerdatapb.SchemaDefinition + Error error + }{ + Schema: &tabletmanagerdatapb.SchemaDefinition{ + DatabaseSchema: "CREATE DATABASE vt_testkeyspace", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "t1", + Schema: `CREATE TABLE t1 ( + id int(11) not null, + PRIMARY KEY (id) +);`, + Type: "BASE", + Columns: []string{"id"}, + DataLength: 100, + RowCount: 50, + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + }, + }, + }, + }, + Error: nil, + } + } + + tests := []*struct { + name string + req *vtctldatapb.GetSchemaRequest + expected *vtctldatapb.GetSchemaResponse + shouldErr bool + }{ + { + name: "normal path", + req: &vtctldatapb.GetSchemaRequest{ + TabletAlias: validAlias, + }, + expected: &vtctldatapb.GetSchemaResponse{ + Schema: &tabletmanagerdatapb.SchemaDefinition{ + DatabaseSchema: "CREATE DATABASE vt_testkeyspace", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "t1", + Schema: `CREATE TABLE t1 ( + id int(11) not null, + PRIMARY KEY (id) +);`, + Type: "BASE", + Columns: []string{"id"}, + DataLength: 100, + RowCount: 50, + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + }, + }, + }, + }, + }, + shouldErr: false, + }, + { + name: "table names only", + req: &vtctldatapb.GetSchemaRequest{ + TabletAlias: validAlias, + TableNamesOnly: true, + }, + expected: &vtctldatapb.GetSchemaResponse{ + Schema: &tabletmanagerdatapb.SchemaDefinition{ + DatabaseSchema: "CREATE DATABASE vt_testkeyspace", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "t1", + }, + }, + }, + }, + shouldErr: false, + }, + { + name: "table sizes only", + req: &vtctldatapb.GetSchemaRequest{ + TabletAlias: validAlias, + TableSizesOnly: true, + }, + expected: &vtctldatapb.GetSchemaResponse{ + Schema: &tabletmanagerdatapb.SchemaDefinition{ + DatabaseSchema: "CREATE DATABASE vt_testkeyspace", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "t1", + Type: "BASE", + DataLength: 100, + RowCount: 50, + }, + }, + }, + }, + shouldErr: false, + }, + { + name: "table names take precedence over table sizes", + req: &vtctldatapb.GetSchemaRequest{ + TabletAlias: validAlias, + TableNamesOnly: true, + TableSizesOnly: true, + }, + expected: &vtctldatapb.GetSchemaResponse{ + Schema: &tabletmanagerdatapb.SchemaDefinition{ + DatabaseSchema: "CREATE DATABASE vt_testkeyspace", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "t1", + }, + }, + }, + }, + shouldErr: false, + }, + // error cases + { + name: "no tablet", + req: &vtctldatapb.GetSchemaRequest{ + TabletAlias: &topodatapb.TabletAlias{ + Cell: "notfound", + Uid: 100, + }, + }, + expected: nil, + shouldErr: true, + }, + { + name: "no schema", + req: &vtctldatapb.GetSchemaRequest{ + TabletAlias: otherAlias, + }, + expected: nil, + shouldErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + setupSchema() + + resp, err := vtctld.GetSchema(ctx, tt.req) + if tt.shouldErr { + assert.Error(t, err) + return + } + + assert.NoError(t, err) + assert.Equal(t, tt.expected, resp) + }) + } +} + +func TestGetShard(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + topo []*vtctldatapb.Shard + topoError error + req *vtctldatapb.GetShardRequest + expected *vtctldatapb.GetShardResponse + shouldErr bool + }{ + { + name: "success", + topo: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + topoError: nil, + req: &vtctldatapb.GetShardRequest{ + Keyspace: "testkeyspace", + ShardName: "-", + }, + expected: &vtctldatapb.GetShardResponse{ + Shard: &vtctldatapb.Shard{ + Keyspace: "testkeyspace", + Name: "-", + Shard: &topodatapb.Shard{ + KeyRange: &topodatapb.KeyRange{}, + IsMasterServing: true, + }, + }, + }, + shouldErr: false, + }, + { + name: "shard not found", + topo: nil, + topoError: nil, + req: &vtctldatapb.GetShardRequest{ + Keyspace: "testkeyspace", + ShardName: "-", + }, + shouldErr: true, + }, + { + name: "unavailable topo server", + topo: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + topoError: assert.AnError, + req: &vtctldatapb.GetShardRequest{}, + shouldErr: true, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + cells := []string{"zone1", "zone2", "zone3"} + + ctx := context.Background() + ts, topofactory := memorytopo.NewServerAndFactory(cells...) + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + testutil.AddShards(ctx, t, ts, tt.topo...) + + if tt.topoError != nil { + topofactory.SetError(tt.topoError) + } + + resp, err := vtctld.GetShard(ctx, tt.req) + if tt.shouldErr { + assert.Error(t, err) + return + } + + assert.Equal(t, tt.expected, resp) + }) + } +} + +func TestGetSrvKeyspaces(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + cells []string + srvKeyspaces []*testutil.SrvKeyspace + topoErr error + req *vtctldatapb.GetSrvKeyspacesRequest + expected *vtctldatapb.GetSrvKeyspacesResponse + shouldErr bool + }{ + { + name: "success", + cells: []string{"zone1", "zone2"}, + srvKeyspaces: []*testutil.SrvKeyspace{ + { + Cell: "zone1", + Keyspace: "testkeyspace", + SrvKeyspace: &topodatapb.SrvKeyspace{ + ShardingColumnName: "zone1-sharding-col", + }, + }, + { + Cell: "zone2", + Keyspace: "testkeyspace", + SrvKeyspace: &topodatapb.SrvKeyspace{ + ShardingColumnName: "zone2-sharding-col", + }, + }, + }, + req: &vtctldatapb.GetSrvKeyspacesRequest{ + Keyspace: "testkeyspace", + }, + expected: &vtctldatapb.GetSrvKeyspacesResponse{ + SrvKeyspaces: map[string]*topodatapb.SrvKeyspace{ + "zone1": { + ShardingColumnName: "zone1-sharding-col", + }, + "zone2": { + ShardingColumnName: "zone2-sharding-col", + }, + }, + }, + shouldErr: false, + }, + { + name: "filtering by cell", + cells: []string{"zone1", "zone2"}, + srvKeyspaces: []*testutil.SrvKeyspace{ + { + Cell: "zone1", + Keyspace: "testkeyspace", + SrvKeyspace: &topodatapb.SrvKeyspace{ + ShardingColumnName: "zone1-sharding-col", + }, + }, + { + Cell: "zone2", + Keyspace: "testkeyspace", + SrvKeyspace: &topodatapb.SrvKeyspace{ + ShardingColumnName: "zone2-sharding-col", + }, + }, + }, + req: &vtctldatapb.GetSrvKeyspacesRequest{ + Keyspace: "testkeyspace", + Cells: []string{"zone1"}, + }, + expected: &vtctldatapb.GetSrvKeyspacesResponse{ + SrvKeyspaces: map[string]*topodatapb.SrvKeyspace{ + "zone1": { + ShardingColumnName: "zone1-sharding-col", + }, + }, + }, + shouldErr: false, + }, + { + name: "no srvkeyspace for single cell", + cells: []string{"zone1", "zone2"}, + srvKeyspaces: []*testutil.SrvKeyspace{ + { + Cell: "zone1", + Keyspace: "testkeyspace", + SrvKeyspace: &topodatapb.SrvKeyspace{ + ShardingColumnName: "zone1-sharding-col", + }, + }, + }, + req: &vtctldatapb.GetSrvKeyspacesRequest{ + Keyspace: "testkeyspace", + }, + expected: &vtctldatapb.GetSrvKeyspacesResponse{ + SrvKeyspaces: map[string]*topodatapb.SrvKeyspace{ + "zone1": { + ShardingColumnName: "zone1-sharding-col", + }, + "zone2": nil, + }, + }, + shouldErr: false, + }, + { + name: "error getting cell names", + cells: []string{"zone1"}, + srvKeyspaces: []*testutil.SrvKeyspace{ + { + Cell: "zone1", + Keyspace: "testkeyspace", + SrvKeyspace: &topodatapb.SrvKeyspace{ + ShardingColumnName: "zone1-sharding-col", + }, + }, + }, + topoErr: assert.AnError, + req: &vtctldatapb.GetSrvKeyspacesRequest{ + Keyspace: "testkeyspace", + }, + shouldErr: true, + }, + { + name: "error getting srvkeyspace", + cells: []string{"zone1"}, + srvKeyspaces: []*testutil.SrvKeyspace{ + { + Cell: "zone1", + Keyspace: "testkeyspace", + SrvKeyspace: &topodatapb.SrvKeyspace{ + ShardingColumnName: "zone1-sharding-col", + }, + }, + }, + topoErr: assert.AnError, + req: &vtctldatapb.GetSrvKeyspacesRequest{ + Keyspace: "testkeyspace", + Cells: []string{"zone1"}, + }, + shouldErr: true, + }, + } + + ctx := context.Background() + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + if tt.req == nil { + t.SkipNow() + } + + ts, topofactory := memorytopo.NewServerAndFactory(tt.cells...) + + testutil.AddSrvKeyspaces(t, ts, tt.srvKeyspaces...) + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + if tt.topoErr != nil { + topofactory.SetError(tt.topoErr) + } + + resp, err := vtctld.GetSrvKeyspaces(ctx, tt.req) + if tt.shouldErr { + assert.Error(t, err) + + return + } + + assert.NoError(t, err) + assert.Equal(t, tt.expected, resp) + }) + } +} + +func TestGetSrvVSchema(t *testing.T) { + t.Parallel() + + ctx := context.Background() + ts, topofactory := memorytopo.NewServerAndFactory("zone1", "zone2") + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + zone1SrvVSchema := &vschemapb.SrvVSchema{ + Keyspaces: map[string]*vschemapb.Keyspace{ + "testkeyspace": { + Sharded: true, + RequireExplicitRouting: false, + }, + }, + RoutingRules: &vschemapb.RoutingRules{ + Rules: []*vschemapb.RoutingRule{}, + }, + } + zone2SrvVSchema := &vschemapb.SrvVSchema{ + Keyspaces: map[string]*vschemapb.Keyspace{ + "testkeyspace": { + Sharded: true, + RequireExplicitRouting: false, + }, + "unsharded": { + Sharded: false, + RequireExplicitRouting: false, + }, + }, + RoutingRules: &vschemapb.RoutingRules{ + Rules: []*vschemapb.RoutingRule{}, + }, + } + + err := ts.UpdateSrvVSchema(ctx, "zone1", zone1SrvVSchema) + require.NoError(t, err, "cannot add zone1 srv vschema") + err = ts.UpdateSrvVSchema(ctx, "zone2", zone2SrvVSchema) + require.NoError(t, err, "cannot add zone2 srv vschema") + + expected := &vschemapb.SrvVSchema{ // have to copy our structs because of proto marshal artifacts + Keyspaces: map[string]*vschemapb.Keyspace{ + "testkeyspace": { + Sharded: true, + RequireExplicitRouting: false, + }, + }, + RoutingRules: &vschemapb.RoutingRules{ + Rules: []*vschemapb.RoutingRule{}, + }, + } + resp, err := vtctld.GetSrvVSchema(ctx, &vtctldatapb.GetSrvVSchemaRequest{Cell: "zone1"}) + assert.NoError(t, err) + assert.Equal(t, expected.Keyspaces, resp.SrvVSchema.Keyspaces, "GetSrvVSchema(zone1) mismatch") + assert.ElementsMatch(t, expected.RoutingRules.Rules, resp.SrvVSchema.RoutingRules.Rules, "GetSrvVSchema(zone1) rules mismatch") + + expected = &vschemapb.SrvVSchema{ // have to copy our structs because of proto marshal artifacts + Keyspaces: map[string]*vschemapb.Keyspace{ + "testkeyspace": { + Sharded: true, + RequireExplicitRouting: false, + }, + "unsharded": { + Sharded: false, + RequireExplicitRouting: false, + }, + }, + RoutingRules: &vschemapb.RoutingRules{ + Rules: []*vschemapb.RoutingRule{}, + }, + } + resp, err = vtctld.GetSrvVSchema(ctx, &vtctldatapb.GetSrvVSchemaRequest{Cell: "zone2"}) + assert.NoError(t, err) + assert.Equal(t, expected.Keyspaces, resp.SrvVSchema.Keyspaces, "GetSrvVSchema(zone2) mismatch %+v %+v", zone2SrvVSchema.Keyspaces["testkeyspace"], resp.SrvVSchema.Keyspaces["testkeyspace"]) + assert.ElementsMatch(t, expected.RoutingRules.Rules, resp.SrvVSchema.RoutingRules.Rules, "GetSrvVSchema(zone2) rules mismatch") + + resp, err = vtctld.GetSrvVSchema(ctx, &vtctldatapb.GetSrvVSchemaRequest{Cell: "dne"}) + assert.Error(t, err, "GetSrvVSchema(dne)") + assert.Nil(t, resp, "GetSrvVSchema(dne)") + + topofactory.SetError(assert.AnError) + _, err = vtctld.GetSrvVSchema(ctx, &vtctldatapb.GetSrvVSchemaRequest{Cell: "zone1"}) + assert.Error(t, err) +} + +func TestGetTablets(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + cells []string + tablets []*topodatapb.Tablet + req *vtctldatapb.GetTabletsRequest + expected []*topodatapb.Tablet + shouldErr bool + }{ + { + name: "no tablets", + cells: []string{"cell1"}, + tablets: []*topodatapb.Tablet{}, + req: &vtctldatapb.GetTabletsRequest{}, + expected: []*topodatapb.Tablet{}, + shouldErr: false, + }, + { + name: "keyspace and shard filter", + cells: []string{"cell1"}, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Keyspace: "ks1", + Shard: "-80", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 101, + }, + Keyspace: "ks1", + Shard: "80-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 102, + }, + Keyspace: "ks2", + Shard: "-", + }, + }, + req: &vtctldatapb.GetTabletsRequest{ + Keyspace: "ks1", + Shard: "80-", + }, + expected: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 101, + }, + Keyspace: "ks1", + Shard: "80-", + }, + }, + shouldErr: false, + }, + { + name: "keyspace filter", + cells: []string{"cell1"}, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Keyspace: "ks1", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 101, + }, + Keyspace: "ks1", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 102, + }, + Keyspace: "otherkeyspace", + }, + }, + req: &vtctldatapb.GetTabletsRequest{ + Keyspace: "ks1", + }, + expected: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Keyspace: "ks1", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 101, + }, + Keyspace: "ks1", + }, + }, + shouldErr: false, + }, + { + name: "keyspace and shard filter - stale primary", + cells: []string{"cell1"}, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Keyspace: "ks1", + Shard: "-80", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 101, + }, + Keyspace: "ks1", + Shard: "80-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 102, + }, + Keyspace: "ks2", + Shard: "-", + Type: topodatapb.TabletType_MASTER, + MasterTermStartTime: logutil.TimeToProto(time.Date(2006, time.January, 2, 15, 4, 5, 0, time.UTC)), + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 103, + }, + Keyspace: "ks2", + Shard: "-", + Hostname: "stale.primary", + Type: topodatapb.TabletType_MASTER, + MasterTermStartTime: logutil.TimeToProto(time.Date(2006, time.January, 2, 14, 4, 5, 0, time.UTC)), + }, + }, + req: &vtctldatapb.GetTabletsRequest{ + Keyspace: "ks2", + Shard: "-", + }, + expected: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 102, + }, + Keyspace: "ks2", + Shard: "-", + Type: topodatapb.TabletType_MASTER, + MasterTermStartTime: logutil.TimeToProto(time.Date(2006, time.January, 2, 15, 4, 5, 0, time.UTC)), + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 103, + }, + Keyspace: "ks2", + Shard: "-", + Hostname: "stale.primary", + Type: topodatapb.TabletType_UNKNOWN, + MasterTermStartTime: logutil.TimeToProto(time.Date(2006, time.January, 2, 14, 4, 5, 0, time.UTC)), + }, + }, + shouldErr: false, + }, + { + name: "stale primary", + cells: []string{"cell1"}, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Keyspace: "ks1", + Shard: "-", + Hostname: "slightly less stale", + Type: topodatapb.TabletType_MASTER, + MasterTermStartTime: logutil.TimeToProto(time.Date(2006, time.January, 2, 15, 4, 5, 0, time.UTC)), + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 101, + }, + Hostname: "stale primary", + Keyspace: "ks1", + Shard: "-", + Type: topodatapb.TabletType_MASTER, + MasterTermStartTime: logutil.TimeToProto(time.Date(2006, time.January, 2, 14, 4, 5, 0, time.UTC)), + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 103, + }, + Hostname: "true primary", + Keyspace: "ks1", + Shard: "-", + Type: topodatapb.TabletType_MASTER, + MasterTermStartTime: logutil.TimeToProto(time.Date(2006, time.January, 2, 16, 4, 5, 0, time.UTC)), + }, + }, + req: &vtctldatapb.GetTabletsRequest{}, + expected: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Keyspace: "ks1", + Shard: "-", + Hostname: "slightly less stale", + Type: topodatapb.TabletType_UNKNOWN, + MasterTermStartTime: logutil.TimeToProto(time.Date(2006, time.January, 2, 15, 4, 5, 0, time.UTC)), + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 101, + }, + Hostname: "stale primary", + Keyspace: "ks1", + Shard: "-", + Type: topodatapb.TabletType_UNKNOWN, + MasterTermStartTime: logutil.TimeToProto(time.Date(2006, time.January, 2, 14, 4, 5, 0, time.UTC)), + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 103, + }, + Hostname: "true primary", + Keyspace: "ks1", + Shard: "-", + Type: topodatapb.TabletType_MASTER, + MasterTermStartTime: logutil.TimeToProto(time.Date(2006, time.January, 2, 16, 4, 5, 0, time.UTC)), + }, + }, + shouldErr: false, + }, + { + name: "keyspace and shard filter - error", + cells: []string{"cell1"}, + tablets: []*topodatapb.Tablet{}, + req: &vtctldatapb.GetTabletsRequest{ + Keyspace: "ks1", + Shard: "-", + }, + expected: []*topodatapb.Tablet{}, + shouldErr: true, + }, + { + name: "cells filter", + cells: []string{"cell1", "cell2", "cell3"}, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "cell2", + Uid: 200, + }, + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "cell3", + Uid: 300, + }, + }, + }, + req: &vtctldatapb.GetTabletsRequest{ + Cells: []string{"cell1", "cell3"}, + }, + expected: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "cell3", + Uid: 300, + }, + }, + }, + shouldErr: false, + }, + { + name: "cells filter with single error is nonfatal", + cells: []string{"cell1"}, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Keyspace: "ks1", + Shard: "-", + }, + }, + req: &vtctldatapb.GetTabletsRequest{ + Cells: []string{"cell1", "doesnotexist"}, + }, + expected: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Keyspace: "ks1", + Shard: "-", + }, + }, + shouldErr: false, + }, + { + name: "cells filter with single error is fatal in strict mode", + cells: []string{"cell1"}, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Keyspace: "ks1", + Shard: "-", + }, + }, + req: &vtctldatapb.GetTabletsRequest{ + Cells: []string{"cell1", "doesnotexist"}, + Strict: true, + }, + shouldErr: true, + }, + { + name: "in nonstrict mode if all cells fail the request fails", + cells: []string{"cell1"}, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Keyspace: "ks1", + Shard: "-", + }, + }, + req: &vtctldatapb.GetTabletsRequest{ + Cells: []string{"doesnotexist", "alsodoesnotexist"}, + }, + shouldErr: true, + }, + { + name: "tablet alias filtering", + cells: []string{"zone1"}, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + req: &vtctldatapb.GetTabletsRequest{ + TabletAliases: []*topodatapb.TabletAlias{ + { + Cell: "zone1", + Uid: 100, + }, + { + // This tablet doesn't exist, but doesn't cause a failure. + Cell: "zone404", + Uid: 404, + }, + }, + // The below filters are ignored, because TabletAliases always + // takes precedence. + Keyspace: "another_keyspace", + Shard: "-80", + Cells: []string{"zone404"}, + }, + expected: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + shouldErr: false, + }, + { + name: "tablet alias filter with none found", + tablets: []*topodatapb.Tablet{}, + req: &vtctldatapb.GetTabletsRequest{ + TabletAliases: []*topodatapb.TabletAlias{ + { + Cell: "zone1", + Uid: 101, + }, + }, + }, + expected: []*topodatapb.Tablet{}, + shouldErr: false, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ctx := context.Background() + ts := memorytopo.NewServer(tt.cells...) + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + testutil.AddTablets(ctx, t, ts, nil, tt.tablets...) + + resp, err := vtctld.GetTablets(ctx, tt.req) + if tt.shouldErr { + assert.Error(t, err) + return + } + + assert.NoError(t, err) + assert.ElementsMatch(t, tt.expected, resp.Tablets) + }) + } +} + +func TestGetVSchema(t *testing.T) { + t.Parallel() + + ctx := context.Background() + ts := memorytopo.NewServer("zone1") + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + t.Run("found", func(t *testing.T) { + t.Parallel() + + err := ts.SaveVSchema(ctx, "testkeyspace", &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "v1": { + Type: "hash", + }, + }, + }) + require.NoError(t, err) + + expected := &vtctldatapb.GetVSchemaResponse{ + VSchema: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "v1": { + Type: "hash", + }, + }, + }, + } + + resp, err := vtctld.GetVSchema(ctx, &vtctldatapb.GetVSchemaRequest{ + Keyspace: "testkeyspace", + }) + assert.NoError(t, err) + assert.Equal(t, expected, resp) + }) + + t.Run("not found", func(t *testing.T) { + t.Parallel() + + _, err := vtctld.GetVSchema(ctx, &vtctldatapb.GetVSchemaRequest{ + Keyspace: "doesnotexist", + }) + assert.Error(t, err) + }) +} + +func TestPlannedReparentShard(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + ts *topo.Server + tmc tmclient.TabletManagerClient + tablets []*topodatapb.Tablet + + req *vtctldatapb.PlannedReparentShardRequest + expected *vtctldatapb.PlannedReparentShardResponse + expectEventsToOccur bool + shouldErr bool + }{ + { + name: "successful reparent", + ts: memorytopo.NewServer("zone1"), + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + MasterTermStartTime: &vttime.Time{ + Seconds: 100, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Type: topodatapb.TabletType_RDONLY, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + tmc: &testutil.TabletManagerClient{ + DemoteMasterResults: map[string]struct { + Status *replicationdatapb.MasterStatus + Error error + }{ + "zone1-0000000100": { + Status: &replicationdatapb.MasterStatus{ + Position: "primary-demotion position", + }, + Error: nil, + }, + }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Position: "doesn't matter", + Error: nil, + }, + }, + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000200": nil, + }, + PromoteReplicaResults: map[string]struct { + Result string + Error error + }{ + "zone1-0000000200": { + Result: "promotion position", + Error: nil, + }, + }, + SetMasterResults: map[string]error{ + "zone1-0000000200": nil, // waiting for master-position during promotion + // reparent SetMaster calls + "zone1-0000000100": nil, + "zone1-0000000101": nil, + }, + WaitForPositionResults: map[string]map[string]error{ + "zone1-0000000200": { + "primary-demotion position": nil, + }, + }, + }, + req: &vtctldatapb.PlannedReparentShardRequest{ + Keyspace: "testkeyspace", + Shard: "-", + NewPrimary: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + WaitReplicasTimeout: protoutil.DurationToProto(time.Millisecond * 10), + }, + expected: &vtctldatapb.PlannedReparentShardResponse{ + Keyspace: "testkeyspace", + Shard: "-", + PromotedPrimary: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + }, + expectEventsToOccur: true, + shouldErr: false, + }, + { + // Note: this is testing the error-handling done in + // (*VtctldServer).PlannedReparentShard, not the logic of an PRS. + // That logic is tested in reparentutil, and not here. Therefore, + // the simplest way to trigger a failure is to attempt an PRS on a + // shard that does not exist. + name: "failed reparent", + ts: memorytopo.NewServer("zone1"), + tablets: nil, + req: &vtctldatapb.PlannedReparentShardRequest{ + Keyspace: "testkeyspace", + Shard: "-", + }, + expectEventsToOccur: false, + shouldErr: true, + }, + { + name: "invalid WaitReplicasTimeout", + req: &vtctldatapb.PlannedReparentShardRequest{ + WaitReplicasTimeout: &vttime.Duration{ + Seconds: -1, + Nanos: 1, + }, + }, + shouldErr: true, + }, + } + + ctx := context.Background() + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + testutil.AddTablets(ctx, t, tt.ts, &testutil.AddTabletOptions{ + AlsoSetShardMaster: true, + ForceSetShardMaster: true, + SkipShardCreation: false, + }, tt.tablets...) + + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + resp, err := vtctld.PlannedReparentShard(ctx, tt.req) + + // We defer this because we want to check in both error and non- + // error cases, but after the main set of assertions for those + // cases. + defer func() { + if !tt.expectEventsToOccur { + testutil.AssertNoLogutilEventsOccurred(t, resp, "expected no events to occur during ERS") + + return + } + + testutil.AssertLogutilEventsOccurred(t, resp, "expected events to occur during ERS") + }() + + if tt.shouldErr { + assert.Error(t, err) + + return + } + + assert.NoError(t, err) + testutil.AssertPlannedReparentShardResponsesEqual(t, *tt.expected, *resp) + }) + } +} + +func TestRemoveKeyspaceCell(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + keyspace *vtctldatapb.Keyspace + shards []*vtctldatapb.Shard + topoError error + topoIsLocked bool + srvKeyspaceDoesNotExist bool + req *vtctldatapb.RemoveKeyspaceCellRequest + expected *vtctldatapb.RemoveKeyspaceCellResponse + shouldErr bool + }{ + { + name: "success", + keyspace: nil, + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + topoError: nil, + topoIsLocked: false, + srvKeyspaceDoesNotExist: false, + req: &vtctldatapb.RemoveKeyspaceCellRequest{ + Keyspace: "testkeyspace", + Cell: "zone1", + }, + expected: &vtctldatapb.RemoveKeyspaceCellResponse{}, + shouldErr: false, + }, + { + name: "success/empty keyspace", + keyspace: &vtctldatapb.Keyspace{ + Name: "testkeyspace", + Keyspace: &topodatapb.Keyspace{}, + }, + shards: nil, + topoError: nil, + topoIsLocked: false, + srvKeyspaceDoesNotExist: false, + req: &vtctldatapb.RemoveKeyspaceCellRequest{ + Keyspace: "testkeyspace", + Cell: "zone1", + }, + expected: &vtctldatapb.RemoveKeyspaceCellResponse{}, + shouldErr: false, + }, + { + name: "keyspace not found", + keyspace: &vtctldatapb.Keyspace{ + Name: "otherkeyspace", + Keyspace: &topodatapb.Keyspace{}, + }, + shards: nil, + topoError: nil, + topoIsLocked: false, + srvKeyspaceDoesNotExist: false, + req: &vtctldatapb.RemoveKeyspaceCellRequest{ + Keyspace: "testkeyspace", + Cell: "zone1", + }, + expected: nil, + shouldErr: true, + }, + { + name: "topo is down", + keyspace: nil, + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + topoError: assert.AnError, + topoIsLocked: false, + srvKeyspaceDoesNotExist: false, + req: &vtctldatapb.RemoveKeyspaceCellRequest{ + Keyspace: "testkeyspace", + Cell: "zone1", + }, + expected: nil, + shouldErr: true, + }, + { + name: "topo is locked", + keyspace: nil, + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + topoError: nil, + topoIsLocked: true, + srvKeyspaceDoesNotExist: false, + req: &vtctldatapb.RemoveKeyspaceCellRequest{ + Keyspace: "testkeyspace", + Cell: "zone1", + }, + expected: nil, + shouldErr: true, + }, + { + name: "srvkeyspace already deleted", + keyspace: nil, + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + topoError: nil, + topoIsLocked: false, + srvKeyspaceDoesNotExist: true, + req: &vtctldatapb.RemoveKeyspaceCellRequest{ + Keyspace: "testkeyspace", + Cell: "zone1", + }, + expected: nil, + shouldErr: true, + }, } - addKeyspace(ctx, t, ts, ks) - si1, err := ts.GetOrCreateShard(ctx, ks.Name, "-80") - require.NoError(t, err) - si2, err := ts.GetOrCreateShard(ctx, ks.Name, "80-") - require.NoError(t, err) + for _, tt := range tests { + tt := tt - resp, err := vtctld.FindAllShardsInKeyspace(ctx, &vtctldatapb.FindAllShardsInKeyspaceRequest{Keyspace: ks.Name}) - assert.NoError(t, err) - assert.NotNil(t, resp) + t.Run(tt.name, func(t *testing.T) { + t.Parallel() - expected := map[string]*vtctldatapb.Shard{ - "-80": { - Keyspace: ks.Name, - Name: "-80", - Shard: si1.Shard, + cells := []string{"zone1", "zone2", "zone3"} + + ctx := context.Background() + ts, topofactory := memorytopo.NewServerAndFactory(cells...) + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + // Setup topo + if tt.keyspace != nil { + testutil.AddKeyspace(ctx, t, ts, tt.keyspace) + } + + testutil.AddShards(ctx, t, ts, tt.shards...) + + // For certain tests, we don't actually create the SrvKeyspace + // object. + if !tt.srvKeyspaceDoesNotExist { + updateSrvKeyspace := func(keyspace string) { + for _, cell := range cells { + err := ts.UpdateSrvKeyspace(ctx, cell, keyspace, &topodatapb.SrvKeyspace{}) + require.NoError(t, err, "could not create empty SrvKeyspace for keyspace %s in cell %s", tt.req.Keyspace, cell) + } + } + + updateSrvKeyspace(tt.req.Keyspace) + if tt.keyspace != nil { + updateSrvKeyspace(tt.keyspace.Name) + } + } + + // Set errors and locks + if tt.topoError != nil { + topofactory.SetError(tt.topoError) + } + + if tt.topoIsLocked { + lctx, unlock, err := ts.LockKeyspace(ctx, tt.req.Keyspace, "testing locked keyspace") + require.NoError(t, err, "cannot lock keyspace %s", tt.req.Keyspace) + defer unlock(&err) + + ctx = lctx + } + + resp, err := vtctld.RemoveKeyspaceCell(ctx, tt.req) + if tt.shouldErr { + assert.Error(t, err) + return + } + + assert.NoError(t, err) + assert.Equal(t, tt.expected, resp) + }) + } +} + +func TestRemoveShardCell(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + servingCells []string + shards []*vtctldatapb.Shard + replicationGraphs []*topo.ShardReplicationInfo + topoError error + topoIsLocked bool + req *vtctldatapb.RemoveShardCellRequest + expected *vtctldatapb.RemoveShardCellResponse + shouldErr bool + }{ + { + name: "success", + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + replicationGraphs: []*topo.ShardReplicationInfo{ + topo.NewShardReplicationInfo(&topodatapb.ShardReplication{ + Nodes: []*topodatapb.ShardReplication_Node{ + { + TabletAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + }, "zone1", "testkeyspace", "-"), + topo.NewShardReplicationInfo(&topodatapb.ShardReplication{ + Nodes: []*topodatapb.ShardReplication_Node{ + { + TabletAlias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 200, + }, + }, + }, + }, "zone2", "testkeyspace", "-"), + topo.NewShardReplicationInfo(&topodatapb.ShardReplication{ + Nodes: []*topodatapb.ShardReplication_Node{ + { + TabletAlias: &topodatapb.TabletAlias{ + Cell: "zone3", + Uid: 300, + }, + }, + }, + }, "zone3", "testkeyspace", "-"), + }, + req: &vtctldatapb.RemoveShardCellRequest{ + Keyspace: "testkeyspace", + ShardName: "-", + Cell: "zone2", + Recursive: true, + }, + expected: &vtctldatapb.RemoveShardCellResponse{}, + shouldErr: false, }, - "80-": { - Keyspace: ks.Name, - Name: "80-", - Shard: si2.Shard, + { + name: "success/no tablets", + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + req: &vtctldatapb.RemoveShardCellRequest{ + Keyspace: "testkeyspace", + ShardName: "-", + Cell: "zone2", + }, + expected: &vtctldatapb.RemoveShardCellResponse{}, + shouldErr: false, + }, + { + name: "nonexistent shard", + shards: nil, + replicationGraphs: nil, + req: &vtctldatapb.RemoveShardCellRequest{ + Keyspace: "testkeyspace", + ShardName: "-", + Cell: "zone2", + }, + expected: nil, + shouldErr: true, + }, + { + name: "cell does not exist", + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + req: &vtctldatapb.RemoveShardCellRequest{ + Keyspace: "testkeyspace", + ShardName: "-", + Cell: "fourthzone", + }, + expected: nil, + shouldErr: true, + }, + { + name: "cell not in serving list", + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + servingCells: []string{"zone1"}, + replicationGraphs: nil, + req: &vtctldatapb.RemoveShardCellRequest{ + Keyspace: "testkeyspace", + ShardName: "-", + Cell: "zone2", + }, + expected: nil, + shouldErr: true, + }, + { + name: "tablets/non-recursive", + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + replicationGraphs: []*topo.ShardReplicationInfo{ + topo.NewShardReplicationInfo(&topodatapb.ShardReplication{ + Nodes: []*topodatapb.ShardReplication_Node{ + { + TabletAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + }, "zone1", "testkeyspace", "-"), + topo.NewShardReplicationInfo(&topodatapb.ShardReplication{ + Nodes: []*topodatapb.ShardReplication_Node{ + { + TabletAlias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 200, + }, + }, + }, + }, "zone2", "testkeyspace", "-"), + topo.NewShardReplicationInfo(&topodatapb.ShardReplication{ + Nodes: []*topodatapb.ShardReplication_Node{ + { + TabletAlias: &topodatapb.TabletAlias{ + Cell: "zone3", + Uid: 300, + }, + }, + }, + }, "zone3", "testkeyspace", "-"), + }, + req: &vtctldatapb.RemoveShardCellRequest{ + Keyspace: "testkeyspace", + ShardName: "-", + Cell: "zone2", + Recursive: false, // non-recursive + replication graph = failure + }, + expected: nil, + shouldErr: true, + }, + { + name: "topo server down", + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + replicationGraphs: nil, + req: &vtctldatapb.RemoveShardCellRequest{ + Keyspace: "testkeyspace", + ShardName: "-", + Cell: "zone2", + }, + topoError: assert.AnError, + topoIsLocked: false, + expected: nil, + shouldErr: true, + }, + // Not sure how to set up this test case. + // { + // name: "topo server down for replication check/no force", + // }, + // Not sure how to set up this test case. + // { + // name: "topo server down for replication check/force", + // }, + { + name: "cannot lock keyspace", + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + replicationGraphs: nil, + req: &vtctldatapb.RemoveShardCellRequest{ + Keyspace: "testkeyspace", + ShardName: "-", + Cell: "zone2", + }, + topoError: nil, + topoIsLocked: true, + expected: nil, + shouldErr: true, }, + // Not sure how to set up this test case. + // { + // name: "cannot delete srvkeyspace partition", + // }, } - assert.Equal(t, expected, resp.Shards) + for _, tt := range tests { + tt := tt - _, err = vtctld.FindAllShardsInKeyspace(ctx, &vtctldatapb.FindAllShardsInKeyspaceRequest{Keyspace: "nothing"}) - assert.Error(t, err) + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + cells := []string{"zone1", "zone2", "zone3"} + + ctx := context.Background() + ts, topofactory := memorytopo.NewServerAndFactory(cells...) + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + // Setup shard topos and replication graphs. + testutil.AddShards(ctx, t, ts, tt.shards...) + testutil.SetupReplicationGraphs(ctx, t, ts, tt.replicationGraphs...) + + // Set up srvkeyspace partitions; a little gross. + servingCells := tt.servingCells + if servingCells == nil { // we expect an explicit empty list to have a shard with no serving cells + servingCells = cells + } + + for _, shard := range tt.shards { + lctx, unlock, lerr := ts.LockKeyspace(ctx, shard.Keyspace, "initializing serving graph for test") + require.NoError(t, lerr, "cannot lock keyspace %s to initialize serving graph", shard.Keyspace) + + for _, cell := range servingCells { + + err := ts.UpdateSrvKeyspace(lctx, cell, shard.Keyspace, &topodatapb.SrvKeyspace{ + Partitions: []*topodatapb.SrvKeyspace_KeyspacePartition{ + { + ServedType: topodatapb.TabletType_REPLICA, + ShardReferences: []*topodatapb.ShardReference{ + { + Name: shard.Name, + }, + }, + }, + }, + }) + require.NoError(t, err, "cannot update srvkeyspace for %s/%s in cell %v", shard.Keyspace, shard.Name, cell) + } + + unlock(&lerr) + } + + // Set errors and locks. + if tt.topoError != nil { + topofactory.SetError(tt.topoError) + } + + if tt.topoIsLocked { + lctx, unlock, err := ts.LockKeyspace(ctx, tt.req.Keyspace, "testing locked keyspace") + require.NoError(t, err, "cannot lock keyspace %s", tt.req.Keyspace) + defer unlock(&err) + + // Need to use the lock ctx in the RPC call so we fail when + // attempting to lock the keyspace rather than waiting forever + // for the lock. Explicitly setting a deadline would be another + // way to achieve this. + ctx = lctx + } + + // Make the RPC and assert things about it. + resp, err := vtctld.RemoveShardCell(ctx, tt.req) + if tt.shouldErr { + assert.Error(t, err) + return + } + + assert.NoError(t, err) + assert.Equal(t, tt.expected, resp) + }) + } } -func TestGetKeyspace(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1") - vtctld := NewVtctldServer(ts) +func TestReparentTablet(t *testing.T) { + t.Parallel() - expected := &vtctldatapb.GetKeyspaceResponse{ - Keyspace: &vtctldatapb.Keyspace{ - Name: "testkeyspace", - Keyspace: &topodatapb.Keyspace{ - ShardingColumnName: "col1", + tests := []struct { + name string + tmc tmclient.TabletManagerClient + tablets []*topodatapb.Tablet + shards []*vtctldatapb.Shard + topoErr error + req *vtctldatapb.ReparentTabletRequest + expected *vtctldatapb.ReparentTabletResponse + shouldErr bool + }{ + { + name: "success", + tmc: &testutil.TabletManagerClient{ + SetMasterResults: map[string]error{ + "zone1-0000000100": nil, + }, + }, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 200, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "testkeyspace", + Shard: "-", + MasterTermStartTime: &vttime.Time{ + Seconds: 1000, + }, + }, + }, + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + Shard: &topodatapb.Shard{ + MasterAlias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 200, + }, + MasterTermStartTime: &vttime.Time{ + Seconds: 1000, + }, + IsMasterServing: true, + }, + }, + }, + req: &vtctldatapb.ReparentTabletRequest{ + Tablet: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + expected: &vtctldatapb.ReparentTabletResponse{ + Keyspace: "testkeyspace", + Shard: "-", + Primary: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 200, + }, + }, + shouldErr: false, + }, + { + name: "tablet is nil", + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 200, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "testkeyspace", + Shard: "-", + MasterTermStartTime: &vttime.Time{ + Seconds: 1000, + }, + }, + }, + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + Shard: &topodatapb.Shard{ + MasterAlias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 200, + }, + MasterTermStartTime: &vttime.Time{ + Seconds: 1000, + }, + IsMasterServing: true, + }, + }, + }, + req: &vtctldatapb.ReparentTabletRequest{ + Tablet: nil, + }, + expected: nil, + shouldErr: true, + }, + { + name: "tablet not in topo", + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 200, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "testkeyspace", + Shard: "-", + MasterTermStartTime: &vttime.Time{ + Seconds: 1000, + }, + }, + }, + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + Shard: &topodatapb.Shard{ + MasterAlias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 200, + }, + MasterTermStartTime: &vttime.Time{ + Seconds: 1000, + }, + IsMasterServing: true, + }, + }, + }, + req: &vtctldatapb.ReparentTabletRequest{ + Tablet: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + expected: nil, + shouldErr: true, + }, + { + name: "shard not in topo", + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 200, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "testkeyspace", + Shard: "-", + MasterTermStartTime: &vttime.Time{ + Seconds: 1000, + }, + }, + }, + shards: nil, + req: &vtctldatapb.ReparentTabletRequest{ + Tablet: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + expected: nil, + shouldErr: true, + }, + { + name: "shard has no primary", + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + Shard: &topodatapb.Shard{ + IsMasterServing: false, + }, + }, + }, + req: &vtctldatapb.ReparentTabletRequest{ + Tablet: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + expected: nil, + shouldErr: true, + }, + { + name: "shard primary not in topo", + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 200, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "testkeyspace", + Shard: "-", + MasterTermStartTime: &vttime.Time{ + Seconds: 1000, + }, + }, + }, + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + Shard: &topodatapb.Shard{ + MasterAlias: &topodatapb.TabletAlias{ + Cell: "zone3", + Uid: 300, + }, + MasterTermStartTime: &vttime.Time{ + Seconds: 1010, + }, + IsMasterServing: true, + }, + }, + }, + req: &vtctldatapb.ReparentTabletRequest{ + Tablet: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + expected: nil, + shouldErr: true, + }, + { + name: "shard primary is not type MASTER", + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + Shard: &topodatapb.Shard{ + MasterAlias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 200, + }, + MasterTermStartTime: &vttime.Time{ + Seconds: 1010, + }, + IsMasterServing: true, + }, + }, + }, + req: &vtctldatapb.ReparentTabletRequest{ + Tablet: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + expected: nil, + shouldErr: true, + }, + { + name: "shard primary is not actually in shard", + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 200, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "otherkeyspace", + Shard: "-", + MasterTermStartTime: &vttime.Time{ + Seconds: 1000, + }, + }, + }, + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + Shard: &topodatapb.Shard{ + MasterAlias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 200, + }, + MasterTermStartTime: &vttime.Time{ + Seconds: 1010, + }, + IsMasterServing: true, + }, + }, + }, + req: &vtctldatapb.ReparentTabletRequest{ + Tablet: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + expected: nil, + shouldErr: true, + }, + { + name: "tmc.SetMaster failure", + tmc: &testutil.TabletManagerClient{ + SetMasterResults: map[string]error{ + "zone1-0000000100": assert.AnError, + }, + }, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 200, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "testkeyspace", + Shard: "-", + MasterTermStartTime: &vttime.Time{ + Seconds: 1000, + }, + }, + }, + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + Shard: &topodatapb.Shard{ + MasterAlias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 200, + }, + MasterTermStartTime: &vttime.Time{ + Seconds: 1000, + }, + IsMasterServing: true, + }, + }, }, + req: &vtctldatapb.ReparentTabletRequest{ + Tablet: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + expected: nil, + shouldErr: true, + }, + { + name: "topo is down", + tmc: &testutil.TabletManagerClient{ + SetMasterResults: map[string]error{ + "zone1-0000000100": nil, + }, + }, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 200, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "testkeyspace", + Shard: "-", + MasterTermStartTime: &vttime.Time{ + Seconds: 1000, + }, + }, + }, + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + Shard: &topodatapb.Shard{ + MasterAlias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 200, + }, + MasterTermStartTime: &vttime.Time{ + Seconds: 1000, + }, + IsMasterServing: true, + }, + }, + }, + topoErr: assert.AnError, + req: &vtctldatapb.ReparentTabletRequest{ + Tablet: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + expected: nil, + shouldErr: true, }, } - addKeyspace(ctx, t, ts, expected.Keyspace) - ks, err := vtctld.GetKeyspace(ctx, &vtctldatapb.GetKeyspaceRequest{Keyspace: expected.Keyspace.Name}) - assert.NoError(t, err) - assert.Equal(t, expected, ks) + for _, tt := range tests { + tt := tt - _, err = vtctld.GetKeyspace(ctx, &vtctldatapb.GetKeyspaceRequest{Keyspace: "notfound"}) - assert.Error(t, err) -} + t.Run(tt.name, func(t *testing.T) { + t.Parallel() -func addKeyspace(ctx context.Context, t *testing.T, ts *topo.Server, ks *vtctldatapb.Keyspace) { - in := *ks.Keyspace // take a copy to avoid the XXX_ fields changing + if tt.req == nil { + t.Skip("focused on other test cases right now") + } - err := ts.CreateKeyspace(ctx, ks.Name, &in) - require.NoError(t, err) -} + cells := []string{"zone1", "zone2", "zone3"} -func TestGetKeyspaces(t *testing.T) { - ctx := context.Background() - ts, topofactory := memorytopo.NewServerAndFactory("cell1") - vtctld := NewVtctldServer(ts) + ctx := context.Background() + ts, topofactory := memorytopo.NewServerAndFactory(cells...) + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) - resp, err := vtctld.GetKeyspaces(ctx, &vtctldatapb.GetKeyspacesRequest{}) - assert.NoError(t, err) - assert.Empty(t, resp.Keyspaces) + testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ + SkipShardCreation: true, + }, tt.tablets...) + testutil.AddShards(ctx, t, ts, tt.shards...) - expected := []*vtctldatapb.Keyspace{ + if tt.topoErr != nil { + topofactory.SetError(tt.topoErr) + } + + resp, err := vtctld.ReparentTablet(ctx, tt.req) + if tt.shouldErr { + assert.Error(t, err) + return + } + + assert.NoError(t, err) + assert.Equal(t, tt.expected, resp) + }) + } +} + +func TestShardReplicationPositions(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + ts *topo.Server + tablets []*topodatapb.Tablet + tmc tmclient.TabletManagerClient + ctxTimeout time.Duration + req *vtctldatapb.ShardReplicationPositionsRequest + expected *vtctldatapb.ShardReplicationPositionsResponse + shouldErr bool + }{ { - Name: "ks1", - Keyspace: &topodatapb.Keyspace{ - ShardingColumnName: "ks1_col1", + name: "success", + ts: memorytopo.NewServer("zone1"), + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + Shard: "-", + Type: topodatapb.TabletType_MASTER, + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Keyspace: "testkeyspace", + Shard: "-", + Type: topodatapb.TabletType_REPLICA, + }, + }, + tmc: &testutil.TabletManagerClient{ + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Position: "primary_tablet_position", + }, + }, + ReplicationStatusResults: map[string]struct { + Position *replicationdatapb.Status + Error error + }{ + "zone1-0000000101": { + Position: &replicationdatapb.Status{ + Position: "replica_tablet_position", + }, + }, + }, }, + req: &vtctldatapb.ShardReplicationPositionsRequest{ + Keyspace: "testkeyspace", + Shard: "-", + }, + expected: &vtctldatapb.ShardReplicationPositionsResponse{ + ReplicationStatuses: map[string]*replicationdatapb.Status{ + "zone1-0000000100": { + Position: "primary_tablet_position", + }, + "zone1-0000000101": { + Position: "replica_tablet_position", + }, + }, + TabletMap: map[string]*topodatapb.Tablet{ + "zone1-0000000100": { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + Shard: "-", + Type: topodatapb.TabletType_MASTER, + }, + "zone1-0000000101": { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Keyspace: "testkeyspace", + Shard: "-", + Type: topodatapb.TabletType_REPLICA, + }, + }, + }, + shouldErr: false, }, { - Name: "ks2", - Keyspace: &topodatapb.Keyspace{ - ShardingColumnName: "ks2_col1", + name: "timeouts are nonfatal", + ts: memorytopo.NewServer("zone1"), + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + Shard: "-", + Type: topodatapb.TabletType_MASTER, + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Keyspace: "testkeyspace", + Shard: "-", + Type: topodatapb.TabletType_REPLICA, + }, + }, + tmc: &testutil.TabletManagerClient{ + MasterPositionDelays: map[string]time.Duration{ + "zone1-0000000100": time.Millisecond * 100, + }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Position: "primary_tablet_position", + }, + }, + ReplicationStatusDelays: map[string]time.Duration{ + "zone1-0000000101": time.Millisecond * 100, + }, + ReplicationStatusResults: map[string]struct { + Position *replicationdatapb.Status + Error error + }{ + "zone1-0000000101": { + Position: &replicationdatapb.Status{ + Position: "replica_tablet_position", + }, + }, + }, + }, + ctxTimeout: time.Millisecond * 10, + req: &vtctldatapb.ShardReplicationPositionsRequest{ + Keyspace: "testkeyspace", + Shard: "-", + }, + expected: &vtctldatapb.ShardReplicationPositionsResponse{ + ReplicationStatuses: map[string]*replicationdatapb.Status{ + "zone1-0000000100": nil, + "zone1-0000000101": nil, + }, + TabletMap: map[string]*topodatapb.Tablet{ + "zone1-0000000100": { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + Shard: "-", + Type: topodatapb.TabletType_MASTER, + }, + "zone1-0000000101": { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Keyspace: "testkeyspace", + Shard: "-", + Type: topodatapb.TabletType_REPLICA, + }, + }, }, + shouldErr: false, }, { - Name: "ks3", - Keyspace: &topodatapb.Keyspace{ - ShardingColumnName: "ks3_col1", + name: "other rpc errors are fatal", + ts: memorytopo.NewServer("zone1"), + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + Shard: "-", + Type: topodatapb.TabletType_MASTER, + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Keyspace: "testkeyspace", + Shard: "-", + Type: topodatapb.TabletType_REPLICA, + }, + }, + tmc: &testutil.TabletManagerClient{ + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Error: assert.AnError, + }, + }, + ReplicationStatusResults: map[string]struct { + Position *replicationdatapb.Status + Error error + }{ + "zone1-0000000101": { + Position: &replicationdatapb.Status{ + Position: "replica_tablet_position", + }, + }, + }, + }, + req: &vtctldatapb.ShardReplicationPositionsRequest{ + Keyspace: "testkeyspace", + Shard: "-", + }, + expected: nil, + shouldErr: true, + }, + { + name: "nonexistent shard", + ts: memorytopo.NewServer("zone1"), + req: &vtctldatapb.ShardReplicationPositionsRequest{ + Keyspace: "testkeyspace", + Shard: "-", }, + expected: nil, + shouldErr: true, }, } - for _, ks := range expected { - addKeyspace(ctx, t, ts, ks) + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + testutil.AddTablets(ctx, t, tt.ts, &testutil.AddTabletOptions{ + AlsoSetShardMaster: true, + SkipShardCreation: false, + }, tt.tablets...) + + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + if tt.ctxTimeout > 0 { + _ctx, cancel := context.WithTimeout(ctx, tt.ctxTimeout) + defer cancel() + + ctx = _ctx + } + + resp, err := vtctld.ShardReplicationPositions(ctx, tt.req) + if tt.shouldErr { + assert.Error(t, err) + + return + } + + assert.NoError(t, err) + assert.Equal(t, tt.expected, resp) + }) } +} - resp, err = vtctld.GetKeyspaces(ctx, &vtctldatapb.GetKeyspacesRequest{}) - assert.NoError(t, err) - assert.Equal(t, expected, resp.Keyspaces) +func TestTabletExternallyReparented(t *testing.T) { + t.Parallel() - topofactory.SetError(errors.New("error from toposerver")) + tests := []struct { + name string + topo []*topodatapb.Tablet + topoErr error + tmcHasNoTopo bool + req *vtctldatapb.TabletExternallyReparentedRequest + expected *vtctldatapb.TabletExternallyReparentedResponse + shouldErr bool + expectedTopo []*topodatapb.Tablet + }{ + { + name: "success", + topo: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "testkeyspace", + Shard: "-", + MasterTermStartTime: &vttime.Time{ + Seconds: 1000, + }, + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone3", + Uid: 300, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + topoErr: nil, + req: &vtctldatapb.TabletExternallyReparentedRequest{ + Tablet: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 200, + }, + }, + expected: &vtctldatapb.TabletExternallyReparentedResponse{ + Keyspace: "testkeyspace", + Shard: "-", + NewPrimary: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 200, + }, + OldPrimary: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + shouldErr: false, + // NOTE: this seems weird, right? Why is the old primary still a + // MASTER, and why is the new primary's term start 0,0? Well, our + // test client implementation is a little incomplete. See + // ./testutil/test_tmclient.go for reference. + expectedTopo: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "testkeyspace", + Shard: "-", + MasterTermStartTime: &vttime.Time{ + Seconds: 1000, + }, + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 200, + }, + Type: topodatapb.TabletType_UNKNOWN, + Keyspace: "testkeyspace", + Shard: "-", + MasterTermStartTime: &vttime.Time{}, + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone3", + Uid: 300, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + }, + { + name: "tablet is nil", + topo: nil, + topoErr: nil, + req: &vtctldatapb.TabletExternallyReparentedRequest{ + Tablet: nil, + }, + expected: nil, + shouldErr: true, + expectedTopo: nil, + }, + { + name: "topo is down", + topo: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "testkeyspace", + Shard: "-", + MasterTermStartTime: &vttime.Time{ + Seconds: 1000, + }, + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone3", + Uid: 300, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + topoErr: assert.AnError, + req: &vtctldatapb.TabletExternallyReparentedRequest{ + Tablet: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 200, + }, + }, + expected: nil, + shouldErr: true, + expectedTopo: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "testkeyspace", + Shard: "-", + MasterTermStartTime: &vttime.Time{ + Seconds: 1000, + }, + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone3", + Uid: 300, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + }, + { + name: "tablet is already primary", + topo: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "testkeyspace", + Shard: "-", + MasterTermStartTime: &vttime.Time{ + Seconds: 1000, + }, + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone3", + Uid: 300, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + topoErr: nil, + req: &vtctldatapb.TabletExternallyReparentedRequest{ + Tablet: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + expected: &vtctldatapb.TabletExternallyReparentedResponse{ + Keyspace: "testkeyspace", + Shard: "-", + NewPrimary: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + OldPrimary: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + shouldErr: false, + expectedTopo: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "testkeyspace", + Shard: "-", + MasterTermStartTime: &vttime.Time{ + Seconds: 1000, + }, + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone3", + Uid: 300, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + }, + { + name: "cannot change tablet type", + topo: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "testkeyspace", + Shard: "-", + MasterTermStartTime: &vttime.Time{ + Seconds: 1000, + }, + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone3", + Uid: 300, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + topoErr: nil, + tmcHasNoTopo: true, + req: &vtctldatapb.TabletExternallyReparentedRequest{ + Tablet: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 200, + }, + }, + expected: nil, + shouldErr: true, + expectedTopo: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "testkeyspace", + Shard: "-", + MasterTermStartTime: &vttime.Time{ + Seconds: 1000, + }, + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone3", + Uid: 300, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + }, + } - _, err = vtctld.GetKeyspaces(ctx, &vtctldatapb.GetKeyspacesRequest{}) - assert.Error(t, err) + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + cells := []string{"zone1", "zone2", "zone3"} + + ctx := context.Background() + ts, topofactory := memorytopo.NewServerAndFactory(cells...) + tmc := testutil.TabletManagerClient{ + TopoServer: ts, + } + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + if tt.tmcHasNoTopo { + // For certain test cases, we want specifically just the + // ChangeType call to fail, which is why we rely on a separate + // bool rather than using tt.topoErr. + tmc.TopoServer = nil + } + + testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ + AlsoSetShardMaster: true, + }, tt.topo...) + + if tt.topoErr != nil { + topofactory.SetError(tt.topoErr) + } + + if tt.expectedTopo != nil { + // assert on expectedTopo state when we've fininished the rest + // of the test. + defer func() { + topofactory.SetError(nil) + + ctx, cancel := context.WithTimeout(ctx, time.Millisecond*10) + defer cancel() + + resp, err := vtctld.GetTablets(ctx, &vtctldatapb.GetTabletsRequest{}) + require.NoError(t, err, "cannot get all tablets in the topo") + assert.ElementsMatch(t, tt.expectedTopo, resp.Tablets) + }() + } + + resp, err := vtctld.TabletExternallyReparented(ctx, tt.req) + if tt.shouldErr { + assert.Error(t, err) + return + } + + assert.NoError(t, err) + assert.Equal(t, tt.expected, resp) + }) + } } diff --git a/go/vt/vtctl/grpcvtctldserver/testutil/proto_compare.go b/go/vt/vtctl/grpcvtctldserver/testutil/proto_compare.go new file mode 100644 index 00000000000..b17a1b4aeaf --- /dev/null +++ b/go/vt/vtctl/grpcvtctldserver/testutil/proto_compare.go @@ -0,0 +1,112 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testutil + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + logutilpb "vitess.io/vitess/go/vt/proto/logutil" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +// AssertEmergencyReparentShardResponsesEqual asserts that two +// vtctldatapb.EmergencyReparentShardResponse objects are equal, ignoring their +// respective Events field in the comparison. +func AssertEmergencyReparentShardResponsesEqual(t *testing.T, expected vtctldatapb.EmergencyReparentShardResponse, actual vtctldatapb.EmergencyReparentShardResponse, msgAndArgs ...interface{}) { + t.Helper() + + // We take both the expected and actual values by value, rather than by + // reference, so this mutation is safe to do and will not interfere with + // other assertions performed in the calling function. + expected.Events = nil + actual.Events = nil + + assert.Equal(t, expected, actual, msgAndArgs...) +} + +// AssertPlannedReparentShardResponsesEqual asserts that two +// vtctldatapb.PlannedReparentShardResponse objects are equal, ignoring their +// respective Events field in the comparison. +func AssertPlannedReparentShardResponsesEqual(t *testing.T, expected vtctldatapb.PlannedReparentShardResponse, actual vtctldatapb.PlannedReparentShardResponse, msgAndArgs ...interface{}) { + t.Helper() + + expected.Events = nil + actual.Events = nil + + assert.Equal(t, expected, actual, msgAndArgs...) +} + +// AssertKeyspacesEqual is a convenience function to assert that two +// vtctldatapb.Keyspace objects are equal, after clearing out any reserved +// proto XXX_ fields. +func AssertKeyspacesEqual(t *testing.T, expected *vtctldatapb.Keyspace, actual *vtctldatapb.Keyspace, msgAndArgs ...interface{}) { + t.Helper() + + for _, ks := range []*vtctldatapb.Keyspace{expected, actual} { + if ks.Keyspace != nil { + ks.Keyspace.XXX_sizecache = 0 + ks.Keyspace.XXX_unrecognized = nil + } + + if ks.Keyspace.SnapshotTime != nil { + ks.Keyspace.SnapshotTime.XXX_sizecache = 0 + ks.Keyspace.SnapshotTime.XXX_unrecognized = nil + } + } + + assert.Equal(t, expected, actual, msgAndArgs...) +} + +// AssertLogutilEventsOccurred asserts that for something containing a slice of +// logutilpb.Event, that the container is non-nil, and the event slice is +// non-zero. +// +// This test function is generalized with an anonymous interface that any +// protobuf type containing a slice of logutilpb.Event elements called Events, +// which is the convention in protobuf types in the Vitess codebase, already +// implements. +func AssertLogutilEventsOccurred(t *testing.T, container interface{ GetEvents() []*logutilpb.Event }, msgAndArgs ...interface{}) { + t.Helper() + + if container == nil { + assert.Fail(t, "Events container must not be nil", msgAndArgs...) + + return + } + + assert.Greater(t, len(container.GetEvents()), 0, msgAndArgs...) +} + +// AssertNoLogutilEventsOccurred asserts that for something containing a slice +// of logutilpb.Event, that the container is either nil, or that the event slice +// is exactly zero length. +// +// This test function is generalized with an anonymous interface that any +// protobuf type containing a slice of logutilpb.Event elements called Events, +// which is the convention in protobuf types in the Vitess codebase, already +// implements. +func AssertNoLogutilEventsOccurred(t *testing.T, container interface{ GetEvents() []*logutilpb.Event }, msgAndArgs ...interface{}) { + t.Helper() + + if container == nil { + return + } + + assert.Equal(t, len(container.GetEvents()), 0, msgAndArgs...) +} diff --git a/go/vt/vtctl/grpcvtctldserver/testutil/srv_keyspace.go b/go/vt/vtctl/grpcvtctldserver/testutil/srv_keyspace.go new file mode 100644 index 00000000000..378dbe3ce7b --- /dev/null +++ b/go/vt/vtctl/grpcvtctldserver/testutil/srv_keyspace.go @@ -0,0 +1,49 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testutil + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/topo" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" +) + +// SrvKeyspace groups a topodatapb.SrvKeyspace together with a keyspace and +// cell. +type SrvKeyspace struct { + Keyspace string + Cell string + SrvKeyspace *topodatapb.SrvKeyspace +} + +// AddSrvKeyspaces adds one or more SrvKeyspace objects to the topology. It +// fails the calling test if any of the objects fail to update. +func AddSrvKeyspaces(t *testing.T, ts *topo.Server, srvKeyspaces ...*SrvKeyspace) { + t.Helper() + + ctx := context.Background() + + for _, sk := range srvKeyspaces { + err := ts.UpdateSrvKeyspace(ctx, sk.Cell, sk.Keyspace, sk.SrvKeyspace) + require.NoError(t, err, "UpdateSrvKeyspace(cell = %v, keyspace = %v, srv_keyspace = %v", sk.Cell, sk.Keyspace, sk.SrvKeyspace) + } +} diff --git a/go/vt/vtctl/grpcvtctldserver/testutil/test_backupstorage.go b/go/vt/vtctl/grpcvtctldserver/testutil/test_backupstorage.go new file mode 100644 index 00000000000..a871cbfdbf7 --- /dev/null +++ b/go/vt/vtctl/grpcvtctldserver/testutil/test_backupstorage.go @@ -0,0 +1,92 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testutil + +import ( + "context" + "sort" + + "vitess.io/vitess/go/vt/mysqlctl/backupstorage" +) + +type backupStorage struct { + backupstorage.BackupStorage + + // Backups is a mapping of directory to list of backup names stored in that + // directory. + Backups map[string][]string + // ListBackupsError is returned from ListBackups when it is non-nil. + ListBackupsError error +} + +// ListBackups is part of the backupstorage.BackupStorage interface. +func (bs *backupStorage) ListBackups(ctx context.Context, dir string) ([]backupstorage.BackupHandle, error) { + if bs.ListBackupsError != nil { + return nil, bs.ListBackupsError + } + + handles := []backupstorage.BackupHandle{} + + for k, v := range bs.Backups { + if k == dir { + for _, name := range v { + handles = append(handles, &backupHandle{directory: k, name: name}) + } + } + } + + sort.Sort(handlesByName(handles)) + + return handles, nil +} + +// Close is part of the backupstorage.BackupStorage interface. +func (bs *backupStorage) Close() error { return nil } + +// backupHandle implements a subset of the backupstorage.backupHandle interface. +type backupHandle struct { + backupstorage.BackupHandle + + directory string + name string +} + +func (bh *backupHandle) Directory() string { return bh.directory } +func (bh *backupHandle) Name() string { return bh.name } + +// handlesByName implements the sort interface for backup handles by Name(). +type handlesByName []backupstorage.BackupHandle + +func (a handlesByName) Len() int { return len(a) } +func (a handlesByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a handlesByName) Less(i, j int) bool { return a[i].Name() < a[j].Name() } + +// BackupStorageImplementation is the name this package registers its test +// backupstorage.BackupStorage implementation as. Users should set +// *backupstorage.BackupStorageImplementation to this value before use. +const BackupStorageImplementation = "grpcvtctldserver.testutil" + +// BackupStorage is the singleton test backupstorage.BackupStorage intastnce. It +// is public and singleton to allow tests to both mutate and assert against its +// state. +var BackupStorage = &backupStorage{ + Backups: map[string][]string{}, +} + +func init() { + backupstorage.BackupStorageMap[BackupStorageImplementation] = BackupStorage +} diff --git a/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go b/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go new file mode 100644 index 00000000000..ccc4e53c5b0 --- /dev/null +++ b/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go @@ -0,0 +1,597 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testutil + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vttablet/tmclient" + + querypb "vitess.io/vitess/go/vt/proto/query" + replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtctlservicepb "vitess.io/vitess/go/vt/proto/vtctlservice" + "vitess.io/vitess/go/vt/proto/vttime" +) + +var ( + tmclientLock sync.Mutex + tmclientFactoryLock sync.Mutex + tmclients = map[string]tmclient.TabletManagerClient{} + tmclientFactories = map[string]func() tmclient.TabletManagerClient{} +) + +// NewVtctldServerWithTabletManagerClient returns a new +// grpcvtctldserver.VtctldServer configured with the given topo server and +// tmclient.TabletManagerClient implementation for testing. +// +// It synchronizes on private locks to prevent multiple goroutines from stepping +// on each other during VtctldServer initialization, but still run the rest of +// the test in parallel. +// +// NOTE, THE FIRST: It is only safe to use in parallel with other tests using +// this method of creating a VtctldServer, or with tests that do not depend on a +// VtctldServer's tmclient.TabletManagerClient implementation. +// +// NOTE, THE SECOND: It needs to register a unique name to the tmclient factory +// registry, so we keep a shadow map of factories registered for "protocols" by +// this function. That way, if we happen to have multiple tests with the same +// name, we can swap out the return value for the factory and allow both tests +// to run, rather than the second test failing when it attempts to register a +// second factory for the same "protocol" name. +// +// NOTE, THE THIRD: we take a "new" func to produce a valid +// vtctlservicepb.VtctldServer implementation, rather than constructing directly +// ourselves with grpcvtctldserver.NewVtctldServer. This is to prevent an import +// cycle between this package and package grpcvtctldserver. Further, because the +// return type of NewVtctldServer is the struct type +// (*grpcvtctldserver.VtctldServer) and not the interface type +// vtctlservicepb.VtctldServer, tests will need to indirect that call through an +// extra layer rather than passing the function identifier directly, e.g.: +// +// vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &testutil.TabletManagerClient{ +// ... +// }, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) +// +func NewVtctldServerWithTabletManagerClient(t *testing.T, ts *topo.Server, tmc tmclient.TabletManagerClient, newVtctldServerFn func(ts *topo.Server) vtctlservicepb.VtctldServer) vtctlservicepb.VtctldServer { + tmclientFactoryLock.Lock() + defer tmclientFactoryLock.Unlock() + + protocol := t.Name() + + if _, alreadyRegisteredFactory := tmclientFactories[protocol]; !alreadyRegisteredFactory { + factory := func() tmclient.TabletManagerClient { + tmclientLock.Lock() + defer tmclientLock.Unlock() + + client, ok := tmclients[protocol] + if !ok { + t.Fatal("Test managed to register a factory for a client value that never got set; this should be impossible") + } + + return client + } + + tmclient.RegisterTabletManagerClientFactory(protocol, factory) + tmclientFactories[protocol] = factory + } + + // Always swap in the new client return value for the given protocol name. + // We cannot defer the unlock here, because grpcvtctldserver.NewVtctldServer + // eventually will call into the factory we registered above, and we will + // deadlock ourselves. + tmclientLock.Lock() + tmclients[protocol] = tmc + tmclientLock.Unlock() + + // Be (mostly, we can't help concurrent goroutines not using this function) + // atomic with our mutation of the global TabletManagerProtocol pointer. + oldProto := *tmclient.TabletManagerProtocol + defer func() { *tmclient.TabletManagerProtocol = oldProto }() + + *tmclient.TabletManagerProtocol = protocol + + return newVtctldServerFn(ts) +} + +// TabletManagerClient implements the tmclient.TabletManagerClient interface +// with mock delays and response values, for use in unit tests. +type TabletManagerClient struct { + tmclient.TabletManagerClient + // TopoServer is used for certain TabletManagerClient rpcs that update topo + // information, e.g. ChangeType. To force an error result for those rpcs in + // a test, set tmc.TopoServer = nil. + TopoServer *topo.Server + // keyed by tablet alias. + DemoteMasterDelays map[string]time.Duration + // keyed by tablet alias. + DemoteMasterResults map[string]struct { + Status *replicationdatapb.MasterStatus + Error error + } + // keyed by tablet alias. + GetSchemaDelays map[string]time.Duration + // keyed by tablet alias. + GetSchemaResults map[string]struct { + Schema *tabletmanagerdatapb.SchemaDefinition + Error error + } + // keyed by tablet alias. + MasterPositionDelays map[string]time.Duration + // keyed by tablet alias. + MasterPositionResults map[string]struct { + Position string + Error error + } + // keyed by tablet alias. + PopulateReparentJournalDelays map[string]time.Duration + // keyed by tablet alias + PopulateReparentJournalResults map[string]error + // keyed by tablet alias. + PromoteReplicaDelays map[string]time.Duration + // keyed by tablet alias. injects a sleep to the end of the function + // regardless of parent context timeout or error result. + PromoteReplicaPostDelays map[string]time.Duration + // keyed by tablet alias. + PromoteReplicaResults map[string]struct { + Result string + Error error + } + ReplicationStatusDelays map[string]time.Duration + ReplicationStatusResults map[string]struct { + Position *replicationdatapb.Status + Error error + } + // keyed by tablet alias. + SetMasterDelays map[string]time.Duration + // keyed by tablet alias. + SetMasterResults map[string]error + // keyed by tablet alias. + SetReadWriteDelays map[string]time.Duration + // keyed by tablet alias. + SetReadWriteResults map[string]error + // keyed by tablet alias. + StopReplicationAndGetStatusDelays map[string]time.Duration + // keyed by tablet alias. + StopReplicationAndGetStatusResults map[string]struct { + Status *replicationdatapb.Status + StopStatus *replicationdatapb.StopReplicationStatus + Error error + } + // keyed by tablet alias. + UndoDemoteMasterDelays map[string]time.Duration + // keyed by tablet alias + UndoDemoteMasterResults map[string]error + // tablet alias => duration + VReplicationExecDelays map[string]time.Duration + // tablet alias => query string => result + VReplicationExecResults map[string]map[string]struct { + Result *querypb.QueryResult + Error error + } + // keyed by tablet alias. + WaitForPositionDelays map[string]time.Duration + // keyed by tablet alias. injects a sleep to the end of the function + // regardless of parent context timeout or error result. + WaitForPositionPostDelays map[string]time.Duration + // WaitForPosition(tablet *topodatapb.Tablet, position string) error, so we + // key by tablet alias and then by position. + WaitForPositionResults map[string]map[string]error +} + +// ChangeType is part of the tmclient.TabletManagerClient interface. +func (fake *TabletManagerClient) ChangeType(ctx context.Context, tablet *topodatapb.Tablet, newType topodatapb.TabletType) error { + if fake.TopoServer == nil { + return assert.AnError + } + + _, err := topotools.ChangeType(ctx, fake.TopoServer, tablet.Alias, newType, &vttime.Time{}) + return err +} + +// DemoteMaster is part of the tmclient.TabletManagerClient interface. +func (fake *TabletManagerClient) DemoteMaster(ctx context.Context, tablet *topodatapb.Tablet) (*replicationdatapb.MasterStatus, error) { + if fake.DemoteMasterResults == nil { + return nil, assert.AnError + } + + if tablet.Alias == nil { + return nil, assert.AnError + } + + key := topoproto.TabletAliasString(tablet.Alias) + + if fake.DemoteMasterDelays != nil { + if delay, ok := fake.DemoteMasterDelays[key]; ok { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(delay): + // proceed to results + } + } + } + + if result, ok := fake.DemoteMasterResults[key]; ok { + return result.Status, result.Error + } + + return nil, assert.AnError +} + +// GetSchema is part of the tmclient.TabletManagerClient interface. +func (fake *TabletManagerClient) GetSchema(ctx context.Context, tablet *topodatapb.Tablet, tablets []string, excludeTables []string, includeViews bool) (*tabletmanagerdatapb.SchemaDefinition, error) { + if fake.GetSchemaResults == nil { + return nil, assert.AnError + } + + if tablet.Alias == nil { + return nil, assert.AnError + } + + key := topoproto.TabletAliasString(tablet.Alias) + + if fake.GetSchemaDelays != nil { + if delay, ok := fake.GetSchemaDelays[key]; ok { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(delay): + // proceed to results + } + } + } + + if result, ok := fake.GetSchemaResults[key]; ok { + return result.Schema, result.Error + } + + return nil, fmt.Errorf("%w: no schemas for %s", assert.AnError, key) +} + +// MasterPosition is part of the tmclient.TabletManagerClient interface. +func (fake *TabletManagerClient) MasterPosition(ctx context.Context, tablet *topodatapb.Tablet) (string, error) { + if fake.MasterPositionResults == nil { + return "", assert.AnError + } + + if tablet.Alias == nil { + return "", assert.AnError + } + + key := topoproto.TabletAliasString(tablet.Alias) + + if fake.MasterPositionDelays != nil { + if delay, ok := fake.MasterPositionDelays[key]; ok { + select { + case <-ctx.Done(): + return "", ctx.Err() + case <-time.After(delay): + // proceed to results + } + } + } + + if result, ok := fake.MasterPositionResults[key]; ok { + return result.Position, result.Error + } + + return "", assert.AnError +} + +// PopulateReparentJournal is part of the tmclient.TabletManagerClient +// interface. +func (fake *TabletManagerClient) PopulateReparentJournal(ctx context.Context, tablet *topodatapb.Tablet, timeCreatedNS int64, actionName string, primaryAlias *topodatapb.TabletAlias, pos string) error { + if fake.PopulateReparentJournalResults == nil { + return assert.AnError + } + + key := topoproto.TabletAliasString(tablet.Alias) + + if fake.PopulateReparentJournalDelays != nil { + if delay, ok := fake.PopulateReparentJournalDelays[key]; ok { + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(delay): + // proceed to results + } + } + } + if result, ok := fake.PopulateReparentJournalResults[key]; ok { + return result + } + + return assert.AnError +} + +// PromoteReplica is part of the tmclient.TabletManagerClient interface. +func (fake *TabletManagerClient) PromoteReplica(ctx context.Context, tablet *topodatapb.Tablet) (string, error) { + if fake.PromoteReplicaResults == nil { + return "", assert.AnError + } + + key := topoproto.TabletAliasString(tablet.Alias) + + defer func() { + if fake.PromoteReplicaPostDelays == nil { + return + } + + if delay, ok := fake.PromoteReplicaPostDelays[key]; ok { + time.Sleep(delay) + } + }() + + if fake.PromoteReplicaDelays != nil { + if delay, ok := fake.PromoteReplicaDelays[key]; ok { + select { + case <-ctx.Done(): + return "", ctx.Err() + case <-time.After(delay): + // proceed to results + } + } + } + + if result, ok := fake.PromoteReplicaResults[key]; ok { + return result.Result, result.Error + } + + return "", assert.AnError +} + +// ReplicationStatus is part of the tmclient.TabletManagerClient interface. +func (fake *TabletManagerClient) ReplicationStatus(ctx context.Context, tablet *topodatapb.Tablet) (*replicationdatapb.Status, error) { + if fake.ReplicationStatusResults == nil { + return nil, assert.AnError + } + + key := topoproto.TabletAliasString(tablet.Alias) + + if fake.ReplicationStatusDelays != nil { + if delay, ok := fake.ReplicationStatusDelays[key]; ok { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(delay): + // proceed to results + } + } + } + + if result, ok := fake.ReplicationStatusResults[key]; ok { + return result.Position, result.Error + } + + return nil, assert.AnError +} + +// SetMaster is part of the tmclient.TabletManagerClient interface. +func (fake *TabletManagerClient) SetMaster(ctx context.Context, tablet *topodatapb.Tablet, parent *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool) error { + if fake.SetMasterResults == nil { + return assert.AnError + } + + key := topoproto.TabletAliasString(tablet.Alias) + + if fake.SetMasterDelays != nil { + if delay, ok := fake.SetMasterDelays[key]; ok { + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(delay): + // proceed to results + } + } + } + + if result, ok := fake.SetMasterResults[key]; ok { + return result + } + + return assert.AnError +} + +// SetReadWrite is part of the tmclient.TabletManagerClient interface. +func (fake *TabletManagerClient) SetReadWrite(ctx context.Context, tablet *topodatapb.Tablet) error { + if fake.SetReadWriteResults == nil { + return assert.AnError + } + + if tablet.Alias == nil { + return assert.AnError + } + + key := topoproto.TabletAliasString(tablet.Alias) + + if fake.SetReadWriteDelays != nil { + if delay, ok := fake.SetReadWriteDelays[key]; ok { + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(delay): + // proceed to results + } + } + } + + if err, ok := fake.SetReadWriteResults[key]; ok { + return err + } + + return assert.AnError +} + +// StopReplicationAndGetStatus is part of the tmclient.TabletManagerClient +// interface. +func (fake *TabletManagerClient) StopReplicationAndGetStatus(ctx context.Context, tablet *topodatapb.Tablet, mode replicationdatapb.StopReplicationMode) (*replicationdatapb.Status, *replicationdatapb.StopReplicationStatus, error) { + if fake.StopReplicationAndGetStatusResults == nil { + return nil, nil, assert.AnError + } + + if tablet.Alias == nil { + return nil, nil, assert.AnError + } + + key := topoproto.TabletAliasString(tablet.Alias) + + if fake.StopReplicationAndGetStatusDelays != nil { + if delay, ok := fake.StopReplicationAndGetStatusDelays[key]; ok { + select { + case <-ctx.Done(): + return nil, nil, ctx.Err() + case <-time.After(delay): + // proceed to results + } + } + } + + if result, ok := fake.StopReplicationAndGetStatusResults[key]; ok { + return result.Status, result.StopStatus, result.Error + } + + return nil, nil, assert.AnError +} + +// WaitForPosition is part of the tmclient.TabletManagerClient interface. +func (fake *TabletManagerClient) WaitForPosition(ctx context.Context, tablet *topodatapb.Tablet, position string) error { + tabletKey := topoproto.TabletAliasString(tablet.Alias) + + defer func() { + if fake.WaitForPositionPostDelays == nil { + return + } + + if delay, ok := fake.WaitForPositionPostDelays[tabletKey]; ok { + time.Sleep(delay) + } + }() + + if fake.WaitForPositionDelays != nil { + if delay, ok := fake.WaitForPositionDelays[tabletKey]; ok { + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(delay): + // proceed to results + } + } + } + + if fake.WaitForPositionResults == nil { + return assert.AnError + } + + tabletResultsByPosition, ok := fake.WaitForPositionResults[tabletKey] + if !ok { + return assert.AnError + } + + result, ok := tabletResultsByPosition[position] + if !ok { + return assert.AnError + } + + return result +} + +// UndoDemoteMaster is part of the tmclient.TabletManagerClient interface. +func (fake *TabletManagerClient) UndoDemoteMaster(ctx context.Context, tablet *topodatapb.Tablet) error { + if fake.UndoDemoteMasterResults == nil { + return assert.AnError + } + + if tablet.Alias == nil { + return assert.AnError + } + + key := topoproto.TabletAliasString(tablet.Alias) + + if fake.UndoDemoteMasterDelays != nil { + if delay, ok := fake.UndoDemoteMasterDelays[key]; ok { + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(delay): + // proceed to results + } + } + } + + if result, ok := fake.UndoDemoteMasterResults[key]; ok { + return result + } + + return assert.AnError +} + +// VReplicationExec is part of the tmclient.TabletManagerCLient interface. +func (fake *TabletManagerClient) VReplicationExec(ctx context.Context, tablet *topodatapb.Tablet, query string) (*querypb.QueryResult, error) { + if fake.VReplicationExecResults == nil { + return nil, assert.AnError + } + + if tablet.Alias == nil { + return nil, assert.AnError + } + + key := topoproto.TabletAliasString(tablet.Alias) + + if fake.VReplicationExecDelays != nil { + if delay, ok := fake.VReplicationExecDelays[key]; ok { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(delay): + // proceed to results + } + } + } + + if resultsForTablet, ok := fake.VReplicationExecResults[key]; ok { + // Round trip the expected query both to ensure it's valid and to + // standardize on capitalization and formatting. + stmt, err := sqlparser.Parse(query) + if err != nil { + return nil, err + } + + buf := sqlparser.NewTrackedBuffer(nil) + buf.Myprintf("%v", stmt) + + parsedQuery := buf.ParsedQuery().Query + + // Now do the map lookup. + if result, ok := resultsForTablet[parsedQuery]; ok { + return result.Result, result.Error + } + } + + return nil, assert.AnError +} diff --git a/go/vt/vtctl/grpcvtctldserver/testutil/util.go b/go/vt/vtctl/grpcvtctldserver/testutil/util.go new file mode 100644 index 00000000000..795a7547c43 --- /dev/null +++ b/go/vt/vtctl/grpcvtctldserver/testutil/util.go @@ -0,0 +1,258 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package testutil contains utility functions for writing tests for the +// grpcvtctldserver. +package testutil + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + "golang.org/x/net/nettest" + "google.golang.org/grpc" + + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtctl/vtctldclient" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" + vtctlservicepb "vitess.io/vitess/go/vt/proto/vtctlservice" +) + +// WithTestServer creates a gRPC server listening locally with the given RPC +// implementation, then runs the test func with a client created to point at +// that server. +func WithTestServer( + t *testing.T, + server vtctlservicepb.VtctldServer, + test func(t *testing.T, client vtctldclient.VtctldClient), +) { + lis, err := nettest.NewLocalListener("tcp") + require.NoError(t, err, "cannot create local listener") + + defer lis.Close() + + s := grpc.NewServer() + vtctlservicepb.RegisterVtctldServer(s, server) + + go s.Serve(lis) + defer s.Stop() + + client, err := vtctldclient.New("grpc", lis.Addr().String()) + require.NoError(t, err, "cannot create vtctld client") + + test(t, client) +} + +// WithTestServers creates N gRPC servers listening locally with the given RPC +// implementations, and then runs the test func with N clients created, where +// clients[i] points at servers[i]. +func WithTestServers( + t *testing.T, + test func(t *testing.T, clients ...vtctldclient.VtctldClient), + servers ...vtctlservicepb.VtctldServer, +) { + // Declare our recursive helper function so it can refer to itself. + var withTestServers func(t *testing.T, servers ...vtctlservicepb.VtctldServer) + + // Preallocate a slice of clients we're eventually going to call the test + // function with. + clients := make([]vtctldclient.VtctldClient, 0, len(servers)) + + withTestServers = func(t *testing.T, servers ...vtctlservicepb.VtctldServer) { + if len(servers) == 0 { + // We've started up all the test servers and accumulated clients for + // each of them (or there were no test servers to start, and we've + // accumulated no clients), so finally we run the test and stop + // recursing. + test(t, clients...) + + return + } + + // Start up a test server for the head of our server slice, accumulate + // the resulting client, and recurse on the tail of our server slice. + WithTestServer(t, servers[0], func(t *testing.T, client vtctldclient.VtctldClient) { + clients = append(clients, client) + withTestServers(t, servers[1:]...) + }) + } + + withTestServers(t, servers...) +} + +// AddKeyspace adds a keyspace to a topology, failing a test if that keyspace +// could not be added. It shallow copies the proto struct to prevent XXX_ fields +// from changing in the marshalling. +func AddKeyspace(ctx context.Context, t *testing.T, ts *topo.Server, ks *vtctldatapb.Keyspace) { + in := *ks.Keyspace // take a copy to avoid XXX_ fields changing. + + err := ts.CreateKeyspace(ctx, ks.Name, &in) + require.NoError(t, err) +} + +// AddKeyspaces adds a list of keyspaces to the topology, failing a test if any +// of those keyspaces cannot be added. See AddKeyspace for details. +func AddKeyspaces(ctx context.Context, t *testing.T, ts *topo.Server, keyspaces ...*vtctldatapb.Keyspace) { + for _, keyspace := range keyspaces { + AddKeyspace(ctx, t, ts, keyspace) + } +} + +// AddTabletOptions is a container for different behaviors tests need from +// AddTablet. +type AddTabletOptions struct { + // AlsoSetShardMaster is an option to control additional setup to take when + // AddTablet receives a tablet of type MASTER. When set, AddTablet will also + // update the shard record to make that tablet the primary, and fail the + // test if the shard record has a serving primary already. + AlsoSetShardMaster bool + // ForceSetShardMaster, when combined with AlsoSetShardMaster, will ignore + // any existing primary in the shard, making the current tablet the serving + // primary (given it is type MASTER), and log that it has done so. + ForceSetShardMaster bool + // SkipShardCreation, when set, makes AddTablet never attempt to create a + // shard record in the topo under any circumstances. + SkipShardCreation bool +} + +// AddTablet adds a tablet to the topology, failing a test if that tablet record +// could not be created. It shallow copies to prevent XXX_ fields from changing, +// including nested proto message fields. +// +// AddTablet also optionally adds empty keyspace and shard records to the +// topology, if they are set on the tablet record and they cannot be retrieved +// from the topo server without error. +// +// If AddTablet receives a tablet record with a keyspace and shard set, and that +// tablet's type is MASTER, and opts.AlsoSetShardMaster is set, then AddTablet +// will update the shard record to make that tablet the shard master and set the +// shard to serving. If that shard record already has a serving primary, then +// AddTablet will fail the test. +func AddTablet(ctx context.Context, t *testing.T, ts *topo.Server, tablet *topodatapb.Tablet, opts *AddTabletOptions) { + in := *tablet + alias := *tablet.Alias + in.Alias = &alias + + if opts == nil { + opts = &AddTabletOptions{} + } + + err := ts.CreateTablet(ctx, &in) + require.NoError(t, err, "CreateTablet(%+v)", &in) + + if opts.SkipShardCreation { + return + } + + if tablet.Keyspace != "" { + if _, err := ts.GetKeyspace(ctx, tablet.Keyspace); err != nil { + err := ts.CreateKeyspace(ctx, tablet.Keyspace, &topodatapb.Keyspace{}) + require.NoError(t, err, "CreateKeyspace(%s)", tablet.Keyspace) + } + + if tablet.Shard != "" { + if _, err := ts.GetShard(ctx, tablet.Keyspace, tablet.Shard); err != nil { + err := ts.CreateShard(ctx, tablet.Keyspace, tablet.Shard) + require.NoError(t, err, "CreateShard(%s, %s)", tablet.Keyspace, tablet.Shard) + } + + if tablet.Type == topodatapb.TabletType_MASTER && opts.AlsoSetShardMaster { + _, err := ts.UpdateShardFields(ctx, tablet.Keyspace, tablet.Shard, func(si *topo.ShardInfo) error { + if si.IsMasterServing && si.MasterAlias != nil { + msg := fmt.Sprintf("shard %v/%v already has a serving master (%v)", tablet.Keyspace, tablet.Shard, topoproto.TabletAliasString(si.MasterAlias)) + + if !opts.ForceSetShardMaster { + return errors.New(msg) + } + + t.Logf("%s; replacing with %v because ForceSetShardMaster = true", msg, topoproto.TabletAliasString(tablet.Alias)) + } + + si.MasterAlias = tablet.Alias + si.IsMasterServing = true + si.MasterTermStartTime = tablet.MasterTermStartTime + + return nil + }) + require.NoError(t, err, "UpdateShardFields(%s, %s) to set %s as serving primary failed", tablet.Keyspace, tablet.Shard, topoproto.TabletAliasString(tablet.Alias)) + } + } + } +} + +// AddTablets adds a list of tablets to the topology. See AddTablet for more +// details. +func AddTablets(ctx context.Context, t *testing.T, ts *topo.Server, opts *AddTabletOptions, tablets ...*topodatapb.Tablet) { + for _, tablet := range tablets { + AddTablet(ctx, t, ts, tablet, opts) + } +} + +// AddShards adds a list of shards to the topology, failing a test if any of the +// shard records could not be created. It also ensures that every shard's +// keyspace exists, or creates an empty keyspace if that shard's keyspace does +// not exist. +func AddShards(ctx context.Context, t *testing.T, ts *topo.Server, shards ...*vtctldatapb.Shard) { + for _, shard := range shards { + if shard.Keyspace != "" { + if _, err := ts.GetKeyspace(ctx, shard.Keyspace); err != nil { + err := ts.CreateKeyspace(ctx, shard.Keyspace, &topodatapb.Keyspace{}) + require.NoError(t, err, "CreateKeyspace(%s)", shard.Keyspace) + } + } + + err := ts.CreateShard(ctx, shard.Keyspace, shard.Name) + require.NoError(t, err, "CreateShard(%s/%s)", shard.Keyspace, shard.Name) + + if shard.Shard != nil { + _, err := ts.UpdateShardFields(ctx, shard.Keyspace, shard.Name, func(si *topo.ShardInfo) error { + si.Shard = shard.Shard + + return nil + }) + require.NoError(t, err, "UpdateShardFields(%s/%s, %v)", shard.Keyspace, shard.Name, shard.Shard) + } + } +} + +// SetupReplicationGraphs creates a set of ShardReplication objects in the topo, +// failing the test if any of the records could not be created. +func SetupReplicationGraphs(ctx context.Context, t *testing.T, ts *topo.Server, replicationGraphs ...*topo.ShardReplicationInfo) { + for _, graph := range replicationGraphs { + err := ts.UpdateShardReplicationFields(ctx, graph.Cell(), graph.Keyspace(), graph.Shard(), func(sr *topodatapb.ShardReplication) error { + sr.Nodes = graph.Nodes + return nil + }) + require.NoError(t, err, "could not save replication graph for %s/%s in cell %v", graph.Keyspace(), graph.Shard(), graph.Cell()) + } +} + +// UpdateSrvKeyspaces updates a set of SrvKeyspace records, grouped by cell and +// then by keyspace. It fails the test if any records cannot be updated. +func UpdateSrvKeyspaces(ctx context.Context, t *testing.T, ts *topo.Server, srvkeyspacesByCellByKeyspace map[string]map[string]*topodatapb.SrvKeyspace) { + for cell, srvKeyspacesByKeyspace := range srvkeyspacesByCellByKeyspace { + for keyspace, srvKeyspace := range srvKeyspacesByKeyspace { + err := ts.UpdateSrvKeyspace(ctx, cell, keyspace, srvKeyspace) + require.NoError(t, err, "UpdateSrvKeyspace(%v, %v, %v)", cell, keyspace, srvKeyspace) + } + } +} diff --git a/go/vt/vtctl/grpcvtctldserver/topo.go b/go/vt/vtctl/grpcvtctldserver/topo.go new file mode 100644 index 00000000000..7de161bcd22 --- /dev/null +++ b/go/vt/vtctl/grpcvtctldserver/topo.go @@ -0,0 +1,292 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package grpcvtctldserver + +import ( + "context" + "fmt" + + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vterrors" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/proto/vtrpc" +) + +func deleteShard(ctx context.Context, ts *topo.Server, keyspace string, shard string, recursive bool, evenIfServing bool) error { + // Read the Shard object. If it's not in the topo, try to clean up the topo + // anyway. + shardInfo, err := ts.GetShard(ctx, keyspace, shard) + if err != nil { + if topo.IsErrType(err, topo.NoNode) { + log.Infof("Shard %v/%v doesn't seem to exist; cleaning up any potential leftover topo data", keyspace, shard) + + return ts.DeleteShard(ctx, keyspace, shard) + } + + return err + } + + servingCells, err := ts.GetShardServingCells(ctx, shardInfo) + if err != nil { + return err + } + + // We never want to remove a potentially serving shard unless someone + // explicitly requested it. + if len(servingCells) > 0 && !evenIfServing { + return vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "shard %v/%v is still serving; cannot delete it; use EvenIfServing = true to delete anyway", keyspace, shard) + } + + cells, err := ts.GetCellInfoNames(ctx) + if err != nil { + return err + } + + for _, cell := range cells { + if err := deleteShardCell(ctx, ts, keyspace, shard, cell, recursive); err != nil { + return err + } + } + + // Try to remove the replication and serving graphs from each cell, + // regardless of whether they exist. + for _, cell := range cells { + if err := ts.DeleteShardReplication(ctx, cell, keyspace, shard); err != nil && !topo.IsErrType(err, topo.NoNode) { + log.Warningf("Cannot delete ShardReplication in cell %v for %v/%v: %w", cell, keyspace, shard, err) + } + } + + return ts.DeleteShard(ctx, keyspace, shard) +} + +// deleteShardCell is the per-cell helper function for deleteShard, and is +// distinct from the RemoveShardCell rpc. Despite having similar names, they are +// **not** the same! +func deleteShardCell(ctx context.Context, ts *topo.Server, keyspace string, shard string, cell string, recursive bool) error { + var aliases []*topodatapb.TabletAlias + + // Get the ShardReplication object for the cell. Collect all the tablets + // that belong to the shard. + sri, err := ts.GetShardReplication(ctx, cell, keyspace, shard) + switch { + case topo.IsErrType(err, topo.NoNode): + // No ShardReplication object means that the topo is inconsistent. + // Therefore we read all the tablets for that cell, and if we find any + // in our shard, we'll either abort or try to delete them, depending on + // whether recursive=true. + aliases, err = ts.GetTabletsByCell(ctx, cell) + if err != nil { + return fmt.Errorf("GetTabletsByCell(%v) failed: %w", cell, err) + } + case err == nil: + // If a ShardReplication object exists, we trust it to have all the + // tablet records for the shard in that cell. + aliases = make([]*topodatapb.TabletAlias, len(sri.Nodes)) + + for i, node := range sri.Nodes { + aliases[i] = node.TabletAlias + } + default: + return fmt.Errorf("GetShardReplication(%v, %v, %v) failed: %w", cell, keyspace, shard, err) + } + + // Get all the tablet records for the aliases we've collected. Note that + // GetTabletMap ignores ErrNoNode, which is convenient for our purpose; it + // means a tablet was deleted but is still referenced. + tabletMap, err := ts.GetTabletMap(ctx, aliases) + if err != nil { + return fmt.Errorf("GetTabletMap() failed: %w", err) + } + + // In the case where no ShardReplication object exists, we collect the + // aliases of every tablet in the cell, so we'll need to filter + // out anything not in our shard. + for alias, ti := range tabletMap { + if !(ti.Keyspace == keyspace && ti.Shard == shard) { + delete(tabletMap, alias) + } + } + + // If there are any tablets in the shard in the cell, delete them. + if len(tabletMap) > 0 { + if !recursive { + return vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "Shard %v/%v still hase %v tablets in cell %v; use Recursive = true or remove them manually", keyspace, shard, len(tabletMap), cell) + } + + log.Infof("Deleting all %d tablets in shard %v/%v cell %v", len(tabletMap), keyspace, shard, cell) + for alias, tablet := range tabletMap { + // We don't care about updating the ShardReplication object, because + // later we're going to delete the entire object. + log.Infof("Deleting tablet %v", alias) + if err := ts.DeleteTablet(ctx, tablet.Alias); err != nil && !topo.IsErrType(err, topo.NoNode) { + // We don't want to continue if a DeleteTablet fails for any + // reason other than a missing tablet (in which case it's just + // topo server inconsistency, which we can ignore). If we were + // to continue and delete the replication graph, the tablet + // record would become orphaned, since we'd no longer know that + // it belongs to this shard. + // + // If the problem is temporary, or resolved externally, + // re-running DeleteShard will skip over tablets that were + // already deleted. + return fmt.Errorf("cannot delete tablet %v: %w", alias, err) + } + } + } + + return nil +} + +func deleteTablet(ctx context.Context, ts *topo.Server, alias *topodatapb.TabletAlias, allowPrimary bool) (err error) { + tablet, err := ts.GetTablet(ctx, alias) + if err != nil { + return err + } + + isPrimary, err := topotools.IsPrimaryTablet(ctx, ts, tablet) + if err != nil { + return err + } + + if isPrimary && !allowPrimary { + return vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "cannot delete tablet %v as it is a master, pass AllowPrimary = true", topoproto.TabletAliasString(alias)) + } + + // Update the Shard object if the master was scrapped. We do this before + // calling DeleteTablet so that the operation can be retried in case of + // failure. + if isPrimary { + lockCtx, unlock, lockErr := ts.LockShard(ctx, tablet.Keyspace, tablet.Shard, fmt.Sprintf("DeleteTablet(%v)", topoproto.TabletAliasString(alias))) + if lockErr != nil { + return lockErr + } + + defer unlock(&err) + + if _, err := ts.UpdateShardFields(lockCtx, tablet.Keyspace, tablet.Shard, func(si *topo.ShardInfo) error { + if !topoproto.TabletAliasEqual(si.MasterAlias, alias) { + log.Warningf( + "Deleting master %v from shard %v/%v but master in Shard object was %v", + topoproto.TabletAliasString(alias), tablet.Keyspace, tablet.Shard, topoproto.TabletAliasString(si.MasterAlias), + ) + + return topo.NewError(topo.NoUpdateNeeded, si.Keyspace()+"/"+si.ShardName()) + } + + si.MasterAlias = nil + + return nil + }); err != nil { + return err + } + } + + // Remove the tablet record and its replication graph entry. + if err := topotools.DeleteTablet(ctx, ts, tablet.Tablet); err != nil { + return err + } + + // Return any error from unlocking the keyspace. + return err +} + +func removeShardCell(ctx context.Context, ts *topo.Server, cell string, keyspace string, shardName string, recursive bool, force bool) error { + shard, err := ts.GetShard(ctx, keyspace, shardName) + if err != nil { + return err + } + + servingCells, err := ts.GetShardServingCells(ctx, shard) + if err != nil { + return err + } + + if !topo.InCellList(cell, servingCells) { + return vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "shard %v/%v does not have serving cell %v", keyspace, shardName, cell) + } + + if shard.MasterAlias != nil && shard.MasterAlias.Cell == cell { + return vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "cannot remove cell %v; shard master %v is in cell", cell, topoproto.TabletAliasString(shard.MasterAlias)) + } + + replication, err := ts.GetShardReplication(ctx, cell, keyspace, shardName) + switch { + case err == nil: + // We have tablets in the shard in this cell. + if recursive { + log.Infof("Deleting all tablets in cell %v in shard %v/%v", cell, keyspace, shardName) + for _, node := range replication.Nodes { + // We don't care about scraping our updating the replication + // graph, because we're about to delete the entire replication + // graph. + log.Infof("Deleting tablet %v", topoproto.TabletAliasString(node.TabletAlias)) + if err := ts.DeleteTablet(ctx, node.TabletAlias); err != nil && !topo.IsErrType(err, topo.NoNode) { + return fmt.Errorf("cannot delete tablet %v: %w", topoproto.TabletAliasString(node.TabletAlias), err) + } + } + } else if len(replication.Nodes) > 0 { + return vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "cell %v has %v possible tablets in replication graph", cell, len(replication.Nodes)) + } + + // Remove the empty replication graph. + if err := ts.DeleteShardReplication(ctx, cell, keyspace, shardName); err != nil && !topo.IsErrType(err, topo.NoNode) { + return fmt.Errorf("error deleting ShardReplication object in cell %v: %w", cell, err) + } + case topo.IsErrType(err, topo.NoNode): + // No ShardReplication object. This is the expected path when there are + // no tablets in the shard in that cell. + err = nil + default: + // If we can't get the replication object out of the local topo, we + // assume the topo server is down in that cell, so we'll only continue + // if Force was specified. + if !force { + return err + } + + log.Warningf("Cannot get ShardReplication from cell %v; assuming cell topo server is down and forcing removal", cell) + } + + // Finally, update the shard. + + log.Infof("Removing cell %v from SrvKeyspace %v/%v", cell, keyspace, shardName) + + ctx, unlock, lockErr := ts.LockKeyspace(ctx, keyspace, "Locking keyspace to remove shard from SrvKeyspace") + if lockErr != nil { + return lockErr + } + + defer unlock(&err) + + if err := ts.DeleteSrvKeyspacePartitions(ctx, keyspace, []*topo.ShardInfo{shard}, topodatapb.TabletType_RDONLY, []string{cell}); err != nil { + return err + } + + if err := ts.DeleteSrvKeyspacePartitions(ctx, keyspace, []*topo.ShardInfo{shard}, topodatapb.TabletType_REPLICA, []string{cell}); err != nil { + return err + } + + if err := ts.DeleteSrvKeyspacePartitions(ctx, keyspace, []*topo.ShardInfo{shard}, topodatapb.TabletType_MASTER, []string{cell}); err != nil { + return err + } + + return err +} diff --git a/go/vt/vtctl/reparentutil/emergency_reparenter.go b/go/vt/vtctl/reparentutil/emergency_reparenter.go new file mode 100644 index 00000000000..3dfb3e64e4b --- /dev/null +++ b/go/vt/vtctl/reparentutil/emergency_reparenter.go @@ -0,0 +1,403 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reparentutil + +import ( + "context" + "fmt" + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/sets" + + "vitess.io/vitess/go/event" + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/vt/concurrency" + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/topotools/events" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vttablet/tmclient" + + logutilpb "vitess.io/vitess/go/vt/proto/logutil" + replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/proto/vtrpc" +) + +// EmergencyReparenter performs EmergencyReparentShard operations. +type EmergencyReparenter struct { + ts *topo.Server + tmc tmclient.TabletManagerClient + logger logutil.Logger +} + +// EmergencyReparentOptions provides optional parameters to +// EmergencyReparentShard operations. Options are passed by value, so it is safe +// for callers to mutate and reuse options structs for multiple calls. +type EmergencyReparentOptions struct { + NewPrimaryAlias *topodatapb.TabletAlias + IgnoreReplicas sets.String + WaitReplicasTimeout time.Duration + + // Private options managed internally. We use value passing to avoid leaking + // these details back out. + + lockAction string +} + +// NewEmergencyReparenter returns a new EmergencyReparenter object, ready to +// perform EmergencyReparentShard operations using the given topo.Server, +// TabletManagerClient, and logger. +// +// Providing a nil logger instance is allowed. +func NewEmergencyReparenter(ts *topo.Server, tmc tmclient.TabletManagerClient, logger logutil.Logger) *EmergencyReparenter { + erp := EmergencyReparenter{ + ts: ts, + tmc: tmc, + logger: logger, + } + + if erp.logger == nil { + // Create a no-op logger so we can call functions on er.logger without + // needed to constantly check for non-nil. + erp.logger = logutil.NewCallbackLogger(func(*logutilpb.Event) {}) + } + + return &erp +} + +// ReparentShard performs the EmergencyReparentShard operation on the given +// keyspace and shard. +func (erp *EmergencyReparenter) ReparentShard(ctx context.Context, keyspace string, shard string, opts EmergencyReparentOptions) (*events.Reparent, error) { + opts.lockAction = erp.getLockAction(opts.NewPrimaryAlias) + + ctx, unlock, err := erp.ts.LockShard(ctx, keyspace, shard, opts.lockAction) + if err != nil { + return nil, err + } + + defer unlock(&err) + + ev := &events.Reparent{} + defer func() { + switch err { + case nil: + event.DispatchUpdate(ev, "finished EmergencyReparentShard") + default: + event.DispatchUpdate(ev, "failed EmergencyReparentShard: "+err.Error()) + } + }() + + err = erp.reparentShardLocked(ctx, ev, keyspace, shard, opts) + + return ev, err +} + +func (erp *EmergencyReparenter) getLockAction(newPrimaryAlias *topodatapb.TabletAlias) string { + action := "EmergencyReparentShard" + + if newPrimaryAlias != nil { + action += fmt.Sprintf("(%v)", topoproto.TabletAliasString(newPrimaryAlias)) + } + + return action +} + +func (erp *EmergencyReparenter) promoteNewPrimary( + ctx context.Context, + ev *events.Reparent, + keyspace string, + shard string, + newPrimaryTabletAlias string, + tabletMap map[string]*topo.TabletInfo, + statusMap map[string]*replicationdatapb.StopReplicationStatus, + opts EmergencyReparentOptions, +) error { + erp.logger.Infof("promoting tablet %v to master", newPrimaryTabletAlias) + event.DispatchUpdate(ev, "promoting replica") + + newPrimaryTabletInfo, ok := tabletMap[newPrimaryTabletAlias] + if !ok { + return vterrors.Errorf(vtrpc.Code_INTERNAL, "attempted to promote master-elect %v that was not in the tablet map; this an impossible situation", newPrimaryTabletAlias) + } + + rp, err := erp.tmc.PromoteReplica(ctx, newPrimaryTabletInfo.Tablet) + if err != nil { + return vterrors.Wrapf(err, "master-elect tablet %v failed to be upgraded to master: %v", newPrimaryTabletAlias, err) + } + + if err := topo.CheckShardLocked(ctx, keyspace, shard); err != nil { + return vterrors.Wrapf(err, "lost topology lock, aborting: %v", err) + } + + replCtx, replCancel := context.WithTimeout(ctx, opts.WaitReplicasTimeout) + defer replCancel() + + event.DispatchUpdate(ev, "reparenting all tablets") + + // Create a context and cancel function to watch for the first successful + // SetMaster call on a replica. We use a background context so that this + // context is only ever Done when its cancel is called by the background + // goroutine we're about to spin up. + // + // Similarly, create a context and cancel for the replica waiter goroutine + // to signal when all replica goroutines have finished. In the case where at + // least one replica succeeds, replSuccessCtx will be canceled first, while + // allReplicasDoneCtx is guaranteed to be canceled within + // opts.WaitReplicasTimeout plus some jitter. + replSuccessCtx, replSuccessCancel := context.WithCancel(context.Background()) + allReplicasDoneCtx, allReplicasDoneCancel := context.WithCancel(context.Background()) + + now := time.Now().UnixNano() + replWg := sync.WaitGroup{} + rec := concurrency.AllErrorRecorder{} + + handlePrimary := func(alias string, ti *topo.TabletInfo) error { + erp.logger.Infof("populating reparent journal on new master %v", alias) + return erp.tmc.PopulateReparentJournal(replCtx, ti.Tablet, now, opts.lockAction, newPrimaryTabletInfo.Alias, rp) + } + + handleReplica := func(alias string, ti *topo.TabletInfo) { + defer replWg.Done() + erp.logger.Infof("setting new master on replica %v", alias) + + forceStart := false + if status, ok := statusMap[alias]; ok { + fs, err := ReplicaWasRunning(status) + if err != nil { + err = vterrors.Wrapf(err, "tablet %v could not determine StopReplicationStatus: %v", alias, err) + rec.RecordError(err) + + return + } + + forceStart = fs + } + + err := erp.tmc.SetMaster(replCtx, ti.Tablet, newPrimaryTabletInfo.Alias, now, "", forceStart) + if err != nil { + err = vterrors.Wrapf(err, "tablet %v SetMaster failed: %v", alias, err) + rec.RecordError(err) + + return + } + + // Signal that at least one goroutine succeeded to SetMaster. + replSuccessCancel() + } + + numReplicas := 0 + + for alias, ti := range tabletMap { + switch { + case alias == newPrimaryTabletAlias: + continue + case !opts.IgnoreReplicas.Has(alias): + replWg.Add(1) + numReplicas++ + go handleReplica(alias, ti) + } + } + + // Spin up a background goroutine to wait until all replica goroutines + // finished. Polling this way allows us to have promoteNewPrimary return + // success as soon as (a) the primary successfully populates its reparent + // journal and (b) at least one replica successfully begins replicating. + // + // If we were to follow the more common pattern of blocking on replWg.Wait() + // in the main body of promoteNewPrimary, we would be bound to the + // time of slowest replica, instead of the time of the fastest successful + // replica, and we want ERS to be fast. + go func() { + replWg.Wait() + allReplicasDoneCancel() + }() + + primaryErr := handlePrimary(newPrimaryTabletAlias, newPrimaryTabletInfo) + if primaryErr != nil { + erp.logger.Warningf("master failed to PopulateReparentJournal") + replCancel() + + return vterrors.Wrapf(primaryErr, "failed to PopulateReparentJournal on master: %v", primaryErr) + } + + select { + case <-replSuccessCtx.Done(): + // At least one replica was able to SetMaster successfully + return nil + case <-allReplicasDoneCtx.Done(): + // There are certain timing issues between replSuccessCtx.Done firing + // and allReplicasDoneCtx.Done firing, so we check again if truly all + // replicas failed (where `numReplicas` goroutines recorded an error) or + // one or more actually managed to succeed. + errCount := len(rec.Errors) + + switch { + case errCount > numReplicas: + // Technically, rec.Errors should never be greater than numReplicas, + // but it's better to err on the side of caution here, but also + // we're going to be explicit that this is doubly unexpected. + return vterrors.Wrapf(rec.Error(), "received more errors (= %d) than replicas (= %d), which should be impossible: %v", errCount, numReplicas, rec.Error()) + case errCount == numReplicas: + return vterrors.Wrapf(rec.Error(), "%d replica(s) failed: %v", numReplicas, rec.Error()) + default: + return nil + } + } +} + +func (erp *EmergencyReparenter) reparentShardLocked(ctx context.Context, ev *events.Reparent, keyspace string, shard string, opts EmergencyReparentOptions) error { + shardInfo, err := erp.ts.GetShard(ctx, keyspace, shard) + if err != nil { + return err + } + + ev.ShardInfo = *shardInfo + + event.DispatchUpdate(ev, "reading all tablets") + + tabletMap, err := erp.ts.GetTabletMapForShard(ctx, keyspace, shard) + if err != nil { + return vterrors.Wrapf(err, "failed to get tablet map for %v/%v: %v", keyspace, shard, err) + } + + statusMap, primaryStatusMap, err := StopReplicationAndBuildStatusMaps(ctx, erp.tmc, ev, tabletMap, opts.WaitReplicasTimeout, opts.IgnoreReplicas, erp.logger) + if err != nil { + return vterrors.Wrapf(err, "failed to stop replication and build status maps: %v", err) + } + + if err := topo.CheckShardLocked(ctx, keyspace, shard); err != nil { + return vterrors.Wrapf(err, "lost topology lock, aborting: %v", err) + } + + validCandidates, err := FindValidEmergencyReparentCandidates(statusMap, primaryStatusMap) + if err != nil { + return err + } else if len(validCandidates) == 0 { + return vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "no valid candidates for emergency reparent") + } + + // Wait for all candidates to apply relay logs + if err := erp.waitForAllRelayLogsToApply(ctx, validCandidates, tabletMap, statusMap, opts); err != nil { + return err + } + + // Elect the candidate with the most up-to-date position. + var ( + winningPosition mysql.Position + winningPrimaryTabletAliasStr string + ) + + for alias, position := range validCandidates { + if winningPosition.IsZero() || position.AtLeast(winningPosition) { + winningPosition = position + winningPrimaryTabletAliasStr = alias + } + } + + // If we were requested to elect a particular primary, verify it's a valid + // candidate (non-zero position, no errant GTIDs) and is at least as + // advanced as the winning position. + if opts.NewPrimaryAlias != nil { + winningPrimaryTabletAliasStr = topoproto.TabletAliasString(opts.NewPrimaryAlias) + pos, ok := validCandidates[winningPrimaryTabletAliasStr] + switch { + case !ok: + return vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "master elect %v has errant GTIDs", winningPrimaryTabletAliasStr) + case !pos.AtLeast(winningPosition): + return vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "master elect %v at position %v is not fully caught up. Winning position: %v", winningPrimaryTabletAliasStr, pos, winningPosition) + } + } + + // Check (again) we still have the topology lock. + if err := topo.CheckShardLocked(ctx, keyspace, shard); err != nil { + return vterrors.Wrapf(err, "lost topology lock, aborting: %v", err) + } + + // Do the promotion. + if err := erp.promoteNewPrimary(ctx, ev, keyspace, shard, winningPrimaryTabletAliasStr, tabletMap, statusMap, opts); err != nil { + return err + } + + ev.NewMaster = *tabletMap[winningPrimaryTabletAliasStr].Tablet + + return nil +} + +func (erp *EmergencyReparenter) waitForAllRelayLogsToApply( + ctx context.Context, + validCandidates map[string]mysql.Position, + tabletMap map[string]*topo.TabletInfo, + statusMap map[string]*replicationdatapb.StopReplicationStatus, + opts EmergencyReparentOptions, +) error { + errCh := make(chan error) + defer close(errCh) + + groupCtx, groupCancel := context.WithTimeout(ctx, opts.WaitReplicasTimeout) + defer groupCancel() + + waiterCount := 0 + + for candidate := range validCandidates { + // When we called StopReplicationAndBuildStatusMaps, we got back two + // maps: (1) the StopReplicationStatus of any replicas that actually + // stopped replication; and (2) the MasterStatus of anything that + // returned ErrNotReplica, which is a tablet that is either the current + // primary or is stuck thinking it is a MASTER but is not in actuality. + // + // If we have a tablet in the validCandidates map that does not appear + // in the statusMap, then we have either (a) the current primary, which + // is not replicating, so it is not applying relay logs; or (b) a tablet + // that is stuck thinking it is MASTER but is not in actuality. In that + // second case - (b) - we will most likely find that the stuck MASTER + // does not have a winning position, and fail the ERS. If, on the other + // hand, it does have a winning position, we are trusting the operator + // to know what they are doing by emergency-reparenting onto that + // tablet. In either case, it does not make sense to wait for relay logs + // to apply on a tablet that was never applying relay logs in the first + // place, so we skip it, and log that we did. + status, ok := statusMap[candidate] + if !ok { + erp.logger.Infof("EmergencyReparent candidate %v not in replica status map; this means it was not running replication (because it was formerly MASTER), so skipping WaitForRelayLogsToApply step for this candidate", candidate) + continue + } + + go func(alias string, status *replicationdatapb.StopReplicationStatus) { + var err error + defer func() { errCh <- err }() + err = WaitForRelayLogsToApply(groupCtx, erp.tmc, tabletMap[alias], status) + }(candidate, status) + + waiterCount++ + } + + errgroup := concurrency.ErrorGroup{ + NumGoroutines: waiterCount, + NumRequiredSuccesses: waiterCount, + NumAllowedErrors: 0, + } + rec := errgroup.Wait(groupCancel, errCh) + + if len(rec.Errors) != 0 { + return vterrors.Wrapf(rec.Error(), "could not apply all relay logs within the provided WaitReplicasTimeout (%s): %v", opts.WaitReplicasTimeout, rec.Error()) + } + + return nil +} diff --git a/go/vt/vtctl/reparentutil/emergency_reparenter_test.go b/go/vt/vtctl/reparentutil/emergency_reparenter_test.go new file mode 100644 index 00000000000..31c47089460 --- /dev/null +++ b/go/vt/vtctl/reparentutil/emergency_reparenter_test.go @@ -0,0 +1,1752 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reparentutil + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/util/sets" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/topotools/events" + "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver/testutil" + + replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +func TestNewEmergencyReparenter(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + logger logutil.Logger + }{ + { + name: "default case", + logger: logutil.NewMemoryLogger(), + }, + { + name: "overrides nil logger with no-op", + logger: nil, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + er := NewEmergencyReparenter(nil, nil, tt.logger) + assert.NotNil(t, er.logger, "NewEmergencyReparenter should never result in a nil logger instance on the EmergencyReparenter") + }) + } +} + +func TestEmergencyReparenter_getLockAction(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + alias *topodatapb.TabletAlias + expected string + msg string + }{ + { + name: "explicit new primary specified", + alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + expected: "EmergencyReparentShard(zone1-0000000100)", + msg: "lockAction should include tablet alias", + }, + { + name: "user did not specify new primary elect", + alias: nil, + expected: "EmergencyReparentShard", + msg: "lockAction should omit parens when no primary elect passed", + }, + } + + erp := &EmergencyReparenter{} + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + actual := erp.getLockAction(tt.alias) + assert.Equal(t, tt.expected, actual, tt.msg) + }) + } +} + +func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + // setup + ts *topo.Server + tmc *testutil.TabletManagerClient + unlockTopo bool + shards []*vtctldatapb.Shard + tablets []*topodatapb.Tablet + // params + keyspace string + shard string + opts EmergencyReparentOptions + // results + shouldErr bool + }{ + { + name: "success", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000102": nil, + }, + PromoteReplicaResults: map[string]struct { + Result string + Error error + }{ + "zone1-0000000102": { + Result: "ok", + Error: nil, + }, + }, + SetMasterResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000101": nil, + }, + StopReplicationAndGetStatusResults: map[string]struct { + Status *replicationdatapb.Status + StopStatus *replicationdatapb.StopReplicationStatus + Error error + }{ + "zone1-0000000100": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{}, + After: &replicationdatapb.Status{ + MasterUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", + }, + }, + }, + "zone1-0000000101": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{}, + After: &replicationdatapb.Status{ + MasterUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", + }, + }, + }, + "zone1-0000000102": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{}, + After: &replicationdatapb.Status{ + MasterUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-26", + }, + }, + }, + }, + WaitForPositionResults: map[string]map[string]error{ + "zone1-0000000100": { + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21": nil, + }, + "zone1-0000000101": { + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21": nil, + }, + "zone1-0000000102": { + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-26": nil, + }, + }, + }, + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Keyspace: "testkeyspace", + Shard: "-", + Hostname: "most up-to-date position, wins election", + }, + }, + keyspace: "testkeyspace", + shard: "-", + opts: EmergencyReparentOptions{}, + shouldErr: false, + }, + { + // Here, all our tablets are tied, so we're going to explicitly pick + // zone1-101. + name: "success with requested primary-elect", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000101": nil, + }, + PromoteReplicaResults: map[string]struct { + Result string + Error error + }{ + "zone1-0000000101": { + Result: "ok", + Error: nil, + }, + }, + SetMasterResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000102": nil, + }, + StopReplicationAndGetStatusResults: map[string]struct { + Status *replicationdatapb.Status + StopStatus *replicationdatapb.StopReplicationStatus + Error error + }{ + "zone1-0000000100": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{}, + After: &replicationdatapb.Status{ + MasterUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", + }, + }, + }, + "zone1-0000000101": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{}, + After: &replicationdatapb.Status{ + MasterUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", + }, + }, + }, + "zone1-0000000102": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{}, + After: &replicationdatapb.Status{ + MasterUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", + }, + }, + }, + }, + WaitForPositionResults: map[string]map[string]error{ + "zone1-0000000100": { + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21": nil, + }, + "zone1-0000000101": { + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21": nil, + }, + "zone1-0000000102": { + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21": nil, + }, + }, + }, + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + keyspace: "testkeyspace", + shard: "-", + opts: EmergencyReparentOptions{ + NewPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + shouldErr: false, + }, + { + name: "success with existing primary", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + DemoteMasterResults: map[string]struct { + Status *replicationdatapb.MasterStatus + Error error + }{ + "zone1-0000000100": { + Status: &replicationdatapb.MasterStatus{ + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", + }, + }, + }, + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000102": nil, + }, + PromoteReplicaResults: map[string]struct { + Result string + Error error + }{ + "zone1-0000000102": { + Result: "ok", + Error: nil, + }, + }, + SetMasterResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000101": nil, + }, + StopReplicationAndGetStatusResults: map[string]struct { + Status *replicationdatapb.Status + StopStatus *replicationdatapb.StopReplicationStatus + Error error + }{ + "zone1-0000000100": { // This tablet claims MASTER, so is not running replication. + Error: mysql.ErrNotReplica, + }, + "zone1-0000000101": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{}, + After: &replicationdatapb.Status{ + MasterUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", + }, + }, + }, + "zone1-0000000102": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{}, + After: &replicationdatapb.Status{ + MasterUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-26", + }, + }, + }, + }, + WaitForPositionResults: map[string]map[string]error{ + "zone1-0000000101": { + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21": nil, + }, + "zone1-0000000102": { + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-26": nil, + }, + }, + }, + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + Shard: "-", + Type: topodatapb.TabletType_MASTER, + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Keyspace: "testkeyspace", + Shard: "-", + Hostname: "most up-to-date position, wins election", + }, + }, + keyspace: "testkeyspace", + shard: "-", + opts: EmergencyReparentOptions{}, + shouldErr: false, + }, + { + name: "shard not found", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{}, + unlockTopo: true, // we shouldn't try to lock the nonexistent shard + shards: nil, + keyspace: "testkeyspace", + shard: "-", + opts: EmergencyReparentOptions{}, + shouldErr: true, + }, + { + name: "cannot stop replication", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + StopReplicationAndGetStatusResults: map[string]struct { + Status *replicationdatapb.Status + StopStatus *replicationdatapb.StopReplicationStatus + Error error + }{ + // We actually need >1 to fail here. + "zone1-0000000100": { + Error: assert.AnError, + }, + "zone1-0000000101": { + Error: assert.AnError, + }, + "zone1-0000000102": { + Error: assert.AnError, + }, + }, + }, + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + keyspace: "testkeyspace", + shard: "-", + opts: EmergencyReparentOptions{}, + shouldErr: true, + }, + { + name: "lost topo lock", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + StopReplicationAndGetStatusResults: map[string]struct { + Status *replicationdatapb.Status + StopStatus *replicationdatapb.StopReplicationStatus + Error error + }{ + "zone1-0000000100": { + StopStatus: &replicationdatapb.StopReplicationStatus{}, + }, + "zone1-0000000101": { + StopStatus: &replicationdatapb.StopReplicationStatus{}, + }, + "zone1-0000000102": { + StopStatus: &replicationdatapb.StopReplicationStatus{}, + }, + }, + }, + unlockTopo: true, + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + keyspace: "testkeyspace", + shard: "-", + opts: EmergencyReparentOptions{}, + shouldErr: true, + }, + { + name: "cannot get reparent candidates", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + StopReplicationAndGetStatusResults: map[string]struct { + Status *replicationdatapb.Status + StopStatus *replicationdatapb.StopReplicationStatus + Error error + }{ + "zone1-0000000100": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + After: &replicationdatapb.Status{ + MasterUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", + }, + }, + }, + "zone1-0000000101": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + After: &replicationdatapb.Status{ + MasterUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", + }, + }, + }, + "zone1-0000000102": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + After: &replicationdatapb.Status{}, + }, + }, + }, + }, + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Keyspace: "testkeyspace", + Shard: "-", + Hostname: "has a zero relay log position", + }, + }, + keyspace: "testkeyspace", + shard: "-", + opts: EmergencyReparentOptions{}, + shouldErr: true, + }, + { + name: "zero valid reparent candidates", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{}, + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + keyspace: "testkeyspace", + shard: "-", + opts: EmergencyReparentOptions{}, + shouldErr: true, + }, + { + name: "error waiting for relay logs to apply", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + StopReplicationAndGetStatusResults: map[string]struct { + Status *replicationdatapb.Status + StopStatus *replicationdatapb.StopReplicationStatus + Error error + }{ + "zone1-0000000100": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + After: &replicationdatapb.Status{ + MasterUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", + }, + }, + }, + "zone1-0000000101": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + After: &replicationdatapb.Status{ + MasterUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", + }, + }, + }, + "zone1-0000000102": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + After: &replicationdatapb.Status{ + MasterUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", + }, + }, + }, + }, + WaitForPositionDelays: map[string]time.Duration{ + "zone1-0000000101": time.Minute, + }, + WaitForPositionResults: map[string]map[string]error{ + "zone1-0000000100": { + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21": nil, + }, + "zone1-0000000101": { + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21": nil, + }, + "zone1-0000000102": { + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21": assert.AnError, + }, + }, + }, + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Keyspace: "testkeyspace", + Shard: "-", + Hostname: "slow to apply relay logs", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Keyspace: "testkeyspace", + Shard: "-", + Hostname: "fails to apply relay logs", + }, + }, + keyspace: "testkeyspace", + shard: "-", + opts: EmergencyReparentOptions{ + WaitReplicasTimeout: time.Millisecond * 50, // one replica is going to take a minute to apply relay logs + }, + shouldErr: true, + }, + { + name: "requested primary-elect is not in tablet map", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + StopReplicationAndGetStatusResults: map[string]struct { + Status *replicationdatapb.Status + StopStatus *replicationdatapb.StopReplicationStatus + Error error + }{ + "zone1-0000000100": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + After: &replicationdatapb.Status{ + MasterUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", + }, + }, + }, + "zone1-0000000101": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + After: &replicationdatapb.Status{ + MasterUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", + }, + }, + }, + "zone1-0000000102": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + After: &replicationdatapb.Status{ + MasterUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", + }, + }, + }, + }, + WaitForPositionResults: map[string]map[string]error{ + "zone1-0000000100": { + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21": nil, + }, + "zone1-0000000101": { + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21": nil, + }, + "zone1-0000000102": { + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21": nil, + }, + }, + }, + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + keyspace: "testkeyspace", + shard: "-", + opts: EmergencyReparentOptions{ + NewPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + }, + shouldErr: true, + }, + { + name: "requested primary-elect is not winning primary-elect", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + StopReplicationAndGetStatusResults: map[string]struct { + Status *replicationdatapb.Status + StopStatus *replicationdatapb.StopReplicationStatus + Error error + }{ + "zone1-0000000100": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + After: &replicationdatapb.Status{ + MasterUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", + }, + }, + }, + "zone1-0000000101": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + After: &replicationdatapb.Status{ + MasterUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", + }, + }, + }, + "zone1-0000000102": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + After: &replicationdatapb.Status{ + MasterUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-20", + }, + }, + }, + }, + WaitForPositionResults: map[string]map[string]error{ + "zone1-0000000100": { + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21": nil, + }, + "zone1-0000000101": { + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21": nil, + }, + "zone1-0000000102": { + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-20": nil, + }, + }, + }, + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Keyspace: "testkeyspace", + Shard: "-", + Hostname: "not most up-to-date position", + }, + }, + keyspace: "testkeyspace", + shard: "-", + opts: EmergencyReparentOptions{ + NewPrimaryAlias: &topodatapb.TabletAlias{ // we're requesting a tablet that's behind in replication + Cell: "zone1", + Uid: 102, + }, + }, + shouldErr: true, + }, + { + name: "cannot promote new primary", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + PromoteReplicaResults: map[string]struct { + Result string + Error error + }{ + "zone1-0000000102": { + Error: assert.AnError, + }, + }, + StopReplicationAndGetStatusResults: map[string]struct { + Status *replicationdatapb.Status + StopStatus *replicationdatapb.StopReplicationStatus + Error error + }{ + "zone1-0000000100": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + After: &replicationdatapb.Status{ + MasterUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", + }, + }, + }, + "zone1-0000000101": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + After: &replicationdatapb.Status{ + MasterUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", + }, + }, + }, + "zone1-0000000102": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + After: &replicationdatapb.Status{ + MasterUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", + }, + }, + }, + }, + WaitForPositionResults: map[string]map[string]error{ + "zone1-0000000100": { + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21": nil, + }, + "zone1-0000000101": { + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21": nil, + }, + "zone1-0000000102": { + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21": nil, + }, + }, + }, + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Keyspace: "testkeyspace", + Shard: "-", + Hostname: "not most up-to-date position", + }, + }, + keyspace: "testkeyspace", + shard: "-", + opts: EmergencyReparentOptions{ + // We're explicitly requesting a primary-elect in this test case + // because we don't care about the correctness of the selection + // code (it's covered by other test cases), and it simplifies + // the error mocking. + NewPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + }, + shouldErr: true, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ctx := context.Background() + logger := logutil.NewMemoryLogger() + ev := &events.Reparent{} + + testutil.AddShards(ctx, t, tt.ts, tt.shards...) + testutil.AddTablets(ctx, t, tt.ts, nil, tt.tablets...) + + if !tt.unlockTopo { + lctx, unlock, lerr := tt.ts.LockShard(ctx, tt.keyspace, tt.shard, "test lock") + require.NoError(t, lerr, "could not lock %s/%s for testing", tt.keyspace, tt.shard) + + defer func() { + unlock(&lerr) + require.NoError(t, lerr, "could not unlock %s/%s after test", tt.keyspace, tt.shard) + }() + + ctx = lctx // make the reparentShardLocked call use the lock ctx + } + + erp := NewEmergencyReparenter(tt.ts, tt.tmc, logger) + + err := erp.reparentShardLocked(ctx, ev, tt.keyspace, tt.shard, tt.opts) + if tt.shouldErr { + assert.Error(t, err) + + return + } + + assert.NoError(t, err) + }) + } +} + +func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + ts *topo.Server + tmc *testutil.TabletManagerClient + unlockTopo bool + keyspace string + shard string + newPrimaryTabletAlias string + tabletMap map[string]*topo.TabletInfo + statusMap map[string]*replicationdatapb.StopReplicationStatus + opts EmergencyReparentOptions + shouldErr bool + }{ + { + name: "success", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000100": nil, + }, + PromoteReplicaResults: map[string]struct { + Result string + Error error + }{ + "zone1-0000000100": { + Error: nil, + }, + }, + SetMasterResults: map[string]error{ + "zone1-0000000101": nil, + "zone1-0000000102": nil, + "zone1-0000000404": assert.AnError, // okay, because we're ignoring it. + }, + }, + keyspace: "testkeyspace", + shard: "-", + newPrimaryTabletAlias: "zone1-0000000100", + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Hostname: "primary-elect", + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + "zone1-0000000102": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Hostname: "requires force start", + }, + }, + "zone1-0000000404": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 404, + }, + Hostname: "ignored tablet", + }, + }, + }, + statusMap: map[string]*replicationdatapb.StopReplicationStatus{ + "zone1-0000000101": { // forceStart = false + Before: &replicationdatapb.Status{ + IoThreadRunning: false, + SqlThreadRunning: false, + }, + }, + "zone1-0000000102": { // forceStart = true + Before: &replicationdatapb.Status{ + IoThreadRunning: true, + SqlThreadRunning: true, + }, + }, + }, + opts: EmergencyReparentOptions{ + IgnoreReplicas: sets.NewString("zone1-0000000404"), + }, + shouldErr: false, + }, + { + name: "primary not in tablet map", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{}, + keyspace: "testkeyspace", + shard: "-", + newPrimaryTabletAlias: "zone2-0000000200", + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": {}, + "zone1-0000000101": {}, + }, + statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, + opts: EmergencyReparentOptions{}, + shouldErr: true, + }, + { + name: "PromoteReplica error", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + PromoteReplicaResults: map[string]struct { + Result string + Error error + }{ + "zone1-0000000100": { + Error: assert.AnError, + }, + }, + }, + keyspace: "testkeyspace", + shard: "-", + newPrimaryTabletAlias: "zone1-0000000100", + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + }, + statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, + opts: EmergencyReparentOptions{}, + shouldErr: true, + }, + { + name: "lost topology lock", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + PromoteReplicaResults: map[string]struct { + Result string + Error error + }{ + "zone1-0000000100": { + Error: nil, + }, + }, + }, + unlockTopo: true, + keyspace: "testkeyspace", + shard: "-", + newPrimaryTabletAlias: "zone1-0000000100", + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + }, + statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, + opts: EmergencyReparentOptions{}, + shouldErr: true, + }, + { + name: "cannot repopulate reparent journal on new primary", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000100": assert.AnError, + }, + PromoteReplicaResults: map[string]struct { + Result string + Error error + }{ + "zone1-0000000100": { + Error: nil, + }, + }, + }, + keyspace: "testkeyspace", + shard: "-", + newPrimaryTabletAlias: "zone1-0000000100", + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + }, + statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, + opts: EmergencyReparentOptions{}, + shouldErr: true, + }, + { + name: "all replicas failing to SetMaster does fail the promotion", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000100": nil, + }, + PromoteReplicaResults: map[string]struct { + Result string + Error error + }{ + "zone1-0000000100": { + Error: nil, + }, + }, + SetMasterResults: map[string]error{ + // everyone fails, we all fail + "zone1-0000000101": assert.AnError, + "zone1-0000000102": assert.AnError, + }, + }, + keyspace: "testkeyspace", + shard: "-", + newPrimaryTabletAlias: "zone1-0000000100", + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + "zone1-00000000102": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + }, + }, + }, + statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, + opts: EmergencyReparentOptions{}, + shouldErr: true, + }, + { + name: "all replicas slow to SetMaster does fail the promotion", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000100": nil, + }, + PromoteReplicaResults: map[string]struct { + Result string + Error error + }{ + "zone1-0000000100": { + Error: nil, + }, + }, + SetMasterDelays: map[string]time.Duration{ + // nothing is failing, we're just slow + "zone1-0000000101": time.Millisecond * 100, + "zone1-0000000102": time.Millisecond * 75, + }, + SetMasterResults: map[string]error{ + "zone1-0000000101": nil, + "zone1-0000000102": nil, + }, + }, + keyspace: "testkeyspace", + shard: "-", + newPrimaryTabletAlias: "zone1-0000000100", + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + "zone1-0000000102": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + }, + }, + }, + statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, + opts: EmergencyReparentOptions{ + WaitReplicasTimeout: time.Millisecond * 10, + }, + shouldErr: true, + }, + { + name: "one replica failing to SetMaster does not fail the promotion", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000100": nil, + }, + PromoteReplicaResults: map[string]struct { + Result string + Error error + }{ + "zone1-0000000100": { + Error: nil, + }, + }, + SetMasterResults: map[string]error{ + "zone1-0000000101": nil, // this one succeeds, so we're good + "zone1-0000000102": assert.AnError, + }, + }, + keyspace: "testkeyspace", + shard: "-", + newPrimaryTabletAlias: "zone1-0000000100", + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + "zone1-0000000102": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + }, + }, + }, + statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, + shouldErr: false, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ctx := context.Background() + logger := logutil.NewMemoryLogger() + ev := &events.Reparent{} + + testutil.AddShards(ctx, t, tt.ts, &vtctldatapb.Shard{ + Keyspace: tt.keyspace, + Name: tt.shard, + }) + + if !tt.unlockTopo { + var ( + unlock func(*error) + lerr error + ) + + ctx, unlock, lerr = tt.ts.LockShard(ctx, tt.keyspace, tt.shard, "test lock") + require.NoError(t, lerr, "could not lock %s/%s for test", tt.keyspace, tt.shard) + + defer func() { + unlock(&lerr) + require.NoError(t, lerr, "could not unlock %s/%s after test", tt.keyspace, tt.shard) + }() + } + + erp := NewEmergencyReparenter(tt.ts, tt.tmc, logger) + + err := erp.promoteNewPrimary(ctx, ev, tt.keyspace, tt.shard, tt.newPrimaryTabletAlias, tt.tabletMap, tt.statusMap, tt.opts) + if tt.shouldErr { + assert.Error(t, err) + + return + } + + assert.NoError(t, err) + }) + } +} + +func TestEmergencyReparenter_waitForAllRelayLogsToApply(t *testing.T) { + t.Parallel() + + ctx := context.Background() + logger := logutil.NewMemoryLogger() + opts := EmergencyReparentOptions{ + WaitReplicasTimeout: time.Millisecond * 50, + } + tests := []struct { + name string + tmc *testutil.TabletManagerClient + candidates map[string]mysql.Position + tabletMap map[string]*topo.TabletInfo + statusMap map[string]*replicationdatapb.StopReplicationStatus + shouldErr bool + }{ + { + name: "all tablet pass", + tmc: &testutil.TabletManagerClient{ + WaitForPositionResults: map[string]map[string]error{ + "zone1-0000000100": { + "position1": nil, + }, + "zone1-0000000101": { + "position1": nil, + }, + }, + }, + candidates: map[string]mysql.Position{ + "zone1-0000000100": {}, + "zone1-0000000101": {}, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + }, + statusMap: map[string]*replicationdatapb.StopReplicationStatus{ + "zone1-0000000100": { + After: &replicationdatapb.Status{ + RelayLogPosition: "position1", + }, + }, + "zone1-0000000101": { + After: &replicationdatapb.Status{ + RelayLogPosition: "position1", + }, + }, + }, + shouldErr: false, + }, + { + name: "one tablet fails", + tmc: &testutil.TabletManagerClient{ + WaitForPositionResults: map[string]map[string]error{ + "zone1-0000000100": { + "position1": nil, + }, + "zone1-0000000101": { + "position1": nil, + }, + }, + }, + candidates: map[string]mysql.Position{ + "zone1-0000000100": {}, + "zone1-0000000101": {}, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + }, + statusMap: map[string]*replicationdatapb.StopReplicationStatus{ + "zone1-0000000100": { + After: &replicationdatapb.Status{ + RelayLogPosition: "position1", + }, + }, + "zone1-0000000101": { + After: &replicationdatapb.Status{ + RelayLogPosition: "position2", // cannot wait for the desired "position1", so we fail + }, + }, + }, + shouldErr: true, + }, + { + name: "multiple tablets fail", + tmc: &testutil.TabletManagerClient{ + WaitForPositionResults: map[string]map[string]error{ + "zone1-0000000100": { + "position1": nil, + }, + "zone1-0000000101": { + "position2": nil, + }, + "zone1-0000000102": { + "position3": nil, + }, + }, + }, + candidates: map[string]mysql.Position{ + "zone1-0000000100": {}, + "zone1-0000000101": {}, + "zone1-0000000102": {}, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + "zone1-0000000102": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + }, + }, + }, + statusMap: map[string]*replicationdatapb.StopReplicationStatus{ + "zone1-0000000100": { + After: &replicationdatapb.Status{ + RelayLogPosition: "position1", + }, + }, + "zone1-0000000101": { + After: &replicationdatapb.Status{ + RelayLogPosition: "position1", + }, + }, + "zone1-0000000102": { + After: &replicationdatapb.Status{ + RelayLogPosition: "position1", + }, + }, + }, + shouldErr: true, + }, + { + name: "one slow tablet", + tmc: &testutil.TabletManagerClient{ + WaitForPositionDelays: map[string]time.Duration{ + "zone1-0000000101": time.Minute, + }, + WaitForPositionResults: map[string]map[string]error{ + "zone1-0000000100": { + "position1": nil, + }, + "zone1-0000000101": { + "position1": nil, + }, + }, + }, + candidates: map[string]mysql.Position{ + "zone1-0000000100": {}, + "zone1-0000000101": {}, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + }, + statusMap: map[string]*replicationdatapb.StopReplicationStatus{ + "zone1-0000000100": { + After: &replicationdatapb.Status{ + RelayLogPosition: "position1", + }, + }, + "zone1-0000000101": { + After: &replicationdatapb.Status{ + RelayLogPosition: "position1", + }, + }, + }, + shouldErr: true, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + erp := NewEmergencyReparenter(nil, tt.tmc, logger) + err := erp.waitForAllRelayLogsToApply(ctx, tt.candidates, tt.tabletMap, tt.statusMap, opts) + if tt.shouldErr { + assert.Error(t, err) + return + } + + assert.NoError(t, err) + }) + } +} diff --git a/go/vt/vtctl/reparentutil/planned_reparenter.go b/go/vt/vtctl/reparentutil/planned_reparenter.go new file mode 100644 index 00000000000..100d1e3d4a1 --- /dev/null +++ b/go/vt/vtctl/reparentutil/planned_reparenter.go @@ -0,0 +1,627 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reparentutil + +import ( + "context" + "fmt" + "sync" + "time" + + "vitess.io/vitess/go/event" + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/vt/concurrency" + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/topotools/events" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vttablet/tmclient" + + logutilpb "vitess.io/vitess/go/vt/proto/logutil" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/proto/vtrpc" +) + +// PlannedReparenter performs PlannedReparentShard operations. +type PlannedReparenter struct { + ts *topo.Server + tmc tmclient.TabletManagerClient + logger logutil.Logger +} + +// PlannedReparentOptions provides optional parameters to PlannedReparentShard +// operations. Options are passed by value, so it is safe for callers to mutate +// resue options structs for multiple calls. +type PlannedReparentOptions struct { + NewPrimaryAlias *topodatapb.TabletAlias + AvoidPrimaryAlias *topodatapb.TabletAlias + WaitReplicasTimeout time.Duration + + // Private options managed internally. We use value-passing semantics to + // set these options inside a PlannedReparent without leaking these details + // back out to the caller. + + lockAction string +} + +// NewPlannedReparenter returns a new PlannedReparenter object, ready to perform +// PlannedReparentShard operations using the given topo.Server, +// TabletManagerClient, and logger. +// +// Providing a nil logger instance is allowed. +func NewPlannedReparenter(ts *topo.Server, tmc tmclient.TabletManagerClient, logger logutil.Logger) *PlannedReparenter { + pr := PlannedReparenter{ + ts: ts, + tmc: tmc, + logger: logger, + } + + if pr.logger == nil { + // Create a no-op logger so we can call functions on pr.logger without + // needing to constantly check it for non-nil first. + pr.logger = logutil.NewCallbackLogger(func(e *logutilpb.Event) {}) + } + + return &pr +} + +// ReparentShard performs a PlannedReparentShard operation on the given keyspace +// and shard. It will make the provided tablet the primary for the shard, when +// both the current and desired primary are reachable and in a good state. +func (pr *PlannedReparenter) ReparentShard(ctx context.Context, keyspace string, shard string, opts PlannedReparentOptions) (*events.Reparent, error) { + opts.lockAction = pr.getLockAction(opts) + + ctx, unlock, err := pr.ts.LockShard(ctx, keyspace, shard, opts.lockAction) + if err != nil { + return nil, err + } + + defer unlock(&err) + + if opts.NewPrimaryAlias == nil && opts.AvoidPrimaryAlias == nil { + shardInfo, err := pr.ts.GetShard(ctx, keyspace, shard) + if err != nil { + return nil, err + } + + opts.AvoidPrimaryAlias = shardInfo.MasterAlias + } + + ev := &events.Reparent{} + defer func() { + switch err { + case nil: + event.DispatchUpdate(ev, "finished PlannedReparentShard") + default: + event.DispatchUpdate(ev, "failed PlannedReparentShard: "+err.Error()) + } + }() + + err = pr.reparentShardLocked(ctx, ev, keyspace, shard, opts) + + return ev, err +} + +func (pr *PlannedReparenter) getLockAction(opts PlannedReparentOptions) string { + return fmt.Sprintf( + "PlannedReparentShard(%v, AvoidPrimary = %v)", + topoproto.TabletAliasString(opts.NewPrimaryAlias), + topoproto.TabletAliasString(opts.AvoidPrimaryAlias), + ) +} + +// preflightChecks checks some invariants that pr.reparentShardLocked() depends +// on. It returns a boolean to indicate if the reparent is a no-op (which +// happens iff the caller specified an AvoidPrimaryAlias and it's not the shard +// primary), as well as an error. +// +// It will also set the NewPrimaryAlias option if the caller did not specify +// one, provided it can choose a new primary candidate. See ChooseNewPrimary() +// for details on primary candidate selection. +func (pr *PlannedReparenter) preflightChecks( + ctx context.Context, + ev *events.Reparent, + keyspace string, + shard string, + tabletMap map[string]*topo.TabletInfo, + opts *PlannedReparentOptions, // we take a pointer here to set NewPrimaryAlias +) (isNoop bool, err error) { + if topoproto.TabletAliasEqual(opts.NewPrimaryAlias, opts.AvoidPrimaryAlias) { + return true, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "primary-elect tablet %v is the same as the tablet to avoid", topoproto.TabletAliasString(opts.NewPrimaryAlias)) + } + + if opts.NewPrimaryAlias == nil { + if !topoproto.TabletAliasEqual(opts.AvoidPrimaryAlias, ev.ShardInfo.MasterAlias) { + event.DispatchUpdate(ev, "current primary is different than AvoidPrimary, nothing to do") + return true, nil + } + + event.DispatchUpdate(ev, "searching for primary candidate") + + opts.NewPrimaryAlias, err = ChooseNewPrimary(ctx, pr.tmc, &ev.ShardInfo, tabletMap, opts.AvoidPrimaryAlias, opts.WaitReplicasTimeout, pr.logger) + if err != nil { + return true, err + } + + if opts.NewPrimaryAlias == nil { + return true, vterrors.Errorf(vtrpc.Code_INTERNAL, "cannot find a tablet to reparent to") + } + + pr.logger.Infof("elected new primary candidate %v", topoproto.TabletAliasString(opts.NewPrimaryAlias)) + event.DispatchUpdate(ev, "elected new primary candidate") + } + + primaryElectAliasStr := topoproto.TabletAliasString(opts.NewPrimaryAlias) + + newPrimaryTabletInfo, ok := tabletMap[primaryElectAliasStr] + if !ok { + return true, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "primary-elect tablet %v is not in the shard", primaryElectAliasStr) + } + + ev.NewMaster = *newPrimaryTabletInfo.Tablet + + if topoproto.TabletAliasIsZero(ev.ShardInfo.MasterAlias) { + return true, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "the shard has no current primary, use EmergencyReparentShard instead") + } + + return false, nil +} + +func (pr *PlannedReparenter) performGracefulPromotion( + ctx context.Context, + ev *events.Reparent, + keyspace string, + shard string, + currentPrimary *topo.TabletInfo, + primaryElect topodatapb.Tablet, + tabletMap map[string]*topo.TabletInfo, + opts PlannedReparentOptions, +) (string, error) { + primaryElectAliasStr := topoproto.TabletAliasString(primaryElect.Alias) + ev.OldMaster = *currentPrimary.Tablet + + // Before demoting the old primary, we're going to ensure that replication + // is working from the old primary to the primary-elect. If replication is + // not working, a PlannedReparent is not safe to do, because the candidate + // won't catch up and we'll potentially miss transactions. + pr.logger.Infof("checking replication on primary-elect %v", primaryElectAliasStr) + + // First, we find the position of the current primary. Note that this is + // just a snapshot of the position, since we let it keep accepting writes + // until we're sure we want to proceed with the promotion. + snapshotCtx, snapshotCancel := context.WithTimeout(ctx, *topo.RemoteOperationTimeout) + defer snapshotCancel() + + snapshotPos, err := pr.tmc.MasterPosition(snapshotCtx, currentPrimary.Tablet) + if err != nil { + return "", vterrors.Wrapf(err, "cannot get replication position on current primary %v; current primary must be healthy to perform PlannedReparent", currentPrimary.AliasString()) + } + + // Next, we wait for the primary-elect to catch up to that snapshot point. + // If it can catch up within WaitReplicasTimeout, we can be fairly + // confident that it will catch up on everything else that happens between + // the snapshot point we grabbed above and when we demote the old primary + // below. + // + // We do this as an idempotent SetMaster to make sure the replica knows who + // the current primary is. + setMasterCtx, setMasterCancel := context.WithTimeout(ctx, opts.WaitReplicasTimeout) + defer setMasterCancel() + + if err := pr.tmc.SetMaster(setMasterCtx, &primaryElect, currentPrimary.Alias, 0, snapshotPos, true); err != nil { + return "", vterrors.Wrapf(err, "replication on primary-elect %v did not catch up in time; replication must be healthy to perform PlannedReparent", primaryElectAliasStr) + } + + // Verify we still have the topology lock before doing the demotion. + if err := topo.CheckShardLocked(ctx, keyspace, shard); err != nil { + return "", vterrors.Wrap(err, "lost topology lock; aborting") + } + + // Next up, demote the current primary and get its replication position. + // It's fine if the current primary was already demoted, since DemoteMaster + // is idempotent. + pr.logger.Infof("demoting current primary: %v", currentPrimary.AliasString()) + event.DispatchUpdate(ev, "demoting old primary") + + demoteCtx, demoteCancel := context.WithTimeout(ctx, *topo.RemoteOperationTimeout) + defer demoteCancel() + + masterStatus, err := pr.tmc.DemoteMaster(demoteCtx, currentPrimary.Tablet) + if err != nil { + return "", vterrors.Wrapf(err, "failed to DemoteMaster on current primary %v: %v", currentPrimary.AliasString(), err) + } + + // Wait for the primary-elect to catch up to the position we demoted the + // current primary at. If it fails to catch up within WaitReplicasTimeout, + // we will try to roll back to the original primary before aborting. + waitCtx, waitCancel := context.WithTimeout(ctx, opts.WaitReplicasTimeout) + defer waitCancel() + + waitErr := pr.tmc.WaitForPosition(waitCtx, &primaryElect, masterStatus.Position) + + // Do some wrapping of errors to get the right codes and callstacks. + var finalWaitErr error + switch { + case waitErr != nil: + finalWaitErr = vterrors.Wrapf(waitErr, "primary-elect tablet %v failed to catch up with replication %v", primaryElectAliasStr, masterStatus.Position) + case ctx.Err() == context.DeadlineExceeded: + finalWaitErr = vterrors.New(vtrpc.Code_DEADLINE_EXCEEDED, "PlannedReparent timed out; please try again") + } + + if finalWaitErr != nil { + // It's possible that we've used up the calling context's timeout, or + // that not enough time is left on the it to finish the rollback. + // We create a new background context to avoid a partial rollback, which + // could leave the cluster in a worse state than when we started. + undoCtx, undoCancel := context.WithTimeout(context.Background(), *topo.RemoteOperationTimeout) + defer undoCancel() + + if undoErr := pr.tmc.UndoDemoteMaster(undoCtx, currentPrimary.Tablet); undoErr != nil { + pr.logger.Warningf("encountered error while performing UndoDemoteMaster(%v): %v", currentPrimary.AliasString(), undoErr) + finalWaitErr = vterrors.Wrapf(finalWaitErr, "encountered error while performing UndoDemoteMaster(%v): %v", currentPrimary.AliasString(), undoErr) + } + + return "", finalWaitErr + } + + // Primary-elect is caught up to the current primary. We can do the + // promotion now. + promoteCtx, promoteCancel := context.WithTimeout(ctx, opts.WaitReplicasTimeout) + defer promoteCancel() + + rp, err := pr.tmc.PromoteReplica(promoteCtx, &primaryElect) + if err != nil { + return "", vterrors.Wrapf(err, "primary-elect tablet %v failed to be promoted to primary; please try again", primaryElectAliasStr) + } + + if ctx.Err() == context.DeadlineExceeded { + // PromoteReplica succeeded, but we ran out of time. PRS needs to be + // re-run to complete fully. + return "", vterrors.Errorf(vtrpc.Code_DEADLINE_EXCEEDED, "PLannedReparent timed out after successfully promoting primary-elect %v; please re-run to fix up the replicas", primaryElectAliasStr) + } + + return rp, nil +} + +func (pr *PlannedReparenter) performPartialPromotionRecovery(ctx context.Context, primaryElect topodatapb.Tablet) (string, error) { + // It's possible that a previous attempt to reparent failed to SetReadWrite, + // so call it here to make sure the underlying MySQL is read-write on the + // candidate primary. + setReadWriteCtx, setReadWriteCancel := context.WithTimeout(ctx, *topo.RemoteOperationTimeout) + defer setReadWriteCancel() + + if err := pr.tmc.SetReadWrite(setReadWriteCtx, &primaryElect); err != nil { + return "", vterrors.Wrapf(err, "failed to SetReadWrite on current primary %v", topoproto.TabletAliasString(primaryElect.Alias)) + } + + // The primary is already the one we want according to its tablet record. + refreshCtx, refreshCancel := context.WithTimeout(ctx, *topo.RemoteOperationTimeout) + defer refreshCancel() + + // Get the replication position so we can try to fix the replicas (back in + // reparentShardLocked()) + reparentJournalPosition, err := pr.tmc.MasterPosition(refreshCtx, &primaryElect) + if err != nil { + return "", vterrors.Wrapf(err, "failed to get replication position of current primary %v", topoproto.TabletAliasString(primaryElect.Alias)) + } + + return reparentJournalPosition, nil +} + +func (pr *PlannedReparenter) performPotentialPromotion( + ctx context.Context, + keyspace string, + shard string, + primaryElect topodatapb.Tablet, + tabletMap map[string]*topo.TabletInfo, +) (string, error) { + primaryElectAliasStr := topoproto.TabletAliasString(primaryElect.Alias) + + pr.logger.Infof("no clear winner found for current master term; checking if it's safe to recover by electing %v", primaryElectAliasStr) + + type tabletPos struct { + alias string + tablet *topodatapb.Tablet + pos mysql.Position + } + + positions := make(chan tabletPos, len(tabletMap)) + + // First, stop the world, to ensure no writes are happening anywhere. We + // don't trust that we know which tablets might be acting as primaries, so + // we simply demote everyone. + // + // Unlike the normal, single-primary case, we don't try to undo this if we + // fail. If we've made it here, it means there is no clear primary, so we + // don't know who it's safe to roll back to. Leaving everything read-only is + // probably safer, or at least no worse, than whatever weird state we were + // in before. + // + // If any tablets are unreachable, we can't be sure it's safe either, + // because one of the unreachable tablets might have a replication position + // further ahead than the candidate primary. + + var ( + stopAllWg sync.WaitGroup + rec concurrency.AllErrorRecorder + ) + + stopAllCtx, stopAllCancel := context.WithTimeout(ctx, *topo.RemoteOperationTimeout) + defer stopAllCancel() + + for alias, tabletInfo := range tabletMap { + stopAllWg.Add(1) + + go func(alias string, tablet *topodatapb.Tablet) { + defer stopAllWg.Done() + + // Regardless of what type this tablet thinks it is, we will always + // call DemoteMaster to ensure the underlying MySQL server is in + // read-only, and to check its replication position. DemoteMaster is + // idempotent, so it's fine to call it on a replica (or other + // tablet type), that's already in read-only. + pr.logger.Infof("demoting tablet %v", alias) + + masterStatus, err := pr.tmc.DemoteMaster(stopAllCtx, tablet) + if err != nil { + rec.RecordError(vterrors.Wrapf(err, "DemoteMaster(%v) failed on contested primary", alias)) + + return + } + + pos, err := mysql.DecodePosition(masterStatus.Position) + if err != nil { + rec.RecordError(vterrors.Wrapf(err, "cannot decode replication position (%v) for demoted tablet %v", masterStatus.Position, alias)) + + return + } + + positions <- tabletPos{ + alias: alias, + tablet: tablet, + pos: pos, + } + }(alias, tabletInfo.Tablet) + } + + stopAllWg.Wait() + close(positions) + + if rec.HasErrors() { + return "", vterrors.Wrap(rec.Error(), "failed to demote all tablets") + } + + // Construct a mapping of alias to tablet position. + tabletPosMap := make(map[string]tabletPos, len(tabletMap)) + for tp := range positions { + tabletPosMap[tp.alias] = tp + } + + // Make sure no tablet has a more advanced position than the candidate + // primary. It's up to the caller to choose a suitable candidate, and to + // choose another if this check fails. + // + // Note that we still allow replication to run during this time, but we + // assume that no new high water mark can appear because we just demoted all + // tablets to read-only, so there should be no new transactions. + // + // TODO: consider temporarily replicating from another tablet to catch up, + // if the candidate primary is behind that tablet. + tp, ok := tabletPosMap[primaryElectAliasStr] + if !ok { + return "", vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "primary-elect tablet %v not found in tablet map", primaryElectAliasStr) + } + + primaryElectPos := tp.pos + + for _, tp := range tabletPosMap { + // The primary-elect pos has to be at least as advanced as every tablet + // in the shard. + if !primaryElectPos.AtLeast(tp.pos) { + return "", vterrors.Errorf( + vtrpc.Code_FAILED_PRECONDITION, + "tablet %v (position: %v) contains transactions not found in primary-elect %v (position: %v)", + tp.alias, tp.pos, primaryElectAliasStr, primaryElectPos, + ) + } + } + + // Check that we still have the topology lock. + if err := topo.CheckShardLocked(ctx, keyspace, shard); err != nil { + return "", vterrors.Wrap(err, "lost topology lock; aborting") + } + + // Promote the candidate primary to type:MASTER. + promoteCtx, promoteCancel := context.WithTimeout(ctx, *topo.RemoteOperationTimeout) + defer promoteCancel() + + rp, err := pr.tmc.PromoteReplica(promoteCtx, &primaryElect) + if err != nil { + return "", vterrors.Wrapf(err, "failed to promote %v to primary", primaryElectAliasStr) + } + + return rp, nil +} + +func (pr *PlannedReparenter) reparentShardLocked( + ctx context.Context, + ev *events.Reparent, + keyspace string, + shard string, + opts PlannedReparentOptions, +) error { + shardInfo, err := pr.ts.GetShard(ctx, keyspace, shard) + if err != nil { + return err + } + + ev.ShardInfo = *shardInfo + + event.DispatchUpdate(ev, "reading tablet map") + + tabletMap, err := pr.ts.GetTabletMapForShard(ctx, keyspace, shard) + if err != nil { + return err + } + + // Check invariants that PlannedReparentShard depends on. + if isNoop, err := pr.preflightChecks(ctx, ev, keyspace, shard, tabletMap, &opts); err != nil { + return err + } else if isNoop { + return nil + } + + currentPrimary := FindCurrentPrimary(tabletMap, pr.logger) + reparentJournalPos := "" + + // Depending on whether we can find a current primary, and what the caller + // specified as the candidate primary, we will do one of three kinds of + // promotions: + // + // 1) There is no clear current primary. In this case we will try to + // determine if it's safe to promote the candidate specified by the caller. + // If it's not -- including if any tablet in the shard is unreachable -- we + // bail. We also don't attempt to rollback a failed demotion in this case. + // + // 2) The current primary is the same as the candidate primary specified by + // the caller. In this case, we assume there was a previous PRS for this + // primary, and the caller is re-issuing the call to fix-up any replicas. We + // also idempotently set the desired primary as read-write, just in case. + // + // 3) The current primary and the desired primary differ. In this case, we + // perform a graceful promotion, in which we validate the desired primary is + // sufficiently up-to-date, demote the current primary, wait for the desired + // primary to catch up to that position, and set the desired primary + // read-write. We will attempt to rollback a failed demotion in this case, + // unlike in case (1), because we have a known good state to rollback to. + // + // In all cases, we will retrieve the reparent journal position that was + // inserted in the new primary's journal, so we can use it below to check + // that all the replicas have attached to new primary successfully. + switch { + case currentPrimary == nil: + // Case (1): no clear current primary. Try to find a safe promotion + // candidate, and promote to it. + reparentJournalPos, err = pr.performPotentialPromotion(ctx, keyspace, shard, ev.NewMaster, tabletMap) + case topoproto.TabletAliasEqual(currentPrimary.Alias, opts.NewPrimaryAlias): + // Case (2): desired new primary is the current primary. Attempt to fix + // up replicas to recover from a previous partial promotion. + reparentJournalPos, err = pr.performPartialPromotionRecovery(ctx, ev.NewMaster) + default: + // Case (3): desired primary and current primary differ. Do a graceful + // demotion-then-promotion. + reparentJournalPos, err = pr.performGracefulPromotion(ctx, ev, keyspace, shard, currentPrimary, ev.NewMaster, tabletMap, opts) + } + + if err != nil { + return err + } + + if err := topo.CheckShardLocked(ctx, keyspace, shard); err != nil { + return vterrors.Wrap(err, "lost topology lock, aborting") + } + + if err := pr.reparentTablets(ctx, ev, reparentJournalPos, tabletMap, opts); err != nil { + return err + } + + return nil +} + +func (pr *PlannedReparenter) reparentTablets( + ctx context.Context, + ev *events.Reparent, + reparentJournalPosition string, + tabletMap map[string]*topo.TabletInfo, + opts PlannedReparentOptions, +) error { + // Create a cancellable context for the entire set of reparent operations. + // If any error conditions happen, we can cancel all outgoing RPCs. + replCtx, replCancel := context.WithTimeout(ctx, opts.WaitReplicasTimeout) + defer replCancel() + + // Go thorugh all the tablets. + // - New primary: populate the reparent journal. + // - Everybody else: reparent to the new primary; wait for the reparent + // journal row. + event.DispatchUpdate(ev, "reparenting all tablets") + + // We add a (hopefully) unique record to the reparent journal table on the + // new primary, so we can check if replicas got it through replication. + reparentJournalTimestamp := time.Now().UnixNano() + primaryElectAliasStr := topoproto.TabletAliasString(ev.NewMaster.Alias) + replicasWg := sync.WaitGroup{} + rec := concurrency.AllErrorRecorder{} + + // Point all replicas at the new primary and check that they receive the + // reparent journal entry, proving that they are replicating from the new + // primary. We do this concurrently with adding the journal entry (after + // this loop), because if semi-sync is enabled, the update to the journal + // table will block until at least one replica is successfully attached to + // the new primary. + for alias, tabletInfo := range tabletMap { + if alias == primaryElectAliasStr { + continue + } + + replicasWg.Add(1) + + go func(alias string, tablet *topodatapb.Tablet) { + defer replicasWg.Done() + pr.logger.Infof("setting new primary on replica %v", alias) + + // Note: we used to force replication to start on the old primary, + // but now that we support "resuming" a previously-failed PRS + // attempt, we can no longer assume that we know who the former + // primary was. Instead, we rely on the former primary to remember + // that it needs to start replication after transitioning from + // MASTER => REPLICA. + forceStartReplication := false + if err := pr.tmc.SetMaster(replCtx, tablet, ev.NewMaster.Alias, reparentJournalTimestamp, "", forceStartReplication); err != nil { + rec.RecordError(vterrors.Wrapf(err, "tablet %v failed to SetMaster(%v): %v", alias, primaryElectAliasStr, err)) + } + }(alias, tabletInfo.Tablet) + } + + // Add a reparent journal entry on the new primary. If semi-sync is enabled, + // this blocks until at least one replica is reparented (above) and + // successfully replicating from the new primary. + // + // If we fail to populate the reparent journal, there's no way the replicas + // will work, so we cancel the ongoing reparent RPCs and bail out. + pr.logger.Infof("populating reparent journal on new primary %v", primaryElectAliasStr) + if err := pr.tmc.PopulateReparentJournal(replCtx, &ev.NewMaster, reparentJournalTimestamp, "PlannedReparentShard", ev.NewMaster.Alias, reparentJournalPosition); err != nil { + pr.logger.Warningf("primary failed to PopulateReparentJournal (position: %v); cancelling replica reparent attempts", reparentJournalPosition) + replCancel() + replicasWg.Wait() + + return vterrors.Wrapf(err, "failed PopulateReparentJournal(primary=%v, ts=%v, pos=%v): %v", primaryElectAliasStr, reparentJournalTimestamp, reparentJournalPosition, err) + } + + // Reparent journal has been populated on the new primary. We just need to + // wait for all the replicas to receive it. + replicasWg.Wait() + + if err := rec.Error(); err != nil { + msg := "some replicas failed to reparent; retry PlannedReparentShard with the same new primary alias (%v) to retry failed replicas" + pr.logger.Errorf2(err, msg, primaryElectAliasStr) + return vterrors.Wrapf(err, msg, primaryElectAliasStr) + } + + return nil +} diff --git a/go/vt/vtctl/reparentutil/planned_reparenter_test.go b/go/vt/vtctl/reparentutil/planned_reparenter_test.go new file mode 100644 index 00000000000..805e685295d --- /dev/null +++ b/go/vt/vtctl/reparentutil/planned_reparenter_test.go @@ -0,0 +1,3182 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reparentutil + +import ( + "context" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/topotools/events" + "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver/testutil" + "vitess.io/vitess/go/vt/vttablet/tmclient" + + replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" + "vitess.io/vitess/go/vt/proto/vttime" +) + +func TestNewPlannedReparenter(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + logger logutil.Logger + }{ + { + name: "default case", + logger: logutil.NewMemoryLogger(), + }, + { + name: "overrides nil logger with no-op", + logger: nil, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + er := NewPlannedReparenter(nil, nil, tt.logger) + assert.NotNil(t, er.logger, "NewPlannedReparenter should never result in a nil logger instance on the EmergencyReparenter") + }) + } +} + +func TestPlannedReparenter_ReparentShard(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + ts *topo.Server + tmc tmclient.TabletManagerClient + tablets []*topodatapb.Tablet + lockShardBeforeTest bool + + keyspace string + shard string + opts PlannedReparentOptions + + expectedEvent *events.Reparent + shouldErr bool + }{ + { + name: "success", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Position: "position1", + Error: nil, + }, + }, + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000100": nil, + }, + SetMasterResults: map[string]error{ + "zone1-0000000200": nil, + }, + SetReadWriteResults: map[string]error{ + "zone1-0000000100": nil, + }, + }, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + + keyspace: "testkeyspace", + shard: "-", + opts: PlannedReparentOptions{ + NewPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + + shouldErr: false, + expectedEvent: &events.Reparent{ + ShardInfo: *topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{ + MasterAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + KeyRange: &topodatapb.KeyRange{}, + IsMasterServing: true, + }, nil), + NewMaster: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + }, + { + name: "cannot lock shard", + ts: memorytopo.NewServer("zone1"), + tmc: nil, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + lockShardBeforeTest: true, + + keyspace: "testkeyspace", + shard: "-", + opts: PlannedReparentOptions{}, + + expectedEvent: nil, + shouldErr: true, + }, + { + // The simplest setup required to make an overall ReparentShard call + // fail is to set NewPrimaryAlias = AvoidPrimaryAlias, which will + // fail the preflight checks. Other functions are unit-tested + // thoroughly to cover all the cases. + name: "reparent fails", + ts: memorytopo.NewServer("zone1"), + tmc: nil, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + Shard: "-", + Type: topodatapb.TabletType_MASTER, + }, + }, + + keyspace: "testkeyspace", + shard: "-", + opts: PlannedReparentOptions{ + NewPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + AvoidPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + + expectedEvent: &events.Reparent{ + ShardInfo: *topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{ + MasterAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + IsMasterServing: true, + KeyRange: &topodatapb.KeyRange{}, + }, nil), + }, + shouldErr: true, + }, + } + + ctx := context.Background() + logger := logutil.NewMemoryLogger() + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ctx := ctx + + testutil.AddTablets(ctx, t, tt.ts, &testutil.AddTabletOptions{ + AlsoSetShardMaster: true, + SkipShardCreation: false, + }, tt.tablets...) + + if tt.lockShardBeforeTest { + lctx, unlock, err := tt.ts.LockShard(ctx, tt.keyspace, tt.shard, "locking for test") + require.NoError(t, err, "could not lock %s/%s for test case", tt.keyspace, tt.shard) + + defer func() { + unlock(&err) + require.NoError(t, err, "could not unlock %s/%s after test case", tt.keyspace, tt.shard) + }() + + ctx = lctx + } + + pr := NewPlannedReparenter(tt.ts, tt.tmc, logger) + ev, err := pr.ReparentShard(ctx, tt.keyspace, tt.shard, tt.opts) + if tt.shouldErr { + assert.Error(t, err) + AssertReparentEventsEqual(t, tt.expectedEvent, ev) + + if ev != nil { + assert.Contains(t, ev.Status, "failed PlannedReparentShard", "expected event status to indicate failed PRS") + } + + return + } + + assert.NoError(t, err) + AssertReparentEventsEqual(t, tt.expectedEvent, ev) + assert.Contains(t, ev.Status, "finished PlannedReparentShard", "expected event status to indicate successful PRS") + }) + } +} + +func TestPlannedReparenter_getLockAction(t *testing.T) { + t.Parallel() + + pr := &PlannedReparenter{} + tests := []struct { + name string + opts PlannedReparentOptions + expected string + }{ + { + name: "no options", + opts: PlannedReparentOptions{}, + expected: "PlannedReparentShard(, AvoidPrimary = )", + }, + { + name: "desired primary only", + opts: PlannedReparentOptions{ + NewPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + expected: "PlannedReparentShard(zone1-0000000100, AvoidPrimary = )", + }, + { + name: "avoid-primary only", + opts: PlannedReparentOptions{ + AvoidPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 500, + }, + }, + expected: "PlannedReparentShard(, AvoidPrimary = zone1-0000000500)", + }, + { + name: "all options specified", + opts: PlannedReparentOptions{ + NewPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + AvoidPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 500, + }, + }, + expected: "PlannedReparentShard(zone1-0000000100, AvoidPrimary = zone1-0000000500)", + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + actual := pr.getLockAction(tt.opts) + assert.Equal(t, tt.expected, actual) + }) + } +} + +func TestPlannedReparenter_preflightChecks(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + + ts *topo.Server + tmc tmclient.TabletManagerClient + tablets []*topodatapb.Tablet + + ev *events.Reparent + keyspace string + shard string + tabletMap map[string]*topo.TabletInfo + opts *PlannedReparentOptions + + expectedIsNoop bool + expectedEvent *events.Reparent + expectedOpts *PlannedReparentOptions + shouldErr bool + }{ + { + name: "invariants hold", + ev: &events.Reparent{ + ShardInfo: *topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{ + MasterAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 500, + }, + }, nil), + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + }, + opts: &PlannedReparentOptions{ + NewPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + expectedIsNoop: false, + expectedEvent: &events.Reparent{ + ShardInfo: *topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{ + MasterAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 500, + }, + }, nil), + NewMaster: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + shouldErr: false, + }, + { + name: "invariants hold with primary selection", + tmc: &testutil.TabletManagerClient{ + ReplicationStatusResults: map[string]struct { + Position *replicationdatapb.Status + Error error + }{ + "zone1-0000000100": { // most advanced position + Position: &replicationdatapb.Status{ + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10", + }, + }, + "zone1-0000000101": { + Position: &replicationdatapb.Status{ + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", + }, + }, + }, + }, + ev: &events.Reparent{ + ShardInfo: *topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{ + MasterAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 500, + }, + }, nil), + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + "zone1-0000000500": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 500, + }, + Type: topodatapb.TabletType_MASTER, + }, + }, + }, + opts: &PlannedReparentOptions{ + // Avoid the current primary. + AvoidPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 500, + }, + }, + expectedIsNoop: false, + expectedEvent: &events.Reparent{ + ShardInfo: *topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{ + MasterAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 500, + }, + }, nil), + NewMaster: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + expectedOpts: &PlannedReparentOptions{ + AvoidPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 500, + }, + // NewPrimaryAlias gets populated by the preflightCheck code + NewPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + shouldErr: false, + }, + { + name: "new-primary and avoid-primary match", + opts: &PlannedReparentOptions{ + NewPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + AvoidPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + expectedIsNoop: true, + shouldErr: true, + }, + { + name: "current shard primary is not avoid-primary", + ev: &events.Reparent{ + ShardInfo: *topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{ + MasterAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, nil), + }, + opts: &PlannedReparentOptions{ + AvoidPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + }, + expectedIsNoop: true, // nothing to do, but not an error! + shouldErr: false, + }, + { + // this doesn't cause an actual error from ChooseNewPrimary, because + // the only way to do that is to set AvoidPrimaryAlias == nil, and + // that gets checked in preflightChecks before calling + // ChooseNewPrimary for other reasons. however we do check that we + // get a non-nil result from ChooseNewPrimary in preflightChecks and + // bail out if we don't, so we're forcing that case here. + name: "cannot choose new primary-elect", + ev: &events.Reparent{ + ShardInfo: *topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{ + MasterAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, nil), + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + }, + opts: &PlannedReparentOptions{ + AvoidPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + expectedIsNoop: true, + shouldErr: true, + }, + { + name: "primary-elect is not in tablet map", + ev: &events.Reparent{}, + tabletMap: map[string]*topo.TabletInfo{}, + opts: &PlannedReparentOptions{ + NewPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + expectedIsNoop: true, + shouldErr: true, + }, + { + name: "shard has no current primary", + ev: &events.Reparent{ + ShardInfo: *topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{ + MasterAlias: nil, + }, nil), + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + }, + opts: &PlannedReparentOptions{ + NewPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + expectedIsNoop: true, + expectedEvent: &events.Reparent{ + ShardInfo: *topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{ + MasterAlias: nil, + }, nil), + NewMaster: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + shouldErr: true, + }, + } + + ctx := context.Background() + logger := logutil.NewMemoryLogger() + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + defer func() { + if tt.expectedEvent != nil { + AssertReparentEventsEqualWithMessage(t, tt.expectedEvent, tt.ev, "expected preflightChecks to mutate the passed-in event") + } + + if tt.expectedOpts != nil { + assert.Equal(t, tt.expectedOpts, tt.opts, "expected preflightChecks to mutate the passed in PlannedReparentOptions") + } + }() + + pr := NewPlannedReparenter(tt.ts, tt.tmc, logger) + isNoop, err := pr.preflightChecks(ctx, tt.ev, tt.keyspace, tt.shard, tt.tabletMap, tt.opts) + if tt.shouldErr { + assert.Error(t, err) + assert.Equal(t, tt.expectedIsNoop, isNoop, "preflightChecks returned wrong isNoop signal") + + return + } + + assert.NoError(t, err) + assert.Equal(t, tt.expectedIsNoop, isNoop, "preflightChecks returned wrong isNoop signal") + }) + } +} + +func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + ts *topo.Server + tmc tmclient.TabletManagerClient + unlockTopo bool + ctxTimeout time.Duration + + ev *events.Reparent + keyspace string + shard string + currentPrimary *topo.TabletInfo + primaryElect topodatapb.Tablet + tabletMap map[string]*topo.TabletInfo + opts PlannedReparentOptions + + expectedPos string + expectedEvent *events.Reparent + shouldErr bool + // Optional function to run some additional post-test assertions. Will + // be run in the main test body before the common assertions are run, + // regardless of the value of tt.shouldErr for that test case. + extraAssertions func(t *testing.T, pos string, err error) + }{ + { + name: "successful promotion", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + DemoteMasterResults: map[string]struct { + Status *replicationdatapb.MasterStatus + Error error + }{ + "zone1-0000000100": { + Status: &replicationdatapb.MasterStatus{ + // value of Position doesn't strictly matter for + // this test case, as long as it matches the inner + // key of the WaitForPositionResults map for the + // primary-elect. + Position: "position1", + }, + Error: nil, + }, + }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10", + }, + }, + PromoteReplicaResults: map[string]struct { + Result string + Error error + }{ + "zone1-0000000200": { + Result: "successful reparent journal position", + Error: nil, + }, + }, + SetMasterResults: map[string]error{ + "zone1-0000000200": nil, + }, + WaitForPositionResults: map[string]map[string]error{ + "zone1-0000000200": { + "position1": nil, + }, + }, + }, + ev: &events.Reparent{}, + keyspace: "testkeyspace", + shard: "-", + currentPrimary: &topo.TabletInfo{ + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + primaryElect: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + }, + tabletMap: map[string]*topo.TabletInfo{}, + opts: PlannedReparentOptions{}, + expectedPos: "successful reparent journal position", + shouldErr: false, + }, + { + name: "cannot get snapshot of current primary", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Error: assert.AnError, + }, + }, + }, + ev: &events.Reparent{}, + keyspace: "testkeyspace", + shard: "-", + currentPrimary: &topo.TabletInfo{ + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + primaryElect: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + }, + tabletMap: map[string]*topo.TabletInfo{}, + opts: PlannedReparentOptions{}, + shouldErr: true, + }, + { + name: "primary-elect fails to catch up to current primary snapshot position", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10", + }, + }, + SetMasterResults: map[string]error{ + "zone1-0000000200": assert.AnError, + }, + }, + ev: &events.Reparent{}, + keyspace: "testkeyspace", + shard: "-", + currentPrimary: &topo.TabletInfo{ + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + primaryElect: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + }, + tabletMap: map[string]*topo.TabletInfo{}, + opts: PlannedReparentOptions{}, + shouldErr: true, + }, + { + name: "primary-elect times out catching up to current primary snapshot position", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10", + }, + }, + SetMasterDelays: map[string]time.Duration{ + "zone1-0000000200": time.Millisecond * 100, + }, + SetMasterResults: map[string]error{ + "zone1-0000000200": nil, + }, + }, + ev: &events.Reparent{}, + keyspace: "testkeyspace", + shard: "-", + currentPrimary: &topo.TabletInfo{ + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + primaryElect: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + }, + tabletMap: map[string]*topo.TabletInfo{}, + opts: PlannedReparentOptions{ + WaitReplicasTimeout: time.Millisecond * 10, + }, + shouldErr: true, + }, + { + name: "lost topology lock", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10", + }, + }, + SetMasterResults: map[string]error{ + "zone1-0000000200": nil, + }, + }, + unlockTopo: true, + ev: &events.Reparent{}, + keyspace: "testkeyspace", + shard: "-", + currentPrimary: &topo.TabletInfo{ + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + primaryElect: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + }, + tabletMap: map[string]*topo.TabletInfo{}, + opts: PlannedReparentOptions{}, + shouldErr: true, + }, + { + name: "failed to demote current primary", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + DemoteMasterResults: map[string]struct { + Status *replicationdatapb.MasterStatus + Error error + }{ + "zone1-0000000100": { + Error: assert.AnError, + }, + }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10", + }, + }, + SetMasterResults: map[string]error{ + "zone1-0000000200": nil, + }, + }, + ev: &events.Reparent{}, + keyspace: "testkeyspace", + shard: "-", + currentPrimary: &topo.TabletInfo{ + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + primaryElect: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + }, + tabletMap: map[string]*topo.TabletInfo{}, + opts: PlannedReparentOptions{}, + shouldErr: true, + }, + { + name: "primary-elect fails to catch up to current primary demotion position", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + DemoteMasterResults: map[string]struct { + Status *replicationdatapb.MasterStatus + Error error + }{ + "zone1-0000000100": { + Status: &replicationdatapb.MasterStatus{ + // value of Position doesn't strictly matter for + // this test case, as long as it matches the inner + // key of the WaitForPositionResults map for the + // primary-elect. + Position: "position1", + }, + Error: nil, + }, + }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10", + }, + }, + SetMasterResults: map[string]error{ + "zone1-0000000200": nil, + }, + WaitForPositionResults: map[string]map[string]error{ + "zone1-0000000200": { + "position1": assert.AnError, + }, + }, + }, + ev: &events.Reparent{}, + keyspace: "testkeyspace", + shard: "-", + currentPrimary: &topo.TabletInfo{ + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + primaryElect: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + }, + tabletMap: map[string]*topo.TabletInfo{}, + opts: PlannedReparentOptions{}, + shouldErr: true, + }, + { + name: "primary-elect times out catching up to current primary demotion position", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + DemoteMasterResults: map[string]struct { + Status *replicationdatapb.MasterStatus + Error error + }{ + "zone1-0000000100": { + Status: &replicationdatapb.MasterStatus{ + // value of Position doesn't strictly matter for + // this test case, as long as it matches the inner + // key of the WaitForPositionResults map for the + // primary-elect. + Position: "position1", + }, + Error: nil, + }, + }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10", + }, + }, + SetMasterResults: map[string]error{ + "zone1-0000000200": nil, + }, + WaitForPositionDelays: map[string]time.Duration{ + "zone1-0000000200": time.Millisecond * 100, + }, + WaitForPositionResults: map[string]map[string]error{ + "zone1-0000000200": { + "position1": nil, + }, + }, + }, + ev: &events.Reparent{}, + keyspace: "testkeyspace", + shard: "-", + currentPrimary: &topo.TabletInfo{ + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + primaryElect: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + }, + tabletMap: map[string]*topo.TabletInfo{}, + opts: PlannedReparentOptions{ + WaitReplicasTimeout: time.Millisecond * 10, + }, + shouldErr: true, + }, + { + name: "demotion succeeds but parent context times out", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + DemoteMasterResults: map[string]struct { + Status *replicationdatapb.MasterStatus + Error error + }{ + "zone1-0000000100": { + Status: &replicationdatapb.MasterStatus{ + Position: "position1", + }, + Error: nil, + }, + }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10", + }, + }, + PromoteReplicaResults: map[string]struct { + Result string + Error error + }{ + // This being present means that if we don't encounter a + // a case where either WaitForPosition errors, or the parent + // context times out, then we will fail the test, since it + // will cause the overall function under test to return no + // error. + "zone1-0000000200": { + Result: "success!", + Error: nil, + }, + }, + SetMasterResults: map[string]error{ + "zone1-0000000200": nil, + }, + WaitForPositionPostDelays: map[string]time.Duration{ + "zone1-0000000200": time.Millisecond * 5, + }, + WaitForPositionResults: map[string]map[string]error{ + "zone1-0000000200": { + "position1": nil, + }, + }, + }, + ctxTimeout: time.Millisecond * 4, // WaitForPosition won't return error, but will timeout the parent context + ev: &events.Reparent{}, + keyspace: "testkeyspace", + shard: "-", + currentPrimary: &topo.TabletInfo{ + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + primaryElect: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + }, + tabletMap: map[string]*topo.TabletInfo{}, + opts: PlannedReparentOptions{}, + shouldErr: true, + }, + { + name: "rollback fails", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + DemoteMasterResults: map[string]struct { + Status *replicationdatapb.MasterStatus + Error error + }{ + "zone1-0000000100": { + Status: &replicationdatapb.MasterStatus{ + // value of Position doesn't strictly matter for + // this test case, as long as it matches the inner + // key of the WaitForPositionResults map for the + // primary-elect. + Position: "position1", + }, + Error: nil, + }, + }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10", + }, + }, + SetMasterResults: map[string]error{ + "zone1-0000000200": nil, + }, + WaitForPositionResults: map[string]map[string]error{ + "zone1-0000000200": { + "position1": assert.AnError, + }, + }, + UndoDemoteMasterResults: map[string]error{ + "zone1-0000000100": assert.AnError, + }, + }, + ev: &events.Reparent{}, + keyspace: "testkeyspace", + shard: "-", + currentPrimary: &topo.TabletInfo{ + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + primaryElect: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + }, + tabletMap: map[string]*topo.TabletInfo{}, + opts: PlannedReparentOptions{}, + shouldErr: true, + extraAssertions: func(t *testing.T, pos string, err error) { + assert.Contains(t, err.Error(), "UndoDemoteMaster", "expected error to include information about failed demotion rollback") + }, + }, + { + name: "rollback succeeds", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + DemoteMasterResults: map[string]struct { + Status *replicationdatapb.MasterStatus + Error error + }{ + "zone1-0000000100": { + Status: &replicationdatapb.MasterStatus{ + // value of Position doesn't strictly matter for + // this test case, as long as it matches the inner + // key of the WaitForPositionResults map for the + // primary-elect. + Position: "position1", + }, + Error: nil, + }, + }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10", + }, + }, + SetMasterResults: map[string]error{ + "zone1-0000000200": nil, + }, + WaitForPositionResults: map[string]map[string]error{ + "zone1-0000000200": { + "position1": assert.AnError, + }, + }, + UndoDemoteMasterResults: map[string]error{ + "zone1-0000000100": nil, + }, + }, + ev: &events.Reparent{}, + keyspace: "testkeyspace", + shard: "-", + currentPrimary: &topo.TabletInfo{ + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + primaryElect: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + }, + tabletMap: map[string]*topo.TabletInfo{}, + opts: PlannedReparentOptions{}, + shouldErr: true, + extraAssertions: func(t *testing.T, pos string, err error) { + assert.NotContains(t, err.Error(), "UndoDemoteMaster", "expected error to not include information about failed demotion rollback") + }, + }, + { + name: "primary-elect fails to promote", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + DemoteMasterResults: map[string]struct { + Status *replicationdatapb.MasterStatus + Error error + }{ + "zone1-0000000100": { + Status: &replicationdatapb.MasterStatus{ + // value of Position doesn't strictly matter for + // this test case, as long as it matches the inner + // key of the WaitForPositionResults map for the + // primary-elect. + Position: "position1", + }, + Error: nil, + }, + }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10", + }, + }, + PromoteReplicaResults: map[string]struct { + Result string + Error error + }{ + "zone1-0000000200": { + Error: assert.AnError, + }, + }, + SetMasterResults: map[string]error{ + "zone1-0000000200": nil, + }, + WaitForPositionResults: map[string]map[string]error{ + "zone1-0000000200": { + "position1": nil, + }, + }, + }, + ev: &events.Reparent{}, + keyspace: "testkeyspace", + shard: "-", + currentPrimary: &topo.TabletInfo{ + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + primaryElect: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + }, + tabletMap: map[string]*topo.TabletInfo{}, + opts: PlannedReparentOptions{}, + shouldErr: true, + }, + { + name: "promotion succeeds but parent context times out", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + DemoteMasterResults: map[string]struct { + Status *replicationdatapb.MasterStatus + Error error + }{ + "zone1-0000000100": { + Status: &replicationdatapb.MasterStatus{ + // value of Position doesn't strictly matter for + // this test case, as long as it matches the inner + // key of the WaitForPositionResults map for the + // primary-elect. + Position: "position1", + }, + Error: nil, + }, + }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10", + }, + }, + PromoteReplicaPostDelays: map[string]time.Duration{ + "zone1-0000000200": time.Millisecond * 20, // 2x the parent context timeout + }, + PromoteReplicaResults: map[string]struct { + Result string + Error error + }{ + "zone1-0000000200": { + Error: nil, + }, + }, + SetMasterResults: map[string]error{ + "zone1-0000000200": nil, + }, + WaitForPositionResults: map[string]map[string]error{ + "zone1-0000000200": { + "position1": nil, + }, + }, + }, + ctxTimeout: time.Millisecond * 10, + ev: &events.Reparent{}, + keyspace: "testkeyspace", + shard: "-", + currentPrimary: &topo.TabletInfo{ + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + primaryElect: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + }, + tabletMap: map[string]*topo.TabletInfo{}, + opts: PlannedReparentOptions{}, + shouldErr: true, + }, + } + + ctx := context.Background() + logger := logutil.NewMemoryLogger() + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ctx := ctx + + testutil.AddShards(ctx, t, tt.ts, &vtctldatapb.Shard{ + Keyspace: tt.keyspace, + Name: tt.shard, + }) + + if !tt.unlockTopo { + lctx, unlock, err := tt.ts.LockShard(ctx, tt.keyspace, tt.shard, "test lock") + require.NoError(t, err, "could not lock %s/%s for testing", tt.keyspace, tt.shard) + + defer func() { + unlock(&err) + require.NoError(t, err, "could not unlock %s/%s during testing", tt.keyspace, tt.shard) + }() + + ctx = lctx + } + + pr := NewPlannedReparenter(tt.ts, tt.tmc, logger) + + if tt.ctxTimeout > 0 { + _ctx, cancel := context.WithTimeout(ctx, tt.ctxTimeout) + defer cancel() + + ctx = _ctx + } + + pos, err := pr.performGracefulPromotion( + ctx, + tt.ev, + tt.keyspace, + tt.shard, + tt.currentPrimary, + tt.primaryElect, + tt.tabletMap, + tt.opts, + ) + + if tt.extraAssertions != nil { + tt.extraAssertions(t, pos, err) + } + + if tt.shouldErr { + assert.Error(t, err) + + return + } + + assert.NoError(t, err) + assert.Equal(t, tt.expectedPos, pos) + }) + } +} + +func TestPlannedReparenter_performPartialPromotionRecovery(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + tmc tmclient.TabletManagerClient + timeout time.Duration + primaryElect topodatapb.Tablet + expectedPos string + shouldErr bool + }{ + { + name: "successful recovery", + tmc: &testutil.TabletManagerClient{ + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Position: "position1", + Error: nil, + }, + }, + SetReadWriteResults: map[string]error{ + "zone1-0000000100": nil, + }, + }, + primaryElect: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + expectedPos: "position1", + shouldErr: false, + }, + { + name: "failed to SetReadWrite", + tmc: &testutil.TabletManagerClient{ + SetReadWriteResults: map[string]error{ + "zone1-0000000100": assert.AnError, + }, + }, + primaryElect: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + shouldErr: true, + }, + { + name: "SetReadWrite timed out", + tmc: &testutil.TabletManagerClient{ + SetReadWriteDelays: map[string]time.Duration{ + "zone1-0000000100": time.Millisecond * 50, + }, + SetReadWriteResults: map[string]error{ + "zone1-0000000100": nil, + }, + }, + timeout: time.Millisecond * 10, + primaryElect: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + shouldErr: true, + }, + { + name: "failed to get MasterPosition from refreshed primary", + tmc: &testutil.TabletManagerClient{ + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Position: "", + Error: assert.AnError, + }, + }, + SetReadWriteResults: map[string]error{ + "zone1-0000000100": nil, + }, + }, + primaryElect: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + shouldErr: true, + }, + { + name: "MasterPosition timed out", + tmc: &testutil.TabletManagerClient{ + MasterPositionDelays: map[string]time.Duration{ + "zone1-0000000100": time.Millisecond * 50, + }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Position: "position1", + Error: nil, + }, + }, + SetReadWriteResults: map[string]error{ + "zone1-0000000100": nil, + }, + }, + timeout: time.Millisecond * 10, + primaryElect: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + shouldErr: true, + }, + } + + ctx := context.Background() + logger := logutil.NewMemoryLogger() + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ctx := ctx + pr := NewPlannedReparenter(nil, tt.tmc, logger) + + if tt.timeout > 0 { + _ctx, cancel := context.WithTimeout(ctx, tt.timeout) + defer cancel() + + ctx = _ctx + } + + rp, err := pr.performPartialPromotionRecovery(ctx, tt.primaryElect) + if tt.shouldErr { + assert.Error(t, err) + + return + } + + assert.NoError(t, err) + assert.Equal(t, tt.expectedPos, rp, "performPartialPromotionRecovery gave unexpected reparent journal position") + }) + } +} + +func TestPlannedReparenter_performPotentialPromotion(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + ts *topo.Server + tmc tmclient.TabletManagerClient + timeout time.Duration + unlockTopo bool + + keyspace string + shard string + primaryElect topodatapb.Tablet + tabletMap map[string]*topo.TabletInfo + + expectedPos string + shouldErr bool + }{ + { + name: "success", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + DemoteMasterResults: map[string]struct { + Status *replicationdatapb.MasterStatus + Error error + }{ + "zone1-0000000100": { + Status: &replicationdatapb.MasterStatus{ + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10", + }, + Error: nil, + }, + "zone1-0000000101": { + Status: &replicationdatapb.MasterStatus{ + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10", + }, + Error: nil, + }, + "zone1-0000000102": { + Status: &replicationdatapb.MasterStatus{ + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", + }, + Error: nil, + }, + }, + PromoteReplicaResults: map[string]struct { + Result string + Error error + }{ + "zone1-0000000100": { + Result: "reparent journal position", + Error: nil, + }, + }, + }, + unlockTopo: false, + keyspace: "testkeyspace", + shard: "-", + primaryElect: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + "zone1-0000000102": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + }, + }, + }, + expectedPos: "reparent journal position", + shouldErr: false, + }, + { + name: "failed to DemoteMaster on a tablet", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + DemoteMasterResults: map[string]struct { + Status *replicationdatapb.MasterStatus + Error error + }{ + "zone1-0000000100": { + Status: nil, + Error: assert.AnError, + }, + }, + }, + unlockTopo: false, + keyspace: "testkeyspace", + shard: "-", + primaryElect: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + }, + shouldErr: true, + }, + { + name: "timed out during DemoteMaster on a tablet", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + DemoteMasterDelays: map[string]time.Duration{ + "zone1-0000000100": time.Millisecond * 50, + }, + DemoteMasterResults: map[string]struct { + Status *replicationdatapb.MasterStatus + Error error + }{ + "zone1-0000000100": { + Status: &replicationdatapb.MasterStatus{ + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10", + }, + Error: nil, + }, + }, + }, + timeout: time.Millisecond * 10, + unlockTopo: false, + keyspace: "testkeyspace", + shard: "-", + primaryElect: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + }, + shouldErr: true, + }, + { + name: "failed to DecodePosition on a tablet's demote position", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + DemoteMasterResults: map[string]struct { + Status *replicationdatapb.MasterStatus + Error error + }{ + "zone1-0000000100": { + Status: &replicationdatapb.MasterStatus{ + Position: "MySQL56/this-is-nonsense", + }, + Error: nil, + }, + }, + }, + unlockTopo: false, + keyspace: "testkeyspace", + shard: "-", + primaryElect: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + }, + shouldErr: true, + }, + { + name: "primary-elect not in tablet map", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{}, + unlockTopo: false, + keyspace: "testkeyspace", + shard: "-", + primaryElect: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + tabletMap: map[string]*topo.TabletInfo{}, + shouldErr: true, + }, + { + name: "primary-elect not most at most advanced position", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + DemoteMasterResults: map[string]struct { + Status *replicationdatapb.MasterStatus + Error error + }{ + "zone1-0000000100": { + Status: &replicationdatapb.MasterStatus{ + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10", + }, + Error: nil, + }, + "zone1-0000000101": { + Status: &replicationdatapb.MasterStatus{ + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10", + }, + Error: nil, + }, + "zone1-0000000102": { + Status: &replicationdatapb.MasterStatus{ + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10000", + }, + Error: nil, + }, + }, + }, + unlockTopo: false, + keyspace: "testkeyspace", + shard: "-", + primaryElect: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + "zone1-0000000102": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + }, + }, + }, + shouldErr: true, + }, + { + name: "lost topology lock", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + DemoteMasterResults: map[string]struct { + Status *replicationdatapb.MasterStatus + Error error + }{ + "zone1-0000000100": { + Status: &replicationdatapb.MasterStatus{ + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10", + }, + Error: nil, + }, + "zone1-0000000101": { + Status: &replicationdatapb.MasterStatus{ + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10", + }, + Error: nil, + }, + "zone1-0000000102": { + Status: &replicationdatapb.MasterStatus{ + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10", + }, + Error: nil, + }, + }, + }, + unlockTopo: true, + keyspace: "testkeyspace", + shard: "-", + primaryElect: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + "zone1-0000000102": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + }, + }, + }, + shouldErr: true, + }, + { + name: "failed to promote primary-elect", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + DemoteMasterResults: map[string]struct { + Status *replicationdatapb.MasterStatus + Error error + }{ + "zone1-0000000100": { + Status: &replicationdatapb.MasterStatus{ + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10", + }, + Error: nil, + }, + "zone1-0000000101": { + Status: &replicationdatapb.MasterStatus{ + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10", + }, + Error: nil, + }, + "zone1-0000000102": { + Status: &replicationdatapb.MasterStatus{ + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", + }, + Error: nil, + }, + }, + PromoteReplicaResults: map[string]struct { + Result string + Error error + }{ + "zone1-0000000100": { + Result: "", + Error: assert.AnError, + }, + }, + }, + unlockTopo: false, + keyspace: "testkeyspace", + shard: "-", + primaryElect: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + "zone1-0000000102": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + }, + }, + }, + shouldErr: true, + }, + { + name: "timed out while promoting primary-elect", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + DemoteMasterResults: map[string]struct { + Status *replicationdatapb.MasterStatus + Error error + }{ + "zone1-0000000100": { + Status: &replicationdatapb.MasterStatus{ + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10", + }, + Error: nil, + }, + "zone1-0000000101": { + Status: &replicationdatapb.MasterStatus{ + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10", + }, + Error: nil, + }, + "zone1-0000000102": { + Status: &replicationdatapb.MasterStatus{ + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", + }, + Error: nil, + }, + }, + PromoteReplicaDelays: map[string]time.Duration{ + "zone1-0000000100": time.Millisecond * 100, + }, + PromoteReplicaResults: map[string]struct { + Result string + Error error + }{ + "zone1-0000000100": { + Result: "reparent journal position", + Error: nil, + }, + }, + }, + timeout: time.Millisecond * 50, + unlockTopo: false, + keyspace: "testkeyspace", + shard: "-", + primaryElect: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + "zone1-0000000102": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + }, + }, + }, + shouldErr: true, + }, + } + + ctx := context.Background() + logger := logutil.NewMemoryLogger() + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ctx := ctx + pr := NewPlannedReparenter(nil, tt.tmc, logger) + + testutil.AddShards(ctx, t, tt.ts, &vtctldatapb.Shard{ + Keyspace: tt.keyspace, + Name: tt.shard, + }) + + if !tt.unlockTopo { + lctx, unlock, err := tt.ts.LockShard(ctx, tt.keyspace, tt.shard, "test lock") + require.NoError(t, err, "could not lock %s/%s for testing", tt.keyspace, tt.shard) + + defer func() { + unlock(&err) + require.NoError(t, err, "could not unlock %s/%s during testing", tt.keyspace, tt.shard) + }() + + ctx = lctx + } + + if tt.timeout > 0 { + _ctx, cancel := context.WithTimeout(ctx, tt.timeout) + defer cancel() + + ctx = _ctx + } + + rp, err := pr.performPotentialPromotion(ctx, tt.keyspace, tt.shard, tt.primaryElect, tt.tabletMap) + if tt.shouldErr { + assert.Error(t, err) + + return + } + + assert.NoError(t, err) + assert.Equal(t, tt.expectedPos, rp) + }) + } +} + +func TestPlannedReparenter_reparentShardLocked(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + ts *topo.Server + tmc tmclient.TabletManagerClient + tablets []*topodatapb.Tablet + unlockTopo bool + + ev *events.Reparent + keyspace string + shard string + opts PlannedReparentOptions + + shouldErr bool + expectedEvent *events.Reparent + }{ + { + name: "success: current primary cannot be determined", // "Case (1)" + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + DemoteMasterResults: map[string]struct { + Status *replicationdatapb.MasterStatus + Error error + }{ + "zone1-0000000100": { + Status: &replicationdatapb.MasterStatus{ + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10", + }, + Error: nil, + }, + "zone1-0000000200": { + Status: &replicationdatapb.MasterStatus{ + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10", + }, + Error: nil, + }, + }, + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000200": nil, // zone1-200 gets promoted + }, + PromoteReplicaResults: map[string]struct { + Result string + Error error + }{ + "zone1-0000000200": { + Result: "reparent journal position", + Error: nil, + }, + }, + SetMasterResults: map[string]error{ + "zone1-0000000100": nil, // zone1-100 gets reparented under zone1-200 + }, + }, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + MasterTermStartTime: &vttime.Time{ + Seconds: 1000, + Nanoseconds: 500, + }, + Hostname: "primary1", // claims to be MASTER with same term as primary2 + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_MASTER, + MasterTermStartTime: &vttime.Time{ + Seconds: 1000, + Nanoseconds: 500, + }, + Hostname: "primary2", // claims to be MASTER with same term as primary1 + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + + ev: &events.Reparent{}, + keyspace: "testkeyspace", + shard: "-", + opts: PlannedReparentOptions{ + NewPrimaryAlias: &topodatapb.TabletAlias{ // We want primary2 to be the true primary. + Cell: "zone1", + Uid: 200, + }, + }, + + shouldErr: false, + }, + { + name: "success: current primary is desired primary", // "Case (2)" + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Position: "position1", + Error: nil, + }, + }, + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000100": nil, + }, + SetMasterResults: map[string]error{ + "zone1-0000000200": nil, + }, + SetReadWriteResults: map[string]error{ + "zone1-0000000100": nil, + }, + }, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + + ev: &events.Reparent{}, + keyspace: "testkeyspace", + shard: "-", + opts: PlannedReparentOptions{ + NewPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + + shouldErr: false, + expectedEvent: &events.Reparent{ + ShardInfo: *topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{ + MasterAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + KeyRange: &topodatapb.KeyRange{}, + IsMasterServing: true, + }, nil), + NewMaster: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + }, + { + name: "success: graceful promotion", // "Case (3)" + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + DemoteMasterResults: map[string]struct { + Status *replicationdatapb.MasterStatus + Error error + }{ + "zone1-0000000100": { + Status: &replicationdatapb.MasterStatus{ + // a few more transactions happen after waiting for replication + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10", + }, + Error: nil, + }, + }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-8", + Error: nil, + }, + }, + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000200": nil, + }, + PromoteReplicaResults: map[string]struct { + Result string + Error error + }{ + "zone1-0000000200": { + Result: "reparent journal position", + Error: nil, + }, + }, + SetMasterResults: map[string]error{ + "zone1-0000000100": nil, // called during reparentTablets to make oldPrimary a replica of newPrimary + "zone1-0000000200": nil, // called during performGracefulPromotion to ensure newPrimary is caught up + }, + WaitForPositionResults: map[string]map[string]error{ + "zone1-0000000200": { + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10": nil, + }, + }, + }, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + MasterTermStartTime: &vttime.Time{ + Seconds: 1000, + Nanoseconds: 500, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + + ev: &events.Reparent{}, + keyspace: "testkeyspace", + shard: "-", + opts: PlannedReparentOptions{ + NewPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + }, + + shouldErr: false, + }, + { + name: "shard not found", + ts: memorytopo.NewServer("zone1"), + tmc: nil, + tablets: nil, + unlockTopo: true, + + ev: &events.Reparent{}, + keyspace: "testkeyspace", + shard: "-", + opts: PlannedReparentOptions{}, + + shouldErr: true, + expectedEvent: &events.Reparent{}, + }, + { + name: "preflight checks fail", + ts: memorytopo.NewServer("zone1"), + tmc: nil, + tablets: []*topodatapb.Tablet{ + // Shard has no current primary, so preflight fails. + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + + ev: &events.Reparent{}, + keyspace: "testkeyspace", + shard: "-", + opts: PlannedReparentOptions{ + NewPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + }, + + shouldErr: true, + expectedEvent: &events.Reparent{ + ShardInfo: *topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{ + KeyRange: &topodatapb.KeyRange{}, + IsMasterServing: true, + }, nil), + NewMaster: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + }, + { + name: "preflight checks determine PRS is no-op", + ts: memorytopo.NewServer("zone1"), + tmc: nil, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + + ev: &events.Reparent{}, + keyspace: "testkeyspace", + shard: "-", + opts: PlannedReparentOptions{ + // This is not the shard primary, so nothing to do. + AvoidPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + }, + + shouldErr: false, + expectedEvent: &events.Reparent{ + ShardInfo: *topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{ + MasterAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + KeyRange: &topodatapb.KeyRange{}, + IsMasterServing: true, + }, nil), + }, + }, + { + name: "promotion step fails", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + SetReadWriteResults: map[string]error{ + "zone1-0000000100": assert.AnError, + }, + }, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + + ev: &events.Reparent{}, + keyspace: "testkeyspace", + shard: "-", + opts: PlannedReparentOptions{ + NewPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + + shouldErr: true, + expectedEvent: &events.Reparent{ + ShardInfo: *topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{ + MasterAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + KeyRange: &topodatapb.KeyRange{}, + IsMasterServing: true, + }, nil), + NewMaster: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + }, + { + name: "lost topology lock", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Position: "position1", + Error: nil, + }, + }, + SetReadWriteResults: map[string]error{ + "zone1-0000000100": nil, + }, + }, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + unlockTopo: true, + + ev: &events.Reparent{}, + keyspace: "testkeyspace", + shard: "-", + opts: PlannedReparentOptions{ + // This is not the shard primary, so nothing to do. + NewPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + + shouldErr: true, + expectedEvent: &events.Reparent{ + ShardInfo: *topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{ + MasterAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + KeyRange: &topodatapb.KeyRange{}, + IsMasterServing: true, + }, nil), + NewMaster: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + }, + { + name: "failed to reparent tablets", + ts: memorytopo.NewServer("zone1"), + tmc: &testutil.TabletManagerClient{ + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Position: "position1", + Error: nil, + }, + }, + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000100": assert.AnError, + }, + SetReadWriteResults: map[string]error{ + "zone1-0000000100": nil, + }, + }, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + + ev: &events.Reparent{}, + keyspace: "testkeyspace", + shard: "-", + opts: PlannedReparentOptions{ + NewPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + + shouldErr: true, + expectedEvent: &events.Reparent{ + ShardInfo: *topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{ + MasterAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + KeyRange: &topodatapb.KeyRange{}, + IsMasterServing: true, + }, nil), + NewMaster: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + }, + } + + ctx := context.Background() + logger := logutil.NewMemoryLogger() + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ctx := ctx + + testutil.AddTablets(ctx, t, tt.ts, &testutil.AddTabletOptions{ + AlsoSetShardMaster: true, + ForceSetShardMaster: true, // Some of our test cases count on having multiple primaries, so let the last one "win". + SkipShardCreation: false, + }, tt.tablets...) + + if !tt.unlockTopo { + lctx, unlock, err := tt.ts.LockShard(ctx, tt.keyspace, tt.shard, "locking for testing") + require.NoError(t, err, "could not lock %s/%s for testing", tt.keyspace, tt.shard) + + defer func() { + unlock(&err) + require.NoError(t, err, "error while unlocking %s/%s after test case", tt.keyspace, tt.shard) + }() + + ctx = lctx + } + + if tt.expectedEvent != nil { + defer func() { + AssertReparentEventsEqualWithMessage(t, tt.expectedEvent, tt.ev, "expected reparentShardLocked to mutate the passed-in event") + }() + } + + pr := NewPlannedReparenter(tt.ts, tt.tmc, logger) + + err := pr.reparentShardLocked(ctx, tt.ev, tt.keyspace, tt.shard, tt.opts) + if tt.shouldErr { + assert.Error(t, err) + + return + } + + assert.NoError(t, err) + }) + } +} + +func TestPlannedReparenter_reparentTablets(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + tmc tmclient.TabletManagerClient + + ev *events.Reparent + reparentJournalPosition string + tabletMap map[string]*topo.TabletInfo + opts PlannedReparentOptions + + shouldErr bool + }{ + { + name: "success", + tmc: &testutil.TabletManagerClient{ + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000100": nil, + }, + SetMasterResults: map[string]error{ + "zone1-0000000200": nil, + "zone1-0000000201": nil, + "zone1-0000000202": nil, + }, + }, + ev: &events.Reparent{ + NewMaster: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + }, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + }, + }, + "zone1-0000000200": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + "zone1-0000000201": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 201, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + "zone1-0000000202": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 202, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + }, + shouldErr: false, + }, + { + name: "SetMaster failed on replica", + tmc: &testutil.TabletManagerClient{ + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000100": nil, + }, + SetMasterResults: map[string]error{ + "zone1-0000000200": nil, + "zone1-0000000201": assert.AnError, + "zone1-0000000202": nil, + }, + }, + ev: &events.Reparent{ + NewMaster: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + }, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + }, + }, + "zone1-0000000200": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + "zone1-0000000201": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 201, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + "zone1-0000000202": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 202, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + }, + shouldErr: true, + }, + { + name: "SetMaster timed out on replica", + tmc: &testutil.TabletManagerClient{ + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000100": nil, + }, + SetMasterDelays: map[string]time.Duration{ + "zone1-0000000201": time.Millisecond * 50, + }, + SetMasterResults: map[string]error{ + "zone1-0000000200": nil, + "zone1-0000000201": nil, + "zone1-0000000202": nil, + }, + }, + ev: &events.Reparent{ + NewMaster: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + }, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + }, + }, + "zone1-0000000200": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + "zone1-0000000201": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 201, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + "zone1-0000000202": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 202, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + }, + opts: PlannedReparentOptions{ + WaitReplicasTimeout: time.Millisecond * 10, + }, + shouldErr: true, + }, + { + name: "PopulateReparentJournal failed out on new primary", + tmc: &testutil.TabletManagerClient{ + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000100": assert.AnError, + }, + SetMasterResults: map[string]error{ + "zone1-0000000200": nil, + "zone1-0000000201": nil, + "zone1-0000000202": nil, + }, + }, + ev: &events.Reparent{ + NewMaster: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + }, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + }, + }, + "zone1-0000000200": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + "zone1-0000000201": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 201, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + "zone1-0000000202": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 202, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + }, + shouldErr: true, + }, + { + name: "PopulateReparentJournal timed out on new primary", + tmc: &testutil.TabletManagerClient{ + PopulateReparentJournalDelays: map[string]time.Duration{ + "zone1-0000000100": time.Millisecond * 50, + }, + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000100": nil, + }, + SetMasterResults: map[string]error{ + "zone1-0000000200": nil, + "zone1-0000000201": nil, + "zone1-0000000202": nil, + }, + }, + ev: &events.Reparent{ + NewMaster: topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + }, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + }, + }, + "zone1-0000000200": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + "zone1-0000000201": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 201, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + "zone1-0000000202": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 202, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + }, + opts: PlannedReparentOptions{ + WaitReplicasTimeout: time.Millisecond * 10, + }, + shouldErr: true, + }, + } + + ctx := context.Background() + logger := logutil.NewMemoryLogger() + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + pr := NewPlannedReparenter(nil, tt.tmc, logger) + err := pr.reparentTablets(ctx, tt.ev, tt.reparentJournalPosition, tt.tabletMap, tt.opts) + if tt.shouldErr { + assert.Error(t, err) + + return + } + + assert.NoError(t, err) + }) + } +} + +// (TODO:@ajm88) when unifying all the mock TMClient implementations (which will +// most likely end up in go/vt/vtctl/testutil), move these to the same testutil +// package. +func AssertReparentEventsEqualWithMessage(t *testing.T, expected *events.Reparent, actual *events.Reparent, msg string) { + t.Helper() + + if msg != "" && !strings.HasSuffix(msg, " ") { + msg = msg + ": " + } + + if expected == nil { + assert.Nil(t, actual, "%sexpected nil Reparent event", msg) + + return + } + + if actual == nil { + // Note: the reason we don't use require.NotNil here is because it would + // fail the entire test, rather than just this one helper, which is + // intended to be an atomic assertion. However, we also don't want to + // have to add a bunch of nil-guards below, as it would complicate the + // code, so we're going to duplicate the nil check to force a failure + // and bail early. + assert.NotNil(t, actual, "%sexpected non-nil Reparent event", msg) + + return + } + + removeVersion := func(si topo.ShardInfo) topo.ShardInfo { + return *topo.NewShardInfo(si.Keyspace(), si.ShardName(), si.Shard, nil) + } + + assert.Equal(t, removeVersion(expected.ShardInfo), removeVersion(actual.ShardInfo), "%sReparent.ShardInfo mismatch", msg) + assert.Equal(t, expected.NewMaster, actual.NewMaster, "%sReparent.NewMaster mismatch", msg) + assert.Equal(t, expected.OldMaster, actual.OldMaster, "%sReparent.OldMaster mismatch", msg) +} + +func AssertReparentEventsEqual(t *testing.T, expected *events.Reparent, actual *events.Reparent) { + t.Helper() + + AssertReparentEventsEqualWithMessage(t, expected, actual, "") +} diff --git a/go/vt/vtctl/reparentutil/replication.go b/go/vt/vtctl/reparentutil/replication.go new file mode 100644 index 00000000000..42440b82918 --- /dev/null +++ b/go/vt/vtctl/reparentutil/replication.go @@ -0,0 +1,242 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reparentutil + +import ( + "context" + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/sets" + + "vitess.io/vitess/go/event" + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/vt/concurrency" + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topotools/events" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vttablet/tmclient" + + replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata" + "vitess.io/vitess/go/vt/proto/vtrpc" +) + +// FindValidEmergencyReparentCandidates will find candidates for an emergency +// reparent, and, if successful, return a mapping of those tablet aliases (as +// raw strings) to their replication positions for later comparison. +func FindValidEmergencyReparentCandidates( + statusMap map[string]*replicationdatapb.StopReplicationStatus, + primaryStatusMap map[string]*replicationdatapb.MasterStatus, +) (map[string]mysql.Position, error) { + replicationStatusMap := make(map[string]*mysql.ReplicationStatus, len(statusMap)) + positionMap := make(map[string]mysql.Position) + + // Build out replication status list from proto types. + for alias, statuspb := range statusMap { + status := mysql.ProtoToReplicationStatus(statuspb.After) + replicationStatusMap[alias] = &status + } + + // Determine if we're GTID-based. If we are, we'll need to look for errant + // GTIDs below. + var ( + isGTIDBased bool + isNonGTIDBased bool + emptyRelayPosErrorRecorder concurrency.FirstErrorRecorder + ) + + for alias, status := range replicationStatusMap { + if _, ok := status.RelayLogPosition.GTIDSet.(mysql.Mysql56GTIDSet); ok { + isGTIDBased = true + } else { + isNonGTIDBased = true + } + + if status.RelayLogPosition.IsZero() { + // Potentially bail. If any other tablet is detected to have + // GTID-based relay log positions, we will return the error recorded + // here. + emptyRelayPosErrorRecorder.RecordError(vterrors.Errorf(vtrpc.Code_UNAVAILABLE, "encountered tablet %v with no relay log position, when at least one other tablet in the status map has GTID based relay log positions", alias)) + } + } + + if isGTIDBased && emptyRelayPosErrorRecorder.HasErrors() { + return nil, emptyRelayPosErrorRecorder.Error() + } + + if isGTIDBased && isNonGTIDBased { + return nil, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "encountered mix of GTID-based and non GTID-based relay logs") + } + + // Create relevant position list of errant GTID-based positions for later + // comparison. + for alias, status := range replicationStatusMap { + // If we're not GTID-based, no need to search for errant GTIDs, so just + // add the position to the map and continue. + if !isGTIDBased { + positionMap[alias] = status.Position + + continue + } + + // This condition should really never happen, since we did the same cast + // in the earlier loop, but let's be doubly sure. + relayLogGTIDSet, ok := status.RelayLogPosition.GTIDSet.(mysql.Mysql56GTIDSet) + if !ok { + return nil, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "we got a filled-in relay log position, but it's not of type Mysql56GTIDSet, even though we've determined we need to use GTID based assesment") + } + + // We need to remove this alias's status from the list, otherwise the + // GTID diff will always be empty. + statusList := make([]*mysql.ReplicationStatus, 0, len(replicationStatusMap)-1) + + for a, s := range replicationStatusMap { + if a != alias { + statusList = append(statusList, s) + } + } + + errantGTIDs, err := status.FindErrantGTIDs(statusList) + switch { + case err != nil: + // Could not look up GTIDs to determine if we have any. It's not + // safe to continue. + return nil, err + case len(errantGTIDs) != 0: + // This tablet has errant GTIDs. It's not a valid candidate for + // reparent, so don't insert it into the final mapping. + continue + } + + pos := mysql.Position{GTIDSet: relayLogGTIDSet} + positionMap[alias] = pos + } + + for alias, primaryStatus := range primaryStatusMap { + executedPosition, err := mysql.DecodePosition(primaryStatus.Position) + if err != nil { + return nil, vterrors.Wrapf(err, "could not decode a master status executed position for tablet %v: %v", alias, err) + } + + positionMap[alias] = executedPosition + } + + return positionMap, nil +} + +// ReplicaWasRunning returns true if a StopReplicationStatus indicates that the +// replica had running replication threads before being stopped. It returns an +// error if the Before state of replication is nil. +func ReplicaWasRunning(stopStatus *replicationdatapb.StopReplicationStatus) (bool, error) { + if stopStatus == nil || stopStatus.Before == nil { + return false, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "could not determine Before state of StopReplicationStatus %v", stopStatus) + } + + return stopStatus.Before.IoThreadRunning || stopStatus.Before.SqlThreadRunning, nil +} + +// StopReplicationAndBuildStatusMaps stops replication on all replicas, then +// collects and returns a mapping of TabletAlias (as string) to their current +// replication positions. +func StopReplicationAndBuildStatusMaps( + ctx context.Context, + tmc tmclient.TabletManagerClient, + ev *events.Reparent, + tabletMap map[string]*topo.TabletInfo, + waitReplicasTimeout time.Duration, + ignoredTablets sets.String, + logger logutil.Logger, +) (map[string]*replicationdatapb.StopReplicationStatus, map[string]*replicationdatapb.MasterStatus, error) { + event.DispatchUpdate(ev, "stop replication on all replicas") + + var ( + statusMap = map[string]*replicationdatapb.StopReplicationStatus{} + masterStatusMap = map[string]*replicationdatapb.MasterStatus{} + m sync.Mutex + errChan = make(chan error) + ) + + groupCtx, groupCancel := context.WithTimeout(ctx, waitReplicasTimeout) + defer groupCancel() + + fillStatus := func(alias string, tabletInfo *topo.TabletInfo) { + err := vterrors.Errorf(vtrpc.Code_UNAVAILABLE, "fillStatus did not successfully complete") + defer func() { errChan <- err }() + + logger.Infof("getting replication position from %v", alias) + + _, stopReplicationStatus, err := tmc.StopReplicationAndGetStatus(groupCtx, tabletInfo.Tablet, replicationdatapb.StopReplicationMode_IOTHREADONLY) + switch err { + case mysql.ErrNotReplica: + var masterStatus *replicationdatapb.MasterStatus + + masterStatus, err = tmc.DemoteMaster(groupCtx, tabletInfo.Tablet) + if err != nil { + msg := "replica %v thinks it's master but we failed to demote it" + err = vterrors.Wrapf(err, msg+": %v", alias, err) + + logger.Warningf(msg, alias) + return + } + + m.Lock() + masterStatusMap[alias] = masterStatus + m.Unlock() + case nil: + m.Lock() + statusMap[alias] = stopReplicationStatus + m.Unlock() + default: + logger.Warningf("failed to get replication status from %v: %v", alias, err) + + err = vterrors.Wrapf(err, "error when getting replication status for alias %v: %v", alias, err) + } + } + + for alias, tabletInfo := range tabletMap { + if !ignoredTablets.Has(alias) { + go fillStatus(alias, tabletInfo) + } + } + + errgroup := concurrency.ErrorGroup{ + NumGoroutines: len(tabletMap) - ignoredTablets.Len(), + NumRequiredSuccesses: len(tabletMap) - ignoredTablets.Len() - 1, + NumAllowedErrors: 1, + } + + errRecorder := errgroup.Wait(groupCancel, errChan) + if len(errRecorder.Errors) > 1 { + return nil, nil, vterrors.Wrapf(errRecorder.Error(), "encountered more than one error when trying to stop replication and get positions: %v", errRecorder.Error()) + } + + return statusMap, masterStatusMap, nil +} + +// WaitForRelayLogsToApply blocks execution waiting for the given tablet's relay +// logs to apply, unless the specified context is canceled or exceeded. +// Typically a caller will set a timeout of WaitReplicasTimeout on a context and +// use that context with this function. +func WaitForRelayLogsToApply(ctx context.Context, tmc tmclient.TabletManagerClient, tabletInfo *topo.TabletInfo, status *replicationdatapb.StopReplicationStatus) error { + switch status.After.RelayLogPosition { + case "": + return tmc.WaitForPosition(ctx, tabletInfo.Tablet, status.After.FileRelayLogPosition) + default: + return tmc.WaitForPosition(ctx, tabletInfo.Tablet, status.After.RelayLogPosition) + } +} diff --git a/go/vt/vtctl/reparentutil/replication_test.go b/go/vt/vtctl/reparentutil/replication_test.go new file mode 100644 index 00000000000..12a045c60dd --- /dev/null +++ b/go/vt/vtctl/reparentutil/replication_test.go @@ -0,0 +1,898 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reparentutil + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/util/sets" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/topotools/events" + "vitess.io/vitess/go/vt/vttablet/tmclient" + + replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" +) + +func TestFindValidEmergencyReparentCandidates(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + statusMap map[string]*replicationdatapb.StopReplicationStatus + primaryStatusMap map[string]*replicationdatapb.MasterStatus + // Note: for these tests, it's simpler to compare keys than actual + // mysql.Postion structs, which are just thin wrappers around the + // mysql.GTIDSet interface. If a tablet alias makes it into the map, we + // know it was chosen by the method, and that either + // mysql.DecodePosition was successful (in the primary case) or + // status.FindErrantGTIDs was successful (in the replica case). If the + // former is not true, then the function should return an error. If the + // latter is not true, then the tablet alias will not be in the map. The + // point is, the combination of (1) whether the test should error and + // (2) the set of keys we expect in the map is enough to fully assert on + // the correctness of the behavior of this functional unit. + expected []string + shouldErr bool + }{ + { + name: "success", + statusMap: map[string]*replicationdatapb.StopReplicationStatus{ + "r1": { + After: &replicationdatapb.Status{ + MasterUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", + }, + }, + "r2": { + After: &replicationdatapb.Status{ + MasterUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", + }, + }, + }, + primaryStatusMap: map[string]*replicationdatapb.MasterStatus{ + "p1": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", + }, + }, + expected: []string{"r1", "r2", "p1"}, + shouldErr: false, + }, + { + name: "mixed replication modes", + statusMap: map[string]*replicationdatapb.StopReplicationStatus{ + "r1": { + After: &replicationdatapb.Status{ + MasterUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", + }, + }, + "r2": { + After: &replicationdatapb.Status{ + MasterUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "FilePos/mysql-bin.0001:10", + }, + }, + }, + expected: nil, + shouldErr: true, + }, + { + name: "tablet without relay log position", + statusMap: map[string]*replicationdatapb.StopReplicationStatus{ + "r1": { + After: &replicationdatapb.Status{ + MasterUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", + }, + }, + "r2": { + After: &replicationdatapb.Status{ + RelayLogPosition: "", + }, + }, + }, + expected: nil, + shouldErr: true, + }, + { + name: "non-GTID-based", + statusMap: map[string]*replicationdatapb.StopReplicationStatus{ + "r1": { + After: &replicationdatapb.Status{ + MasterUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "FilePos/mysql-bin.0001:100", + }, + }, + "r2": { + After: &replicationdatapb.Status{ + MasterUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "FilePos/mysql-bin.0001:10", + }, + }, + }, + expected: []string{"r1", "r2"}, + shouldErr: false, + }, + { + name: "tablet with errant GTIDs is excluded", + statusMap: map[string]*replicationdatapb.StopReplicationStatus{ + "r1": { + After: &replicationdatapb.Status{ + MasterUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", + }, + }, + "errant": { + After: &replicationdatapb.Status{ + MasterUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5,AAAAAAAA-71CA-11E1-9E33-C80AA9429562:1", + }, + }, + }, + primaryStatusMap: map[string]*replicationdatapb.MasterStatus{ + "p1": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", + }, + }, + expected: []string{"r1", "p1"}, + shouldErr: false, + }, + { + name: "bad master position fails the call", + statusMap: map[string]*replicationdatapb.StopReplicationStatus{ + "r1": { + After: &replicationdatapb.Status{ + MasterUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", + }, + }, + }, + primaryStatusMap: map[string]*replicationdatapb.MasterStatus{ + "p1": { + Position: "InvalidFlavor/1234", + }, + }, + expected: nil, + shouldErr: true, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + actual, err := FindValidEmergencyReparentCandidates(tt.statusMap, tt.primaryStatusMap) + if tt.shouldErr { + assert.Error(t, err) + return + } + + assert.NoError(t, err) + + keys := make([]string, 0, len(actual)) + for key := range actual { + keys = append(keys, key) + } + assert.ElementsMatch(t, tt.expected, keys) + }) + } +} + +// stopReplicationAndBuildStatusMapsTestTMClient implements +// tmclient.TabletManagerClient to facilitate testing of +// StopReplicationAndBuildStatusMaps. +type stopReplicationAndBuildStatusMapsTestTMClient struct { + tmclient.TabletManagerClient + + demoteMasterResults map[string]*struct { + MasterStatus *replicationdatapb.MasterStatus + Err error + } + demoteMasterDelays map[string]time.Duration + + stopReplicationAndGetStatusResults map[string]*struct { + StopStatus *replicationdatapb.StopReplicationStatus + Err error + } + stopReplicationAndGetStatusDelays map[string]time.Duration +} + +func (fake *stopReplicationAndBuildStatusMapsTestTMClient) DemoteMaster(ctx context.Context, tablet *topodatapb.Tablet) (*replicationdatapb.MasterStatus, error) { + if tablet.Alias == nil { + return nil, assert.AnError + } + + key := topoproto.TabletAliasString(tablet.Alias) + + if delay, ok := fake.demoteMasterDelays[key]; ok { + select { + case <-time.After(delay): + case <-ctx.Done(): + return nil, ctx.Err() + } + } + + if result, ok := fake.demoteMasterResults[key]; ok { + return result.MasterStatus, result.Err + } + + return nil, assert.AnError +} + +func (fake *stopReplicationAndBuildStatusMapsTestTMClient) StopReplicationAndGetStatus(ctx context.Context, tablet *topodatapb.Tablet, mode replicationdatapb.StopReplicationMode) (*replicationdatapb.Status, *replicationdatapb.StopReplicationStatus, error) { + if tablet.Alias == nil { + return nil, nil, assert.AnError + } + + key := topoproto.TabletAliasString(tablet.Alias) + + if delay, ok := fake.stopReplicationAndGetStatusDelays[key]; ok { + select { + case <-time.After(delay): + case <-ctx.Done(): + return nil, nil, ctx.Err() + } + } + + if result, ok := fake.stopReplicationAndGetStatusResults[key]; ok { + return /* unused by the code under test */ nil, result.StopStatus, result.Err + } + + return nil, nil, assert.AnError +} + +func TestStopReplicationAndBuildStatusMaps(t *testing.T) { + t.Parallel() + + ctx := context.Background() + logger := logutil.NewMemoryLogger() + tests := []struct { + name string + tmc *stopReplicationAndBuildStatusMapsTestTMClient + tabletMap map[string]*topo.TabletInfo + waitReplicasTimeout time.Duration + ignoredTablets sets.String + expectedStatusMap map[string]*replicationdatapb.StopReplicationStatus + expectedMasterStatusMap map[string]*replicationdatapb.MasterStatus + shouldErr bool + }{ + { + name: "success", + tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ + stopReplicationAndGetStatusResults: map[string]*struct { + StopStatus *replicationdatapb.StopReplicationStatus + Err error + }{ + "zone1-0000000100": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{Position: "100-before"}, + After: &replicationdatapb.Status{Position: "100-after"}, + }, + }, + "zone1-0000000101": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{Position: "101-before"}, + After: &replicationdatapb.Status{Position: "101-after"}, + }, + }, + }, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + }, + ignoredTablets: sets.NewString(), + expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ + "zone1-0000000100": { + Before: &replicationdatapb.Status{Position: "100-before"}, + After: &replicationdatapb.Status{Position: "100-after"}, + }, + "zone1-0000000101": { + Before: &replicationdatapb.Status{Position: "101-before"}, + After: &replicationdatapb.Status{Position: "101-after"}, + }, + }, + expectedMasterStatusMap: map[string]*replicationdatapb.MasterStatus{}, + shouldErr: false, + }, + { + name: "ignore tablets", + tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ + stopReplicationAndGetStatusResults: map[string]*struct { + StopStatus *replicationdatapb.StopReplicationStatus + Err error + }{ + "zone1-0000000100": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{Position: "100-before"}, + After: &replicationdatapb.Status{Position: "100-after"}, + }, + }, + "zone1-0000000101": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{Position: "101-before"}, + After: &replicationdatapb.Status{Position: "101-after"}, + }, + }, + }, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + }, + ignoredTablets: sets.NewString("zone1-0000000100"), + expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ + "zone1-0000000101": { + Before: &replicationdatapb.Status{Position: "101-before"}, + After: &replicationdatapb.Status{Position: "101-after"}, + }, + }, + expectedMasterStatusMap: map[string]*replicationdatapb.MasterStatus{}, + shouldErr: false, + }, + { + name: "have MASTER tablet and can demote", + tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ + demoteMasterResults: map[string]*struct { + MasterStatus *replicationdatapb.MasterStatus + Err error + }{ + "zone1-0000000100": { + MasterStatus: &replicationdatapb.MasterStatus{ + Position: "master-position-100", + }, + }, + }, + stopReplicationAndGetStatusResults: map[string]*struct { + StopStatus *replicationdatapb.StopReplicationStatus + Err error + }{ + "zone1-0000000100": { + Err: mysql.ErrNotReplica, + }, + "zone1-0000000101": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{Position: "101-before"}, + After: &replicationdatapb.Status{Position: "101-after"}, + }, + }, + }, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + }, + ignoredTablets: sets.NewString(), + expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ + "zone1-0000000101": { + Before: &replicationdatapb.Status{Position: "101-before"}, + After: &replicationdatapb.Status{Position: "101-after"}, + }, + }, + expectedMasterStatusMap: map[string]*replicationdatapb.MasterStatus{ + "zone1-0000000100": { + Position: "master-position-100", + }, + }, + shouldErr: false, + }, + { + name: "one tablet is MASTER and cannot demote", + tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ + demoteMasterResults: map[string]*struct { + MasterStatus *replicationdatapb.MasterStatus + Err error + }{ + "zone1-0000000100": { + Err: assert.AnError, + }, + }, + stopReplicationAndGetStatusResults: map[string]*struct { + StopStatus *replicationdatapb.StopReplicationStatus + Err error + }{ + "zone1-0000000100": { + Err: mysql.ErrNotReplica, + }, + "zone1-0000000101": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{Position: "101-before"}, + After: &replicationdatapb.Status{Position: "101-after"}, + }, + }, + }, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + }, + ignoredTablets: sets.NewString(), + expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ + "zone1-0000000101": { + Before: &replicationdatapb.Status{Position: "101-before"}, + After: &replicationdatapb.Status{Position: "101-after"}, + }, + }, + expectedMasterStatusMap: map[string]*replicationdatapb.MasterStatus{}, // zone1-0000000100 fails to demote, so does not appear + shouldErr: false, + }, + { + name: "multiple tablets are MASTER and cannot demote", + tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ + demoteMasterResults: map[string]*struct { + MasterStatus *replicationdatapb.MasterStatus + Err error + }{ + "zone1-0000000100": { + Err: assert.AnError, + }, + "zone1-0000000101": { + Err: assert.AnError, + }, + }, + stopReplicationAndGetStatusResults: map[string]*struct { + StopStatus *replicationdatapb.StopReplicationStatus + Err error + }{ + "zone1-0000000100": { + Err: mysql.ErrNotReplica, + }, + "zone1-0000000101": { + Err: mysql.ErrNotReplica, + }, + }, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + }, + ignoredTablets: sets.NewString(), + expectedStatusMap: nil, + expectedMasterStatusMap: nil, + shouldErr: true, // we get multiple errors, so we fail + }, + { + name: "waitReplicasTimeout exceeded", + tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ + stopReplicationAndGetStatusDelays: map[string]time.Duration{ + "zone1-0000000100": time.Minute, // zone1-0000000100 will timeout and not be included + }, + stopReplicationAndGetStatusResults: map[string]*struct { + StopStatus *replicationdatapb.StopReplicationStatus + Err error + }{ + "zone1-0000000100": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{Position: "100-before"}, + After: &replicationdatapb.Status{Position: "100-after"}, + }, + }, + "zone1-0000000101": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{Position: "101-before"}, + After: &replicationdatapb.Status{Position: "101-after"}, + }, + }, + }, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + }, + waitReplicasTimeout: time.Millisecond * 5, + ignoredTablets: sets.NewString(), + expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ + "zone1-0000000101": { + Before: &replicationdatapb.Status{Position: "101-before"}, + After: &replicationdatapb.Status{Position: "101-after"}, + }, + }, + expectedMasterStatusMap: map[string]*replicationdatapb.MasterStatus{}, + shouldErr: false, + }, + { + name: "one tablet fails to StopReplication", + tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ + stopReplicationAndGetStatusResults: map[string]*struct { + StopStatus *replicationdatapb.StopReplicationStatus + Err error + }{ + "zone1-0000000100": { + Err: assert.AnError, // not being mysql.ErrNotReplica will not cause us to call DemoteMaster + }, + "zone1-0000000101": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{Position: "101-before"}, + After: &replicationdatapb.Status{Position: "101-after"}, + }, + }, + }, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + }, + ignoredTablets: sets.NewString(), + expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ + "zone1-0000000101": { + Before: &replicationdatapb.Status{Position: "101-before"}, + After: &replicationdatapb.Status{Position: "101-after"}, + }, + }, + expectedMasterStatusMap: map[string]*replicationdatapb.MasterStatus{}, + shouldErr: false, + }, + { + name: "multiple tablets fail StopReplication", + tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ + stopReplicationAndGetStatusResults: map[string]*struct { + StopStatus *replicationdatapb.StopReplicationStatus + Err error + }{ + "zone1-0000000100": { + Err: assert.AnError, + }, + "zone1-0000000101": { + Err: assert.AnError, + }, + }, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + }, + ignoredTablets: sets.NewString(), + expectedStatusMap: nil, + expectedMasterStatusMap: nil, + shouldErr: true, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + statusMap, masterStatusMap, err := StopReplicationAndBuildStatusMaps( + ctx, + tt.tmc, + &events.Reparent{}, + tt.tabletMap, + tt.waitReplicasTimeout, + tt.ignoredTablets, + logger, + ) + if tt.shouldErr { + assert.Error(t, err) + return + } + + assert.NoError(t, err) + assert.Equal(t, tt.expectedStatusMap, statusMap, "StopReplicationStatus mismatch") + assert.Equal(t, tt.expectedMasterStatusMap, masterStatusMap, "MasterStatusMap mismatch") + }) + } +} + +func TestReplicaWasRunning(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + in *replicationdatapb.StopReplicationStatus + expected bool + shouldErr bool + }{ + { + name: "io thread running", + in: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{ + IoThreadRunning: true, + SqlThreadRunning: false, + }, + }, + expected: true, + shouldErr: false, + }, + { + name: "sql thread running", + in: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{ + IoThreadRunning: false, + SqlThreadRunning: true, + }, + }, + expected: true, + shouldErr: false, + }, + { + name: "io and sql threads running", + in: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{ + IoThreadRunning: true, + SqlThreadRunning: true, + }, + }, + expected: true, + shouldErr: false, + }, + { + name: "no replication threads running", + in: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{ + IoThreadRunning: false, + SqlThreadRunning: false, + }, + }, + expected: false, + shouldErr: false, + }, + { + name: "passing nil pointer results in an error", + in: nil, + expected: false, + shouldErr: true, + }, + { + name: "status.Before is nil results in an error", + in: &replicationdatapb.StopReplicationStatus{ + Before: nil, + }, + expected: false, + shouldErr: true, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + actual, err := ReplicaWasRunning(tt.in) + if tt.shouldErr { + assert.Error(t, err) + + return + } + + assert.NoError(t, err) + assert.Equal(t, tt.expected, actual) + }) + } +} + +// waitForRelayLogsToApplyTestTMClient implements just the WaitForPosition +// method of the tmclient.TabletManagerClient interface for +// TestWaitForRelayLogsToApply, with the necessary trackers to facilitate +// testing that unit. +type waitForRelayLogsToApplyTestTMClient struct { + tmclient.TabletManagerClient + calledPositions []string + shouldErr bool +} + +func (fake *waitForRelayLogsToApplyTestTMClient) WaitForPosition(_ context.Context, _ *topodatapb.Tablet, position string) error { + if fake.shouldErr { + return assert.AnError + } + + fake.calledPositions = append(fake.calledPositions, position) + return nil +} + +func TestWaitForRelayLogsToApply(t *testing.T) { + t.Parallel() + + ctx := context.Background() + tests := []struct { + name string + client *waitForRelayLogsToApplyTestTMClient + status *replicationdatapb.StopReplicationStatus + expectedCalledPositions []string + shouldErr bool + }{ + { + name: "using relay log position", + client: &waitForRelayLogsToApplyTestTMClient{}, + status: &replicationdatapb.StopReplicationStatus{ + After: &replicationdatapb.Status{ + RelayLogPosition: "relay-pos", + }, + }, + expectedCalledPositions: []string{"relay-pos"}, + shouldErr: false, + }, + { + name: "using file relay log position", + client: &waitForRelayLogsToApplyTestTMClient{}, + status: &replicationdatapb.StopReplicationStatus{ + After: &replicationdatapb.Status{ + FileRelayLogPosition: "file-relay-pos", + }, + }, + expectedCalledPositions: []string{"file-relay-pos"}, + shouldErr: false, + }, + { + name: "when both are set, relay log position takes precedence over file relay log position", + client: &waitForRelayLogsToApplyTestTMClient{}, + status: &replicationdatapb.StopReplicationStatus{ + After: &replicationdatapb.Status{ + RelayLogPosition: "relay-pos", + FilePosition: "file-relay-pos", + }, + }, + expectedCalledPositions: []string{"relay-pos"}, + shouldErr: false, + }, + { + name: "error waiting for position", + client: &waitForRelayLogsToApplyTestTMClient{ + shouldErr: true, + }, + status: &replicationdatapb.StopReplicationStatus{ + After: &replicationdatapb.Status{ + RelayLogPosition: "relay-pos", + }, + }, + expectedCalledPositions: nil, + shouldErr: true, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + err := WaitForRelayLogsToApply(ctx, tt.client, &topo.TabletInfo{}, tt.status) + defer assert.Equal(t, tt.expectedCalledPositions, tt.client.calledPositions) + if tt.shouldErr { + assert.Error(t, err) + return + } + + assert.NoError(t, err) + }) + } +} diff --git a/go/vt/vtctl/reparentutil/util.go b/go/vt/vtctl/reparentutil/util.go new file mode 100644 index 00000000000..9e31c6985da --- /dev/null +++ b/go/vt/vtctl/reparentutil/util.go @@ -0,0 +1,146 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reparentutil + +import ( + "context" + "sync" + "time" + + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vttablet/tmclient" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/proto/vtrpc" +) + +// ChooseNewPrimary finds a tablet that should become a primary after reparent. +// The criteria for the new primary-elect are (preferably) to be in the same +// cell as the current primary, and to be different from avoidPrimaryAlias. The +// tablet with the most advanced replication position is chosen to minimize the +// amount of time spent catching up with the current primary. +// +// Note that the search for the most advanced replication position will race +// with transactions being executed on the current primary, so when all tablets +// are at roughly the same position, then the choice of new primary-elect will +// be somewhat unpredictable. +func ChooseNewPrimary( + ctx context.Context, + tmc tmclient.TabletManagerClient, + shardInfo *topo.ShardInfo, + tabletMap map[string]*topo.TabletInfo, + avoidPrimaryAlias *topodatapb.TabletAlias, + waitReplicasTimeout time.Duration, + // (TODO:@ajm188) it's a little gross we need to pass this, maybe embed in the context? + logger logutil.Logger, +) (*topodatapb.TabletAlias, error) { + if avoidPrimaryAlias == nil { + return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "tablet to avoid for reparent is not provided, cannot choose new primary") + } + + var primaryCell string + if shardInfo.MasterAlias != nil { + primaryCell = shardInfo.MasterAlias.Cell + } + + var ( + searcher = topotools.NewMaxReplicationPositionSearcher(tmc, logger, waitReplicasTimeout) + wg sync.WaitGroup + ) + + for _, tablet := range tabletMap { + switch { + case primaryCell != "" && tablet.Alias.Cell != primaryCell: + continue + case topoproto.TabletAliasEqual(tablet.Alias, avoidPrimaryAlias): + continue + case tablet.Tablet.Type != topodatapb.TabletType_REPLICA: + continue + } + + wg.Add(1) + + go func(tablet *topodatapb.Tablet) { + defer wg.Done() + searcher.ProcessTablet(ctx, tablet) + }(tablet.Tablet) + } + + wg.Wait() + + if maxPosTablet := searcher.MaxPositionTablet(); maxPosTablet != nil { + return maxPosTablet.Alias, nil + } + + return nil, nil +} + +// FindCurrentPrimary returns the current primary tablet of a shard, if any. The +// current primary is whichever tablet of type MASTER (if any) has the most +// recent MasterTermStartTime, which is the same rule that vtgate uses to route +// master traffic. +// +// The return value is nil if the current primary cannot be definitively +// determined. This can happen either if no tablet claims to be type MASTER, or +// if multiple tablets claim to be type MASTER and happen to have the same +// MasterTermStartTime timestamp (a tie). +// +// The tabletMap must be a complete map (not a partial result) for the shard. +func FindCurrentPrimary(tabletMap map[string]*topo.TabletInfo, logger logutil.Logger) *topo.TabletInfo { + var ( + currentPrimary *topo.TabletInfo + currentTermStartTime time.Time + ) + + for _, tablet := range tabletMap { + if tablet.Type != topodatapb.TabletType_MASTER { + continue + } + + if currentPrimary == nil { + currentPrimary = tablet + currentTermStartTime = tablet.GetMasterTermStartTime() + continue + } + + otherPrimaryTermStartTime := tablet.GetMasterTermStartTime() + if otherPrimaryTermStartTime.After(currentTermStartTime) { + currentPrimary = tablet + currentTermStartTime = otherPrimaryTermStartTime + } else if otherPrimaryTermStartTime.Equal(currentTermStartTime) { + // A tie should not happen unless the upgrade order was violated + // (e.g. some vttablets have not been upgraded) or if we get really + // unlucky. + // + // Either way, we need to be safe and not assume we know who the + // true primary is. + logger.Warningf( + "Multiple primaries (%v and %v) are tied for MasterTermStartTime; can't determine the true primary.", + topoproto.TabletAliasString(currentPrimary.Alias), + topoproto.TabletAliasString(tablet.Alias), + ) + + return nil + } + } + + return currentPrimary +} diff --git a/go/vt/vtctl/reparentutil/util_test.go b/go/vt/vtctl/reparentutil/util_test.go new file mode 100644 index 00000000000..eb0ccba3a3d --- /dev/null +++ b/go/vt/vtctl/reparentutil/util_test.go @@ -0,0 +1,513 @@ +/* +Copyright 20201 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reparentutil + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vttablet/tmclient" + + replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/proto/vttime" +) + +type chooseNewPrimaryTestTMClient struct { + tmclient.TabletManagerClient + replicationStatuses map[string]*replicationdatapb.Status +} + +func (fake *chooseNewPrimaryTestTMClient) ReplicationStatus(ctx context.Context, tablet *topodatapb.Tablet) (*replicationdatapb.Status, error) { + if fake.replicationStatuses == nil { + return nil, assert.AnError + } + + key := topoproto.TabletAliasString(tablet.Alias) + + if status, ok := fake.replicationStatuses[key]; ok { + return status, nil + } + + return nil, assert.AnError +} + +func TestChooseNewPrimary(t *testing.T) { + t.Parallel() + + ctx := context.Background() + logger := logutil.NewMemoryLogger() + tests := []struct { + name string + tmc *chooseNewPrimaryTestTMClient + shardInfo *topo.ShardInfo + tabletMap map[string]*topo.TabletInfo + avoidPrimaryAlias *topodatapb.TabletAlias + expected *topodatapb.TabletAlias + shouldErr bool + }{ + { + name: "found a replica", + tmc: &chooseNewPrimaryTestTMClient{ + // zone1-101 is behind zone1-102 + replicationStatuses: map[string]*replicationdatapb.Status{ + "zone1-0000000101": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1", + }, + "zone1-0000000102": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", + }, + }, + }, + shardInfo: topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{ + MasterAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, nil), + tabletMap: map[string]*topo.TabletInfo{ + "primary": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + }, + }, + "replica1": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + "replica2": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + }, + avoidPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 0, + }, + expected: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + shouldErr: false, + }, + { + name: "no active primary in shard", + tmc: &chooseNewPrimaryTestTMClient{ + replicationStatuses: map[string]*replicationdatapb.Status{ + "zone1-0000000101": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1", + }, + }, + }, + shardInfo: topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{}, nil), + tabletMap: map[string]*topo.TabletInfo{ + "primary": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + }, + }, + "replica1": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + }, + avoidPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 0, + }, + expected: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + shouldErr: false, + }, + { + name: "primary alias is nil", + tmc: &chooseNewPrimaryTestTMClient{ + replicationStatuses: map[string]*replicationdatapb.Status{ + "zone1-0000000101": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1", + }, + }, + }, + shardInfo: topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{ + MasterAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, nil), + tabletMap: map[string]*topo.TabletInfo{ + "primary": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + }, + }, + "replica1": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + }, + avoidPrimaryAlias: nil, + expected: nil, + shouldErr: true, + }, + { + name: "no replicas in primary cell", + tmc: &chooseNewPrimaryTestTMClient{ + // zone1-101 is behind zone1-102 + replicationStatuses: map[string]*replicationdatapb.Status{ + "zone1-0000000101": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1", + }, + "zone1-0000000102": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", + }, + }, + }, + shardInfo: topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{ + MasterAlias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 200, + }, + }, nil), + tabletMap: map[string]*topo.TabletInfo{ + "primary": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 200, + }, + Type: topodatapb.TabletType_MASTER, + }, + }, + "replica1": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + "replica2": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + }, + avoidPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 0, + }, + expected: nil, + shouldErr: false, + }, + { + name: "only available tablet is AvoidPrimary", + tmc: &chooseNewPrimaryTestTMClient{ + // zone1-101 is behind zone1-102 + replicationStatuses: map[string]*replicationdatapb.Status{ + "zone1-0000000101": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1", + }, + "zone1-0000000102": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", + }, + }, + }, + shardInfo: topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{ + MasterAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, nil), + tabletMap: map[string]*topo.TabletInfo{ + "avoid-primary": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + }, + avoidPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + expected: nil, + shouldErr: false, + }, + { + name: "no replicas in shard", + tmc: &chooseNewPrimaryTestTMClient{}, + shardInfo: topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{ + MasterAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, nil), + tabletMap: map[string]*topo.TabletInfo{ + "primary": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_MASTER, + }, + }, + }, + avoidPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 0, + }, + expected: nil, + shouldErr: false, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + actual, err := ChooseNewPrimary(ctx, tt.tmc, tt.shardInfo, tt.tabletMap, tt.avoidPrimaryAlias, time.Millisecond*50, logger) + if tt.shouldErr { + assert.Error(t, err) + return + } + + assert.NoError(t, err) + assert.Equal(t, tt.expected, actual) + }) + } +} + +func TestFindCurrentPrimary(t *testing.T) { + t.Parallel() + + // The exact values of the tablet aliases don't matter to this function, but + // we need them to be non-nil, so we'll just make one and reuse it. + alias := &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + } + logger := logutil.NewMemoryLogger() + tests := []struct { + name string + in map[string]*topo.TabletInfo + expected *topo.TabletInfo + }{ + { + name: "single current primary", + in: map[string]*topo.TabletInfo{ + "primary": { + Tablet: &topodatapb.Tablet{ + Alias: alias, + Type: topodatapb.TabletType_MASTER, + MasterTermStartTime: &vttime.Time{ + Seconds: 100, + }, + Hostname: "primary-tablet", + }, + }, + "replica": { + Tablet: &topodatapb.Tablet{ + Alias: alias, + Type: topodatapb.TabletType_REPLICA, + Hostname: "replica-tablet", + }, + }, + "rdonly": { + Tablet: &topodatapb.Tablet{ + Alias: alias, + Type: topodatapb.TabletType_RDONLY, + Hostname: "rdonly-tablet", + }, + }, + }, + expected: &topo.TabletInfo{ + Tablet: &topodatapb.Tablet{ + Alias: alias, + Type: topodatapb.TabletType_MASTER, + MasterTermStartTime: &vttime.Time{ + Seconds: 100, + }, + Hostname: "primary-tablet", + }, + }, + }, + { + name: "no primaries", + in: map[string]*topo.TabletInfo{ + "replica1": { + Tablet: &topodatapb.Tablet{ + Alias: alias, + Type: topodatapb.TabletType_REPLICA, + Hostname: "replica-tablet-1", + }, + }, + "replica2": { + Tablet: &topodatapb.Tablet{ + Alias: alias, + Type: topodatapb.TabletType_REPLICA, + Hostname: "replica-tablet-2", + }, + }, + "rdonly": { + Tablet: &topodatapb.Tablet{ + Alias: alias, + Type: topodatapb.TabletType_RDONLY, + Hostname: "rdonly-tablet", + }, + }, + }, + expected: nil, + }, + { + name: "multiple primaries with one true primary", + in: map[string]*topo.TabletInfo{ + "stale-primary": { + Tablet: &topodatapb.Tablet{ + Alias: alias, + Type: topodatapb.TabletType_MASTER, + MasterTermStartTime: &vttime.Time{ + Seconds: 100, + }, + Hostname: "stale-primary-tablet", + }, + }, + "true-primary": { + Tablet: &topodatapb.Tablet{ + Alias: alias, + Type: topodatapb.TabletType_MASTER, + MasterTermStartTime: &vttime.Time{ + Seconds: 1000, + }, + Hostname: "true-primary-tablet", + }, + }, + "rdonly": { + Tablet: &topodatapb.Tablet{ + Alias: alias, + Type: topodatapb.TabletType_RDONLY, + Hostname: "rdonly-tablet", + }, + }, + }, + expected: &topo.TabletInfo{ + Tablet: &topodatapb.Tablet{ + Alias: alias, + Type: topodatapb.TabletType_MASTER, + MasterTermStartTime: &vttime.Time{ + Seconds: 1000, + }, + Hostname: "true-primary-tablet", + }, + }, + }, + { + name: "multiple primaries with same term start", + in: map[string]*topo.TabletInfo{ + "primary1": { + Tablet: &topodatapb.Tablet{ + Alias: alias, + Type: topodatapb.TabletType_MASTER, + MasterTermStartTime: &vttime.Time{ + Seconds: 100, + }, + Hostname: "primary-tablet-1", + }, + }, + "primary2": { + Tablet: &topodatapb.Tablet{ + Alias: alias, + Type: topodatapb.TabletType_MASTER, + MasterTermStartTime: &vttime.Time{ + Seconds: 100, + }, + Hostname: "primary-tablet-2", + }, + }, + "rdonly": { + Tablet: &topodatapb.Tablet{ + Alias: alias, + Type: topodatapb.TabletType_RDONLY, + Hostname: "rdonly-tablet", + }, + }, + }, + expected: nil, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + actual := FindCurrentPrimary(tt.in, logger) + assert.Equal(t, tt.expected, actual) + }) + } +} diff --git a/go/vt/vtctl/topo.go b/go/vt/vtctl/topo.go index 529affdafb6..a408051d31a 100644 --- a/go/vt/vtctl/topo.go +++ b/go/vt/vtctl/topo.go @@ -60,7 +60,7 @@ func init() { // the right object, then echoes it as a string. func DecodeContent(filename string, data []byte, json bool) (string, error) { name := path.Base(filename) - + dir := path.Dir(filename) var p proto.Message switch name { case topo.CellInfoFile: @@ -82,9 +82,15 @@ func DecodeContent(filename string, data []byte, json bool) (string, error) { case topo.RoutingRulesFile: p = new(vschemapb.RoutingRules) default: - if json { - return "", fmt.Errorf("unknown topo protobuf type for %v", name) - } else { + switch dir { + case "/" + topo.GetExternalVitessClusterDir(): + p = new(topodatapb.ExternalVitessCluster) + default: + } + if p == nil { + if json { + return "", fmt.Errorf("unknown topo protobuf type for %v", name) + } return string(data), nil } } @@ -95,15 +101,14 @@ func DecodeContent(filename string, data []byte, json bool) (string, error) { if json { return new(jsonpb.Marshaler).MarshalToString(p) - } else { - return proto.MarshalTextString(p), nil } + return proto.MarshalTextString(p), nil } func commandTopoCat(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { cell := subFlags.String("cell", topo.GlobalCell, "topology cell to cat the file from. Defaults to global cell.") long := subFlags.Bool("long", false, "long listing.") - decodeProtoJson := subFlags.Bool("decode_proto_json", false, "decode proto files and display them as json") + decodeProtoJSON := subFlags.Bool("decode_proto_json", false, "decode proto files and display them as json") decodeProto := subFlags.Bool("decode_proto", false, "decode proto files and display them as text") subFlags.Parse(args) if subFlags.NArg() == 0 { @@ -125,15 +130,15 @@ func commandTopoCat(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.F var topologyDecoder TopologyDecoder switch { - case *decodeProtoJson: - topologyDecoder = JsonTopologyDecoder{} + case *decodeProtoJSON: + topologyDecoder = JSONTopologyDecoder{} case *decodeProto: topologyDecoder = ProtoTopologyDecoder{} default: topologyDecoder = PlainTopologyDecoder{} } - return topologyDecoder.decode(resolved, conn, ctx, wr, *long) + return topologyDecoder.decode(ctx, resolved, conn, wr, *long) } func commandTopoCp(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { @@ -176,15 +181,21 @@ func copyFileToTopo(ctx context.Context, ts *topo.Server, cell, from, to string) return err } +// TopologyDecoder interface for exporting out a leaf node in a readable form type TopologyDecoder interface { - decode([]string, topo.Conn, context.Context, *wrangler.Wrangler, bool) error + decode(context.Context, []string, topo.Conn, *wrangler.Wrangler, bool) error } +// ProtoTopologyDecoder exports topo node as a proto type ProtoTopologyDecoder struct{} + +// PlainTopologyDecoder exports topo node as plain text type PlainTopologyDecoder struct{} -type JsonTopologyDecoder struct{} -func (d ProtoTopologyDecoder) decode(topoPaths []string, conn topo.Conn, ctx context.Context, wr *wrangler.Wrangler, long bool) error { +// JSONTopologyDecoder exports topo node as JSON +type JSONTopologyDecoder struct{} + +func (d ProtoTopologyDecoder) decode(ctx context.Context, topoPaths []string, conn topo.Conn, wr *wrangler.Wrangler, long bool) error { hasError := false for _, topoPath := range topoPaths { data, version, err := conn.Get(ctx, topoPath) @@ -216,7 +227,7 @@ func (d ProtoTopologyDecoder) decode(topoPaths []string, conn topo.Conn, ctx con return nil } -func (d PlainTopologyDecoder) decode(topoPaths []string, conn topo.Conn, ctx context.Context, wr *wrangler.Wrangler, long bool) error { +func (d PlainTopologyDecoder) decode(ctx context.Context, topoPaths []string, conn topo.Conn, wr *wrangler.Wrangler, long bool) error { hasError := false for _, topoPath := range topoPaths { data, version, err := conn.Get(ctx, topoPath) @@ -242,7 +253,7 @@ func (d PlainTopologyDecoder) decode(topoPaths []string, conn topo.Conn, ctx con return nil } -func (d JsonTopologyDecoder) decode(topoPaths []string, conn topo.Conn, ctx context.Context, wr *wrangler.Wrangler, long bool) error { +func (d JSONTopologyDecoder) decode(ctx context.Context, topoPaths []string, conn topo.Conn, wr *wrangler.Wrangler, long bool) error { hasError := false var jsonData []interface{} for _, topoPath := range topoPaths { diff --git a/go/vt/vtctl/vtctl.go b/go/vt/vtctl/vtctl.go index 93d6e55bfa7..0102dff05fd 100644 --- a/go/vt/vtctl/vtctl.go +++ b/go/vt/vtctl/vtctl.go @@ -99,9 +99,9 @@ import ( "github.com/golang/protobuf/jsonpb" "github.com/golang/protobuf/proto" + "vitess.io/vitess/go/cmd/vtctldclient/cli" "vitess.io/vitess/go/flagutil" "vitess.io/vitess/go/json2" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/sync2" hk "vitess.io/vitess/go/vt/hook" @@ -117,8 +117,6 @@ import ( "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/wrangler" - replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata" - tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vschemapb "vitess.io/vitess/go/vt/proto/vschema" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" @@ -309,17 +307,20 @@ var commands = []commandGroup{ {"MoveTables", commandMoveTables, "[-cells=] [-tablet_types=] -workflow= ", `Move table(s) to another keyspace, table_specs is a list of tables or the tables section of the vschema for the target keyspace. Example: '{"t1":{"column_vindexes": [{"column": "id1", "name": "hash"}]}, "t2":{"column_vindexes": [{"column": "id2", "name": "hash"}]}}'. In the case of an unsharded target keyspace the vschema for each table may be empty. Example: '{"t1":{}, "t2":{}}'.`}, + {"Migrate", commandMigrate, + "[-cells=] [-tablet_types=] -workflow= ", + `Move table(s) to another keyspace, table_specs is a list of tables or the tables section of the vschema for the target keyspace. Example: '{"t1":{"column_vindexes": [{"column": "id1", "name": "hash"}]}, "t2":{"column_vindexes": [{"column": "id2", "name": "hash"}]}}'. In the case of an unsharded target keyspace the vschema for each table may be empty. Example: '{"t1":{}, "t2":{}}'.`}, {"DropSources", commandDropSources, "[-dry_run] [-rename_tables] ", "After a MoveTables or Resharding workflow cleanup unused artifacts like source tables, source shards and blacklists"}, {"CreateLookupVindex", commandCreateLookupVindex, - "[-cell=] [-tablet_types=] ", + "[-cell= DEPRECATED] [-cells=] [-tablet_types=] ", `Create and backfill a lookup vindex. the json_spec must contain the vindex and colvindex specs for the new lookup.`}, {"ExternalizeVindex", commandExternalizeVindex, ".", `Externalize a backfilled vindex.`}, {"Materialize", commandMaterialize, - `, example : '{"workflow": "aaa", "source_keyspace": "source", "target_keyspace": "target", "table_settings": [{"target_table": "customer", "source_expression": "select * from customer", "create_ddl": "copy"}]}'`, + `[-cells=] [-tablet_types=] , example : '{"workflow": "aaa", "source_keyspace": "source", "target_keyspace": "target", "table_settings": [{"target_table": "customer", "source_expression": "select * from customer", "create_ddl": "copy"}]}'`, "Performs materialization based on the json spec. Is used directly to form VReplication rules, with an optional step to copy table structure/DDL."}, {"SplitClone", commandSplitClone, " ", @@ -356,6 +357,9 @@ var commands = []commandGroup{ "Blocks until no new queries were observed on all tablets with the given tablet type in the specified keyspace. " + " This can be used as sanity check to ensure that the tablets were drained after running vtctl MigrateServedTypes " + " and vtgate is no longer using them. If -timeout is set, it fails when the timeout is reached."}, + {"Mount", commandMount, + "[-topo_type=etcd2|consul|zookeeper] [-topo_server=topo_url] [-topo_root=root_topo_node> [-unmount] [-list] [-show] []", + "Add/Remove/Display/List external cluster(s) to this vitess cluster"}, }, }, { @@ -398,8 +402,8 @@ var commands = []commandGroup{ "[-exclude_tables=''] [-include-views] [-skip-no-master] ", "Validates that the master schema from shard 0 matches the schema on all of the other tablets in the keyspace."}, {"ApplySchema", commandApplySchema, - "[-allow_long_unavailability] [-wait_replicas_timeout=10s] [-ddl_strategy=] {-sql= || -sql-file=} ", - "Applies the schema change to the specified keyspace on every master, running in parallel on all shards. The changes are then propagated to replicas via replication. If -allow_long_unavailability is set, schema changes affecting a large number of rows (and possibly incurring a longer period of unavailability) will not be rejected. ddl_strategy is used to intruct migrations via gh-ost or pt-osc with optional parameters"}, + "[-allow_long_unavailability] [-wait_replicas_timeout=10s] [-ddl_strategy=] [-request_context=] [-skip_preflight] {-sql= || -sql-file=} ", + "Applies the schema change to the specified keyspace on every master, running in parallel on all shards. The changes are then propagated to replicas via replication. If -allow_long_unavailability is set, schema changes affecting a large number of rows (and possibly incurring a longer period of unavailability) will not be rejected. -ddl_strategy is used to intruct migrations via vreplication, gh-ost or pt-osc with optional parameters. -request_context allows the user to specify a custom request context for online DDL migrations. If -skip_preflight, SQL goes directly to shards without going through sanity checks"}, {"CopySchemaShard", commandCopySchemaShard, "[-tables=,,...] [-exclude_tables=,,...] [-include-views] [-skip-verify] [-wait_replicas_timeout=10s] { || } ", "Copies the schema from a source shard's master (or a specific tablet) to a destination shard. The schema is applied directly on the master of the destination shard, and it is propagated to the replicas through binlogs."}, @@ -547,81 +551,6 @@ func fmtTabletAwkable(ti *topo.TabletInfo) string { return fmt.Sprintf("%v %v %v %v %v %v %v %v", topoproto.TabletAliasString(ti.Alias), keyspace, shard, topoproto.TabletTypeLString(ti.Type), ti.Addr(), ti.MysqlAddr(), fmtMapAwkable(ti.Tags), mtst) } -func listTabletsByShard(ctx context.Context, wr *wrangler.Wrangler, keyspace, shard string) error { - tabletMap, err := wr.TopoServer().GetTabletMapForShard(ctx, keyspace, shard) - if err != nil { - return err - } - var trueMasterTimestamp time.Time - for _, ti := range tabletMap { - if ti.Type == topodatapb.TabletType_MASTER { - masterTimestamp := ti.GetMasterTermStartTime() - if masterTimestamp.After(trueMasterTimestamp) { - trueMasterTimestamp = masterTimestamp - } - } - } - for _, ti := range tabletMap { - masterTimestamp := ti.GetMasterTermStartTime() - if ti.Type == topodatapb.TabletType_MASTER && masterTimestamp.Before(trueMasterTimestamp) { - ti.Type = topodatapb.TabletType_UNKNOWN - } - wr.Logger().Printf("%v\n", fmtTabletAwkable(ti)) - } - return nil -} - -func dumpAllTablets(ctx context.Context, wr *wrangler.Wrangler, cell string) error { - tablets, err := topotools.GetAllTablets(ctx, wr.TopoServer(), cell) - if err != nil { - return err - } - // It is possible that an old master has not yet updated it's type in the topo - // In that case, report its type as UNKNOWN - // It used to be MASTER, and it is supposed to be REPLICA/SPARE eventually - trueMasterTimestamps := findTrueMasterTimestamps(tablets) - for _, ti := range tablets { - key := ti.Keyspace + "." + ti.Shard - masterTimestamp := ti.GetMasterTermStartTime() - if ti.Type == topodatapb.TabletType_MASTER && masterTimestamp.Before(trueMasterTimestamps[key]) { - ti.Type = topodatapb.TabletType_UNKNOWN - } - wr.Logger().Printf("%v\n", fmtTabletAwkable(ti)) - } - return nil -} - -func findTrueMasterTimestamps(tablets []*topo.TabletInfo) map[string]time.Time { - result := make(map[string]time.Time) - for _, ti := range tablets { - key := ti.Keyspace + "." + ti.Shard - if v, ok := result[key]; !ok { - result[key] = ti.GetMasterTermStartTime() - } else { - if ti.GetMasterTermStartTime().After(v) { - result[key] = ti.GetMasterTermStartTime() - } - } - } - return result -} - -func dumpTablets(ctx context.Context, wr *wrangler.Wrangler, tabletAliases []*topodatapb.TabletAlias) error { - tabletMap, err := wr.TopoServer().GetTabletMap(ctx, tabletAliases) - if err != nil { - return err - } - for _, tabletAlias := range tabletAliases { - ti, ok := tabletMap[topoproto.TabletAliasString(tabletAlias)] - if !ok { - wr.Logger().Warningf("failed to load tablet %v", tabletAlias) - } else { - wr.Logger().Printf("%v\n", fmtTabletAwkable(ti)) - } - } - return nil -} - // getFileParam returns a string containing either flag is not "", // or the content of the file named flagFile func getFileParam(flag, flagFile, name string) (string, error) { @@ -723,19 +652,6 @@ func parseTabletType(param string, types []topodatapb.TabletType) (topodatapb.Ta return tabletType, nil } -// parseServingTabletType3 parses the tablet type into the enum, -// and makes sure the enum is of serving type (MASTER, REPLICA, RDONLY/BATCH) -func parseServingTabletType3(param string) (topodatapb.TabletType, error) { - servedType, err := topoproto.ParseTabletType(param) - if err != nil { - return topodatapb.TabletType_UNKNOWN, err - } - if !topo.IsInServingGraph(servedType) { - return topodatapb.TabletType_UNKNOWN, fmt.Errorf("served_type has to be in the serving graph, not %v", param) - } - return servedType, nil -} - func commandInitTablet(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { dbNameOverride := subFlags.String("db_name_override", "", "Overrides the name of the database that the vttablet uses") allowUpdate := subFlags.Bool("allow_update", false, "Use this flag to force initialization if a tablet with the same name already exists. Use with caution.") @@ -1046,7 +962,7 @@ func commandRefreshStateByShard(ctx context.Context, wr *wrangler.Wrangler, subF if *cellsStr != "" { cells = strings.Split(*cellsStr, ",") } - return wr.RefreshTabletsByShard(ctx, si, nil /* tabletTypes */, cells) + return wr.RefreshTabletsByShard(ctx, si, cells) } func commandRunHealthCheck(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { @@ -1109,7 +1025,7 @@ func commandWaitForDrain(ctx context.Context, wr *wrangler.Wrangler, subFlags *f if err != nil { return err } - servedType, err := parseServingTabletType3(subFlags.Arg(1)) + servedType, err := topo.ParseServingTabletType(subFlags.Arg(1)) if err != nil { return err } @@ -1321,19 +1237,23 @@ func commandShardReplicationPositions(ctx context.Context, wr *wrangler.Wrangler if err != nil { return err } - tablets, stats, err := wr.ShardReplicationStatuses(ctx, keyspace, shard) - if tablets == nil { + + resp, err := wr.VtctldServer().ShardReplicationPositions(ctx, &vtctldatapb.ShardReplicationPositionsRequest{ + Keyspace: keyspace, + Shard: shard, + }) + if err != nil { return err } lines := make([]string, 0, 24) - for _, rt := range sortReplicatingTablets(tablets, stats) { + for _, rt := range cli.SortedReplicatingTablets(resp.TabletMap, resp.ReplicationStatuses) { status := rt.Status - ti := rt.TabletInfo + tablet := rt.Tablet if status == nil { - lines = append(lines, fmtTabletAwkable(ti)+" ") + lines = append(lines, cli.MarshalTabletAWK(tablet)+" ") } else { - lines = append(lines, fmtTabletAwkable(ti)+fmt.Sprintf(" %v %v", status.Position, status.SecondsBehindMaster)) + lines = append(lines, cli.MarshalTabletAWK(tablet)+fmt.Sprintf(" %v %v", status.Position, status.SecondsBehindMaster)) } } for _, l := range lines { @@ -1353,7 +1273,21 @@ func commandListShardTablets(ctx context.Context, wr *wrangler.Wrangler, subFlag if err != nil { return err } - return listTabletsByShard(ctx, wr, keyspace, shard) + + resp, err := wr.VtctldServer().GetTablets(ctx, &vtctldatapb.GetTabletsRequest{ + Keyspace: keyspace, + Shard: shard, + Strict: false, + }) + if err != nil { + return err + } + + for _, tablet := range resp.Tablets { + wr.Logger().Printf("%v\n", cli.MarshalTabletAWK(tablet)) + } + + return nil } func commandSetShardIsMasterServing(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { @@ -1390,7 +1324,7 @@ func commandUpdateSrvKeyspacePartition(ctx context.Context, wr *wrangler.Wrangle if err != nil { return err } - tabletType, err := parseServingTabletType3(subFlags.Arg(1)) + tabletType, err := topo.ParseServingTabletType(subFlags.Arg(1)) if err != nil { return err } @@ -1422,7 +1356,7 @@ func commandSetShardTabletControl(ctx context.Context, wr *wrangler.Wrangler, su if err != nil { return err } - tabletType, err := parseServingTabletType3(subFlags.Arg(1)) + tabletType, err := topo.ParseServingTabletType(subFlags.Arg(1)) if err != nil { return err } @@ -1679,7 +1613,7 @@ func commandCreateKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags } if len(servedFrom) > 0 { for name, value := range servedFrom { - tt, err := parseServingTabletType3(name) + tt, err := topo.ParseServingTabletType(name) if err != nil { return err } @@ -1897,6 +1831,10 @@ func commandReshard(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.F cells := subFlags.String("cells", "", "Cell(s) or CellAlias(es) (comma-separated) to replicate from.") tabletTypes := subFlags.String("tablet_types", "", "Source tablet types to replicate from.") skipSchemaCopy := subFlags.Bool("skip_schema_copy", false, "Skip copying of schema to targets") + + autoStart := subFlags.Bool("auto_start", true, "If false, streams will start in the Stopped state and will need to be explicitly started") + stopAfterCopy := subFlags.Bool("stop_after_copy", false, "Streams will be stopped once the copy phase is completed") + if err := subFlags.Parse(args); err != nil { return err } @@ -1909,7 +1847,8 @@ func commandReshard(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.F } source := strings.Split(subFlags.Arg(1), ",") target := strings.Split(subFlags.Arg(2), ",") - return wr.Reshard(ctx, keyspace, workflow, source, target, *skipSchemaCopy, *cells, *tabletTypes) + return wr.Reshard(ctx, keyspace, workflow, source, target, *skipSchemaCopy, *cells, + *tabletTypes, *autoStart, *stopAfterCopy) } func commandMoveTables(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { @@ -1925,6 +1864,9 @@ func commandMoveTables(ctx context.Context, wr *wrangler.Wrangler, subFlags *fla allTables := subFlags.Bool("all", false, "Move all tables from the source keyspace") excludes := subFlags.String("exclude", "", "Tables to exclude (comma-separated) if -all is specified") + autoStart := subFlags.Bool("auto_start", true, "If false, streams will start in the Stopped state and will need to be explicitly started") + stopAfterCopy := subFlags.Bool("stop_after_copy", false, "Streams will be stopped once the copy phase is completed") + if err := subFlags.Parse(args); err != nil { return err } @@ -1947,7 +1889,8 @@ func commandMoveTables(ctx context.Context, wr *wrangler.Wrangler, subFlags *fla source := subFlags.Arg(0) target := subFlags.Arg(1) tableSpecs := subFlags.Arg(2) - return wr.MoveTables(ctx, *workflow, source, target, tableSpecs, *cells, *tabletTypes, *allTables, *excludes) + return wr.MoveTables(ctx, *workflow, source, target, tableSpecs, *cells, *tabletTypes, *allTables, + *excludes, *autoStart, *stopAfterCopy, "") } // VReplicationWorkflowAction defines subcommands passed to vtctl for movetables or reshard @@ -1964,6 +1907,21 @@ const ( vReplicationWorkflowActionGetState = "getstate" ) +func commandMigrate(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { + return commandVRWorkflow(ctx, wr, subFlags, args, wrangler.MigrateWorkflow) +} + +// getSourceKeyspace expects a keyspace of the form "externalClusterName.keyspaceName" and returns the components +func getSourceKeyspace(clusterKeyspace string) (clusterName string, sourceKeyspace string, err error) { + splits := strings.Split(clusterKeyspace, ".") + if len(splits) != 2 { + return "", "", fmt.Errorf("invalid format for external source cluster: %s", clusterKeyspace) + } + return splits[0], splits[1], nil +} + +// commandVRWorkflow is the common entry point for MoveTables/Reshard/Migrate workflows +// FIXME: this needs a refactor. Also validations for params need to be done per workflow type func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string, workflowType wrangler.VReplicationWorkflowType) error { @@ -1974,12 +1932,19 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *fla reverseReplication := subFlags.Bool("reverse_replication", true, "Also reverse the replication") keepData := subFlags.Bool("keep_data", false, "Do not drop tables or shards (if true, only vreplication artifacts are cleaned up)") - sourceKeyspace := subFlags.String("source", "", "Source keyspace") + autoStart := subFlags.Bool("auto_start", true, "If false, streams will start in the Stopped state and will need to be explicitly started") + stopAfterCopy := subFlags.Bool("stop_after_copy", false, "Streams will be stopped once the copy phase is completed") + + // MoveTables and Migrate params tables := subFlags.String("tables", "", "A table spec or a list of tables") allTables := subFlags.Bool("all", false, "Move all tables from the source keyspace") excludes := subFlags.String("exclude", "", "Tables to exclude (comma-separated) if -all is specified") + sourceKeyspace := subFlags.String("source", "", "Source keyspace") + + // MoveTables-only params renameTables := subFlags.Bool("rename_tables", false, "Rename tables instead of dropping them") + // Reshard params sourceShards := subFlags.String("source_shards", "", "Source shards") targetShards := subFlags.String("target_shards", "", "Target shards") skipSchemaCopy := subFlags.Bool("skip_schema_copy", false, "Skip copying of schema to target shards") @@ -2009,6 +1974,8 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *fla TargetKeyspace: target, Workflow: workflow, DryRun: *dryRun, + AutoStart: *autoStart, + StopAfterCopy: *stopAfterCopy, } printDetails := func() error { @@ -2052,14 +2019,37 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *fla //TODO: check if invalid parameters were passed in that do not apply to this action originalAction := action action = strings.ToLower(action) // allow users to input action in a case-insensitive manner + if workflowType == wrangler.MigrateWorkflow { + switch action { + case vReplicationWorkflowActionCreate, vReplicationWorkflowActionCancel, vReplicationWorkflowActionComplete: + default: + return fmt.Errorf("invalid action for Migrate: %s", action) + } + } + switch action { case vReplicationWorkflowActionCreate: switch workflowType { - case wrangler.MoveTablesWorkflow: + case wrangler.MoveTablesWorkflow, wrangler.MigrateWorkflow: + var sourceTopo *topo.Server + var externalClusterName string + + sourceTopo = wr.TopoServer() if *sourceKeyspace == "" { return fmt.Errorf("source keyspace is not specified") } - _, err := wr.TopoServer().GetKeyspace(ctx, *sourceKeyspace) + if workflowType == wrangler.MigrateWorkflow { + externalClusterName, *sourceKeyspace, err = getSourceKeyspace(*sourceKeyspace) + if err != nil { + return err + } + sourceTopo, err = sourceTopo.OpenExternalVitessClusterServer(ctx, externalClusterName) + if err != nil { + return err + } + } + + _, err := sourceTopo.GetKeyspace(ctx, *sourceKeyspace) if err != nil { wr.Logger().Errorf("keyspace %s not found", *sourceKeyspace) return err @@ -2072,7 +2062,7 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *fla vrwp.AllTables = *allTables vrwp.ExcludeTables = *excludes vrwp.Timeout = *timeout - workflowType = wrangler.MoveTablesWorkflow + vrwp.ExternalCluster = externalClusterName case wrangler.ReshardWorkflow: if *sourceShards == "" || *targetShards == "" { return fmt.Errorf("source and target shards are not specified") @@ -2081,8 +2071,6 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *fla vrwp.TargetShards = strings.Split(*targetShards, ",") vrwp.SkipSchemaCopy = *skipSchemaCopy vrwp.SourceKeyspace = target - workflowType = wrangler.ReshardWorkflow - log.Infof("params are %s, %s, %+v", *sourceShards, *targetShards, vrwp) default: return fmt.Errorf("unknown workflow type passed: %v", workflowType) } @@ -2103,12 +2091,13 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *fla case wrangler.MoveTablesWorkflow: vrwp.RenameTables = *renameTables case wrangler.ReshardWorkflow: + case wrangler.MigrateWorkflow: default: return fmt.Errorf("unknown workflow type passed: %v", workflowType) } vrwp.KeepData = *keepData } - + vrwp.WorkflowType = workflowType wf, err := wr.NewVReplicationWorkflow(ctx, workflowType, vrwp) if err != nil { log.Warningf("NewVReplicationWorkflow returned error %+v", wf) @@ -2168,7 +2157,12 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *fla if err != nil { return err } + if !*autoStart { + wr.Logger().Printf("Workflow has been created in Stopped state\n") + break + } wr.Logger().Printf("Waiting for workflow to start:\n") + type streamCount struct { total, running int64 } @@ -2256,6 +2250,8 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *fla } func commandCreateLookupVindex(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { + cells := subFlags.String("cells", "", "Source cells to replicate from.") + //TODO: keep -cell around for backward compatibility and remove it in a future version cell := subFlags.String("cell", "", "Cell to replicate from.") tabletTypes := subFlags.String("tablet_types", "", "Source tablet types to replicate from.") if err := subFlags.Parse(args); err != nil { @@ -2264,6 +2260,9 @@ func commandCreateLookupVindex(ctx context.Context, wr *wrangler.Wrangler, subFl if subFlags.NArg() != 2 { return fmt.Errorf("two arguments are required: keyspace and json_spec") } + if *cells == "" && *cell != "" { + *cells = *cell + } keyspace := subFlags.Arg(0) specs := &vschemapb.Keyspace{} if err := json2.Unmarshal([]byte(subFlags.Arg(1)), specs); err != nil { @@ -2283,6 +2282,8 @@ func commandExternalizeVindex(ctx context.Context, wr *wrangler.Wrangler, subFla } func commandMaterialize(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { + cells := subFlags.String("cells", "", "Source cells to replicate from.") + tabletTypes := subFlags.String("tablet_types", "", "Source tablet types to replicate from.") if err := subFlags.Parse(args); err != nil { return err } @@ -2293,6 +2294,8 @@ func commandMaterialize(ctx context.Context, wr *wrangler.Wrangler, subFlags *fl if err := json2.Unmarshal([]byte(subFlags.Arg(0)), ms); err != nil { return err } + ms.Cell = *cells + ms.TabletTypes = *tabletTypes return wr.Materialize(ctx, ms) } @@ -2380,7 +2383,7 @@ func commandMigrateServedTypes(ctx context.Context, wr *wrangler.Wrangler, subFl if err != nil { return err } - servedType, err := parseServingTabletType3(subFlags.Arg(1)) + servedType, err := topo.ParseServingTabletType(subFlags.Arg(1)) if err != nil { return err } @@ -2613,23 +2616,26 @@ func commandListAllTablets(ctx context.Context, wr *wrangler.Wrangler, subFlags if err := subFlags.Parse(args); err != nil { return err } + var cells []string - var err error + if subFlags.NArg() == 1 { cells = strings.Split(subFlags.Arg(0), ",") - } else { - cells, err = wr.TopoServer().GetKnownCells(ctx) - if err != nil { - return err - } } - for _, cell := range cells { - err := dumpAllTablets(ctx, wr, cell) - if err != nil { - return err - } + resp, err := wr.VtctldServer().GetTablets(ctx, &vtctldatapb.GetTabletsRequest{ + Cells: cells, + Strict: false, + }) + + if err != nil { + return err } + + for _, tablet := range resp.Tablets { + wr.Logger().Printf("%v\n", cli.MarshalTabletAWK(tablet)) + } + return nil } @@ -2650,7 +2656,20 @@ func commandListTablets(ctx context.Context, wr *wrangler.Wrangler, subFlags *fl return err } } - return dumpTablets(ctx, wr, aliases) + + resp, err := wr.VtctldServer().GetTablets(ctx, &vtctldatapb.GetTabletsRequest{ + TabletAliases: aliases, + Strict: false, + }) + if err != nil { + return err + } + + for _, tablet := range resp.Tablets { + wr.Logger().Printf("%v\n", cli.MarshalTabletAWK(tablet)) + } + + return nil } func commandGetSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { @@ -2679,32 +2698,26 @@ func commandGetSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag excludeTableArray = strings.Split(*excludeTables, ",") } - sd, err := wr.GetSchema(ctx, tabletAlias, tableArray, excludeTableArray, *includeViews) + resp, err := wr.VtctldServer().GetSchema(ctx, &vtctldatapb.GetSchemaRequest{ + TabletAlias: tabletAlias, + Tables: tableArray, + ExcludeTables: excludeTableArray, + IncludeViews: *includeViews, + TableNamesOnly: *tableNamesOnly, + TableSizesOnly: *tableSizesOnly, + }) if err != nil { return err } + if *tableNamesOnly { - for _, td := range sd.TableDefinitions { + for _, td := range resp.Schema.TableDefinitions { wr.Logger().Printf("%v\n", td.Name) } return nil } - if *tableSizesOnly { - sizeTds := make([]*tabletmanagerdatapb.TableDefinition, len(sd.TableDefinitions)) - for i, td := range sd.TableDefinitions { - sizeTds[i] = &tabletmanagerdatapb.TableDefinition{ - Name: td.Name, - Type: td.Type, - RowCount: td.RowCount, - DataLength: td.DataLength, - } - } - - sd.TableDefinitions = sizeTds - } - - return printJSON(wr.Logger(), sd) + return printJSON(wr.Logger(), resp.Schema) } func commandReloadSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { @@ -2797,7 +2810,9 @@ func commandApplySchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *fl sql := subFlags.String("sql", "", "A list of semicolon-delimited SQL commands") sqlFile := subFlags.String("sql-file", "", "Identifies the file that contains the SQL commands") ddlStrategy := subFlags.String("ddl_strategy", string(schema.DDLStrategyDirect), "Online DDL strategy, compatible with @@ddl_strategy session variable (examples: 'gh-ost', 'pt-osc', 'gh-ost --max-load=Threads_running=100'") + requestContext := subFlags.String("request_context", "", "For Only DDL, optionally supply a custom unique string used as context for the migration(s) in this command. By default a unique context is auto-generated by Vitess") waitReplicasTimeout := subFlags.Duration("wait_replicas_timeout", wrangler.DefaultWaitReplicasTimeout, "The amount of time to wait for replicas to receive the schema change via replication.") + skipPreflight := subFlags.Bool("skip_preflight", false, "Skip pre-apply schema checks, and dircetly forward schema change query to shards") if err := subFlags.Parse(args); err != nil { return err } @@ -2814,13 +2829,18 @@ func commandApplySchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *fl if err != nil { return err } - requestContext := fmt.Sprintf("vtctl:%s", executionUUID) - executor := schemamanager.NewTabletExecutor(requestContext, wr, *waitReplicasTimeout) + if *requestContext == "" { + *requestContext = fmt.Sprintf("vtctl:%s", executionUUID) + } + executor := schemamanager.NewTabletExecutor(*requestContext, wr, *waitReplicasTimeout) if *allowLongUnavailability { executor.AllowBigSchemaChange() } + if *skipPreflight { + executor.SkipPreflight() + } if err := executor.SetDDLStrategy(*ddlStrategy); err != nil { - return nil + return err } return schemamanager.Run( @@ -2901,6 +2921,34 @@ func commandOnlineDDL(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag } query = `update _vt.schema_migrations set migration_status='cancel-all'` } + case "revert": + { + if arg == "" { + return fmt.Errorf("UUID required") + } + uuid = arg + contextUUID, err := schema.CreateUUID() + if err != nil { + return err + } + requestContext := fmt.Sprintf("vtctl:%s", contextUUID) + + onlineDDL, err := schema.NewOnlineDDL(keyspace, "", fmt.Sprintf("revert %s", uuid), schema.DDLStrategyOnline, "", requestContext) + if err != nil { + return err + } + conn, err := wr.TopoServer().ConnForCell(ctx, topo.GlobalCell) + if err != nil { + return err + } + err = onlineDDL.WriteTopo(ctx, conn, schema.MigrationRequestsPath()) + if err != nil { + return err + } + wr.Logger().Infof("UUID=%+v", onlineDDL.UUID) + wr.Logger().Printf("%s\n", onlineDDL.UUID) + return nil + } default: return fmt.Errorf("Unknown OnlineDDL command: %s", command) } @@ -3263,11 +3311,18 @@ func commandGetSrvKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags return fmt.Errorf("the and arguments are required for the GetSrvKeyspace command") } - srvKeyspace, err := wr.TopoServer().GetSrvKeyspace(ctx, subFlags.Arg(0), subFlags.Arg(1)) + cell := subFlags.Arg(0) + keyspace := subFlags.Arg(1) + + resp, err := wr.VtctldServer().GetSrvKeyspaces(ctx, &vtctldatapb.GetSrvKeyspacesRequest{ + Keyspace: keyspace, + Cells: []string{cell}, + }) if err != nil { return err } - return printJSON(wr.Logger(), srvKeyspace) + + return printJSON(wr.Logger(), resp.SrvKeyspaces[cell]) } func commandGetSrvVSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { @@ -3413,6 +3468,62 @@ func commandWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag. return nil } +func commandMount(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { + clusterType := subFlags.String("type", "vitess", "Specify cluster type: mysql or vitess, only vitess clustered right now") + unmount := subFlags.Bool("unmount", false, "Unmount cluster") + show := subFlags.Bool("show", false, "Display contents of cluster") + list := subFlags.Bool("list", false, "List all clusters") + + // vitess cluster params + topoType := subFlags.String("topo_type", "", "Type of cluster's topology server") + topoServer := subFlags.String("topo_server", "", "Server url of cluster's topology server") + topoRoot := subFlags.String("topo_root", "", "Root node of cluster's topology") + + if err := subFlags.Parse(args); err != nil { + return err + } + if *list { + clusters, err := wr.TopoServer().GetExternalVitessClusters(ctx) + if err != nil { + return err + } + wr.Logger().Printf("%s\n", strings.Join(clusters, ",")) + return nil + } + if subFlags.NArg() != 1 { + return fmt.Errorf("cluster name needs to be provided") + } + + clusterName := subFlags.Arg(0) + switch *clusterType { + case "vitess": + switch { + case *unmount: + return wr.UnmountExternalVitessCluster(ctx, clusterName) + case *show: + vci, err := wr.TopoServer().GetExternalVitessCluster(ctx, clusterName) + if err != nil { + return err + } + if vci == nil { + return fmt.Errorf("there is no vitess cluster named %s", clusterName) + } + data, err := json.Marshal(vci) + if err != nil { + return err + } + wr.Logger().Printf("%s\n", string(data)) + return nil + default: + return wr.MountExternalVitessCluster(ctx, clusterName, *topoType, *topoServer, *topoRoot) + } + case "mysql": + return fmt.Errorf("mysql cluster type not yet supported") + default: + return fmt.Errorf("cluster type can be only one of vitess or mysql") + } +} + func commandGenerateShardRanges(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { numShards := subFlags.Int("num_shards", 2, "Number of shards to generate shard ranges for.") @@ -3502,62 +3613,6 @@ func commandPanic(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.Fla panic(fmt.Errorf("this command panics on purpose")) } -type rTablet struct { - *topo.TabletInfo - *replicationdatapb.Status -} - -type rTablets []*rTablet - -func (rts rTablets) Len() int { return len(rts) } - -func (rts rTablets) Swap(i, j int) { rts[i], rts[j] = rts[j], rts[i] } - -// Sort for tablet replication. -// Tablet type first (with master first), then replication positions. -func (rts rTablets) Less(i, j int) bool { - l, r := rts[i], rts[j] - // l or r ReplicationStatus would be nil if we failed to get - // the position (put them at the beginning of the list) - if l.Status == nil { - return r.Status != nil - } - if r.Status == nil { - return false - } - // the type proto has MASTER first, so sort by that. Will show - // the MASTER first, then each replica type sorted by - // replication position. - if l.Type < r.Type { - return true - } - if l.Type > r.Type { - return false - } - // then compare replication positions - lpos, err := mysql.DecodePosition(l.Position) - if err != nil { - return true - } - rpos, err := mysql.DecodePosition(r.Position) - if err != nil { - return false - } - return !lpos.AtLeast(rpos) -} - -func sortReplicatingTablets(tablets []*topo.TabletInfo, stats []*replicationdatapb.Status) []*rTablet { - rtablets := make([]*rTablet, len(tablets)) - for i, status := range stats { - rtablets[i] = &rTablet{ - TabletInfo: tablets[i], - Status: status, - } - } - sort.Sort(rTablets(rtablets)) - return rtablets -} - // printJSON will print the JSON version of the structure to the logger. func printJSON(logger logutil.Logger, val interface{}) error { data, err := MarshalJSON(val) diff --git a/go/vt/vtctl/workflow/doc.go b/go/vt/vtctl/workflow/doc.go new file mode 100644 index 00000000000..c334470320f --- /dev/null +++ b/go/vt/vtctl/workflow/doc.go @@ -0,0 +1,45 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package workflow defines types and functions for working with Vitess workflows. + +This is still a very rough sketch, far from a final API, but I want to document +some things here as I go: + +(1) The lines between package workflow and package workflow/vexec are, uh, + blurry at best, and definitely need serious thinking and refinement. Maybe + there shouldn't even be two separate packages at all. The reason I have the + two packages right now is because I'm operating under the assumption that + there are workflows that are vexec, and then there are other workflows. If + it's true that all workflows are vexec workflows, then probably one single + package could make more sense. For now, two packages seems the way to go, + but like I said, the boundaries are blurry, and things that belong in one + package are in the other, because I haven't gone back and moved things + around. +(2) I'm aiming for this to be a drop-in replacement (more or less) for the + function calls in go/vt/wrangler. However, I'd rather define a better + abstraction if it means having to rewrite even significant portions of the + existing wrangler code to adapt to it, than make a subpar API in the name of + backwards compatibility. I'm not sure if that's a tradeoff I'll even need to + consider in the future, but I'm putting a stake in the ground on which side + of that tradeoff I intend to fall, should it come to it. +(3) Eventually we'll need to consider how the online schema migration workflows + fit into this. I'm trying to at least be somewhat abstract in the + vexec / queryplanner APIs to fit with the QueryParams thing that wrangler + uses, which _should_ work, but who knows?? Time will tell. +*/ +package workflow diff --git a/go/vt/vtctl/workflow/server.go b/go/vt/vtctl/workflow/server.go new file mode 100644 index 00000000000..86c02718d4f --- /dev/null +++ b/go/vt/vtctl/workflow/server.go @@ -0,0 +1,341 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "context" + "errors" + "fmt" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "k8s.io/apimachinery/pkg/util/sets" + + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vtctl/workflow/vexec" + "vitess.io/vitess/go/vt/vtgate/evalengine" + "vitess.io/vitess/go/vt/vttablet/tmclient" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" + "vitess.io/vitess/go/vt/proto/vttime" +) + +var ( + // ErrInvalidWorkflow is a catchall error type for conditions that should be + // impossible when operating on a workflow. + ErrInvalidWorkflow = errors.New("invalid workflow") + // ErrMultipleSourceKeyspaces occurs when a workflow somehow has multiple + // source keyspaces across different shard primaries. This should be + // impossible. + ErrMultipleSourceKeyspaces = errors.New("multiple source keyspaces for a single workflow") + // ErrMultipleTargetKeyspaces occurs when a workflow somehow has multiple + // target keyspaces across different shard primaries. This should be + // impossible. + ErrMultipleTargetKeyspaces = errors.New("multiple target keyspaces for a single workflow") +) + +// Server provides an API to work with Vitess workflows, like vreplication +// workflows (MoveTables, Reshard, etc) and schema migration workflows. +// +// NB: This is in alpha, and you probably don't want to depend on it (yet!). +// Currently, it provides only a read-only API to vreplication workflows. Write +// actions on vreplication workflows, and schema migration workflows entirely, +// are not yet supported, but planned. +type Server struct { + ts *topo.Server + tmc tmclient.TabletManagerClient +} + +// NewServer returns a new server instance with the given topo.Server and +// TabletManagerClient. +func NewServer(ts *topo.Server, tmc tmclient.TabletManagerClient) *Server { + return &Server{ + ts: ts, + tmc: tmc, + } +} + +// GetWorkflows returns a list of all workflows that exist in a given keyspace, +// with some additional filtering depending on the request parameters (for +// example, ActiveOnly=true restricts the search to only workflows that are +// currently running). +// +// It has the same signature as the vtctlservicepb.VtctldServer's GetWorkflows +// rpc, and grpcvtctldserver delegates to this function. +func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflowsRequest) (*vtctldatapb.GetWorkflowsResponse, error) { + where := "" + if req.ActiveOnly { + where = "WHERE state <> 'Stopped'" + } + + query := fmt.Sprintf(` + SELECT + id, + workflow, + source, + pos, + stop_pos, + max_replication_lag, + state, + db_name, + time_updated, + transaction_timestamp, + message + FROM + _vt.vreplication + %s`, + where, + ) + + vx := vexec.NewVExec(req.Keyspace, "", s.ts, s.tmc) + results, err := vx.QueryContext(ctx, query) + if err != nil { + return nil, err + } + + workflowsMap := make(map[string]*vtctldatapb.Workflow, len(results)) + sourceKeyspaceByWorkflow := make(map[string]string, len(results)) + sourceShardsByWorkflow := make(map[string]sets.String, len(results)) + targetKeyspaceByWorkflow := make(map[string]string, len(results)) + targetShardsByWorkflow := make(map[string]sets.String, len(results)) + maxVReplicationLagByWorkflow := make(map[string]float64, len(results)) + + // We guarantee the following invariants when this function is called for a + // given workflow: + // - workflow.Name != "" (more precisely, ".Name is set 'properly'") + // - workflowsMap[workflow.Name] == workflow + // - sourceShardsByWorkflow[workflow.Name] != nil + // - targetShardsByWorkflow[workflow.Name] != nil + // - workflow.ShardStatuses != nil + scanWorkflow := func(ctx context.Context, workflow *vtctldatapb.Workflow, row []sqltypes.Value, tablet *topo.TabletInfo) error { + id, err := evalengine.ToInt64(row[0]) + if err != nil { + return err + } + + var bls binlogdatapb.BinlogSource + if err := proto.UnmarshalText(row[2].ToString(), &bls); err != nil { + return err + } + + pos := row[3].ToString() + stopPos := row[4].ToString() + state := row[6].ToString() + dbName := row[7].ToString() + + timeUpdatedSeconds, err := evalengine.ToInt64(row[8]) + if err != nil { + return err + } + + transactionTimeSeconds, err := evalengine.ToInt64(row[9]) + if err != nil { + return err + } + + message := row[10].ToString() + + stream := &vtctldatapb.Workflow_Stream{ + Id: id, + Shard: tablet.Shard, + Tablet: tablet.Alias, + BinlogSource: &bls, + Position: pos, + StopPosition: stopPos, + State: state, + DbName: dbName, + TransactionTimestamp: &vttime.Time{ + Seconds: transactionTimeSeconds, + }, + TimeUpdated: &vttime.Time{ + Seconds: timeUpdatedSeconds, + }, + Message: message, + } + + stream.CopyStates, err = s.getWorkflowCopyStates(ctx, tablet, id) + if err != nil { + return err + } + + switch { + case strings.Contains(strings.ToLower(stream.Message), "error"): + stream.State = "Error" + case stream.State == "Running" && len(stream.CopyStates) > 0: + stream.State = "Copying" + case stream.State == "Running" && int64(time.Now().Second())-timeUpdatedSeconds > 10: + stream.State = "Lagging" + } + + shardStreamKey := fmt.Sprintf("%s/%s", tablet.Shard, tablet.AliasString()) + shardStream, ok := workflow.ShardStreams[shardStreamKey] + if !ok { + ctx, cancel := context.WithTimeout(ctx, *topo.RemoteOperationTimeout) + defer cancel() + + si, err := s.ts.GetShard(ctx, req.Keyspace, tablet.Shard) + if err != nil { + return err + } + + shardStream = &vtctldatapb.Workflow_ShardStream{ + Streams: nil, + TabletControls: si.TabletControls, + IsPrimaryServing: si.IsMasterServing, + } + + workflow.ShardStreams[shardStreamKey] = shardStream + } + + shardStream.Streams = append(shardStream.Streams, stream) + sourceShardsByWorkflow[workflow.Name].Insert(stream.BinlogSource.Shard) + targetShardsByWorkflow[workflow.Name].Insert(tablet.Shard) + + if ks, ok := sourceKeyspaceByWorkflow[workflow.Name]; ok && ks != stream.BinlogSource.Keyspace { + return fmt.Errorf("%w: workflow = %v, ks1 = %v, ks2 = %v", ErrMultipleSourceKeyspaces, workflow.Name, ks, stream.BinlogSource.Keyspace) + } + + sourceKeyspaceByWorkflow[workflow.Name] = stream.BinlogSource.Keyspace + + if ks, ok := targetKeyspaceByWorkflow[workflow.Name]; ok && ks != tablet.Keyspace { + return fmt.Errorf("%w: workflow = %v, ks1 = %v, ks2 = %v", ErrMultipleTargetKeyspaces, workflow.Name, ks, tablet.Keyspace) + } + + targetKeyspaceByWorkflow[workflow.Name] = tablet.Keyspace + + timeUpdated := time.Unix(timeUpdatedSeconds, 0) + vreplicationLag := time.Since(timeUpdated) + + if currentMaxLag, ok := maxVReplicationLagByWorkflow[workflow.Name]; ok { + if vreplicationLag.Seconds() > currentMaxLag { + maxVReplicationLagByWorkflow[workflow.Name] = vreplicationLag.Seconds() + } + } else { + maxVReplicationLagByWorkflow[workflow.Name] = vreplicationLag.Seconds() + } + + return nil + } + + for tablet, result := range results { + qr := sqltypes.Proto3ToResult(result) + + // In the old implementation, we knew we had at most one (0 <= N <= 1) + // workflow for each shard primary we queried. There might be multiple + // rows (streams) comprising that workflow, so we would aggregate the + // rows for a given primary into a single value ("the workflow", + // ReplicationStatusResult in the old types). + // + // In this version, we have many (N >= 0) workflows for each shard + // primary we queried, so we need to determine if each row corresponds + // to a workflow we're already aggregating, or if it's a workflow we + // haven't seen yet for that shard primary. We use the workflow name to + // dedupe for this. + for _, row := range qr.Rows { + workflowName := row[1].ToString() + workflow, ok := workflowsMap[workflowName] + if !ok { + workflow = &vtctldatapb.Workflow{ + Name: workflowName, + ShardStreams: map[string]*vtctldatapb.Workflow_ShardStream{}, + } + + workflowsMap[workflowName] = workflow + sourceShardsByWorkflow[workflowName] = sets.NewString() + targetShardsByWorkflow[workflowName] = sets.NewString() + } + + if err := scanWorkflow(ctx, workflow, row, tablet); err != nil { + return nil, err + } + } + } + + workflows := make([]*vtctldatapb.Workflow, 0, len(workflowsMap)) + + for name, workflow := range workflowsMap { + sourceShards, ok := sourceShardsByWorkflow[name] + if !ok { + return nil, fmt.Errorf("%w: %s has no source shards", ErrInvalidWorkflow, name) + } + + sourceKeyspace, ok := sourceKeyspaceByWorkflow[name] + if !ok { + return nil, fmt.Errorf("%w: %s has no source keyspace", ErrInvalidWorkflow, name) + } + + targetShards, ok := targetShardsByWorkflow[name] + if !ok { + return nil, fmt.Errorf("%w: %s has no target shards", ErrInvalidWorkflow, name) + } + + targetKeyspace, ok := targetKeyspaceByWorkflow[name] + if !ok { + return nil, fmt.Errorf("%w: %s has no target keyspace", ErrInvalidWorkflow, name) + } + + maxVReplicationLag, ok := maxVReplicationLagByWorkflow[name] + if !ok { + return nil, fmt.Errorf("%w: %s has no tracked vreplication lag", ErrInvalidWorkflow, name) + } + + workflow.Source = &vtctldatapb.Workflow_ReplicationLocation{ + Keyspace: sourceKeyspace, + Shards: sourceShards.List(), + } + + workflow.Target = &vtctldatapb.Workflow_ReplicationLocation{ + Keyspace: targetKeyspace, + Shards: targetShards.List(), + } + + workflow.MaxVReplicationLag = int64(maxVReplicationLag) + + workflows = append(workflows, workflow) + } + + return &vtctldatapb.GetWorkflowsResponse{ + Workflows: workflows, + }, nil +} + +func (s *Server) getWorkflowCopyStates(ctx context.Context, tablet *topo.TabletInfo, id int64) ([]*vtctldatapb.Workflow_Stream_CopyState, error) { + query := fmt.Sprintf("select table_name, lastpk from _vt.copy_state where vrepl_id = %d", id) + qr, err := s.tmc.VReplicationExec(ctx, tablet.Tablet, query) + if err != nil { + return nil, err + } + + result := sqltypes.Proto3ToResult(qr) + if result == nil { + return nil, nil + } + + copyStates := make([]*vtctldatapb.Workflow_Stream_CopyState, len(result.Rows)) + for i, row := range result.Rows { + // These fields are technically varbinary, but this is close enough. + copyStates[i] = &vtctldatapb.Workflow_Stream_CopyState{ + Table: row[0].ToString(), + LastPk: row[1].ToString(), + } + } + + return copyStates, nil +} diff --git a/go/vt/vtctl/workflow/vexec/query_plan.go b/go/vt/vtctl/workflow/vexec/query_plan.go new file mode 100644 index 00000000000..f71e124b786 --- /dev/null +++ b/go/vt/vtctl/workflow/vexec/query_plan.go @@ -0,0 +1,116 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vexec + +import ( + "context" + "fmt" + "sync" + + "vitess.io/vitess/go/vt/concurrency" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vttablet/tmclient" + + querypb "vitess.io/vitess/go/vt/proto/query" +) + +// QueryPlan wraps a planned query produced by a QueryPlanner. It is safe to +// execute a QueryPlan repeatedly and in multiple goroutines. +type QueryPlan struct { + ParsedQuery *sqlparser.ParsedQuery + + workflow string + tmc tmclient.TabletManagerClient +} + +// Execute executes a QueryPlan on a single target. +func (qp *QueryPlan) Execute(ctx context.Context, target *topo.TabletInfo) (qr *querypb.QueryResult, err error) { + if qp.ParsedQuery == nil { + return nil, fmt.Errorf("%w: call PlanQuery on a query planner first", ErrUnpreparedQuery) + } + + targetAliasStr := target.AliasString() + + log.Infof("Running %v on %v", qp.ParsedQuery.Query, targetAliasStr) + defer func() { + if err != nil { + log.Warningf("Result on %v: %v", targetAliasStr, err) + + return + } + + log.Infof("Result on %v: %v", targetAliasStr, qr) + }() + + qr, err = qp.tmc.VReplicationExec(ctx, target.Tablet, qp.ParsedQuery.Query) + if err != nil { + return nil, err + } + + if qr.RowsAffected == 0 { + log.Infof("no matching streams found for workflows %s, tablet %s, query %s", qp.workflow, targetAliasStr, qp.ParsedQuery.Query) + } + + return qr, nil +} + +// ExecuteScatter executes a QueryPlan on multiple targets concurrently, +// returning a mapping of target tablet to querypb.QueryResult. Errors from +// individual targets are aggregated into a singular error. +func (qp *QueryPlan) ExecuteScatter(ctx context.Context, targets ...*topo.TabletInfo) (map[*topo.TabletInfo]*querypb.QueryResult, error) { + if qp.ParsedQuery == nil { + // This check is an "optimization" on error handling. We check here, + // even though we will check this during the individual Execute calls, + // so that we return one error, rather than the same error aggregated + // len(targets) times. + return nil, fmt.Errorf("%w: call PlanQuery on a query planner first", ErrUnpreparedQuery) + } + + var ( + m sync.Mutex + wg sync.WaitGroup + rec concurrency.AllErrorRecorder + results = make(map[*topo.TabletInfo]*querypb.QueryResult, len(targets)) + ) + + for _, target := range targets { + wg.Add(1) + + go func(ctx context.Context, target *topo.TabletInfo) { + defer wg.Done() + + qr, err := qp.Execute(ctx, target) + if err != nil { + rec.RecordError(err) + + return + } + + m.Lock() + defer m.Unlock() + + results[target] = qr + }(ctx, target) + } + + wg.Wait() + + return results, rec.AggrError(vterrors.Aggregate) +} diff --git a/go/vt/vtctl/workflow/vexec/query_plan_test.go b/go/vt/vtctl/workflow/vexec/query_plan_test.go new file mode 100644 index 00000000000..ec4f6fab95d --- /dev/null +++ b/go/vt/vtctl/workflow/vexec/query_plan_test.go @@ -0,0 +1,332 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vexec + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver/testutil" + + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" +) + +func TestQueryPlanExecute(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + plan QueryPlan + target *topo.TabletInfo + expected *querypb.QueryResult + shouldErr bool + errKind error + }{ + { + name: "success", + plan: QueryPlan{ + ParsedQuery: &sqlparser.ParsedQuery{ + Query: "SELECT id FROM _vt.vreplication", + }, + tmc: &testutil.TabletManagerClient{ + VReplicationExecResults: map[string]map[string]struct { + Result *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + "select id from _vt.vreplication": { + Result: &querypb.QueryResult{ + RowsAffected: 1, + }, + }, + }, + }, + }, + }, + target: &topo.TabletInfo{ + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + expected: &querypb.QueryResult{ + RowsAffected: 1, + }, + shouldErr: false, + }, + { + name: "no rows affected", + plan: QueryPlan{ + ParsedQuery: &sqlparser.ParsedQuery{ + Query: "SELECT id FROM _vt.vreplication", + }, + tmc: &testutil.TabletManagerClient{ + VReplicationExecResults: map[string]map[string]struct { + Result *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + "select id from _vt.vreplication": { + Result: &querypb.QueryResult{ + RowsAffected: 0, + }, + }, + }, + }, + }, + }, + target: &topo.TabletInfo{ + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + expected: &querypb.QueryResult{ + RowsAffected: 0, + }, + shouldErr: false, + }, + { + name: "error", + plan: QueryPlan{ + ParsedQuery: &sqlparser.ParsedQuery{ + Query: "SELECT id FROM _vt.vreplication", + }, + tmc: &testutil.TabletManagerClient{ + VReplicationExecResults: map[string]map[string]struct { + Result *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + "select id from _vt.vreplication": { + Error: assert.AnError, + }, + }, + }, + }, + }, + target: &topo.TabletInfo{ + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + expected: nil, + shouldErr: true, + }, + { + name: "unprepared query", + plan: QueryPlan{ + ParsedQuery: nil, + }, + shouldErr: true, + errKind: ErrUnpreparedQuery, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + qr, err := tt.plan.Execute(ctx, tt.target) + if tt.shouldErr { + assert.Error(t, err) + + if tt.errKind != nil { + assert.True(t, errors.Is(err, tt.errKind), "expected error kind (= %v), got = %v", tt.errKind, err) + } + + return + } + + assert.NoError(t, err) + assert.Equal(t, tt.expected, qr) + }) + } +} + +func TestQueryPlanExecuteScatter(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + plan QueryPlan + targets []*topo.TabletInfo + // This is different from our actual return type because guaranteeing + // exact pointers in this table-driven style is a bit tough. + expected map[string]*querypb.QueryResult + shouldErr bool + errKind error + }{ + { + name: "success", + plan: QueryPlan{ + ParsedQuery: &sqlparser.ParsedQuery{ + Query: "SELECT id FROM _vt.vreplication", + }, + tmc: &testutil.TabletManagerClient{ + VReplicationExecResults: map[string]map[string]struct { + Result *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + "select id from _vt.vreplication": { + Result: &querypb.QueryResult{ + RowsAffected: 10, + }, + }, + }, + "zone1-0000000101": { + "select id from _vt.vreplication": { + Result: &querypb.QueryResult{ + RowsAffected: 5, + }, + }, + }, + }, + }, + }, + targets: []*topo.TabletInfo{ + { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + }, + expected: map[string]*querypb.QueryResult{ + "zone1-0000000100": { + RowsAffected: 10, + }, + "zone1-0000000101": { + RowsAffected: 5, + }, + }, + shouldErr: false, + }, + { + name: "some targets fail", + plan: QueryPlan{ + ParsedQuery: &sqlparser.ParsedQuery{ + Query: "SELECT id FROM _vt.vreplication", + }, + tmc: &testutil.TabletManagerClient{ + VReplicationExecResults: map[string]map[string]struct { + Result *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + "select id from _vt.vreplication": { + Error: assert.AnError, + }, + }, + "zone1-0000000101": { + "select id from _vt.vreplication": { + Result: &querypb.QueryResult{ + RowsAffected: 5, + }, + }, + }, + }, + }, + }, + targets: []*topo.TabletInfo{ + { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + }, + shouldErr: true, + }, + { + name: "unprepared query", + plan: QueryPlan{ + ParsedQuery: nil, + }, + shouldErr: true, + errKind: ErrUnpreparedQuery, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + results, err := tt.plan.ExecuteScatter(ctx, tt.targets...) + if tt.shouldErr { + assert.Error(t, err) + + if tt.errKind != nil { + assert.True(t, errors.Is(err, tt.errKind), "expected error kind (= %v), got = %v", tt.errKind, err) + } + + return + } + + assert.NoError(t, err) + + resultsByAlias := make(map[string]*querypb.QueryResult, len(results)) + for tablet, qr := range results { + resultsByAlias[tablet.AliasString()] = qr + } + + assert.Equal(t, tt.expected, resultsByAlias) + }) + } +} diff --git a/go/vt/vtctl/workflow/vexec/query_planner.go b/go/vt/vtctl/workflow/vexec/query_planner.go new file mode 100644 index 00000000000..e562c7fc806 --- /dev/null +++ b/go/vt/vtctl/workflow/vexec/query_planner.go @@ -0,0 +1,326 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vexec + +import ( + "errors" + "fmt" + + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vttablet/tmclient" +) + +var ( // Query planning errors. + // ErrCannotUpdateImmutableColumn is returned when attempting to plan a + // query that updates a column that should be treated as immutable. + ErrCannotUpdateImmutableColumn = errors.New("cannot update immutable column") + // ErrUnsupportedQueryConstruct is returned when a particular query + // construct is unsupported by a QueryPlanner, despite the more general kind + // of query being supported. + // + // For example, VReplication supports DELETEs, but does not support DELETEs + // with LIMIT clauses, so planning a "DELETE ... LIMIT" will return + // ErrUnsupportedQueryConstruct rather than a "CREATE TABLE", which would + // return an ErrUnsupportedQuery. + ErrUnsupportedQueryConstruct = errors.New("unsupported query construct") +) + +var ( // Query execution errors. + // ErrUnpreparedQuery is returned when attempting to execute an unprepared + // QueryPlan. + ErrUnpreparedQuery = errors.New("attempted to execute unprepared query") +) + +// QueryPlanner defines the interface that VExec uses to build QueryPlans for +// various vexec workflows. A given vexec table, which is to say a table in the +// "_vt" database, will have at most one QueryPlanner implementation, which is +// responsible for defining both what queries are supported for that table, as +// well as how to build plans for those queries. +// +// VReplicationQueryPlanner is a good example implementation to refer to. +type QueryPlanner interface { + // (NOTE:@ajm188) I don't think this method fits on the query planner. To + // me, especially given that it's only implemented by the vrep query planner + // in the old implementation (the schema migration query planner no-ops this + // method), this fits better on our workflow.Manager struct, probably as a + // method called something like "VReplicationExec(ctx, query, Options{DryRun: true})" + // DryRun(ctx context.Context) error + + // PlanQuery constructs and returns a QueryPlan for a given statement. The + // resulting QueryPlan is suitable for repeated, concurrent use. + PlanQuery(stmt sqlparser.Statement) (*QueryPlan, error) + // QueryParams returns a struct of column parameters the QueryPlanner uses. + // It is used primarily to abstract the adding of default WHERE clauses to + // queries by a private function of this package, and may be removed from + // the interface later. + QueryParams() QueryParams +} + +// QueryParams is a struct that QueryPlanner implementations can provide to +// control the addition of default WHERE clauses to their queries. +type QueryParams struct { + // DBName is the value that the column referred to by DBNameColumn should + // equal in a WHERE clause, if set. + DBName string + // DBNameColumn is the name of the column that DBName should equal in a + // WHERE clause, if set. + DBNameColumn string + // Workflow is the value that the column referred to by WorkflowColumn + // should equal in a WHERE clause, if set. + Workflow string + // WorkflowColumn is the name of the column that Workflow should equal in a + // WHERE clause, if set. + WorkflowColumn string +} + +// VReplicationQueryPlanner implements the QueryPlanner interface for queries on +// the _vt.vreplication table. +type VReplicationQueryPlanner struct { + tmc tmclient.TabletManagerClient + + dbname string + workflow string +} + +// NewVReplicationQueryPlanner returns a new VReplicationQueryPlanner. It is +// valid to pass empty strings for both the dbname and workflow parameters. +func NewVReplicationQueryPlanner(tmc tmclient.TabletManagerClient, workflow string, dbname string) *VReplicationQueryPlanner { + return &VReplicationQueryPlanner{ + tmc: tmc, + dbname: dbname, + workflow: workflow, + } +} + +// PlanQuery is part of the QueryPlanner interface. +// +// For vreplication query planners, only SELECT, UPDATE, and DELETE queries are +// supported. +// +// For UPDATE queries, ORDER BY and LIMIT clauses are not supported. Attempting +// to update vreplication.id is an error. +// +// For DELETE queries, USING, PARTITION, ORDER BY, and LIMIT clauses are not +// supported. +func (planner *VReplicationQueryPlanner) PlanQuery(stmt sqlparser.Statement) (plan *QueryPlan, err error) { + switch stmt := stmt.(type) { + case *sqlparser.Select: + plan, err = planner.planSelect(stmt) + case *sqlparser.Insert: + err = ErrUnsupportedQuery + case *sqlparser.Update: + plan, err = planner.planUpdate(stmt) + case *sqlparser.Delete: + plan, err = planner.planDelete(stmt) + default: + err = ErrUnsupportedQuery + } + + if err != nil { + return nil, fmt.Errorf("%w: %s", err, sqlparser.String(stmt)) + } + + return plan, nil +} + +// QueryParams is part of the QueryPlanner interface. A VReplicationQueryPlanner +// will attach the following WHERE clauses iff (a) DBName, Workflow are set, +// respectively, and (b) db_name and workflow do not appear in the original +// query's WHERE clause: +// +// WHERE (db_name = {{ .DBName }} AND)? (workflow = {{ .Workflow }} AND)? {{ .OriginalWhere }} +func (planner *VReplicationQueryPlanner) QueryParams() QueryParams { + return QueryParams{ + DBName: planner.dbname, + DBNameColumn: "db_name", + Workflow: planner.workflow, + WorkflowColumn: "workflow", + } +} + +func (planner *VReplicationQueryPlanner) planDelete(del *sqlparser.Delete) (*QueryPlan, error) { + if del.Targets != nil { + return nil, fmt.Errorf( + "%w: DELETE must not have USING clause (have: %v): %v", + ErrUnsupportedQueryConstruct, + del.Targets, + sqlparser.String(del), + ) + } + + if del.Partitions != nil { + return nil, fmt.Errorf( + "%w: DELETE must not have explicit partitions (have: %v): %v", + ErrUnsupportedQueryConstruct, + del.Partitions, + sqlparser.String(del), + ) + } + + if del.OrderBy != nil || del.Limit != nil { + return nil, fmt.Errorf( + "%w: DELETE must not have explicit ordering (have: %v) or limit clauses (have: %v): %v", + ErrUnsupportedQueryConstruct, + del.OrderBy, + del.Limit, + sqlparser.String(del), + ) + } + + del.Where = addDefaultWheres(planner, del.Where) + + buf := sqlparser.NewTrackedBuffer(nil) + buf.Myprintf("%v", del) + + return &QueryPlan{ + ParsedQuery: buf.ParsedQuery(), + workflow: planner.workflow, + tmc: planner.tmc, + }, nil +} + +func (planner *VReplicationQueryPlanner) planSelect(sel *sqlparser.Select) (*QueryPlan, error) { + sel.Where = addDefaultWheres(planner, sel.Where) + + buf := sqlparser.NewTrackedBuffer(nil) + buf.Myprintf("%v", sel) + + return &QueryPlan{ + ParsedQuery: buf.ParsedQuery(), + workflow: planner.workflow, + tmc: planner.tmc, + }, nil +} + +func (planner *VReplicationQueryPlanner) planUpdate(upd *sqlparser.Update) (*QueryPlan, error) { + if upd.OrderBy != nil || upd.Limit != nil { + return nil, fmt.Errorf( + "%w: UPDATE must not have explicit ordering (have: %v) or limit clauses (have: %v): %v", + ErrUnsupportedQueryConstruct, + upd.OrderBy, + upd.Limit, + sqlparser.String(upd), + ) + } + + // For updates on the _vt.vreplication table, we ban updates to the `id` + // column, and allow updates to all other columns. + for _, expr := range upd.Exprs { + if expr.Name.Name.EqualString("id") { + return nil, fmt.Errorf( + "%w %+v: %v", + ErrCannotUpdateImmutableColumn, + expr.Name.Name, + sqlparser.String(expr), + ) + } + } + + upd.Where = addDefaultWheres(planner, upd.Where) + + buf := sqlparser.NewTrackedBuffer(nil) + buf.Myprintf("%v", upd) + + return &QueryPlan{ + ParsedQuery: buf.ParsedQuery(), + workflow: planner.workflow, + tmc: planner.tmc, + }, nil +} + +func addDefaultWheres(planner QueryPlanner, where *sqlparser.Where) *sqlparser.Where { + cols := extractWhereComparisonColumns(where) + + params := planner.QueryParams() + hasDBNameCol := false + hasWorkflowCol := false + + for _, col := range cols { + switch col { + case params.DBNameColumn: + hasDBNameCol = true + case params.WorkflowColumn: + hasWorkflowCol = true + } + } + + newWhere := where + + if !hasDBNameCol { + expr := &sqlparser.ComparisonExpr{ + Left: &sqlparser.ColName{ + Name: sqlparser.NewColIdent(params.DBNameColumn), + }, + Operator: sqlparser.EqualOp, + Right: sqlparser.NewStrLiteral(params.DBName), + } + + switch newWhere { + case nil: + newWhere = &sqlparser.Where{ + Type: sqlparser.WhereClause, + Expr: expr, + } + default: + newWhere.Expr = &sqlparser.AndExpr{ + Left: newWhere.Expr, + Right: expr, + } + } + } + + if !hasWorkflowCol && params.Workflow != "" { + expr := &sqlparser.ComparisonExpr{ + Left: &sqlparser.ColName{ + Name: sqlparser.NewColIdent(params.WorkflowColumn), + }, + Operator: sqlparser.EqualOp, + Right: sqlparser.NewStrLiteral(params.Workflow), + } + + newWhere.Expr = &sqlparser.AndExpr{ + Left: newWhere.Expr, + Right: expr, + } + } + + return newWhere +} + +// extractWhereComparisonColumns extracts the column names used in AND-ed +// comparison expressions in a where clause, given the following assumptions: +// - (1) The column name is always the left-hand side of the comparison. +// - (2) There are no compound expressions within the where clause involving OR. +func extractWhereComparisonColumns(where *sqlparser.Where) []string { + if where == nil { + return nil + } + + exprs := sqlparser.SplitAndExpression(nil, where.Expr) + cols := make([]string, 0, len(exprs)) + + for _, expr := range exprs { + switch expr := expr.(type) { + case *sqlparser.ComparisonExpr: + if qualifiedName, ok := expr.Left.(*sqlparser.ColName); ok { + cols = append(cols, qualifiedName.Name.String()) + } + } + } + + return cols +} diff --git a/go/vt/vtctl/workflow/vexec/query_planner_test.go b/go/vt/vtctl/workflow/vexec/query_planner_test.go new file mode 100644 index 00000000000..a63fbb96a65 --- /dev/null +++ b/go/vt/vtctl/workflow/vexec/query_planner_test.go @@ -0,0 +1,244 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vexec + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" + + "vitess.io/vitess/go/vt/vtctl/workflow/vexec/testutil" +) + +func TestVReplicationQueryPlanner_PlanQuery(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + query string + err error + }{ + { + name: "basic select", + query: "SELECT id FROM _vt.vreplication", + err: nil, + }, + { + name: "insert not supported", + query: "INSERT INTO _vt.vreplication (id) VALUES (1)", + err: ErrUnsupportedQuery, + }, + { + name: "basic update", + query: "UPDATE _vt.vreplication SET workflow = 'my workflow'", + err: nil, + }, + { + name: "basic delete", + query: "DELETE FROM _vt.vreplication", + err: nil, + }, + { + name: "other query", + query: "CREATE TABLE foo (id INT(11) PRIMARY KEY NOT NULL) ENGINE=InnoDB", + err: ErrUnsupportedQuery, + }, + } + + planner := NewVReplicationQueryPlanner(nil, "", "") + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + stmt := testutil.StatementFromString(t, tt.query) + + _, err := planner.PlanQuery(stmt) + if tt.err != nil { + assert.True(t, errors.Is(err, tt.err), "expected err of type %v, got %v", tt.err, err) + + return + } + + assert.NoError(t, err) + }) + } +} + +func TestVReplicationQueryPlanner_planSelect(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + query string + expectedPlannedQuery string + }{ + { + name: "simple select", + query: "SELECT id FROM _vt.vreplication WHERE id > 10", + expectedPlannedQuery: "SELECT id FROM _vt.vreplication WHERE id > 10 AND db_name = 'vt_testkeyspace' AND workflow = 'testworkflow'", + }, + { + name: "select with workflow and dbname columns already in WHERE", + query: "SELECT id FROM _vt.vreplication WHERE id > 10 AND db_name = 'vt_testkeyspace' AND workflow = 'testworkflow'", + expectedPlannedQuery: "SELECT id FROM _vt.vreplication WHERE id > 10 AND db_name = 'vt_testkeyspace' AND workflow = 'testworkflow'", + }, + { + // In this case, the QueryParams for the planner (which have + // workflow = "testworkflow"; db_name = "vt_testkeyspace") are + // ignored because the WHERE clause was explicit. + name: "select with workflow and dbname columns with different values", + query: "SELECT id FROM _vt.vreplication WHERE id > 10 AND db_name = 'different_keyspace' AND workflow = 'otherworkflow'", + expectedPlannedQuery: "SELECT id FROM _vt.vreplication WHERE id > 10 AND db_name = 'different_keyspace' AND workflow = 'otherworkflow'", + }, + } + + planner := NewVReplicationQueryPlanner(nil, "testworkflow", "vt_testkeyspace") + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + stmt := testutil.StatementFromString(t, tt.query) + qp, err := planner.PlanQuery(stmt) + + assert.NoError(t, err) + assert.Equal(t, testutil.ParsedQueryFromString(t, tt.expectedPlannedQuery), qp.ParsedQuery) + }) + } +} + +func TestVReplicationQueryPlanner_planUpdate(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + planner *VReplicationQueryPlanner + query string + expectedPlannedQuery string + expectedErr error + }{ + { + name: "simple update", + planner: NewVReplicationQueryPlanner(nil, "testworkflow", "vt_testkeyspace"), + query: "UPDATE _vt.vreplication SET state = 'Running'", + expectedPlannedQuery: "UPDATE _vt.vreplication SET state = 'Running' WHERE db_name = 'vt_testkeyspace' AND workflow = 'testworkflow'", + expectedErr: nil, + }, + { + name: "including an ORDER BY is an error", + planner: NewVReplicationQueryPlanner(nil, "", ""), + query: "UPDATE _vt.vreplication SET state = 'Running' ORDER BY id DESC", + expectedErr: ErrUnsupportedQueryConstruct, + }, + { + name: "including a LIMIT is an error", + planner: NewVReplicationQueryPlanner(nil, "", ""), + query: "UPDATE _vt.vreplication SET state = 'Running' LIMIT 5", + expectedErr: ErrUnsupportedQueryConstruct, + }, + { + name: "cannot update id column", + planner: NewVReplicationQueryPlanner(nil, "", "vt_testkeyspace"), + query: "UPDATE _vt.vreplication SET id = 5", + expectedErr: ErrCannotUpdateImmutableColumn, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + stmt := testutil.StatementFromString(t, tt.query) + + qp, err := tt.planner.PlanQuery(stmt) + if tt.expectedErr != nil { + assert.True(t, errors.Is(err, tt.expectedErr), "expected err of type %q, got %q", tt.expectedErr, err) + + return + } + + assert.Equal(t, testutil.ParsedQueryFromString(t, tt.expectedPlannedQuery), qp.ParsedQuery) + }) + } +} + +func TestVReplicationQueryPlanner_planDelete(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + query string + expectedPlannedQuery string + expectedErr error + }{ + { + name: "simple delete", + query: "DELETE FROM _vt.vreplication WHERE id = 1", + expectedPlannedQuery: "DELETE FROM _vt.vreplication WHERE id = 1 AND db_name = 'vt_testkeyspace'", + expectedErr: nil, + }, + { + name: "DELETE with USING clause is not supported", + query: "DELETE FROM _vt.vreplication, _vt.schema_migrations USING _vt.vreplication INNER JOIN _vt.schema_migrations", + expectedErr: ErrUnsupportedQueryConstruct, + }, + { + name: "DELETE with a PARTITION clause is not supported", + query: "DELETE FROM _vt.vreplication PARTITION (p1)", + expectedErr: ErrUnsupportedQueryConstruct, + }, + { + name: "DELETE with ORDER BY is not supported", + query: "DELETE FROM _vt.vreplication ORDER BY id DESC", + expectedErr: ErrUnsupportedQueryConstruct, + }, + { + name: "DELETE with LIMIT is not supported", + query: "DELETE FROM _vt.vreplication LIMIT 5", + expectedErr: ErrUnsupportedQueryConstruct, + }, + } + + planner := NewVReplicationQueryPlanner(nil, "", "vt_testkeyspace") + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + stmt := testutil.StatementFromString(t, tt.query) + + qp, err := planner.PlanQuery(stmt) + if tt.expectedErr != nil { + assert.True(t, errors.Is(err, tt.expectedErr), "expected err of type %q, got %q", tt.expectedErr, err) + + return + } + + assert.Equal(t, testutil.ParsedQueryFromString(t, tt.expectedPlannedQuery), qp.ParsedQuery) + }) + } +} diff --git a/go/vt/vtctl/workflow/vexec/testutil/query.go b/go/vt/vtctl/workflow/vexec/testutil/query.go new file mode 100644 index 00000000000..3988f7a112f --- /dev/null +++ b/go/vt/vtctl/workflow/vexec/testutil/query.go @@ -0,0 +1,48 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testutil + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/sqlparser" +) + +// ParsedQueryFromString is a test helper that returns a *sqlparser.ParsedQuery +// from a plain string. It marks the test as a failure if the query cannot be +// parsed. +func ParsedQueryFromString(t *testing.T, query string) *sqlparser.ParsedQuery { + t.Helper() + + buf := sqlparser.NewTrackedBuffer(nil) + buf.Myprintf("%v", StatementFromString(t, query)) + + return buf.ParsedQuery() +} + +// StatementFromString is a test helper that returns a sqlparser.Statement from +// a plain string. It marks the test as a failure if the query cannot be parsed. +func StatementFromString(t *testing.T, query string) sqlparser.Statement { + t.Helper() + + stmt, err := sqlparser.Parse(query) + require.NoError(t, err, "could not parse query %v", query) + + return stmt +} diff --git a/go/vt/vtctl/workflow/vexec/vexec.go b/go/vt/vtctl/workflow/vexec/vexec.go new file mode 100644 index 00000000000..d61bd16ab31 --- /dev/null +++ b/go/vt/vtctl/workflow/vexec/vexec.go @@ -0,0 +1,235 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vexec + +import ( + "context" + "errors" + "fmt" + + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vttablet/tmclient" + + querypb "vitess.io/vitess/go/vt/proto/query" +) + +const ( + // VExecTableQualifier is the qualifier that all tables supported by vexec + // are prefixed by. + VExecTableQualifier = "_vt" + + // SchemaMigrationsTableName is the unqualified name of the schema + // migrations table supported by vexec. + SchemaMigrationsTableName = "schema_migrations" + // VReplicationTableName is the unqualified name of the vreplication table + // supported by vexec. + VReplicationTableName = "vreplication" +) + +var ( // Topo lookup errors. + // ErrNoShardPrimary occurs when a shard is found with no serving + // primary. + ErrNoShardPrimary = errors.New("no primary found for shard") + // ErrNoShardsForKeyspace occurs when attempting to run a vexec on an empty + // keyspace. + ErrNoShardsForKeyspace = errors.New("no shards found in keyspace") +) + +var ( // Query parsing and planning errors. + // ErrUnsupportedQuery occurs when attempting to run an unsupported query + // through vexec. + ErrUnsupportedQuery = errors.New("query not supported by vexec") + // ErrUnsupportedTable occurs when attempting to run vexec on an unsupported + // table. At the time of writing, this occurs when attempting to query any + // table other than _vt.vreplication. + ErrUnsupportedTable = errors.New("table not supported by vexec") +) + +// VExec provides the main interface to planning and executing vexec queries +// (normally, queries on tables in the `_vt` database). It currently supports +// some limited vreplication queries; this set of supported behavior will expand +// over time. It may be extended to support schema_migrations queries as well. +type VExec struct { + ts *topo.Server + tmc tmclient.TabletManagerClient + + keyspace string + workflow string + + // (TODO:@ajm188) Consider renaming this field to "targets", and then + // support different Strategy functions for loading target tablets from a + // topo.Server. + // + // For this, I'm currently thinking: + // type TargetStrategy func(ts *topo.Server) ([]*topo.TabletInfo, error) + // + // We _may_ want this if we ever want a vexec query to target anything other + // than "all of the shard primaries in a given keyspace", and I'm not sure + // about potential future usages yet. + primaries []*topo.TabletInfo + // (TODO:@ajm188) Similar to supporting a TargetStrategy for controlling how + // a VExec picks which tablets to query, we may also want an + // ExecutionStrategy (I'm far less sure about whether we would want this at + // all, or what its type definition might look like, than TargetStrategy), + // to support running in modes like: + // - Execute serially rather than concurrently. + // - Only return error if greater than some percentage of the targets fail. +} + +// NewVExec returns a new instance suitable for making vexec queries to a given +// keyspace (required) and workflow (optional, omit by providing the empty +// string). The provided topo server is used to look up target tablets for +// queries. A given instance will discover targets exactly once for its +// lifetime, so to force a refresh, create another instance. +func NewVExec(keyspace string, workflow string, ts *topo.Server, tmc tmclient.TabletManagerClient) *VExec { + return &VExec{ + ts: ts, + tmc: tmc, + keyspace: keyspace, + workflow: workflow, + } +} + +// QueryContext executes the given vexec query, returning a mapping of tablet +// to querypb.QueryResult. +// +// On first use, QueryContext will also cause the VExec instance to discover +// target tablets from the topo; that target list will be reused for all future +// queries made by this instance. +// +// For details on query parsing and planning, see GetPlanner and the +// QueryPlanner interface. +func (vx *VExec) QueryContext(ctx context.Context, query string) (map[*topo.TabletInfo]*querypb.QueryResult, error) { + if vx.primaries == nil { + if err := vx.initialize(ctx); err != nil { + return nil, err + } + } + + stmt, err := sqlparser.Parse(query) + if err != nil { + return nil, err + } + + table, err := extractTableName(stmt) + if err != nil { + return nil, err + } + + planner, err := vx.GetPlanner(ctx, table) + if err != nil { + return nil, err + } + + qp, err := planner.PlanQuery(stmt) + if err != nil { + return nil, err + } + + return qp.ExecuteScatter(ctx, vx.primaries...) +} + +func (vx *VExec) initialize(ctx context.Context) error { + vx.primaries = nil + + getShardsCtx, getShardsCancel := context.WithTimeout(ctx, *topo.RemoteOperationTimeout) + defer getShardsCancel() + + shards, err := vx.ts.GetShardNames(getShardsCtx, vx.keyspace) + if err != nil { + return err + } + + if len(shards) == 0 { + return fmt.Errorf("%w %s", ErrNoShardsForKeyspace, vx.keyspace) + } + + primaries := make([]*topo.TabletInfo, 0, len(shards)) + + for _, shard := range shards { + ctx, cancel := context.WithTimeout(ctx, *topo.RemoteOperationTimeout) + defer cancel() + + si, err := vx.ts.GetShard(ctx, vx.keyspace, shard) + if err != nil { + return err + } + + if si.MasterAlias == nil { + return fmt.Errorf("%w %s/%s", ErrNoShardPrimary, vx.keyspace, shard) + } + + primary, err := vx.ts.GetTablet(ctx, si.MasterAlias) + if err != nil { + return err + } + + if primary == nil { + return fmt.Errorf("%w %s/%s: tablet %v not found", ErrNoShardPrimary, vx.keyspace, shard, topoproto.TabletAliasString(si.MasterAlias)) + } + + primaries = append(primaries, primary) + } + + vx.primaries = primaries + + return nil +} + +// GetPlanner returns an appropriate implementation of a QueryPlanner, depending +// on the table being queried. +// +// On first use, GetPlanner will also cause the VExec instance to discover +// target tablets from the topo; that target list will be reused for all future +// queries made by this instance. +func (vx *VExec) GetPlanner(ctx context.Context, table string) (QueryPlanner, error) { // TODO: private? + if vx.primaries == nil { + if err := vx.initialize(ctx); err != nil { + return nil, fmt.Errorf("error while initializing target list: %w", err) + } + } + + switch table { + case qualifiedTableName(VReplicationTableName): + return NewVReplicationQueryPlanner(vx.tmc, vx.workflow, vx.primaries[0].DbName()), nil + case qualifiedTableName(SchemaMigrationsTableName): + return nil, errors.New("Schema Migrations not yet supported in new workflow package") + default: + return nil, fmt.Errorf("%w: %v", ErrUnsupportedTable, table) + } +} + +func extractTableName(stmt sqlparser.Statement) (string, error) { + switch stmt := stmt.(type) { + case *sqlparser.Update: + return sqlparser.String(stmt.TableExprs), nil + case *sqlparser.Delete: + return sqlparser.String(stmt.TableExprs), nil + case *sqlparser.Insert: + return sqlparser.String(stmt.Table), nil + case *sqlparser.Select: + return sqlparser.String(stmt.From), nil + } + + return "", fmt.Errorf("%w: %+v", ErrUnsupportedQuery, sqlparser.String(stmt)) +} + +func qualifiedTableName(name string) string { + return fmt.Sprintf("%s.%s", VExecTableQualifier, name) +} diff --git a/go/vt/vtctld/schema.go b/go/vt/vtctld/schema.go index 294a3255b2c..ad57158cc7f 100644 --- a/go/vt/vtctld/schema.go +++ b/go/vt/vtctld/schema.go @@ -78,7 +78,7 @@ func reviewMigrationRequest(ctx context.Context, ts *topo.Server, tmClient tmcli if err != nil { return err } - actionStr, err := onlineDDL.GetActionStr() + _, actionStr, err := onlineDDL.GetActionStr() if err != nil { return err } diff --git a/go/vt/vterrors/state.go b/go/vt/vterrors/state.go new file mode 100644 index 00000000000..b3103cd851d --- /dev/null +++ b/go/vt/vterrors/state.go @@ -0,0 +1,71 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vterrors + +// State is error state +type State int + +// All the error states +const ( + Undefined State = iota + + // invalid argument + BadFieldError + CantUseOptionHere + DataOutOfRange + EmptyQuery + ForbidSchemaChange + IncorrectGlobalLocalVar + NonUniqTable + SyntaxError + WrongGroupField + WrongTypeForVar + WrongValueForVar + LockOrActiveTransaction + + // failed precondition + NoDB + InnodbReadOnly + WrongNumberOfColumnsInSelect + + // not found + BadDb + DbDropExists + NoSuchTable + SPDoesNotExist + UnknownSystemVariable + UnknownTable + + // already exists + DbCreateExists + + // resource exhausted + NetPacketTooLarge + + // cancelled + QueryInterrupted + + // unimplemented + NotSupportedYet + UnsupportedPS + + // permission denied + AccessDeniedError + + // No state should be added below NumOfStates + NumOfStates +) diff --git a/go/vt/vterrors/vterrors.go b/go/vt/vterrors/vterrors.go index dc3a19a11b1..4bcdb95a9b2 100644 --- a/go/vt/vterrors/vterrors.go +++ b/go/vt/vterrors/vterrors.go @@ -113,31 +113,34 @@ func New(code vtrpcpb.Code, message string) error { } } -// NewWithoutCode returns an error when no applicable error code is available -// It will record the stack trace when creating the error -func NewWithoutCode(message string) error { +// Errorf formats according to a format specifier and returns the string +// as a value that satisfies error. +// Errorf also records the stack trace at the point it was called. +func Errorf(code vtrpcpb.Code, format string, args ...interface{}) error { return &fundamental{ - msg: message, - code: vtrpcpb.Code_UNKNOWN, + msg: fmt.Sprintf(format, args...), + code: code, stack: callers(), } } -// Errorf formats according to a format specifier and returns the string +// NewErrorf formats according to a format specifier and returns the string // as a value that satisfies error. -// Errorf also records the stack trace at the point it was called. -func Errorf(code vtrpcpb.Code, format string, args ...interface{}) error { +// NewErrorf also records the stack trace at the point it was called. +func NewErrorf(code vtrpcpb.Code, state State, format string, args ...interface{}) error { return &fundamental{ msg: fmt.Sprintf(format, args...), code: code, + state: state, stack: callers(), } } // fundamental is an error that has a message and a stack, but no caller. type fundamental struct { - msg string - code vtrpcpb.Code + msg string + code vtrpcpb.Code + state State *stack } @@ -185,6 +188,24 @@ func Code(err error) vtrpcpb.Code { return vtrpcpb.Code_UNKNOWN } +// ErrState returns the error state if it's a vtError. +// If err is nil, it returns Undefined. +func ErrState(err error) State { + if err == nil { + return Undefined + } + if err, ok := err.(*fundamental); ok { + return err.state + } + + cause := Cause(err) + if cause != err && cause != nil { + // If we did not find an error state at the outer level, let's find the cause and check it's state + return ErrState(cause) + } + return Undefined +} + // Wrap returns an error annotating err with a stack trace // at the point Wrap is called, and the supplied message. // If err is nil, Wrap returns nil. diff --git a/go/vt/vtexplain/testdata/multi-output/comments-output.txt b/go/vt/vtexplain/testdata/multi-output/comments-output.txt index 922dec1543a..db6c63b250a 100644 --- a/go/vt/vtexplain/testdata/multi-output/comments-output.txt +++ b/go/vt/vtexplain/testdata/multi-output/comments-output.txt @@ -1,41 +1,41 @@ ---------------------------------------------------------------------- SELECT * from user -1 ks_sharded/-40: select * from user limit 10001 -1 ks_sharded/40-80: select * from user limit 10001 -1 ks_sharded/80-c0: select * from user limit 10001 -1 ks_sharded/c0-: select * from user limit 10001 +1 ks_sharded/-40: select * from `user` limit 10001 +1 ks_sharded/40-80: select * from `user` limit 10001 +1 ks_sharded/80-c0: select * from `user` limit 10001 +1 ks_sharded/c0-: select * from `user` limit 10001 ---------------------------------------------------------------------- select /* ; */ 1 from user -1 ks_sharded/-40: select /* ; */ 1 from user limit 10001 -1 ks_sharded/40-80: select /* ; */ 1 from user limit 10001 -1 ks_sharded/80-c0: select /* ; */ 1 from user limit 10001 -1 ks_sharded/c0-: select /* ; */ 1 from user limit 10001 +1 ks_sharded/-40: select /* ; */ 1 from `user` limit 10001 +1 ks_sharded/40-80: select /* ; */ 1 from `user` limit 10001 +1 ks_sharded/80-c0: select /* ; */ 1 from `user` limit 10001 +1 ks_sharded/c0-: select /* ; */ 1 from `user` limit 10001 ---------------------------------------------------------------------- select 1 from user where x=';' -1 ks_sharded/-40: select 1 from user where x = ';' limit 10001 -1 ks_sharded/40-80: select 1 from user where x = ';' limit 10001 -1 ks_sharded/80-c0: select 1 from user where x = ';' limit 10001 -1 ks_sharded/c0-: select 1 from user where x = ';' limit 10001 +1 ks_sharded/-40: select 1 from `user` where x = ';' limit 10001 +1 ks_sharded/40-80: select 1 from `user` where x = ';' limit 10001 +1 ks_sharded/80-c0: select 1 from `user` where x = ';' limit 10001 +1 ks_sharded/c0-: select 1 from `user` where x = ';' limit 10001 ---------------------------------------------------------------------- select 1 from user where x='/* hello */' -1 ks_sharded/-40: select 1 from user where x = '/* hello */' limit 10001 -1 ks_sharded/40-80: select 1 from user where x = '/* hello */' limit 10001 -1 ks_sharded/80-c0: select 1 from user where x = '/* hello */' limit 10001 -1 ks_sharded/c0-: select 1 from user where x = '/* hello */' limit 10001 +1 ks_sharded/-40: select 1 from `user` where x = '/* hello */' limit 10001 +1 ks_sharded/40-80: select 1 from `user` where x = '/* hello */' limit 10001 +1 ks_sharded/80-c0: select 1 from `user` where x = '/* hello */' limit 10001 +1 ks_sharded/c0-: select 1 from `user` where x = '/* hello */' limit 10001 ---------------------------------------------------------------------- select 1 from user where x='/* ; */' -1 ks_sharded/-40: select 1 from user where x = '/* ; */' limit 10001 -1 ks_sharded/40-80: select 1 from user where x = '/* ; */' limit 10001 -1 ks_sharded/80-c0: select 1 from user where x = '/* ; */' limit 10001 -1 ks_sharded/c0-: select 1 from user where x = '/* ; */' limit 10001 +1 ks_sharded/-40: select 1 from `user` where x = '/* ; */' limit 10001 +1 ks_sharded/40-80: select 1 from `user` where x = '/* ; */' limit 10001 +1 ks_sharded/80-c0: select 1 from `user` where x = '/* ; */' limit 10001 +1 ks_sharded/c0-: select 1 from `user` where x = '/* ; */' limit 10001 ---------------------------------------------------------------------- diff --git a/go/vt/vtexplain/testdata/multi-output/deletesharded-output.txt b/go/vt/vtexplain/testdata/multi-output/deletesharded-output.txt index c3b32309a6e..e6edbcdb0d1 100644 --- a/go/vt/vtexplain/testdata/multi-output/deletesharded-output.txt +++ b/go/vt/vtexplain/testdata/multi-output/deletesharded-output.txt @@ -16,10 +16,10 @@ delete from music_extra where id=1 and extra='abc' delete from user where id=1 1 ks_sharded/-40: begin -1 ks_sharded/-40: select id, `name` from user where id = 1 limit 10001 for update +1 ks_sharded/-40: select id, `name` from `user` where id = 1 limit 10001 for update 2 ks_sharded/40-80: begin 2 ks_sharded/40-80: delete from name_user_map where `name` = 'name_val_2' and user_id = 1 limit 10001 -3 ks_sharded/-40: delete from user where id = 1 limit 10001 +3 ks_sharded/-40: delete from `user` where id = 1 limit 10001 4 ks_sharded/-40: commit 5 ks_sharded/40-80: commit @@ -29,10 +29,10 @@ delete from user where name='billy' 1 ks_sharded/c0-: begin 1 ks_sharded/c0-: select `name`, user_id from name_user_map where `name` in ('billy') limit 10001 for update 2 ks_sharded/-40: begin -2 ks_sharded/-40: select id, `name` from user where `name` = 'billy' limit 10001 for update +2 ks_sharded/-40: select id, `name` from `user` where `name` = 'billy' limit 10001 for update 3 ks_sharded/40-80: begin 3 ks_sharded/40-80: delete from name_user_map where `name` = 'name_val_2' and user_id = 1 limit 10001 -4 ks_sharded/-40: delete from user where `name` = 'billy' limit 10001 +4 ks_sharded/-40: delete from `user` where `name` = 'billy' limit 10001 5 ks_sharded/c0-: commit 6 ks_sharded/-40: commit 7 ks_sharded/40-80: commit diff --git a/go/vt/vtexplain/testdata/multi-output/insertsharded-output.txt b/go/vt/vtexplain/testdata/multi-output/insertsharded-output.txt index c43ebac4000..16abe1fd424 100644 --- a/go/vt/vtexplain/testdata/multi-output/insertsharded-output.txt +++ b/go/vt/vtexplain/testdata/multi-output/insertsharded-output.txt @@ -4,7 +4,7 @@ insert into user (id, name) values(1, 'alice') 1 ks_sharded/40-80: begin 1 ks_sharded/40-80: insert into name_user_map(`name`, user_id) values ('alice', 1) 2 ks_sharded/-40: begin -2 ks_sharded/-40: insert into user(id, `name`) values (1, 'alice') +2 ks_sharded/-40: insert into `user`(id, `name`) values (1, 'alice') 3 ks_sharded/40-80: commit 4 ks_sharded/-40: commit @@ -14,7 +14,7 @@ insert into user (id, name) values(2, 'bob') 1 ks_sharded/c0-: begin 1 ks_sharded/c0-: insert into name_user_map(`name`, user_id) values ('bob', 2) 2 ks_sharded/-40: begin -2 ks_sharded/-40: insert into user(id, `name`) values (2, 'bob') +2 ks_sharded/-40: insert into `user`(id, `name`) values (2, 'bob') 3 ks_sharded/c0-: commit 4 ks_sharded/-40: commit @@ -25,7 +25,7 @@ insert ignore into user (id, name) values(2, 'bob') 1 ks_sharded/c0-: insert ignore into name_user_map(`name`, user_id) values ('bob', 2) 2 ks_sharded/c0-: select `name` from name_user_map where `name` = 'bob' and user_id = 2 limit 10001 3 ks_sharded/-40: begin -3 ks_sharded/-40: insert ignore into user(id, `name`) values (2, 'bob') +3 ks_sharded/-40: insert ignore into `user`(id, `name`) values (2, 'bob') 4 ks_sharded/c0-: commit 5 ks_sharded/-40: commit @@ -36,7 +36,7 @@ insert ignore into user (id, name, nickname) values(2, 'bob', 'bob') 1 ks_sharded/c0-: insert ignore into name_user_map(`name`, user_id) values ('bob', 2) 2 ks_sharded/c0-: select `name` from name_user_map where `name` = 'bob' and user_id = 2 limit 10001 3 ks_sharded/-40: begin -3 ks_sharded/-40: insert ignore into user(id, `name`, nickname) values (2, 'bob', 'bob') +3 ks_sharded/-40: insert ignore into `user`(id, `name`, nickname) values (2, 'bob', 'bob') 4 ks_sharded/c0-: commit 5 ks_sharded/-40: commit @@ -47,7 +47,7 @@ insert into user (id, name, nickname) values(2, 'bob', 'bobby') on duplicate key 1 ks_sharded/c0-: insert ignore into name_user_map(`name`, user_id) values ('bob', 2) 2 ks_sharded/c0-: select `name` from name_user_map where `name` = 'bob' and user_id = 2 limit 10001 3 ks_sharded/-40: begin -3 ks_sharded/-40: insert into user(id, `name`, nickname) values (2, 'bob', 'bobby') on duplicate key update nickname = 'bobby' +3 ks_sharded/-40: insert into `user`(id, `name`, nickname) values (2, 'bob', 'bobby') on duplicate key update nickname = 'bobby' 4 ks_sharded/c0-: commit 5 ks_sharded/-40: commit @@ -58,7 +58,7 @@ insert into user (id, name, nickname, address) values(2, 'bob', 'bobby', '123 ma 1 ks_sharded/c0-: insert ignore into name_user_map(`name`, user_id) values ('bob', 2) 2 ks_sharded/c0-: select `name` from name_user_map where `name` = 'bob' and user_id = 2 limit 10001 3 ks_sharded/-40: begin -3 ks_sharded/-40: insert into user(id, `name`, nickname, address) values (2, 'bob', 'bobby', '123 main st') on duplicate key update nickname = values(nickname), address = values(address) +3 ks_sharded/-40: insert into `user`(id, `name`, nickname, address) values (2, 'bob', 'bobby', '123 main st') on duplicate key update nickname = values(nickname), address = values(address) 4 ks_sharded/c0-: commit 5 ks_sharded/-40: commit diff --git a/go/vt/vtexplain/testdata/multi-output/options-output.txt b/go/vt/vtexplain/testdata/multi-output/options-output.txt index 21db05ab44d..a617e654499 100644 --- a/go/vt/vtexplain/testdata/multi-output/options-output.txt +++ b/go/vt/vtexplain/testdata/multi-output/options-output.txt @@ -1,17 +1,17 @@ ---------------------------------------------------------------------- select * from user where email='null@void.com' -1 ks_sharded/-40: select * from user where email = 'null@void.com' limit 10001 -1 ks_sharded/40-80: select * from user where email = 'null@void.com' limit 10001 -1 ks_sharded/80-c0: select * from user where email = 'null@void.com' limit 10001 -1 ks_sharded/c0-: select * from user where email = 'null@void.com' limit 10001 +1 ks_sharded/-40: select * from `user` where email = 'null@void.com' limit 10001 +1 ks_sharded/40-80: select * from `user` where email = 'null@void.com' limit 10001 +1 ks_sharded/80-c0: select * from `user` where email = 'null@void.com' limit 10001 +1 ks_sharded/c0-: select * from `user` where email = 'null@void.com' limit 10001 ---------------------------------------------------------------------- select * from user where id in (1,2,3,4,5,6,7,8) -1 ks_sharded/-40: select * from user where id in (1, 2) limit 10001 -1 ks_sharded/40-80: select * from user where id in (3, 5) limit 10001 -1 ks_sharded/c0-: select * from user where id in (4, 6, 7, 8) limit 10001 +1 ks_sharded/-40: select * from `user` where id in (1, 2) limit 10001 +1 ks_sharded/40-80: select * from `user` where id in (3, 5) limit 10001 +1 ks_sharded/c0-: select * from `user` where id in (4, 6, 7, 8) limit 10001 ---------------------------------------------------------------------- insert into user (id, name) values (2, 'bob') @@ -19,7 +19,7 @@ insert into user (id, name) values (2, 'bob') 1 ks_sharded/c0-: begin 1 ks_sharded/c0-: insert into name_user_map(`name`, user_id) values ('bob', 2) 2 ks_sharded/-40: begin -2 ks_sharded/-40: insert into user(id, `name`) values (2, 'bob') +2 ks_sharded/-40: insert into `user`(id, `name`) values (2, 'bob') 3 ks_sharded/c0-: commit 4 ks_sharded/-40: commit diff --git a/go/vt/vtexplain/testdata/multi-output/select-sharded-8-output.txt b/go/vt/vtexplain/testdata/multi-output/select-sharded-8-output.txt index a50c6fcb067..baf8152ce7a 100644 --- a/go/vt/vtexplain/testdata/multi-output/select-sharded-8-output.txt +++ b/go/vt/vtexplain/testdata/multi-output/select-sharded-8-output.txt @@ -1,18 +1,18 @@ ---------------------------------------------------------------------- select * from user -1 ks_sharded/-20: select * from user limit 10001 -1 ks_sharded/20-40: select * from user limit 10001 -1 ks_sharded/40-60: select * from user limit 10001 -1 ks_sharded/60-80: select * from user limit 10001 -1 ks_sharded/80-a0: select * from user limit 10001 -1 ks_sharded/a0-c0: select * from user limit 10001 -1 ks_sharded/c0-e0: select * from user limit 10001 -1 ks_sharded/e0-: select * from user limit 10001 +1 ks_sharded/-20: select * from `user` limit 10001 +1 ks_sharded/20-40: select * from `user` limit 10001 +1 ks_sharded/40-60: select * from `user` limit 10001 +1 ks_sharded/60-80: select * from `user` limit 10001 +1 ks_sharded/80-a0: select * from `user` limit 10001 +1 ks_sharded/a0-c0: select * from `user` limit 10001 +1 ks_sharded/c0-e0: select * from `user` limit 10001 +1 ks_sharded/e0-: select * from `user` limit 10001 ---------------------------------------------------------------------- select * from user where id in (1, 2) -1 ks_sharded/-20: select * from user where id in (1, 2) limit 10001 +1 ks_sharded/-20: select * from `user` where id in (1, 2) limit 10001 ---------------------------------------------------------------------- diff --git a/go/vt/vtexplain/testdata/multi-output/selectsharded-output.txt b/go/vt/vtexplain/testdata/multi-output/selectsharded-output.txt index f17fa13e4cc..6fe933c3759 100644 --- a/go/vt/vtexplain/testdata/multi-output/selectsharded-output.txt +++ b/go/vt/vtexplain/testdata/multi-output/selectsharded-output.txt @@ -1,45 +1,45 @@ ---------------------------------------------------------------------- select * from user /* scatter */ -1 ks_sharded/-40: select * from user limit 10001 /* scatter */ -1 ks_sharded/40-80: select * from user limit 10001 /* scatter */ -1 ks_sharded/80-c0: select * from user limit 10001 /* scatter */ -1 ks_sharded/c0-: select * from user limit 10001 /* scatter */ +1 ks_sharded/-40: select * from `user` limit 10001 /* scatter */ +1 ks_sharded/40-80: select * from `user` limit 10001 /* scatter */ +1 ks_sharded/80-c0: select * from `user` limit 10001 /* scatter */ +1 ks_sharded/c0-: select * from `user` limit 10001 /* scatter */ ---------------------------------------------------------------------- select * from user where id = 1 /* equal unique */ -1 ks_sharded/-40: select * from user where id = 1 limit 10001 /* equal unique */ +1 ks_sharded/-40: select * from `user` where id = 1 limit 10001 /* equal unique */ ---------------------------------------------------------------------- select * from user where id > 100 /* scatter range */ -1 ks_sharded/-40: select * from user where id > 100 limit 10001 /* scatter range */ -1 ks_sharded/40-80: select * from user where id > 100 limit 10001 /* scatter range */ -1 ks_sharded/80-c0: select * from user where id > 100 limit 10001 /* scatter range */ -1 ks_sharded/c0-: select * from user where id > 100 limit 10001 /* scatter range */ +1 ks_sharded/-40: select * from `user` where id > 100 limit 10001 /* scatter range */ +1 ks_sharded/40-80: select * from `user` where id > 100 limit 10001 /* scatter range */ +1 ks_sharded/80-c0: select * from `user` where id > 100 limit 10001 /* scatter range */ +1 ks_sharded/c0-: select * from `user` where id > 100 limit 10001 /* scatter range */ ---------------------------------------------------------------------- select * from user where name = 'bob' /* vindex lookup */ 1 ks_sharded/c0-: select `name`, user_id from name_user_map where `name` in ('bob') limit 10001 /* vindex lookup */ -2 ks_sharded/-40: select * from user where `name` = 'bob' limit 10001 /* vindex lookup */ +2 ks_sharded/-40: select * from `user` where `name` = 'bob' limit 10001 /* vindex lookup */ ---------------------------------------------------------------------- select * from user where name = 'bob' or nickname = 'bob' /* vindex lookup */ -1 ks_sharded/-40: select * from user where `name` = 'bob' or nickname = 'bob' limit 10001 /* vindex lookup */ -1 ks_sharded/40-80: select * from user where `name` = 'bob' or nickname = 'bob' limit 10001 /* vindex lookup */ -1 ks_sharded/80-c0: select * from user where `name` = 'bob' or nickname = 'bob' limit 10001 /* vindex lookup */ -1 ks_sharded/c0-: select * from user where `name` = 'bob' or nickname = 'bob' limit 10001 /* vindex lookup */ +1 ks_sharded/-40: select * from `user` where `name` = 'bob' or nickname = 'bob' limit 10001 /* vindex lookup */ +1 ks_sharded/40-80: select * from `user` where `name` = 'bob' or nickname = 'bob' limit 10001 /* vindex lookup */ +1 ks_sharded/80-c0: select * from `user` where `name` = 'bob' or nickname = 'bob' limit 10001 /* vindex lookup */ +1 ks_sharded/c0-: select * from `user` where `name` = 'bob' or nickname = 'bob' limit 10001 /* vindex lookup */ ---------------------------------------------------------------------- select u.id, u.name, u.nickname, n.info from user u join name_info n on u.name = n.name /* join on varchar */ -1 ks_sharded/-40: select u.id, u.`name`, u.nickname from user as u limit 10001 /* join on varchar */ -1 ks_sharded/40-80: select u.id, u.`name`, u.nickname from user as u limit 10001 /* join on varchar */ -1 ks_sharded/80-c0: select u.id, u.`name`, u.nickname from user as u limit 10001 /* join on varchar */ -1 ks_sharded/c0-: select u.id, u.`name`, u.nickname from user as u limit 10001 /* join on varchar */ +1 ks_sharded/-40: select u.id, u.`name`, u.nickname from `user` as u limit 10001 /* join on varchar */ +1 ks_sharded/40-80: select u.id, u.`name`, u.nickname from `user` as u limit 10001 /* join on varchar */ +1 ks_sharded/80-c0: select u.id, u.`name`, u.nickname from `user` as u limit 10001 /* join on varchar */ +1 ks_sharded/c0-: select u.id, u.`name`, u.nickname from `user` as u limit 10001 /* join on varchar */ 2 ks_sharded/40-80: select n.info from name_info as n where n.`name` = 'name_val_2' limit 10001 /* join on varchar */ 3 ks_sharded/40-80: select n.info from name_info as n where n.`name` = 'name_val_2' limit 10001 /* join on varchar */ 4 ks_sharded/40-80: select n.info from name_info as n where n.`name` = 'name_val_2' limit 10001 /* join on varchar */ @@ -54,68 +54,68 @@ select m.id, m.song, e.extra from music m join music_extra e on m.id = e.id wher ---------------------------------------------------------------------- select count(*) from user where id = 1 /* point aggregate */ -1 ks_sharded/-40: select count(*) from user where id = 1 limit 10001 /* point aggregate */ +1 ks_sharded/-40: select count(*) from `user` where id = 1 limit 10001 /* point aggregate */ ---------------------------------------------------------------------- select count(*) from user where name in ('alice','bob') /* scatter aggregate */ 1 ks_sharded/40-80: select `name`, user_id from name_user_map where `name` in ('alice') limit 10001 /* scatter aggregate */ 2 ks_sharded/c0-: select `name`, user_id from name_user_map where `name` in ('bob') limit 10001 /* scatter aggregate */ -3 ks_sharded/-40: select count(*) from user where `name` in ('alice', 'bob') limit 10001 /* scatter aggregate */ +3 ks_sharded/-40: select count(*) from `user` where `name` in ('alice', 'bob') limit 10001 /* scatter aggregate */ ---------------------------------------------------------------------- select name, count(*) from user group by name /* scatter aggregate */ -1 ks_sharded/-40: select `name`, count(*) from user group by `name` limit 10001 /* scatter aggregate */ -1 ks_sharded/40-80: select `name`, count(*) from user group by `name` limit 10001 /* scatter aggregate */ -1 ks_sharded/80-c0: select `name`, count(*) from user group by `name` limit 10001 /* scatter aggregate */ -1 ks_sharded/c0-: select `name`, count(*) from user group by `name` limit 10001 /* scatter aggregate */ +1 ks_sharded/-40: select `name`, count(*) from `user` group by `name` limit 10001 /* scatter aggregate */ +1 ks_sharded/40-80: select `name`, count(*) from `user` group by `name` limit 10001 /* scatter aggregate */ +1 ks_sharded/80-c0: select `name`, count(*) from `user` group by `name` limit 10001 /* scatter aggregate */ +1 ks_sharded/c0-: select `name`, count(*) from `user` group by `name` limit 10001 /* scatter aggregate */ ---------------------------------------------------------------------- select 1, "hello", 3.14, null from user limit 10 /* select constant sql values */ -1 ks_sharded/-40: select 1, 'hello', 3.14, null from user limit 10 /* select constant sql values */ -1 ks_sharded/40-80: select 1, 'hello', 3.14, null from user limit 10 /* select constant sql values */ -1 ks_sharded/80-c0: select 1, 'hello', 3.14, null from user limit 10 /* select constant sql values */ -1 ks_sharded/c0-: select 1, 'hello', 3.14, null from user limit 10 /* select constant sql values */ +1 ks_sharded/-40: select 1, 'hello', 3.14, null from `user` limit 10 /* select constant sql values */ +1 ks_sharded/40-80: select 1, 'hello', 3.14, null from `user` limit 10 /* select constant sql values */ +1 ks_sharded/80-c0: select 1, 'hello', 3.14, null from `user` limit 10 /* select constant sql values */ +1 ks_sharded/c0-: select 1, 'hello', 3.14, null from `user` limit 10 /* select constant sql values */ ---------------------------------------------------------------------- select * from (select id from user) s /* scatter paren select */ -1 ks_sharded/-40: select * from (select id from user) as s limit 10001 /* scatter paren select */ -1 ks_sharded/40-80: select * from (select id from user) as s limit 10001 /* scatter paren select */ -1 ks_sharded/80-c0: select * from (select id from user) as s limit 10001 /* scatter paren select */ -1 ks_sharded/c0-: select * from (select id from user) as s limit 10001 /* scatter paren select */ +1 ks_sharded/-40: select * from (select id from `user`) as s limit 10001 /* scatter paren select */ +1 ks_sharded/40-80: select * from (select id from `user`) as s limit 10001 /* scatter paren select */ +1 ks_sharded/80-c0: select * from (select id from `user`) as s limit 10001 /* scatter paren select */ +1 ks_sharded/c0-: select * from (select id from `user`) as s limit 10001 /* scatter paren select */ ---------------------------------------------------------------------- select name from user where id = (select id from t1) /* non-correlated subquery as value */ 1 ks_unsharded/-: select id from t1 limit 10001 /* non-correlated subquery as value */ -2 ks_sharded/-40: select `name` from user where id = 1 limit 10001 /* non-correlated subquery as value */ +2 ks_sharded/-40: select `name` from `user` where id = 1 limit 10001 /* non-correlated subquery as value */ ---------------------------------------------------------------------- select name from user where id in (select id from t1) /* non-correlated subquery in IN clause */ 1 ks_unsharded/-: select id from t1 limit 10001 /* non-correlated subquery in IN clause */ -2 ks_sharded/-40: select `name` from user where 1 = 1 and id in (1) limit 10001 /* non-correlated subquery in IN clause */ +2 ks_sharded/-40: select `name` from `user` where 1 = 1 and id in (1) limit 10001 /* non-correlated subquery in IN clause */ ---------------------------------------------------------------------- select name from user where id not in (select id from t1) /* non-correlated subquery in NOT IN clause */ 1 ks_unsharded/-: select id from t1 limit 10001 /* non-correlated subquery in NOT IN clause */ -2 ks_sharded/-40: select `name` from user where 1 = 0 or id not in (1) limit 10001 /* non-correlated subquery in NOT IN clause */ -2 ks_sharded/40-80: select `name` from user where 1 = 0 or id not in (1) limit 10001 /* non-correlated subquery in NOT IN clause */ -2 ks_sharded/80-c0: select `name` from user where 1 = 0 or id not in (1) limit 10001 /* non-correlated subquery in NOT IN clause */ -2 ks_sharded/c0-: select `name` from user where 1 = 0 or id not in (1) limit 10001 /* non-correlated subquery in NOT IN clause */ +2 ks_sharded/-40: select `name` from `user` where 1 = 0 or id not in (1) limit 10001 /* non-correlated subquery in NOT IN clause */ +2 ks_sharded/40-80: select `name` from `user` where 1 = 0 or id not in (1) limit 10001 /* non-correlated subquery in NOT IN clause */ +2 ks_sharded/80-c0: select `name` from `user` where 1 = 0 or id not in (1) limit 10001 /* non-correlated subquery in NOT IN clause */ +2 ks_sharded/c0-: select `name` from `user` where 1 = 0 or id not in (1) limit 10001 /* non-correlated subquery in NOT IN clause */ ---------------------------------------------------------------------- select name from user where exists (select id from t1) /* non-correlated subquery as EXISTS */ 1 ks_unsharded/-: select id from t1 limit 10001 /* non-correlated subquery as EXISTS */ -2 ks_sharded/-40: select `name` from user where 1 limit 10001 /* non-correlated subquery as EXISTS */ -2 ks_sharded/40-80: select `name` from user where 1 limit 10001 /* non-correlated subquery as EXISTS */ -2 ks_sharded/80-c0: select `name` from user where 1 limit 10001 /* non-correlated subquery as EXISTS */ -2 ks_sharded/c0-: select `name` from user where 1 limit 10001 /* non-correlated subquery as EXISTS */ +2 ks_sharded/-40: select `name` from `user` where 1 limit 10001 /* non-correlated subquery as EXISTS */ +2 ks_sharded/40-80: select `name` from `user` where 1 limit 10001 /* non-correlated subquery as EXISTS */ +2 ks_sharded/80-c0: select `name` from `user` where 1 limit 10001 /* non-correlated subquery as EXISTS */ +2 ks_sharded/c0-: select `name` from `user` where 1 limit 10001 /* non-correlated subquery as EXISTS */ ---------------------------------------------------------------------- select * from name_info order by info /* select * and order by varchar column */ @@ -128,42 +128,42 @@ select * from name_info order by info /* select * and order by varchar column */ ---------------------------------------------------------------------- select distinct(name) from user where id = 1 /* select distinct */ -1 ks_sharded/-40: select distinct `name` from user where id = 1 limit 10001 /* select distinct */ +1 ks_sharded/-40: select distinct `name` from `user` where id = 1 limit 10001 /* select distinct */ ---------------------------------------------------------------------- select distinct name from user where id = 1 /* select distinct */ -1 ks_sharded/-40: select distinct `name` from user where id = 1 limit 10001 /* select distinct */ +1 ks_sharded/-40: select distinct `name` from `user` where id = 1 limit 10001 /* select distinct */ ---------------------------------------------------------------------- select id, substring(name, 1, -1) from user where id = 123 /* select substring */ -1 ks_sharded/-40: select id, substr(`name`, 1, -1) from user where id = 123 limit 10001 /* select substring */ +1 ks_sharded/-40: select id, substr(`name`, 1, -1) from `user` where id = 123 limit 10001 /* select substring */ ---------------------------------------------------------------------- select id, substring_index(name, '123456', -1) from user where id = 123 /* select substring_index */ -1 ks_sharded/-40: select id, substring_index(`name`, '123456', -1) from user where id = 123 limit 10001 /* select substring_index */ +1 ks_sharded/-40: select id, substring_index(`name`, '123456', -1) from `user` where id = 123 limit 10001 /* select substring_index */ ---------------------------------------------------------------------- select id, case when name = 'alice' then 'ALICE' when name = 'bob' then 'BOB' end as name from user where id = 1 /* select case */ -1 ks_sharded/-40: select id, case when `name` = 'alice' then 'ALICE' when `name` = 'bob' then 'BOB' end as `name` from user where id = 1 limit 10001 /* select case */ +1 ks_sharded/-40: select id, case when `name` = 'alice' then 'ALICE' when `name` = 'bob' then 'BOB' end as `name` from `user` where id = 1 limit 10001 /* select case */ ---------------------------------------------------------------------- select id, case when name = 'alice' then 'ALICE' when name = 'bob' then 'BOB' else 'OTHER' end as name from user where id = 1 /* select case */ -1 ks_sharded/-40: select id, case when `name` = 'alice' then 'ALICE' when `name` = 'bob' then 'BOB' else 'OTHER' end as `name` from user where id = 1 limit 10001 /* select case */ +1 ks_sharded/-40: select id, case when `name` = 'alice' then 'ALICE' when `name` = 'bob' then 'BOB' else 'OTHER' end as `name` from `user` where id = 1 limit 10001 /* select case */ ---------------------------------------------------------------------- select id, case when substr(name, 1, 5) = 'alice' then 'ALICE' when name = 'bob' then 'BOB' else 'OTHER' end as name from user where id = 1 /* select case */ -1 ks_sharded/-40: select id, case when substr(`name`, 1, 5) = 'alice' then 'ALICE' when `name` = 'bob' then 'BOB' else 'OTHER' end as `name` from user where id = 1 limit 10001 /* select case */ +1 ks_sharded/-40: select id, case when substr(`name`, 1, 5) = 'alice' then 'ALICE' when `name` = 'bob' then 'BOB' else 'OTHER' end as `name` from `user` where id = 1 limit 10001 /* select case */ ---------------------------------------------------------------------- select id, 'abc' as test from user where id = 1 union all select id, 'def' as test from user where id = 1 union all select id, 'ghi' as test from user where id = 1 /* union all */ -1 ks_sharded/-40: select id, 'abc' as test from user where id = 1 union all select id, 'def' as test from user where id = 1 union all select id, 'ghi' as test from user where id = 1 limit 10001 /* union all */ +1 ks_sharded/-40: select id, 'abc' as test from `user` where id = 1 union all select id, 'def' as test from `user` where id = 1 union all select id, 'ghi' as test from `user` where id = 1 limit 10001 /* union all */ ---------------------------------------------------------------------- select id from user where not id in (select col from music where music.user_id = 42) and id in (select col from music where music.user_id = 411) diff --git a/go/vt/vtexplain/testdata/multi-output/target-output.txt b/go/vt/vtexplain/testdata/multi-output/target-output.txt index 4a99a3485e4..3686b86d055 100644 --- a/go/vt/vtexplain/testdata/multi-output/target-output.txt +++ b/go/vt/vtexplain/testdata/multi-output/target-output.txt @@ -1,16 +1,16 @@ ---------------------------------------------------------------------- select * from user where email='null@void.com' -1 ks_sharded/40-80: select * from user where email = 'null@void.com' limit 10001 +1 ks_sharded/40-80: select * from `user` where email = 'null@void.com' limit 10001 ---------------------------------------------------------------------- select * from user where id in (1,2,3,4,5,6,7,8) -1 ks_sharded/40-80: select * from user where id in (1, 2, 3, 4, 5, 6, 7, 8) limit 10001 +1 ks_sharded/40-80: select * from `user` where id in (1, 2, 3, 4, 5, 6, 7, 8) limit 10001 ---------------------------------------------------------------------- insert into user (id, name) values (2, 'bob') -1 ks_sharded/40-80: insert into user(id, `name`) values (2, 'bob') +1 ks_sharded/40-80: insert into `user`(id, `name`) values (2, 'bob') ---------------------------------------------------------------------- diff --git a/go/vt/vtexplain/testdata/multi-output/uneven-keyspace-output.txt b/go/vt/vtexplain/testdata/multi-output/uneven-keyspace-output.txt index 8ed6a239e98..dc74755801e 100644 --- a/go/vt/vtexplain/testdata/multi-output/uneven-keyspace-output.txt +++ b/go/vt/vtexplain/testdata/multi-output/uneven-keyspace-output.txt @@ -1,16 +1,16 @@ ---------------------------------------------------------------------- select * from user -1 ks_sharded/-80: select * from user limit 10001 -1 ks_sharded/80-90: select * from user limit 10001 -1 ks_sharded/90-a0: select * from user limit 10001 -1 ks_sharded/a0-e8: select * from user limit 10001 -1 ks_sharded/e8-: select * from user limit 10001 +1 ks_sharded/-80: select * from `user` limit 10001 +1 ks_sharded/80-90: select * from `user` limit 10001 +1 ks_sharded/90-a0: select * from `user` limit 10001 +1 ks_sharded/a0-e8: select * from `user` limit 10001 +1 ks_sharded/e8-: select * from `user` limit 10001 ---------------------------------------------------------------------- select * from user where id in (10, 17, 42, 100000) -1 ks_sharded/-80: select * from user where id in (10, 17, 42) limit 10001 -1 ks_sharded/80-90: select * from user where id in (100000) limit 10001 +1 ks_sharded/-80: select * from `user` where id in (10, 17, 42) limit 10001 +1 ks_sharded/80-90: select * from `user` where id in (100000) limit 10001 ---------------------------------------------------------------------- diff --git a/go/vt/vtexplain/testdata/multi-output/updatesharded-output.txt b/go/vt/vtexplain/testdata/multi-output/updatesharded-output.txt index 4d952c6ba3e..75a06f0388b 100644 --- a/go/vt/vtexplain/testdata/multi-output/updatesharded-output.txt +++ b/go/vt/vtexplain/testdata/multi-output/updatesharded-output.txt @@ -2,7 +2,7 @@ update user set nickname='alice' where id=1 1 ks_sharded/-40: begin -1 ks_sharded/-40: update user set nickname = 'alice' where id = 1 limit 10001 +1 ks_sharded/-40: update `user` set nickname = 'alice' where id = 1 limit 10001 1 ks_sharded/-40: commit ---------------------------------------------------------------------- @@ -11,7 +11,7 @@ update user set nickname='alice' where name='alice' 1 ks_sharded/40-80: begin 1 ks_sharded/40-80: select `name`, user_id from name_user_map where `name` in ('alice') limit 10001 for update 2 ks_sharded/-40: begin -2 ks_sharded/-40: update user set nickname = 'alice' where `name` = 'alice' limit 10001 +2 ks_sharded/-40: update `user` set nickname = 'alice' where `name` = 'alice' limit 10001 3 ks_sharded/40-80: commit 4 ks_sharded/-40: commit @@ -19,19 +19,19 @@ update user set nickname='alice' where name='alice' update user set pet='fido' where id=1 1 ks_sharded/-40: begin -1 ks_sharded/-40: update user set pet = 'fido' where id = 1 limit 10001 +1 ks_sharded/-40: update `user` set pet = 'fido' where id = 1 limit 10001 1 ks_sharded/-40: commit ---------------------------------------------------------------------- update user set name='alicia' where id=1 1 ks_sharded/-40: begin -1 ks_sharded/-40: select id, `name`, `name` = 'alicia' from user where id = 1 limit 10001 for update +1 ks_sharded/-40: select id, `name`, `name` = 'alicia' from `user` where id = 1 limit 10001 for update 2 ks_sharded/40-80: begin 2 ks_sharded/40-80: delete from name_user_map where `name` = 'name_val_2' and user_id = 1 limit 10001 3 ks_sharded/c0-: begin 3 ks_sharded/c0-: insert into name_user_map(`name`, user_id) values ('alicia', 1) -4 ks_sharded/-40: update user set `name` = 'alicia' where id = 1 limit 10001 +4 ks_sharded/-40: update `user` set `name` = 'alicia' where id = 1 limit 10001 5 ks_sharded/-40: commit 6 ks_sharded/40-80: commit 7 ks_sharded/c0-: commit @@ -42,11 +42,11 @@ update user set name='alicia' where name='alice' 1 ks_sharded/40-80: begin 1 ks_sharded/40-80: select `name`, user_id from name_user_map where `name` in ('alice') limit 10001 for update 2 ks_sharded/-40: begin -2 ks_sharded/-40: select id, `name`, `name` = 'alicia' from user where `name` = 'alice' limit 10001 for update +2 ks_sharded/-40: select id, `name`, `name` = 'alicia' from `user` where `name` = 'alice' limit 10001 for update 3 ks_sharded/40-80: delete from name_user_map where `name` = 'name_val_2' and user_id = 1 limit 10001 4 ks_sharded/c0-: begin 4 ks_sharded/c0-: insert into name_user_map(`name`, user_id) values ('alicia', 1) -5 ks_sharded/-40: update user set `name` = 'alicia' where `name` = 'alice' limit 10001 +5 ks_sharded/-40: update `user` set `name` = 'alicia' where `name` = 'alice' limit 10001 6 ks_sharded/40-80: commit 7 ks_sharded/-40: commit 8 ks_sharded/c0-: commit @@ -73,7 +73,7 @@ update user set pet='rover' where name='alice' 1 ks_sharded/40-80: begin 1 ks_sharded/40-80: select `name`, user_id from name_user_map where `name` in ('alice') limit 10001 for update 2 ks_sharded/-40: begin -2 ks_sharded/-40: update user set pet = 'rover' where `name` = 'alice' limit 10001 +2 ks_sharded/-40: update `user` set pet = 'rover' where `name` = 'alice' limit 10001 3 ks_sharded/40-80: commit 4 ks_sharded/-40: commit @@ -85,12 +85,12 @@ begin update user set nickname='alice' where id=1 1 ks_sharded/-40: begin -1 ks_sharded/-40: update user set nickname = 'alice' where id = 1 limit 10001 +1 ks_sharded/-40: update `user` set nickname = 'alice' where id = 1 limit 10001 ---------------------------------------------------------------------- update user set nickname='bob' where id=1 -2 ks_sharded/-40: update user set nickname = 'bob' where id = 1 limit 10001 +2 ks_sharded/-40: update `user` set nickname = 'bob' where id = 1 limit 10001 ---------------------------------------------------------------------- commit @@ -105,13 +105,13 @@ begin update user set nickname='alice' where id=1 1 ks_sharded/-40: begin -1 ks_sharded/-40: update user set nickname = 'alice' where id = 1 limit 10001 +1 ks_sharded/-40: update `user` set nickname = 'alice' where id = 1 limit 10001 ---------------------------------------------------------------------- update user set nickname='bob' where id=3 2 ks_sharded/40-80: begin -2 ks_sharded/40-80: update user set nickname = 'bob' where id = 3 limit 10001 +2 ks_sharded/40-80: update `user` set nickname = 'bob' where id = 3 limit 10001 ---------------------------------------------------------------------- commit diff --git a/go/vt/vtexplain/vtexplain_flaky_test.go b/go/vt/vtexplain/vtexplain_flaky_test.go index d2d9262dd1d..f75b49ae924 100644 --- a/go/vt/vtexplain/vtexplain_flaky_test.go +++ b/go/vt/vtexplain/vtexplain_flaky_test.go @@ -224,7 +224,7 @@ func TestJSONOutput(t *testing.T) { "ks_sharded/-40": { "MysqlQueries": [ { - "SQL": "select 1 from user where id = 1 limit 10001", + "SQL": "select 1 from ` + "`user`" + ` where id = 1 limit 10001", "Time": 1 } ], @@ -234,7 +234,7 @@ func TestJSONOutput(t *testing.T) { "#maxLimit": "10001", "vtg1": "1" }, - "SQL": "select :vtg1 from user where id = :vtg1", + "SQL": "select :vtg1 from ` + "`user`" + ` where id = :vtg1", "Time": 1 } ] diff --git a/go/vt/vtexplain/vtexplain_vtgate.go b/go/vt/vtexplain/vtexplain_vtgate.go index ccdce29a55c..2f5c540b338 100644 --- a/go/vt/vtexplain/vtexplain_vtgate.go +++ b/go/vt/vtexplain/vtexplain_vtgate.go @@ -20,13 +20,13 @@ limitations under the License. package vtexplain import ( + "context" "fmt" + "vitess.io/vitess/go/cache" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" - "context" - "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/json2" @@ -69,8 +69,7 @@ func initVtgateExecutor(vSchemaStr, ksShardMapStr string, opts *Options) error { vtgateSession.TargetString = opts.Target streamSize := 10 - queryPlanCacheSize := int64(10) - vtgateExecutor = vtgate.NewExecutor(context.Background(), explainTopo, vtexplainCell, resolver, opts.Normalize, streamSize, queryPlanCacheSize) + vtgateExecutor = vtgate.NewExecutor(context.Background(), explainTopo, vtexplainCell, resolver, opts.Normalize, false /*do not warn for sharded only*/, streamSize, cache.DefaultConfig) return nil } @@ -201,11 +200,12 @@ func vtgateExecute(sql string) ([]*engine.Plan, map[string]*TabletActions, error } var plans []*engine.Plan - for _, item := range planCache.Items() { - plan := item.Value.(*engine.Plan) + planCache.ForEach(func(value interface{}) bool { + plan := value.(*engine.Plan) plan.ExecTime = 0 plans = append(plans, plan) - } + return true + }) planCache.Clear() tabletActions := make(map[string]*TabletActions) diff --git a/go/vt/vtexplain/vtexplain_vttablet.go b/go/vt/vtexplain/vtexplain_vttablet.go index 34773864c79..c552c616982 100644 --- a/go/vt/vtexplain/vtexplain_vttablet.go +++ b/go/vt/vtexplain/vtexplain_vttablet.go @@ -366,6 +366,13 @@ func initTabletEnvironment(ddls []sqlparser.DDLStatement, opts *Options) error { }}, Rows: [][]sqltypes.Value{}, }, + mysql.ShowRowsRead: sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "Variable_name|value", + "varchar|uint64", + ), + "Innodb_rows|0", + ), } showTableRows := make([][]sqltypes.Value, 0, 4) @@ -382,7 +389,7 @@ func initTabletEnvironment(ddls []sqlparser.DDLStatement, opts *Options) error { } showTableRows = append(showTableRows, mysql.BaseShowTablesRow(table, false, options)) } - schemaQueries[mysql.BaseShowTables] = &sqltypes.Result{ + schemaQueries[mysql.TablesWithSize57] = &sqltypes.Result{ Fields: mysql.BaseShowTablesFields, Rows: showTableRows, } @@ -523,7 +530,7 @@ func (t *explainTablet) HandleQuery(c *mysql.Conn, query string, callback func(* for _, val := range values { switch v := val.(type) { case *sqlparser.Literal: - inVal = append(inVal, v.Val) + inVal = append(inVal, v.Bytes()) } } rowCount = len(inVal) diff --git a/go/vt/vtgate/autocommit_test.go b/go/vt/vtgate/autocommit_test.go index 87b0c3d3512..519607e800e 100644 --- a/go/vt/vtgate/autocommit_test.go +++ b/go/vt/vtgate/autocommit_test.go @@ -42,7 +42,7 @@ func TestAutocommitUpdateSharded(t *testing.T) { require.NoError(t, err) testQueries(t, "sbc1", sbc1, []*querypb.BoundQuery{{ - Sql: "update user set a = 2 where id = 1", + Sql: "update `user` set a = 2 where id = 1", BindVariables: map[string]*querypb.BindVariable{}, }}) testCommitCount(t, "sbc1", sbc1, 0) @@ -271,7 +271,7 @@ func TestAutocommitInsertLookup(t *testing.T) { testCommitCount(t, "sbclookup", sbclookup, 1) testQueries(t, "sbc1", sbc1, []*querypb.BoundQuery{{ - Sql: "insert into user(id, v, `name`) values (:_Id_0, 2, :_name_0)", + Sql: "insert into `user`(id, v, `name`) values (:_Id_0, 2, :_name_0)", BindVariables: map[string]*querypb.BindVariable{ "_Id_0": sqltypes.Int64BindVariable(1), "_name_0": sqltypes.BytesBindVariable([]byte("myname")), @@ -386,7 +386,7 @@ func TestAutocommitTransactionStarted(t *testing.T) { require.NoError(t, err) testQueries(t, "sbc1", sbc1, []*querypb.BoundQuery{{ - Sql: "update user set a = 2 where id = 1", + Sql: "update `user` set a = 2 where id = 1", BindVariables: map[string]*querypb.BindVariable{}, }}) testCommitCount(t, "sbc1", sbc1, 0) diff --git a/go/vt/vtgate/discoverygateway.go b/go/vt/vtgate/discoverygateway.go index e70d83f389d..167ddd51266 100644 --- a/go/vt/vtgate/discoverygateway.go +++ b/go/vt/vtgate/discoverygateway.go @@ -245,7 +245,6 @@ func (dg *DiscoveryGateway) CacheStatus() TabletCacheStatusList { // a resharding event, and set the re-resolve bit and let the upper layers // re-resolve and retry. func (dg *DiscoveryGateway) withRetry(ctx context.Context, target *querypb.Target, unused queryservice.QueryService, name string, inTransaction bool, inner func(ctx context.Context, target *querypb.Target, conn queryservice.QueryService) (bool, error)) error { - var tabletLastUsed *topodatapb.Tablet var err error invalidTablets := make(map[string]bool) @@ -300,7 +299,7 @@ func (dg *DiscoveryGateway) withRetry(ctx context.Context, target *querypb.Targe if len(tablets) == 0 { // fail fast if there is no tablet - err = vterrors.New(vtrpcpb.Code_UNAVAILABLE, "no valid tablet") + err = vterrors.Errorf(vtrpcpb.Code_UNAVAILABLE, "no healthy tablet available for '%s'", target.String()) break } shuffleTablets(dg.localCell, tablets) @@ -322,7 +321,6 @@ func (dg *DiscoveryGateway) withRetry(ctx context.Context, target *querypb.Targe } // execute - tabletLastUsed = ts.Tablet conn := dg.hc.GetConnection(ts.Key) if conn == nil { err = vterrors.Errorf(vtrpcpb.Code_UNAVAILABLE, "no connection for key %v tablet %+v", ts.Key, ts.Tablet) @@ -340,7 +338,7 @@ func (dg *DiscoveryGateway) withRetry(ctx context.Context, target *querypb.Targe } break } - return NewShardError(err, target, tabletLastUsed) + return NewShardError(err, target) } func shuffleTablets(cell string, tablets []discovery.LegacyTabletStats) { diff --git a/go/vt/vtgate/discoverygateway_test.go b/go/vt/vtgate/discoverygateway_test.go index 7acb8741f80..5612d4a7d9f 100644 --- a/go/vt/vtgate/discoverygateway_test.go +++ b/go/vt/vtgate/discoverygateway_test.go @@ -30,7 +30,6 @@ import ( "vitess.io/vitess/go/vt/srvtopo/srvtopotest" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" - "vitess.io/vitess/go/vt/topotools" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/proto/topodata" @@ -366,7 +365,7 @@ func testDiscoveryGatewayGeneric(t *testing.T, f func(dg *DiscoveryGateway, targ // no tablet hc.Reset() dg.tsc.ResetForTesting() - want := []string{"target: ks.0.replica", "no valid tablet"} + want := []string{"target: ks.0.replica", `no healthy tablet available for 'keyspace:"ks" shard:"0" tablet_type:REPLICA`} err := f(dg, target) verifyShardErrors(t, err, want, vtrpcpb.Code_UNAVAILABLE) @@ -391,14 +390,9 @@ func testDiscoveryGatewayGeneric(t *testing.T, f func(dg *DiscoveryGateway, targ sc2 := hc.AddTestTablet("cell", "1.1.1.1", 1002, keyspace, shard, tabletType, true, 10, nil) sc1.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 1 sc2.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 1 - ep1 := sc1.Tablet() - ep2 := sc2.Tablet() err = f(dg, target) verifyContainsError(t, err, "target: ks.0.replica", vtrpcpb.Code_FAILED_PRECONDITION) - verifyShardErrorEither(t, err, - fmt.Sprintf(`used tablet: %s`, topotools.TabletIdent(ep1)), - fmt.Sprintf(`used tablet: %s`, topotools.TabletIdent(ep2))) // fatal error hc.Reset() @@ -407,22 +401,16 @@ func testDiscoveryGatewayGeneric(t *testing.T, f func(dg *DiscoveryGateway, targ sc2 = hc.AddTestTablet("cell", "1.1.1.1", 1002, keyspace, shard, tabletType, true, 10, nil) sc1.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 1 sc2.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 1 - ep1 = sc1.Tablet() - ep2 = sc2.Tablet() err = f(dg, target) verifyContainsError(t, err, "target: ks.0.replica", vtrpcpb.Code_FAILED_PRECONDITION) - verifyShardErrorEither(t, err, - fmt.Sprintf(`used tablet: %s`, topotools.TabletIdent(ep1)), - fmt.Sprintf(`used tablet: %s`, topotools.TabletIdent(ep2))) // server error - no retry hc.Reset() dg.tsc.ResetForTesting() sc1 = hc.AddTestTablet("cell", "1.1.1.1", 1001, keyspace, shard, tabletType, true, 10, nil) sc1.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 - ep1 = sc1.Tablet() err = f(dg, target) - verifyContainsError(t, err, fmt.Sprintf(`used tablet: %s`, topotools.TabletIdent(ep1)), vtrpcpb.Code_INVALID_ARGUMENT) + verifyContainsError(t, err, "target: ks.0.replica", vtrpcpb.Code_INVALID_ARGUMENT) // no failure hc.Reset() @@ -453,23 +441,15 @@ func testDiscoveryGatewayTransact(t *testing.T, f func(dg *DiscoveryGateway, tar sc2 := hc.AddTestTablet("cell", "1.1.1.1", 1002, keyspace, shard, tabletType, true, 10, nil) sc1.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 1 sc2.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 1 - ep1 := sc1.Tablet() - ep2 := sc2.Tablet() err := f(dg, target) verifyContainsError(t, err, "target: ks.0.replica", vtrpcpb.Code_FAILED_PRECONDITION) - format := `used tablet: %s` - verifyShardErrorEither(t, err, - fmt.Sprintf(format, topotools.TabletIdent(ep1)), - fmt.Sprintf(format, topotools.TabletIdent(ep2))) // server error - no retry hc.Reset() dg.tsc.ResetForTesting() sc1 = hc.AddTestTablet("cell", "1.1.1.1", 1001, keyspace, shard, tabletType, true, 10, nil) sc1.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 - ep1 = sc1.Tablet() err = f(dg, target) verifyContainsError(t, err, "target: ks.0.replica", vtrpcpb.Code_INVALID_ARGUMENT) - verifyContainsError(t, err, fmt.Sprintf(format, topotools.TabletIdent(ep1)), vtrpcpb.Code_INVALID_ARGUMENT) } diff --git a/go/vt/vtgate/endtoend/database_func_test.go b/go/vt/vtgate/endtoend/misc_test.go similarity index 58% rename from go/vt/vtgate/endtoend/database_func_test.go rename to go/vt/vtgate/endtoend/misc_test.go index 3a3b27c65f8..4ad9200956a 100644 --- a/go/vt/vtgate/endtoend/database_func_test.go +++ b/go/vt/vtgate/endtoend/misc_test.go @@ -21,20 +21,30 @@ import ( "fmt" "testing" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql" ) func TestDatabaseFunc(t *testing.T) { ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer conn.Close() exec(t, conn, "use ks") qr := exec(t, conn, "select database()") - if got, want := fmt.Sprintf("%v", qr.Rows), `[[VARBINARY("ks")]]`; got != want { - t.Errorf("select:\n%v want\n%v", got, want) - } + require.Equal(t, `[[VARBINARY("ks")]]`, fmt.Sprintf("%v", qr.Rows)) +} + +func TestSysNumericPrecisionScale(t *testing.T) { + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer conn.Close() + + qr := exec(t, conn, "select numeric_precision, numeric_scale from information_schema.columns where table_schema = 'ks' and table_name = 't1'") + assert.True(t, qr.Fields[0].Type == qr.Rows[0][0].Type()) + assert.True(t, qr.Fields[1].Type == qr.Rows[0][1].Type()) } diff --git a/go/vt/vtgate/endtoend/vstream_test.go b/go/vt/vtgate/endtoend/vstream_test.go index e11088045eb..f5eec59be3e 100644 --- a/go/vt/vtgate/endtoend/vstream_test.go +++ b/go/vt/vtgate/endtoend/vstream_test.go @@ -23,6 +23,8 @@ import ( "sync" "testing" + vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + "github.com/golang/protobuf/proto" "github.com/stretchr/testify/require" @@ -79,7 +81,8 @@ func TestVStream(t *testing.T) { Match: "/.*/", }}, } - reader, err := gconn.VStream(ctx, topodatapb.TabletType_MASTER, vgtid, filter) + flags := &vtgatepb.VStreamFlags{} + reader, err := gconn.VStream(ctx, topodatapb.TabletType_MASTER, vgtid, filter, flags) if err != nil { t.Fatal(err) } @@ -194,7 +197,8 @@ func TestVStreamCopyBasic(t *testing.T) { Filter: "select * from t1", }}, } - reader, err := gconn.VStream(ctx, topodatapb.TabletType_MASTER, vgtid, filter) + flags := &vtgatepb.VStreamFlags{} + reader, err := gconn.VStream(ctx, topodatapb.TabletType_MASTER, vgtid, filter, flags) _, _ = conn, mconn if err != nil { t.Fatal(err) @@ -247,7 +251,8 @@ func TestVStreamCurrent(t *testing.T) { Filter: "select * from t1", }}, } - reader, err := gconn.VStream(ctx, topodatapb.TabletType_MASTER, vgtid, filter) + flags := &vtgatepb.VStreamFlags{} + reader, err := gconn.VStream(ctx, topodatapb.TabletType_MASTER, vgtid, filter, flags) _, _ = conn, mconn if err != nil { t.Fatal(err) diff --git a/go/vt/vtgate/engine/cached_size.go b/go/vt/vtgate/engine/cached_size.go new file mode 100644 index 00000000000..50721c548f3 --- /dev/null +++ b/go/vt/vtgate/engine/cached_size.go @@ -0,0 +1,882 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by Sizegen. DO NOT EDIT. + +package engine + +import ( + "math" + "reflect" + "unsafe" +) + +type cachedObject interface { + CachedSize(alloc bool) int64 +} + +func (cached *AggregateParams) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field Alias string + size += int64(len(cached.Alias)) + return size +} +func (cached *AlterVSchema) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(16) + } + // field Keyspace *vitess.io/vitess/go/vt/vtgate/vindexes.Keyspace + size += cached.Keyspace.CachedSize(true) + // field AlterVschemaDDL *vitess.io/vitess/go/vt/sqlparser.AlterVschema + size += cached.AlterVschemaDDL.CachedSize(true) + return size +} +func (cached *Concatenate) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + // field Sources []vitess.io/vitess/go/vt/vtgate/engine.Primitive + { + size += int64(cap(cached.Sources)) * int64(16) + for _, elem := range cached.Sources { + if cc, ok := elem.(cachedObject); ok { + size += cc.CachedSize(true) + } + } + } + return size +} +func (cached *DBDDL) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field name string + size += int64(len(cached.name)) + return size +} +func (cached *DDL) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(58) + } + // field Keyspace *vitess.io/vitess/go/vt/vtgate/vindexes.Keyspace + size += cached.Keyspace.CachedSize(true) + // field SQL string + size += int64(len(cached.SQL)) + // field DDL vitess.io/vitess/go/vt/sqlparser.DDLStatement + if cc, ok := cached.DDL.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field NormalDDL *vitess.io/vitess/go/vt/vtgate/engine.Send + size += cached.NormalDDL.CachedSize(true) + // field OnlineDDL *vitess.io/vitess/go/vt/vtgate/engine.OnlineDDL + size += cached.OnlineDDL.CachedSize(true) + return size +} +func (cached *DML) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(144) + } + // field Keyspace *vitess.io/vitess/go/vt/vtgate/vindexes.Keyspace + size += cached.Keyspace.CachedSize(true) + // field TargetDestination vitess.io/vitess/go/vt/key.Destination + if cc, ok := cached.TargetDestination.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Query string + size += int64(len(cached.Query)) + // field Vindex vitess.io/vitess/go/vt/vtgate/vindexes.SingleColumn + if cc, ok := cached.Vindex.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Values []vitess.io/vitess/go/sqltypes.PlanValue + { + size += int64(cap(cached.Values)) * int64(88) + for _, elem := range cached.Values { + size += elem.CachedSize(false) + } + } + // field KsidVindex vitess.io/vitess/go/vt/vtgate/vindexes.SingleColumn + if cc, ok := cached.KsidVindex.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Table *vitess.io/vitess/go/vt/vtgate/vindexes.Table + size += cached.Table.CachedSize(true) + // field OwnedVindexQuery string + size += int64(len(cached.OwnedVindexQuery)) + return size +} +func (cached *Delete) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(144) + } + // field DML vitess.io/vitess/go/vt/vtgate/engine.DML + size += cached.DML.CachedSize(false) + return size +} +func (cached *Distinct) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(16) + } + // field Source vitess.io/vitess/go/vt/vtgate/engine.Primitive + if cc, ok := cached.Source.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *Generate) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(112) + } + // field Keyspace *vitess.io/vitess/go/vt/vtgate/vindexes.Keyspace + size += cached.Keyspace.CachedSize(true) + // field Query string + size += int64(len(cached.Query)) + // field Values vitess.io/vitess/go/sqltypes.PlanValue + size += cached.Values.CachedSize(false) + return size +} +func (cached *Insert) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(144) + } + // field Keyspace *vitess.io/vitess/go/vt/vtgate/vindexes.Keyspace + size += cached.Keyspace.CachedSize(true) + // field Query string + size += int64(len(cached.Query)) + // field VindexValues []vitess.io/vitess/go/sqltypes.PlanValue + { + size += int64(cap(cached.VindexValues)) * int64(88) + for _, elem := range cached.VindexValues { + size += elem.CachedSize(false) + } + } + // field Table *vitess.io/vitess/go/vt/vtgate/vindexes.Table + size += cached.Table.CachedSize(true) + // field Generate *vitess.io/vitess/go/vt/vtgate/engine.Generate + size += cached.Generate.CachedSize(true) + // field Prefix string + size += int64(len(cached.Prefix)) + // field Mid []string + { + size += int64(cap(cached.Mid)) * int64(16) + for _, elem := range cached.Mid { + size += int64(len(elem)) + } + } + // field Suffix string + size += int64(len(cached.Suffix)) + return size +} + +//go:nocheckptr +func (cached *Join) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(72) + } + // field Left vitess.io/vitess/go/vt/vtgate/engine.Primitive + if cc, ok := cached.Left.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Right vitess.io/vitess/go/vt/vtgate/engine.Primitive + if cc, ok := cached.Right.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Cols []int + { + size += int64(cap(cached.Cols)) * int64(8) + } + // field Vars map[string]int + if cached.Vars != nil { + size += int64(48) + hmap := reflect.ValueOf(cached.Vars) + numBuckets := int(math.Pow(2, float64((*(*uint8)(unsafe.Pointer(hmap.Pointer() + uintptr(9))))))) + numOldBuckets := (*(*uint16)(unsafe.Pointer(hmap.Pointer() + uintptr(10)))) + size += int64(numOldBuckets * 208) + if len(cached.Vars) > 0 || numBuckets > 1 { + size += int64(numBuckets * 208) + } + for k := range cached.Vars { + size += int64(len(k)) + } + } + return size +} +func (cached *Limit) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(192) + } + // field Count vitess.io/vitess/go/sqltypes.PlanValue + size += cached.Count.CachedSize(false) + // field Offset vitess.io/vitess/go/sqltypes.PlanValue + size += cached.Offset.CachedSize(false) + // field Input vitess.io/vitess/go/vt/vtgate/engine.Primitive + if cc, ok := cached.Input.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *Lock) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(40) + } + // field Keyspace *vitess.io/vitess/go/vt/vtgate/vindexes.Keyspace + size += cached.Keyspace.CachedSize(true) + // field TargetDestination vitess.io/vitess/go/vt/key.Destination + if cc, ok := cached.TargetDestination.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Query string + size += int64(len(cached.Query)) + return size +} +func (cached *MemorySort) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(136) + } + // field UpperLimit vitess.io/vitess/go/sqltypes.PlanValue + size += cached.UpperLimit.CachedSize(false) + // field OrderBy []vitess.io/vitess/go/vt/vtgate/engine.OrderbyParams + { + size += int64(cap(cached.OrderBy)) * int64(17) + } + // field Input vitess.io/vitess/go/vt/vtgate/engine.Primitive + if cc, ok := cached.Input.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *MergeSort) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field Primitives []vitess.io/vitess/go/vt/vtgate/engine.StreamExecutor + { + size += int64(cap(cached.Primitives)) * int64(16) + for _, elem := range cached.Primitives { + if cc, ok := elem.(cachedObject); ok { + size += cc.CachedSize(true) + } + } + } + // field OrderBy []vitess.io/vitess/go/vt/vtgate/engine.OrderbyParams + { + size += int64(cap(cached.OrderBy)) * int64(17) + } + return size +} +func (cached *OnlineDDL) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(72) + } + // field Keyspace *vitess.io/vitess/go/vt/vtgate/vindexes.Keyspace + size += cached.Keyspace.CachedSize(true) + // field DDL vitess.io/vitess/go/vt/sqlparser.DDLStatement + if cc, ok := cached.DDL.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field SQL string + size += int64(len(cached.SQL)) + // field Strategy vitess.io/vitess/go/vt/schema.DDLStrategy + size += int64(len(cached.Strategy)) + // field Options string + size += int64(len(cached.Options)) + return size +} +func (cached *OrderedAggregate) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(80) + } + // field Aggregates []vitess.io/vitess/go/vt/vtgate/engine.AggregateParams + { + size += int64(cap(cached.Aggregates)) * int64(32) + for _, elem := range cached.Aggregates { + size += elem.CachedSize(false) + } + } + // field Keys []int + { + size += int64(cap(cached.Keys)) * int64(8) + } + // field Input vitess.io/vitess/go/vt/vtgate/engine.Primitive + if cc, ok := cached.Input.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *Plan) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(120) + } + // field Original string + size += int64(len(cached.Original)) + // field Instructions vitess.io/vitess/go/vt/vtgate/engine.Primitive + if cc, ok := cached.Instructions.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field BindVarNeeds *vitess.io/vitess/go/vt/sqlparser.BindVarNeeds + size += cached.BindVarNeeds.CachedSize(true) + // field Warnings []*vitess.io/vitess/go/vt/proto/query.QueryWarning + { + size += int64(cap(cached.Warnings)) * int64(8) + for _, elem := range cached.Warnings { + size += elem.CachedSize(true) + } + } + return size +} +func (cached *Projection) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(64) + } + // field Cols []string + { + size += int64(cap(cached.Cols)) * int64(16) + for _, elem := range cached.Cols { + size += int64(len(elem)) + } + } + // field Exprs []vitess.io/vitess/go/vt/vtgate/evalengine.Expr + { + size += int64(cap(cached.Exprs)) * int64(16) + for _, elem := range cached.Exprs { + if cc, ok := elem.(cachedObject); ok { + size += cc.CachedSize(true) + } + } + } + // field Input vitess.io/vitess/go/vt/vtgate/engine.Primitive + if cc, ok := cached.Input.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *PulloutSubquery) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(72) + } + // field SubqueryResult string + size += int64(len(cached.SubqueryResult)) + // field HasValues string + size += int64(len(cached.HasValues)) + // field Subquery vitess.io/vitess/go/vt/vtgate/engine.Primitive + if cc, ok := cached.Subquery.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Underlying vitess.io/vitess/go/vt/vtgate/engine.Primitive + if cc, ok := cached.Underlying.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *RenameFields) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(64) + } + // field Cols []string + { + size += int64(cap(cached.Cols)) * int64(16) + for _, elem := range cached.Cols { + size += int64(len(elem)) + } + } + // field Indices []int + { + size += int64(cap(cached.Indices)) * int64(8) + } + // field Input vitess.io/vitess/go/vt/vtgate/engine.Primitive + if cc, ok := cached.Input.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *ReplaceVariables) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(16) + } + // field Input vitess.io/vitess/go/vt/vtgate/engine.Primitive + if cc, ok := cached.Input.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *RevertMigration) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field Keyspace *vitess.io/vitess/go/vt/vtgate/vindexes.Keyspace + size += cached.Keyspace.CachedSize(true) + // field Stmt *vitess.io/vitess/go/vt/sqlparser.RevertMigration + size += cached.Stmt.CachedSize(true) + // field Query string + size += int64(len(cached.Query)) + return size +} +func (cached *Route) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(208) + } + // field Keyspace *vitess.io/vitess/go/vt/vtgate/vindexes.Keyspace + size += cached.Keyspace.CachedSize(true) + // field TargetDestination vitess.io/vitess/go/vt/key.Destination + if cc, ok := cached.TargetDestination.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Query string + size += int64(len(cached.Query)) + // field TableName string + size += int64(len(cached.TableName)) + // field FieldQuery string + size += int64(len(cached.FieldQuery)) + // field Vindex vitess.io/vitess/go/vt/vtgate/vindexes.SingleColumn + if cc, ok := cached.Vindex.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Values []vitess.io/vitess/go/sqltypes.PlanValue + { + size += int64(cap(cached.Values)) * int64(88) + for _, elem := range cached.Values { + size += elem.CachedSize(false) + } + } + // field OrderBy []vitess.io/vitess/go/vt/vtgate/engine.OrderbyParams + { + size += int64(cap(cached.OrderBy)) * int64(17) + } + // field SysTableTableSchema vitess.io/vitess/go/vt/vtgate/evalengine.Expr + if cc, ok := cached.SysTableTableSchema.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field SysTableTableName vitess.io/vitess/go/vt/vtgate/evalengine.Expr + if cc, ok := cached.SysTableTableName.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *Rows) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field rows [][]vitess.io/vitess/go/sqltypes.Value + { + size += int64(cap(cached.rows)) * int64(24) + for _, elem := range cached.rows { + { + size += int64(cap(elem)) * int64(32) + for _, elem := range elem { + size += elem.CachedSize(false) + } + } + } + } + // field fields []*vitess.io/vitess/go/vt/proto/query.Field + { + size += int64(cap(cached.fields)) * int64(8) + for _, elem := range cached.fields { + size += elem.CachedSize(true) + } + } + return size +} +func (cached *SQLCalcFoundRows) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field LimitPrimitive vitess.io/vitess/go/vt/vtgate/engine.Primitive + if cc, ok := cached.LimitPrimitive.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field CountPrimitive vitess.io/vitess/go/vt/vtgate/engine.Primitive + if cc, ok := cached.CountPrimitive.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *Send) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(43) + } + // field Keyspace *vitess.io/vitess/go/vt/vtgate/vindexes.Keyspace + size += cached.Keyspace.CachedSize(true) + // field TargetDestination vitess.io/vitess/go/vt/key.Destination + if cc, ok := cached.TargetDestination.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Query string + size += int64(len(cached.Query)) + return size +} +func (cached *Set) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(40) + } + // field Ops []vitess.io/vitess/go/vt/vtgate/engine.SetOp + { + size += int64(cap(cached.Ops)) * int64(16) + for _, elem := range cached.Ops { + if cc, ok := elem.(cachedObject); ok { + size += cc.CachedSize(true) + } + } + } + // field Input vitess.io/vitess/go/vt/vtgate/engine.Primitive + if cc, ok := cached.Input.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *Subquery) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(40) + } + // field Cols []int + { + size += int64(cap(cached.Cols)) * int64(8) + } + // field Subquery vitess.io/vitess/go/vt/vtgate/engine.Primitive + if cc, ok := cached.Subquery.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *SysVarCheckAndIgnore) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(56) + } + // field Name string + size += int64(len(cached.Name)) + // field Keyspace *vitess.io/vitess/go/vt/vtgate/vindexes.Keyspace + size += cached.Keyspace.CachedSize(true) + // field TargetDestination vitess.io/vitess/go/vt/key.Destination + if cc, ok := cached.TargetDestination.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Expr string + size += int64(len(cached.Expr)) + return size +} +func (cached *SysVarIgnore) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field Name string + size += int64(len(cached.Name)) + // field Expr string + size += int64(len(cached.Expr)) + return size +} +func (cached *SysVarReservedConn) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(56) + } + // field Name string + size += int64(len(cached.Name)) + // field Keyspace *vitess.io/vitess/go/vt/vtgate/vindexes.Keyspace + size += cached.Keyspace.CachedSize(true) + // field TargetDestination vitess.io/vitess/go/vt/key.Destination + if cc, ok := cached.TargetDestination.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Expr string + size += int64(len(cached.Expr)) + return size +} +func (cached *SysVarSetAware) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field Name string + size += int64(len(cached.Name)) + // field Expr vitess.io/vitess/go/vt/vtgate/evalengine.Expr + if cc, ok := cached.Expr.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} + +//go:nocheckptr +func (cached *Update) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(152) + } + // field DML vitess.io/vitess/go/vt/vtgate/engine.DML + size += cached.DML.CachedSize(false) + // field ChangedVindexValues map[string]*vitess.io/vitess/go/vt/vtgate/engine.VindexValues + if cached.ChangedVindexValues != nil { + size += int64(48) + hmap := reflect.ValueOf(cached.ChangedVindexValues) + numBuckets := int(math.Pow(2, float64((*(*uint8)(unsafe.Pointer(hmap.Pointer() + uintptr(9))))))) + numOldBuckets := (*(*uint16)(unsafe.Pointer(hmap.Pointer() + uintptr(10)))) + size += int64(numOldBuckets * 208) + if len(cached.ChangedVindexValues) > 0 || numBuckets > 1 { + size += int64(numBuckets * 208) + } + for k, v := range cached.ChangedVindexValues { + size += int64(len(k)) + size += v.CachedSize(true) + } + } + return size +} +func (cached *UpdateTarget) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(16) + } + // field Target string + size += int64(len(cached.Target)) + return size +} +func (cached *UserDefinedVariable) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field Name string + size += int64(len(cached.Name)) + // field Expr vitess.io/vitess/go/vt/vtgate/evalengine.Expr + if cc, ok := cached.Expr.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *VindexFunc) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(160) + } + // field Fields []*vitess.io/vitess/go/vt/proto/query.Field + { + size += int64(cap(cached.Fields)) * int64(8) + for _, elem := range cached.Fields { + size += elem.CachedSize(true) + } + } + // field Cols []int + { + size += int64(cap(cached.Cols)) * int64(8) + } + // field Vindex vitess.io/vitess/go/vt/vtgate/vindexes.SingleColumn + if cc, ok := cached.Vindex.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Value vitess.io/vitess/go/sqltypes.PlanValue + size += cached.Value.CachedSize(false) + return size +} + +//go:nocheckptr +func (cached *VindexValues) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(16) + } + // field PvMap map[string]vitess.io/vitess/go/sqltypes.PlanValue + if cached.PvMap != nil { + size += int64(48) + hmap := reflect.ValueOf(cached.PvMap) + numBuckets := int(math.Pow(2, float64((*(*uint8)(unsafe.Pointer(hmap.Pointer() + uintptr(9))))))) + numOldBuckets := (*(*uint16)(unsafe.Pointer(hmap.Pointer() + uintptr(10)))) + size += int64(numOldBuckets * 848) + if len(cached.PvMap) > 0 || numBuckets > 1 { + size += int64(numBuckets * 848) + } + for k, v := range cached.PvMap { + size += int64(len(k)) + size += v.CachedSize(false) + } + } + return size +} + +//go:nocheckptr +func (cached *shardRoute) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field query string + size += int64(len(cached.query)) + // field rs *vitess.io/vitess/go/vt/srvtopo.ResolvedShard + size += cached.rs.CachedSize(true) + // field bv map[string]*vitess.io/vitess/go/vt/proto/query.BindVariable + if cached.bv != nil { + size += int64(48) + hmap := reflect.ValueOf(cached.bv) + numBuckets := int(math.Pow(2, float64((*(*uint8)(unsafe.Pointer(hmap.Pointer() + uintptr(9))))))) + numOldBuckets := (*(*uint16)(unsafe.Pointer(hmap.Pointer() + uintptr(10)))) + size += int64(numOldBuckets * 208) + if len(cached.bv) > 0 || numBuckets > 1 { + size += int64(numBuckets * 208) + } + for k, v := range cached.bv { + size += int64(len(k)) + size += v.CachedSize(true) + } + } + return size +} diff --git a/go/vt/vtgate/engine/comparer.go b/go/vt/vtgate/engine/comparer.go new file mode 100644 index 00000000000..a685c4816a2 --- /dev/null +++ b/go/vt/vtgate/engine/comparer.go @@ -0,0 +1,67 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/vtgate/evalengine" +) + +// comparer is the struct that has the logic for comparing two rows in the result set +type comparer struct { + orderBy, weightString int + desc bool +} + +// compare compares two rows given the comparer and returns which one should be earlier in the result set +// -1 if the first row should be earlier +// 1 is the second row should be earlier +// 0 if both the rows have equal ordering +func (c *comparer) compare(r1, r2 []sqltypes.Value) (int, error) { + cmp, err := evalengine.NullsafeCompare(r1[c.orderBy], r2[c.orderBy]) + if err != nil { + _, isComparisonErr := err.(evalengine.UnsupportedComparisonError) + if !(isComparisonErr && c.weightString != -1) { + return 0, err + } + // in case of a comparison error switch to using the weight string column for ordering + c.orderBy = c.weightString + c.weightString = -1 + cmp, err = evalengine.NullsafeCompare(r1[c.orderBy], r2[c.orderBy]) + if err != nil { + return 0, err + } + } + // change the result if descending ordering is required + if c.desc { + cmp = -cmp + } + return cmp, nil +} + +// extractSlices extracts the three fields of OrderbyParams into a slice of comparers +func extractSlices(input []OrderbyParams) []*comparer { + var result []*comparer + for _, order := range input { + result = append(result, &comparer{ + orderBy: order.Col, + weightString: order.WeightStringCol, + desc: order.Desc, + }) + } + return result +} diff --git a/go/vt/vtgate/engine/comparer_test.go b/go/vt/vtgate/engine/comparer_test.go new file mode 100644 index 00000000000..c1be2c25e82 --- /dev/null +++ b/go/vt/vtgate/engine/comparer_test.go @@ -0,0 +1,114 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "strconv" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/sqltypes" +) + +func TestComparer(t *testing.T) { + tests := []struct { + comparer comparer + row1 []sqltypes.Value + row2 []sqltypes.Value + output int + }{ + { + comparer: comparer{ + orderBy: 0, + weightString: -1, + desc: true, + }, + row1: []sqltypes.Value{ + sqltypes.NewInt64(23), + }, + row2: []sqltypes.Value{ + sqltypes.NewInt64(34), + }, + output: 1, + }, { + comparer: comparer{ + orderBy: 0, + weightString: -1, + desc: false, + }, + row1: []sqltypes.Value{ + sqltypes.NewInt64(23), + }, + row2: []sqltypes.Value{ + sqltypes.NewInt64(23), + }, + output: 0, + }, { + comparer: comparer{ + orderBy: 0, + weightString: -1, + desc: false, + }, + row1: []sqltypes.Value{ + sqltypes.NewInt64(23), + }, + row2: []sqltypes.Value{ + sqltypes.NewInt64(12), + }, + output: 1, + }, { + comparer: comparer{ + orderBy: 1, + weightString: 0, + desc: false, + }, + row1: []sqltypes.Value{ + sqltypes.NewInt64(23), + sqltypes.NewVarChar("b"), + }, + row2: []sqltypes.Value{ + sqltypes.NewInt64(34), + sqltypes.NewVarChar("a"), + }, + output: -1, + }, { + comparer: comparer{ + orderBy: 1, + weightString: 0, + desc: true, + }, + row1: []sqltypes.Value{ + sqltypes.NewInt64(23), + sqltypes.NewVarChar("A"), + }, + row2: []sqltypes.Value{ + sqltypes.NewInt64(23), + sqltypes.NewVarChar("a"), + }, + output: 0, + }, + } + + for i, test := range tests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + got, err := test.comparer.compare(test.row1, test.row2) + require.NoError(t, err) + require.Equal(t, test.output, got) + }) + } +} diff --git a/go/vt/vtgate/engine/concatenate.go b/go/vt/vtgate/engine/concatenate.go index f54feffac68..9143a149802 100644 --- a/go/vt/vtgate/engine/concatenate.go +++ b/go/vt/vtgate/engine/concatenate.go @@ -19,8 +19,6 @@ package engine import ( "sync" - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" @@ -42,12 +40,20 @@ func (c *Concatenate) RouteType() string { // GetKeyspaceName specifies the Keyspace that this primitive routes to func (c *Concatenate) GetKeyspaceName() string { - return formatTwoOptionsNicely(c.Sources[0].GetKeyspaceName(), c.Sources[1].GetKeyspaceName()) + res := c.Sources[0].GetKeyspaceName() + for i := 1; i < len(c.Sources); i++ { + res = formatTwoOptionsNicely(res, c.Sources[i].GetKeyspaceName()) + } + return res } // GetTableName specifies the table that this primitive routes to. func (c *Concatenate) GetTableName() string { - return formatTwoOptionsNicely(c.Sources[0].GetTableName(), c.Sources[1].GetTableName()) + res := c.Sources[0].GetTableName() + for i := 1; i < len(c.Sources); i++ { + res = formatTwoOptionsNicely(res, c.Sources[i].GetTableName()) + } + return res } func formatTwoOptionsNicely(a, b string) string { @@ -57,49 +63,62 @@ func formatTwoOptionsNicely(a, b string) string { return a + "_" + b } +var errWrongNumberOfColumnsInSelect = vterrors.NewErrorf(vtrpcpb.Code_FAILED_PRECONDITION, vterrors.WrongNumberOfColumnsInSelect, "The used SELECT statements have a different number of columns") + // Execute performs a non-streaming exec. func (c *Concatenate) Execute(vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { - lhs, rhs, err := c.execSources(vcursor, bindVars, wantfields) + res, err := c.execSources(vcursor, bindVars, wantfields) if err != nil { return nil, err } - fields, err := c.getFields(lhs.Fields, rhs.Fields) + fields, err := c.getFields(res) if err != nil { return nil, err } - if len(lhs.Rows) > 0 && - len(rhs.Rows) > 0 && - len(lhs.Rows[0]) != len(rhs.Rows[0]) { - return nil, mysql.NewSQLError(mysql.ERWrongNumberOfColumnsInSelect, "21000", "The used SELECT statements have a different number of columns") + var rowsAffected uint64 = 0 + var rows [][]sqltypes.Value + + for _, r := range res { + rowsAffected += r.RowsAffected + + if len(rows) > 0 && + len(r.Rows) > 0 && + len(rows[0]) != len(r.Rows[0]) { + return nil, errWrongNumberOfColumnsInSelect + } + + rows = append(rows, r.Rows...) } return &sqltypes.Result{ Fields: fields, - RowsAffected: lhs.RowsAffected + rhs.RowsAffected, - Rows: append(lhs.Rows, rhs.Rows...), + RowsAffected: rowsAffected, + Rows: rows, }, nil } -func (c *Concatenate) getFields(a, b []*querypb.Field) ([]*querypb.Field, error) { - switch { - case a != nil && b != nil: - err := compareFields(a, b) +func (c *Concatenate) getFields(res []*sqltypes.Result) ([]*querypb.Field, error) { + var resFields []*querypb.Field + for _, r := range res { + fields := r.Fields + if fields == nil { + continue + } + if resFields == nil { + resFields = fields + continue + } + err := compareFields(fields, resFields) if err != nil { return nil, err } - return a, nil - case a != nil: - return a, nil - case b != nil: - return b, nil } - - return nil, nil + return resFields, nil } -func (c *Concatenate) execSources(vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, *sqltypes.Result, error) { - results := make([]*sqltypes.Result, 2) +func (c *Concatenate) execSources(vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) ([]*sqltypes.Result, error) { + results := make([]*sqltypes.Result, len(c.Sources)) g, restoreCtx := vcursor.ErrorGroupCancellableContext() defer restoreCtx() for i, source := range c.Sources { @@ -115,9 +134,9 @@ func (c *Concatenate) execSources(vcursor VCursor, bindVars map[string]*querypb. } if err := g.Wait(); err != nil { - return nil, nil, vterrors.Wrap(err, "Concatenate.Execute") + return nil, err } - return results[0], results[1], nil + return results, nil } // StreamExecute performs a streaming exec. @@ -177,25 +196,32 @@ func (c *Concatenate) StreamExecute(vcursor VCursor, bindVars map[string]*queryp // GetFields fetches the field info. func (c *Concatenate) GetFields(vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { - lhs, err := c.Sources[0].GetFields(vcursor, bindVars) - if err != nil { - return nil, err - } - rhs, err := c.Sources[1].GetFields(vcursor, bindVars) - if err != nil { - return nil, err - } - err = compareFields(lhs.Fields, rhs.Fields) + res, err := c.Sources[0].GetFields(vcursor, bindVars) if err != nil { return nil, err } - return lhs, nil + for i := 1; i < len(c.Sources); i++ { + result, err := c.Sources[i].GetFields(vcursor, bindVars) + if err != nil { + return nil, err + } + err = compareFields(result.Fields, res.Fields) + if err != nil { + return nil, err + } + } + return res, nil } //NeedsTransaction returns whether a transaction is needed for this primitive func (c *Concatenate) NeedsTransaction() bool { - return c.Sources[0].NeedsTransaction() || c.Sources[1].NeedsTransaction() + for _, source := range c.Sources { + if source.NeedsTransaction() { + return true + } + } + return false } // Inputs returns the input primitives for this @@ -209,12 +235,12 @@ func (c *Concatenate) description() PrimitiveDescription { func compareFields(fields1 []*querypb.Field, fields2 []*querypb.Field) error { if len(fields1) != len(fields2) { - return mysql.NewSQLError(mysql.ERWrongNumberOfColumnsInSelect, "21000", "The used SELECT statements have a different number of columns") + return errWrongNumberOfColumnsInSelect } for i, field2 := range fields2 { field1 := fields1[i] if field1.Type != field2.Type { - return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "column field type does not match for name: (%v, %v) types: (%v, %v)", field1.Name, field2.Name, field1.Type, field2.Type) + return vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "merging field of different types is not supported, name: (%v, %v) types: (%v, %v)", field1.Name, field2.Name, field1.Type, field2.Type) } } return nil diff --git a/go/vt/vtgate/engine/concatenate_test.go b/go/vt/vtgate/engine/concatenate_test.go index 4b24562dd10..8e2a5134d81 100644 --- a/go/vt/vtgate/engine/concatenate_test.go +++ b/go/vt/vtgate/engine/concatenate_test.go @@ -46,6 +46,7 @@ func TestConcatenate_NoErrors(t *testing.T) { inputs: []*sqltypes.Result{ r("id1|col11|col12", "int64|varbinary|varbinary"), r("id2|col21|col22", "int64|varbinary|varbinary"), + r("id3|col31|col32", "int64|varbinary|varbinary"), }, expectedResult: r("id1|col11|col12", "int64|varbinary|varbinary"), }, { @@ -53,18 +54,22 @@ func TestConcatenate_NoErrors(t *testing.T) { inputs: []*sqltypes.Result{ r("myid|mycol1|mycol2", "int64|varchar|varbinary", "11|m1|n1", "22|m2|n2"), r("id|col1|col2", "int64|varchar|varbinary", "1|a1|b1", "2|a2|b2"), + r("id2|col2|col3", "int64|varchar|varbinary", "3|a3|b3"), + r("id2|col2|col3", "int64|varchar|varbinary", "4|a4|b4"), }, - expectedResult: r("myid|mycol1|mycol2", "int64|varchar|varbinary", "11|m1|n1", "22|m2|n2", "1|a1|b1", "2|a2|b2"), + expectedResult: r("myid|mycol1|mycol2", "int64|varchar|varbinary", "11|m1|n1", "22|m2|n2", "1|a1|b1", "2|a2|b2", "3|a3|b3", "4|a4|b4"), }, { testName: "mismatch field type", inputs: []*sqltypes.Result{ + r("id|col1|col2", "int64|varbinary|varbinary", "1|a1|b1", "2|a2|b2"), r("id|col1|col2", "int64|varbinary|varbinary", "1|a1|b1", "2|a2|b2"), r("id|col3|col4", "int64|varchar|varbinary", "1|a1|b1", "2|a2|b2"), }, - expectedError: "column field type does not match for name", + expectedError: "merging field of different types is not supported", }, { testName: "input source has different column count", inputs: []*sqltypes.Result{ + r("id|col1|col2", "int64|varchar|varchar", "1|a1|b1", "2|a2|b2"), r("id|col1|col2", "int64|varchar|varchar", "1|a1|b1", "2|a2|b2"), r("id|col3|col4|col5", "int64|varchar|varchar|int32", "1|a1|b1|5", "2|a2|b2|6"), }, @@ -79,12 +84,13 @@ func TestConcatenate_NoErrors(t *testing.T) { }} for _, tc := range testCases { - require.Equal(t, 2, len(tc.inputs)) + var sources []Primitive + for _, input := range tc.inputs { + // input is added twice, since the first one is used by execute and the next by stream execute + sources = append(sources, &fakePrimitive{results: []*sqltypes.Result{input, input}}) + } concatenate := &Concatenate{ - Sources: []Primitive{ - &fakePrimitive{results: []*sqltypes.Result{tc.inputs[0], tc.inputs[0]}}, - &fakePrimitive{results: []*sqltypes.Result{tc.inputs[1], tc.inputs[1]}}, - }, + Sources: sources, } t.Run(tc.testName+"-Execute", func(t *testing.T) { @@ -113,18 +119,18 @@ func TestConcatenate_NoErrors(t *testing.T) { func TestConcatenate_WithErrors(t *testing.T) { strFailed := "failed" - errString := "Concatenate.Execute: " + strFailed fake := r("id|col1|col2", "int64|varchar|varbinary", "1|a1|b1", "2|a2|b2") concatenate := &Concatenate{ Sources: []Primitive{ + &fakePrimitive{results: []*sqltypes.Result{fake, fake}}, &fakePrimitive{results: []*sqltypes.Result{nil, nil}, sendErr: errors.New(strFailed)}, &fakePrimitive{results: []*sqltypes.Result{fake, fake}}, }, } ctx := context.Background() _, err := concatenate.Execute(&noopVCursor{ctx: ctx}, nil, true) - require.EqualError(t, err, errString) + require.EqualError(t, err, strFailed) _, err = wrapStreamExecute(concatenate, &noopVCursor{ctx: ctx}, nil, true) require.EqualError(t, err, strFailed) @@ -133,10 +139,11 @@ func TestConcatenate_WithErrors(t *testing.T) { Sources: []Primitive{ &fakePrimitive{results: []*sqltypes.Result{fake, fake}}, &fakePrimitive{results: []*sqltypes.Result{nil, nil}, sendErr: errors.New(strFailed)}, + &fakePrimitive{results: []*sqltypes.Result{fake, fake}}, }, } _, err = concatenate.Execute(&noopVCursor{ctx: ctx}, nil, true) - require.EqualError(t, err, errString) + require.EqualError(t, err, strFailed) _, err = wrapStreamExecute(concatenate, &noopVCursor{ctx: ctx}, nil, true) require.EqualError(t, err, strFailed) } diff --git a/go/vt/vtgate/engine/dbddl.go b/go/vt/vtgate/engine/dbddl.go new file mode 100644 index 00000000000..f06c8150c2b --- /dev/null +++ b/go/vt/vtgate/engine/dbddl.go @@ -0,0 +1,203 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + "fmt" + "strings" + "time" + + "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" + + "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/srvtopo" + + "vitess.io/vitess/go/vt/log" + + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +var _ Primitive = (*DBDDL)(nil) + +//goland:noinspection GoVarAndConstTypeMayBeOmitted +var databaseCreatorPlugins = map[string]DBDDLPlugin{} + +// DBDDLRegister registers a dbDDL plugin under the specified name. +// A duplicate plugin will generate a panic. +func DBDDLRegister(name string, plugin DBDDLPlugin) { + if _, ok := databaseCreatorPlugins[name]; ok { + panic(fmt.Sprintf("%s is already registered", name)) + } + databaseCreatorPlugins[name] = plugin +} + +// DBDDLPlugin is the interface that you need to implement to add a custom CREATE/DROP DATABASE handler +type DBDDLPlugin interface { + CreateDatabase(ctx context.Context, name string) error + DropDatabase(ctx context.Context, name string) error +} + +// DBDDL is just a container around custom database provisioning plugins +// The default behaviour is to just return an error +type DBDDL struct { + name string + create bool + queryTimeout int + + noInputs + noTxNeeded +} + +// NewDBDDL creates the engine primitive +// `create` will be true for CREATE, and false for DROP +func NewDBDDL(dbName string, create bool, timeout int) *DBDDL { + return &DBDDL{ + name: dbName, + create: create, + queryTimeout: timeout, + } +} + +// RouteType implements the Primitive interface +func (c *DBDDL) RouteType() string { + if c.create { + return "CreateDB" + } + return "DropDB" +} + +// GetKeyspaceName implements the Primitive interface +func (c *DBDDL) GetKeyspaceName() string { + return c.name +} + +// GetTableName implements the Primitive interface +func (c *DBDDL) GetTableName() string { + return "" +} + +// Execute implements the Primitive interface +func (c *DBDDL) Execute(vcursor VCursor, _ map[string]*querypb.BindVariable, _ bool) (*sqltypes.Result, error) { + name := vcursor.GetDBDDLPluginName() + plugin, ok := databaseCreatorPlugins[name] + if !ok { + log.Errorf("'%s' database ddl plugin is not registered. Falling back to default plugin", name) + plugin = databaseCreatorPlugins[defaultDBDDLPlugin] + } + if c.queryTimeout != 0 { + cancel := vcursor.SetContextTimeout(time.Duration(c.queryTimeout) * time.Millisecond) + defer cancel() + } + + if c.create { + return c.createDatabase(vcursor, plugin) + } + + return c.dropDatabase(vcursor, plugin) +} + +func (c *DBDDL) createDatabase(vcursor VCursor, plugin DBDDLPlugin) (*sqltypes.Result, error) { + ctx := vcursor.Context() + err := plugin.CreateDatabase(ctx, c.name) + if err != nil { + return nil, err + } + var destinations []*srvtopo.ResolvedShard + for { + // loop until we have found a valid shard + destinations, _, err = vcursor.ResolveDestinations(c.name, nil, []key.Destination{key.DestinationAllShards{}}) + if err == nil { + break + } + select { + case <-ctx.Done(): //context cancelled + return nil, vterrors.Errorf(vtrpc.Code_DEADLINE_EXCEEDED, "could not validate create database: destination not resolved") + case <-time.After(500 * time.Millisecond): //timeout + } + } + var queries []*querypb.BoundQuery + for range destinations { + queries = append(queries, &querypb.BoundQuery{ + Sql: "select 42 from dual where null", + BindVariables: nil, + }) + } + + for { + _, errors := vcursor.ExecuteMultiShard(destinations, queries, false, true) + + noErr := true + for _, err := range errors { + if err != nil { + noErr = false + select { + case <-ctx.Done(): //context cancelled + return nil, vterrors.Errorf(vtrpc.Code_DEADLINE_EXCEEDED, "could not validate create database: tablets not healthy") + case <-time.After(500 * time.Millisecond): //timeout + } + break + } + } + if noErr { + break + } + } + return &sqltypes.Result{RowsAffected: 1}, nil +} + +func (c *DBDDL) dropDatabase(vcursor VCursor, plugin DBDDLPlugin) (*sqltypes.Result, error) { + ctx := vcursor.Context() + err := plugin.DropDatabase(ctx, c.name) + if err != nil { + return nil, err + } + for vcursor.KeyspaceAvailable(c.name) { + select { + case <-ctx.Done(): //context cancelled + return nil, vterrors.Errorf(vtrpc.Code_DEADLINE_EXCEEDED, "could not validate drop database: keyspace still available in vschema") + case <-time.After(500 * time.Millisecond): //timeout + } + } + + return &sqltypes.Result{StatusFlags: sqltypes.ServerStatusDbDropped}, nil +} + +// StreamExecute implements the Primitive interface +func (c *DBDDL) StreamExecute(vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { + res, err := c.Execute(vcursor, bindVars, wantfields) + if err != nil { + return err + } + return callback(res) +} + +// GetFields implements the Primitive interface +func (c *DBDDL) GetFields(VCursor, map[string]*querypb.BindVariable) (*sqltypes.Result, error) { + return &sqltypes.Result{}, nil +} + +// description implements the Primitive interface +func (c *DBDDL) description() PrimitiveDescription { + return PrimitiveDescription{ + OperatorType: strings.ToUpper(c.RouteType()), + Keyspace: &vindexes.Keyspace{Name: c.name}, + } +} diff --git a/go/vt/vtgate/engine/dbddl_plugin.go b/go/vt/vtgate/engine/dbddl_plugin.go new file mode 100644 index 00000000000..1b132f330a2 --- /dev/null +++ b/go/vt/vtgate/engine/dbddl_plugin.go @@ -0,0 +1,59 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +type failDBDDL struct{} + +// CreateDatabase implements the DropCreateDB interface +func (failDBDDL) CreateDatabase(context.Context, string) error { + return vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "create database is not supported") +} + +// DropDatabase implements the DropCreateDB interface +func (failDBDDL) DropDatabase(context.Context, string) error { + return vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "drop database is not supported") +} + +type noOp struct{} + +// CreateDatabase implements the DropCreateDB interface +func (noOp) CreateDatabase(context.Context, string) error { + return nil +} + +// DropDatabase implements the DropCreateDB interface +func (noOp) DropDatabase(context.Context, string) error { + return nil +} + +const ( + faildbDDL = "fail" + noOpdbDDL = "noop" + defaultDBDDLPlugin = faildbDDL +) + +func init() { + DBDDLRegister(faildbDDL, failDBDDL{}) + DBDDLRegister(noOpdbDDL, noOp{}) +} diff --git a/go/vt/vtgate/engine/dbddl_test.go b/go/vt/vtgate/engine/dbddl_test.go new file mode 100644 index 00000000000..dac22baeeb1 --- /dev/null +++ b/go/vt/vtgate/engine/dbddl_test.go @@ -0,0 +1,84 @@ +package engine + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/stretchr/testify/require" +) + +type dbddlTestFake struct { + createCalled, dropCalled bool + sleep int +} + +func (d *dbddlTestFake) CreateDatabase(ctx context.Context, name string) error { + if d.sleep > 0 { + time.Sleep(time.Duration(d.sleep) * time.Second) + } + d.createCalled = true + return nil +} + +func (d *dbddlTestFake) DropDatabase(ctx context.Context, name string) error { + if d.sleep > 0 { + time.Sleep(time.Duration(d.sleep) * time.Second) + } + d.dropCalled = true + return nil +} + +var _ DBDDLPlugin = (*dbddlTestFake)(nil) + +func TestDBDDLCreateExecute(t *testing.T) { + pluginName := "createFake" + plugin := &dbddlTestFake{} + DBDDLRegister(pluginName, plugin) + + primitive := &DBDDL{ + name: "ks", + create: true, + } + + vc := &loggingVCursor{dbDDLPlugin: pluginName} + + _, err := primitive.Execute(vc, nil, false) + require.NoError(t, err) + require.True(t, plugin.createCalled) + require.False(t, plugin.dropCalled) +} + +func TestDBDDLDropExecute(t *testing.T) { + pluginName := "dropFake" + plugin := &dbddlTestFake{} + DBDDLRegister(pluginName, plugin) + + primitive := &DBDDL{name: "ks"} + + vc := &loggingVCursor{dbDDLPlugin: pluginName, ksAvailable: false} + + _, err := primitive.Execute(vc, nil, false) + require.NoError(t, err) + require.False(t, plugin.createCalled) + require.True(t, plugin.dropCalled) +} + +func TestDBDDLTimeout(t *testing.T) { + pluginName := "timeoutFake" + plugin := &dbddlTestFake{sleep: 2} + DBDDLRegister(pluginName, plugin) + + primitive := &DBDDL{name: "ks", create: true, queryTimeout: 100} + vc := &loggingVCursor{dbDDLPlugin: pluginName, shardErr: fmt.Errorf("db not available")} + _, err := primitive.Execute(vc, nil, false) + assert.EqualError(t, err, "could not validate create database: destination not resolved") + + primitive = &DBDDL{name: "ks", queryTimeout: 100} + vc = &loggingVCursor{dbDDLPlugin: pluginName, ksAvailable: true} + _, err = primitive.Execute(vc, nil, false) + assert.EqualError(t, err, "could not validate drop database: keyspace still available in vschema") +} diff --git a/go/vt/vtgate/engine/ddl.go b/go/vt/vtgate/engine/ddl.go index f936027836c..bb82ae6446f 100644 --- a/go/vt/vtgate/engine/ddl.go +++ b/go/vt/vtgate/engine/ddl.go @@ -37,64 +37,81 @@ type DDL struct { NormalDDL *Send OnlineDDL *OnlineDDL + OnlineDDLEnabled bool + + CreateTempTable bool + noTxNeeded noInputs } -func (v *DDL) description() PrimitiveDescription { +func (ddl *DDL) description() PrimitiveDescription { + other := map[string]interface{}{ + "Query": ddl.SQL, + } + if ddl.CreateTempTable { + other["TempTable"] = true + } return PrimitiveDescription{ OperatorType: "DDL", - Keyspace: v.Keyspace, - Other: map[string]interface{}{ - "Query": v.SQL, - }, + Keyspace: ddl.Keyspace, + Other: other, } } // RouteType implements the Primitive interface -func (v *DDL) RouteType() string { +func (ddl *DDL) RouteType() string { return "DDL" } // GetKeyspaceName implements the Primitive interface -func (v *DDL) GetKeyspaceName() string { - return v.Keyspace.Name +func (ddl *DDL) GetKeyspaceName() string { + return ddl.Keyspace.Name } // GetTableName implements the Primitive interface -func (v *DDL) GetTableName() string { - return v.DDL.GetTable().Name.String() +func (ddl *DDL) GetTableName() string { + return ddl.DDL.GetTable().Name.String() } // IsOnlineSchemaDDL returns true if the query is an online schema change DDL -func (v *DDL) isOnlineSchemaDDL() bool { - switch v.DDL.GetAction() { +func (ddl *DDL) isOnlineSchemaDDL() bool { + switch ddl.DDL.GetAction() { case sqlparser.CreateDDLAction, sqlparser.DropDDLAction, sqlparser.AlterDDLAction: - return !v.OnlineDDL.Strategy.IsDirect() + return !ddl.OnlineDDL.Strategy.IsDirect() } return false } // Execute implements the Primitive interface -func (v *DDL) Execute(vcursor VCursor, bindVars map[string]*query.BindVariable, wantfields bool) (result *sqltypes.Result, err error) { +func (ddl *DDL) Execute(vcursor VCursor, bindVars map[string]*query.BindVariable, wantfields bool) (result *sqltypes.Result, err error) { + if ddl.CreateTempTable { + vcursor.Session().HasCreatedTempTable() + vcursor.Session().NeedsReservedConn() + return ddl.NormalDDL.Execute(vcursor, bindVars, wantfields) + } + strategy, options, err := schema.ParseDDLStrategy(vcursor.Session().GetDDLStrategy()) if err != nil { return nil, err } - v.OnlineDDL.Strategy = strategy - v.OnlineDDL.Options = options - - if v.isOnlineSchemaDDL() { - return v.OnlineDDL.Execute(vcursor, bindVars, wantfields) + ddl.OnlineDDL.Strategy = strategy + ddl.OnlineDDL.Options = options + + if ddl.isOnlineSchemaDDL() { + if !ddl.OnlineDDLEnabled { + return nil, schema.ErrOnlineDDLDisabled + } + return ddl.OnlineDDL.Execute(vcursor, bindVars, wantfields) } - return v.NormalDDL.Execute(vcursor, bindVars, wantfields) + return ddl.NormalDDL.Execute(vcursor, bindVars, wantfields) } // StreamExecute implements the Primitive interface -func (v *DDL) StreamExecute(vcursor VCursor, bindVars map[string]*query.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { - results, err := v.Execute(vcursor, bindVars, wantfields) +func (ddl *DDL) StreamExecute(vcursor VCursor, bindVars map[string]*query.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { + results, err := ddl.Execute(vcursor, bindVars, wantfields) if err != nil { return err } @@ -102,6 +119,6 @@ func (v *DDL) StreamExecute(vcursor VCursor, bindVars map[string]*query.BindVari } // GetFields implements the Primitive interface -func (v *DDL) GetFields(vcursor VCursor, bindVars map[string]*query.BindVariable) (*sqltypes.Result, error) { - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "not reachable") +func (ddl *DDL) GetFields(vcursor VCursor, bindVars map[string]*query.BindVariable) (*sqltypes.Result, error) { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] GetFields in not reachable") } diff --git a/go/vt/vtgate/engine/delete.go b/go/vt/vtgate/engine/delete.go index 8aa40f6d7d1..c2f245fba6c 100644 --- a/go/vt/vtgate/engine/delete.go +++ b/go/vt/vtgate/engine/delete.go @@ -105,10 +105,10 @@ func (del *Delete) GetFields(VCursor, map[string]*querypb.BindVariable) (*sqltyp func (del *Delete) execDeleteUnsharded(vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { rss, _, err := vcursor.ResolveDestinations(del.Keyspace.Name, nil, []key.Destination{key.DestinationAllShards{}}) if err != nil { - return nil, vterrors.Wrap(err, "execDeleteUnsharded") + return nil, err } if len(rss) != 1 { - return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "Keyspace does not have exactly one shard: %v", rss) + return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "cannot send query to multiple shards for un-sharded database: %v", rss) } err = allowOnlyMaster(rss...) if err != nil { @@ -120,11 +120,11 @@ func (del *Delete) execDeleteUnsharded(vcursor VCursor, bindVars map[string]*que func (del *Delete) execDeleteEqual(vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { key, err := del.Values[0].ResolveValue(bindVars) if err != nil { - return nil, vterrors.Wrap(err, "execDeleteEqual") + return nil, err } rs, ksid, err := resolveSingleShard(vcursor, del.Vindex, del.Keyspace, key) if err != nil { - return nil, vterrors.Wrap(err, "execDeleteEqual") + return nil, err } err = allowOnlyMaster(rs) if err != nil { @@ -137,7 +137,7 @@ func (del *Delete) execDeleteEqual(vcursor VCursor, bindVars map[string]*querypb if del.OwnedVindexQuery != "" { err = del.deleteVindexEntries(vcursor, bindVars, []*srvtopo.ResolvedShard{rs}) if err != nil { - return nil, vterrors.Wrap(err, "execDeleteEqual") + return nil, err } } return execShard(vcursor, del.Query, bindVars, rs, true /* rollbackOnError */, true /* canAutocommit */) @@ -155,7 +155,7 @@ func (del *Delete) execDeleteIn(vcursor VCursor, bindVars map[string]*querypb.Bi if del.OwnedVindexQuery != "" { if err := del.deleteVindexEntries(vcursor, bindVars, rss); err != nil { - return nil, vterrors.Wrap(err, "execDeleteIn") + return nil, err } } return execMultiShard(vcursor, rss, queries, del.MultiShardAutocommit) @@ -164,7 +164,7 @@ func (del *Delete) execDeleteIn(vcursor VCursor, bindVars map[string]*querypb.Bi func (del *Delete) execDeleteByDestination(vcursor VCursor, bindVars map[string]*querypb.BindVariable, dest key.Destination) (*sqltypes.Result, error) { rss, _, err := vcursor.ResolveDestinations(del.Keyspace.Name, nil, []key.Destination{dest}) if err != nil { - return nil, vterrors.Wrap(err, "execDeleteScatter") + return nil, err } err = allowOnlyMaster(rss...) if err != nil { @@ -198,7 +198,7 @@ func (del *Delete) deleteVindexEntries(vcursor VCursor, bindVars map[string]*que subQueryResults, errors := vcursor.ExecuteMultiShard(rss, queries, false, false) for _, err := range errors { if err != nil { - return vterrors.Wrap(err, "deleteVindexEntries") + return err } } diff --git a/go/vt/vtgate/engine/delete_test.go b/go/vt/vtgate/engine/delete_test.go index 47c074fb7b8..01502b314ba 100644 --- a/go/vt/vtgate/engine/delete_test.go +++ b/go/vt/vtgate/engine/delete_test.go @@ -51,11 +51,11 @@ func TestDeleteUnsharded(t *testing.T) { // Failure cases vc = &loggingVCursor{shardErr: errors.New("shard_error")} _, err = del.Execute(vc, map[string]*querypb.BindVariable{}, false) - expectError(t, "Execute", err, "execDeleteUnsharded: shard_error") + require.EqualError(t, err, "shard_error") vc = &loggingVCursor{} _, err = del.Execute(vc, map[string]*querypb.BindVariable{}, false) - expectError(t, "Execute", err, "Keyspace does not have exactly one shard: []") + require.EqualError(t, err, "cannot send query to multiple shards for un-sharded database: []") } func TestDeleteEqual(t *testing.T) { @@ -84,7 +84,7 @@ func TestDeleteEqual(t *testing.T) { // Failure case del.Values = []sqltypes.PlanValue{{Key: "aa"}} _, err = del.Execute(vc, map[string]*querypb.BindVariable{}, false) - expectError(t, "Execute", err, "execDeleteEqual: missing bind var aa") + require.EqualError(t, err, "missing bind var aa") } func TestDeleteEqualNoRoute(t *testing.T) { @@ -137,7 +137,7 @@ func TestDeleteEqualNoScatter(t *testing.T) { vc := newDMLTestVCursor("0") _, err := del.Execute(vc, map[string]*querypb.BindVariable{}, false) - expectError(t, "Execute", err, "execDeleteEqual: cannot map vindex to unique keyspace id: DestinationKeyRange(-)") + require.EqualError(t, err, "cannot map vindex to unique keyspace id: DestinationKeyRange(-)") } func TestDeleteOwnedVindex(t *testing.T) { @@ -245,13 +245,13 @@ func TestDeleteSharded(t *testing.T) { // Failure case vc = &loggingVCursor{shardErr: errors.New("shard_error")} _, err = del.Execute(vc, map[string]*querypb.BindVariable{}, false) - expectError(t, "Execute", err, "execDeleteScatter: shard_error") + require.EqualError(t, err, "shard_error") } func TestDeleteNoStream(t *testing.T) { del := &Delete{} err := del.StreamExecute(nil, nil, false, nil) - expectError(t, "StreamExecute", err, `query "" cannot be used for streaming`) + require.EqualError(t, err, `query "" cannot be used for streaming`) } func TestDeleteScatterOwnedVindex(t *testing.T) { diff --git a/go/vt/vtgate/engine/distinct.go b/go/vt/vtgate/engine/distinct.go index afd6f2ccb54..9bf260869ae 100644 --- a/go/vt/vtgate/engine/distinct.go +++ b/go/vt/vtgate/engine/distinct.go @@ -112,8 +112,6 @@ func (d *Distinct) Execute(vcursor VCursor, bindVars map[string]*querypb.BindVar } } - result.RowsAffected = uint64(len(result.Rows)) - return result, err } diff --git a/go/vt/vtgate/engine/dml.go b/go/vt/vtgate/engine/dml.go index 9fe6360bb2f..9689074155f 100644 --- a/go/vt/vtgate/engine/dml.go +++ b/go/vt/vtgate/engine/dml.go @@ -103,11 +103,11 @@ func (op DMLOpcode) String() string { func resolveMultiValueShards(vcursor VCursor, keyspace *vindexes.Keyspace, query string, bindVars map[string]*querypb.BindVariable, pv sqltypes.PlanValue, vindex vindexes.SingleColumn) ([]*srvtopo.ResolvedShard, []*querypb.BoundQuery, error) { keys, err := pv.ResolveList(bindVars) if err != nil { - return nil, nil, vterrors.Wrap(err, "execDeleteIn") + return nil, nil, err } rss, err := resolveMultiShard(vcursor, vindex, keyspace, keys) if err != nil { - return nil, nil, vterrors.Wrap(err, "execDeleteIn") + return nil, nil, err } queries := make([]*querypb.BoundQuery, len(rss)) for i := range rss { diff --git a/go/vt/vtgate/engine/fake_primitive_test.go b/go/vt/vtgate/engine/fake_primitive_test.go index 39c4d209540..d1186e4491c 100644 --- a/go/vt/vtgate/engine/fake_primitive_test.go +++ b/go/vt/vtgate/engine/fake_primitive_test.go @@ -136,9 +136,6 @@ func wrapStreamExecute(prim Primitive, vcursor VCursor, bindVars map[string]*que } return nil }) - if result != nil { - result.RowsAffected = uint64(len(result.Rows)) - } return result, err } diff --git a/go/vt/vtgate/engine/fake_vcursor_test.go b/go/vt/vtgate/engine/fake_vcursor_test.go index c9f40b4d383..bccb8394813 100644 --- a/go/vt/vtgate/engine/fake_vcursor_test.go +++ b/go/vt/vtgate/engine/fake_vcursor_test.go @@ -57,165 +57,191 @@ type noopVCursor struct { ctx context.Context } -func (t noopVCursor) SetDDLStrategy(strategy string) { +func (t *noopVCursor) KeyspaceAvailable(ks string) bool { panic("implement me") } -func (t noopVCursor) GetDDLStrategy() string { +func (t *noopVCursor) SetDDLStrategy(strategy string) { panic("implement me") } -func (t noopVCursor) GetSessionUUID() string { +func (t *noopVCursor) GetDDLStrategy() string { panic("implement me") } -func (t noopVCursor) SetReadAfterWriteGTID(s string) { +func (t *noopVCursor) GetSessionUUID() string { panic("implement me") } -func (t noopVCursor) SetReadAfterWriteTimeout(f float64) { +func (t *noopVCursor) SetReadAfterWriteGTID(s string) { panic("implement me") } -func (t noopVCursor) SetSessionTrackGTIDs(b bool) { +func (t *noopVCursor) SetSessionEnableSystemSettings(allow bool) error { panic("implement me") } -func (t noopVCursor) LookupRowLockShardSession() vtgatepb.CommitOrder { +func (t *noopVCursor) GetSessionEnableSystemSettings() bool { panic("implement me") } -func (t noopVCursor) SetFoundRows(u uint64) { +func (t *noopVCursor) SetReadAfterWriteTimeout(f float64) { panic("implement me") } -func (t noopVCursor) InTransactionAndIsDML() bool { +func (t *noopVCursor) SetSessionTrackGTIDs(b bool) { panic("implement me") } -func (t noopVCursor) FindRoutedTable(sqlparser.TableName) (*vindexes.Table, error) { +func (t *noopVCursor) HasCreatedTempTable() { panic("implement me") } -func (t noopVCursor) ExecuteLock(rs *srvtopo.ResolvedShard, query *querypb.BoundQuery) (*sqltypes.Result, error) { +func (t *noopVCursor) LookupRowLockShardSession() vtgatepb.CommitOrder { panic("implement me") } -func (t noopVCursor) NeedsReservedConn() { +func (t *noopVCursor) SetFoundRows(u uint64) { + panic("implement me") +} + +func (t *noopVCursor) InTransactionAndIsDML() bool { + panic("implement me") +} + +func (t *noopVCursor) FindRoutedTable(sqlparser.TableName) (*vindexes.Table, error) { + panic("implement me") +} + +func (t *noopVCursor) ExecuteLock(rs *srvtopo.ResolvedShard, query *querypb.BoundQuery) (*sqltypes.Result, error) { + panic("implement me") +} + +func (t *noopVCursor) NeedsReservedConn() { } -func (t noopVCursor) SetUDV(key string, value interface{}) error { +func (t *noopVCursor) SetUDV(key string, value interface{}) error { panic("implement me") } -func (t noopVCursor) SetSysVar(name string, expr string) { +func (t *noopVCursor) SetSysVar(name string, expr string) { //panic("implement me") } -func (t noopVCursor) InReservedConn() bool { +func (t *noopVCursor) InReservedConn() bool { panic("implement me") } -func (t noopVCursor) ShardSession() []*srvtopo.ResolvedShard { +func (t *noopVCursor) ShardSession() []*srvtopo.ResolvedShard { panic("implement me") } -func (t noopVCursor) ExecuteVSchema(keyspace string, vschemaDDL *sqlparser.AlterVschema) error { +func (t *noopVCursor) ExecuteVSchema(keyspace string, vschemaDDL *sqlparser.AlterVschema) error { panic("implement me") } -func (t noopVCursor) Session() SessionActions { +func (t *noopVCursor) Session() SessionActions { return t } -func (t noopVCursor) SetAutocommit(bool) error { +func (t *noopVCursor) SetAutocommit(bool) error { panic("implement me") } -func (t noopVCursor) SetClientFoundRows(bool) error { +func (t *noopVCursor) SetClientFoundRows(bool) error { panic("implement me") } -func (t noopVCursor) SetSkipQueryPlanCache(bool) error { +func (t *noopVCursor) SetSkipQueryPlanCache(bool) error { panic("implement me") } -func (t noopVCursor) SetSQLSelectLimit(int64) error { +func (t *noopVCursor) SetSQLSelectLimit(int64) error { panic("implement me") } -func (t noopVCursor) SetTransactionMode(vtgatepb.TransactionMode) { +func (t *noopVCursor) SetTransactionMode(vtgatepb.TransactionMode) { panic("implement me") } -func (t noopVCursor) SetWorkload(querypb.ExecuteOptions_Workload) { +func (t *noopVCursor) SetWorkload(querypb.ExecuteOptions_Workload) { panic("implement me") } -func (t noopVCursor) SetTarget(string) error { +func (t *noopVCursor) SetPlannerVersion(querypb.ExecuteOptions_PlannerVersion) { panic("implement me") } -func (t noopVCursor) Context() context.Context { +func (t *noopVCursor) SetTarget(string) error { + panic("implement me") +} + +func (t *noopVCursor) Context() context.Context { if t.ctx == nil { return context.Background() } return t.ctx } -func (t noopVCursor) MaxMemoryRows() int { +func (t *noopVCursor) MaxMemoryRows() int { return testMaxMemoryRows } -func (t noopVCursor) ExceedsMaxMemoryRows(numRows int) bool { +func (t *noopVCursor) ExceedsMaxMemoryRows(numRows int) bool { return !testIgnoreMaxMemoryRows && numRows > testMaxMemoryRows } -func (t noopVCursor) GetKeyspace() string { +func (t *noopVCursor) GetKeyspace() string { return "" } -func (t noopVCursor) SetContextTimeout(timeout time.Duration) context.CancelFunc { - return func() {} +func (t *noopVCursor) SetContextTimeout(timeout time.Duration) context.CancelFunc { + ctx, cancel := context.WithTimeout(t.Context(), timeout) + t.ctx = ctx + return cancel } -func (t noopVCursor) ErrorGroupCancellableContext() (*errgroup.Group, func()) { +func (t *noopVCursor) ErrorGroupCancellableContext() (*errgroup.Group, func()) { g, ctx := errgroup.WithContext(t.ctx) t.ctx = ctx return g, func() {} } -func (t noopVCursor) RecordWarning(warning *querypb.QueryWarning) { +func (t *noopVCursor) RecordWarning(warning *querypb.QueryWarning) { } -func (t noopVCursor) Execute(method string, query string, bindvars map[string]*querypb.BindVariable, rollbackOnError bool, co vtgatepb.CommitOrder) (*sqltypes.Result, error) { +func (t *noopVCursor) Execute(method string, query string, bindvars map[string]*querypb.BindVariable, rollbackOnError bool, co vtgatepb.CommitOrder) (*sqltypes.Result, error) { panic("unimplemented") } -func (t noopVCursor) ExecuteMultiShard(rss []*srvtopo.ResolvedShard, queries []*querypb.BoundQuery, rollbackOnError, canAutocommit bool) (*sqltypes.Result, []error) { +func (t *noopVCursor) ExecuteMultiShard(rss []*srvtopo.ResolvedShard, queries []*querypb.BoundQuery, rollbackOnError, canAutocommit bool) (*sqltypes.Result, []error) { panic("unimplemented") } -func (t noopVCursor) AutocommitApproval() bool { +func (t *noopVCursor) AutocommitApproval() bool { panic("unimplemented") } -func (t noopVCursor) ExecuteStandalone(query string, bindvars map[string]*querypb.BindVariable, rs *srvtopo.ResolvedShard) (*sqltypes.Result, error) { +func (t *noopVCursor) ExecuteStandalone(query string, bindvars map[string]*querypb.BindVariable, rs *srvtopo.ResolvedShard) (*sqltypes.Result, error) { panic("unimplemented") } -func (t noopVCursor) StreamExecuteMulti(query string, rss []*srvtopo.ResolvedShard, bindVars []map[string]*querypb.BindVariable, callback func(reply *sqltypes.Result) error) error { +func (t *noopVCursor) StreamExecuteMulti(query string, rss []*srvtopo.ResolvedShard, bindVars []map[string]*querypb.BindVariable, callback func(reply *sqltypes.Result) error) error { panic("unimplemented") } -func (t noopVCursor) ExecuteKeyspaceID(keyspace string, ksid []byte, query string, bindVars map[string]*querypb.BindVariable, rollbackOnError, autocommit bool) (*sqltypes.Result, error) { +func (t *noopVCursor) ExecuteKeyspaceID(keyspace string, ksid []byte, query string, bindVars map[string]*querypb.BindVariable, rollbackOnError, autocommit bool) (*sqltypes.Result, error) { panic("unimplemented") } -func (t noopVCursor) ResolveDestinations(keyspace string, ids []*querypb.Value, destinations []key.Destination) ([]*srvtopo.ResolvedShard, [][]*querypb.Value, error) { +func (t *noopVCursor) ResolveDestinations(keyspace string, ids []*querypb.Value, destinations []key.Destination) ([]*srvtopo.ResolvedShard, [][]*querypb.Value, error) { panic("unimplemented") } -func (t noopVCursor) SubmitOnlineDDL(onlineDDl *schema.OnlineDDL) error { +func (t *noopVCursor) SubmitOnlineDDL(onlineDDl *schema.OnlineDDL) error { + panic("unimplemented") +} + +func (t *noopVCursor) GetDBDDLPluginName() string { panic("unimplemented") } @@ -247,12 +273,18 @@ type loggingVCursor struct { resolvedTargetTabletType topodatapb.TabletType tableRoutes tableRoutes + dbDDLPlugin string + ksAvailable bool } type tableRoutes struct { tbl *vindexes.Table } +func (f *loggingVCursor) KeyspaceAvailable(ks string) bool { + return f.ksAvailable +} + func (f *loggingVCursor) SetFoundRows(u uint64) { panic("implement me") } @@ -299,11 +331,16 @@ func (f *loggingVCursor) SetTarget(target string) error { } func (f *loggingVCursor) Context() context.Context { - return context.Background() + if f.ctx == nil { + return context.Background() + } + return f.ctx } -func (f *loggingVCursor) SetContextTimeout(time.Duration) context.CancelFunc { - return func() {} +func (f *loggingVCursor) SetContextTimeout(timeout time.Duration) context.CancelFunc { + ctx, cancel := context.WithTimeout(f.Context(), timeout) + f.ctx = ctx + return cancel } func (f *loggingVCursor) ErrorGroupCancellableContext() (*errgroup.Group, func()) { @@ -483,11 +520,19 @@ func (f *loggingVCursor) SetWorkload(querypb.ExecuteOptions_Workload) { panic("implement me") } +func (f *loggingVCursor) SetPlannerVersion(querypb.ExecuteOptions_PlannerVersion) { + panic("implement me") +} + func (f *loggingVCursor) FindRoutedTable(tbl sqlparser.TableName) (*vindexes.Table, error) { f.log = append(f.log, fmt.Sprintf("FindTable(%s)", sqlparser.String(tbl))) return f.tableRoutes.tbl, nil } +func (f *loggingVCursor) GetDBDDLPluginName() string { + return f.dbDDLPlugin +} + func (f *loggingVCursor) nextResult() (*sqltypes.Result, error) { if f.results == nil || f.curResult >= len(f.results) { return &sqltypes.Result{}, f.resultErr @@ -501,13 +546,6 @@ func (f *loggingVCursor) nextResult() (*sqltypes.Result, error) { return r, nil } -func expectError(t *testing.T, msg string, err error, want string) { - t.Helper() - if err == nil || err.Error() != want { - t.Errorf("%s: %v, want %s", msg, err, want) - } -} - func expectResult(t *testing.T, msg string, result, want *sqltypes.Result) { t.Helper() if !reflect.DeepEqual(result, want) { diff --git a/go/vt/vtgate/engine/insert.go b/go/vt/vtgate/engine/insert.go index a1babe74f13..d09be708467 100644 --- a/go/vt/vtgate/engine/insert.go +++ b/go/vt/vtgate/engine/insert.go @@ -212,18 +212,18 @@ func (ins *Insert) StreamExecute(vcursor VCursor, bindVars map[string]*querypb.B // GetFields fetches the field info. func (ins *Insert) GetFields(vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "BUG: unreachable code for %q", ins.Query) + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unreachable code for %q", ins.Query) } func (ins *Insert) execInsertUnsharded(vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { insertID, err := ins.processGenerate(vcursor, bindVars) if err != nil { - return nil, vterrors.Wrap(err, "execInsertUnsharded") + return nil, err } rss, _, err := vcursor.ResolveDestinations(ins.Keyspace.Name, nil, []key.Destination{key.DestinationAllShards{}}) if err != nil { - return nil, vterrors.Wrap(err, "execInsertUnsharded") + return nil, err } if len(rss) != 1 { return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "Keyspace does not have exactly one shard: %v", rss) @@ -234,7 +234,7 @@ func (ins *Insert) execInsertUnsharded(vcursor VCursor, bindVars map[string]*que } result, err := execShard(vcursor, ins.Query, bindVars, rss[0], true, true /* canAutocommit */) if err != nil { - return nil, vterrors.Wrap(err, "execInsertUnsharded") + return nil, err } // If processGenerate generated new values, it supercedes @@ -250,11 +250,11 @@ func (ins *Insert) execInsertUnsharded(vcursor VCursor, bindVars map[string]*que func (ins *Insert) execInsertSharded(vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { insertID, err := ins.processGenerate(vcursor, bindVars) if err != nil { - return nil, vterrors.Wrap(err, "execInsertSharded") + return nil, err } rss, queries, err := ins.getInsertShardedRoute(vcursor, bindVars) if err != nil { - return nil, vterrors.Wrap(err, "execInsertSharded") + return nil, err } autocommit := (len(rss) == 1 || ins.MultiShardAutocommit) && vcursor.AutocommitApproval() @@ -264,7 +264,7 @@ func (ins *Insert) execInsertSharded(vcursor VCursor, bindVars map[string]*query } result, errs := vcursor.ExecuteMultiShard(rss, queries, true /* rollbackOnError */, autocommit) if errs != nil { - return nil, vterrors.Wrap(vterrors.Aggregate(errs), "execInsertSharded") + return nil, vterrors.Aggregate(errs) } if insertID != 0 { @@ -301,7 +301,7 @@ func (ins *Insert) processGenerate(vcursor VCursor, bindVars map[string]*querypb // keep track of where they should be filled. resolved, err := ins.Generate.Values.ResolveList(bindVars) if err != nil { - return 0, vterrors.Wrap(err, "processGenerate") + return 0, err } count := int64(0) for _, val := range resolved { @@ -314,10 +314,10 @@ func (ins *Insert) processGenerate(vcursor VCursor, bindVars map[string]*querypb if count != 0 { rss, _, err := vcursor.ResolveDestinations(ins.Generate.Keyspace.Name, nil, []key.Destination{key.DestinationAnyShard{}}) if err != nil { - return 0, vterrors.Wrap(err, "processGenerate") + return 0, err } if len(rss) != 1 { - return 0, vterrors.Wrapf(err, "processGenerate len(rss)=%v", len(rss)) + return 0, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "auto sequence generation can happen through single shard only, it is getting routed to %d shards", len(rss)) } bindVars := map[string]*querypb.BindVariable{"n": sqltypes.Int64BindVariable(count)} qr, err := vcursor.ExecuteStandalone(ins.Generate.Query, bindVars, rss[0]) @@ -363,23 +363,23 @@ func (ins *Insert) getInsertShardedRoute(vcursor VCursor, bindVars map[string]*q rowCount := 0 for vIdx, vColValues := range ins.VindexValues { if len(vColValues.Values) != len(ins.Table.ColumnVindexes[vIdx].Columns) { - return nil, nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "BUG: supplied vindex column values don't match vschema: %v %v", vColValues, ins.Table.ColumnVindexes[vIdx].Columns) + return nil, nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] supplied vindex column values don't match vschema: %v %v", vColValues, ins.Table.ColumnVindexes[vIdx].Columns) } for colIdx, colValues := range vColValues.Values { rowsResolvedValues, err := colValues.ResolveList(bindVars) if err != nil { - return nil, nil, vterrors.Wrap(err, "getInsertShardedRoute") + return nil, nil, err } // This is the first iteration: allocate for transpose. if colIdx == 0 { if len(rowsResolvedValues) == 0 { - return nil, nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "BUG: rowcount is zero for inserts: %v", rowsResolvedValues) + return nil, nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] rowcount is zero for inserts: %v", rowsResolvedValues) } if rowCount == 0 { rowCount = len(rowsResolvedValues) } if rowCount != len(rowsResolvedValues) { - return nil, nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "BUG: uneven row values for inserts: %d %d", rowCount, len(rowsResolvedValues)) + return nil, nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] uneven row values for inserts: %d %d", rowCount, len(rowsResolvedValues)) } vindexRowsValues[vIdx] = make([][]sqltypes.Value, rowCount) } @@ -396,7 +396,7 @@ func (ins *Insert) getInsertShardedRoute(vcursor VCursor, bindVars map[string]*q // id is returned as nil, which is used later to drop the corresponding rows. keyspaceIDs, err := ins.processPrimary(vcursor, vindexRowsValues[0], ins.Table.ColumnVindexes[0]) if err != nil { - return nil, nil, vterrors.Wrap(err, "getInsertShardedRoute") + return nil, nil, err } for vIdx := 1; vIdx < len(ins.Table.ColumnVindexes); vIdx++ { @@ -408,7 +408,7 @@ func (ins *Insert) getInsertShardedRoute(vcursor VCursor, bindVars map[string]*q err = ins.processUnowned(vcursor, vindexRowsValues[vIdx], colVindex, keyspaceIDs) } if err != nil { - return nil, nil, vterrors.Wrap(err, "getInsertShardedRoute") + return nil, nil, err } } @@ -449,7 +449,7 @@ func (ins *Insert) getInsertShardedRoute(vcursor VCursor, bindVars map[string]*q rss, indexesPerRss, err := vcursor.ResolveDestinations(ins.Keyspace.Name, indexes, destinations) if err != nil { - return nil, nil, vterrors.Wrap(err, "getInsertShardedRoute") + return nil, nil, err } queries := make([]*querypb.BoundQuery, len(rss)) diff --git a/go/vt/vtgate/engine/insert_test.go b/go/vt/vtgate/engine/insert_test.go index ec59c2a70a1..c3411a98fe3 100644 --- a/go/vt/vtgate/engine/insert_test.go +++ b/go/vt/vtgate/engine/insert_test.go @@ -20,6 +20,8 @@ import ( "errors" "testing" + "github.com/stretchr/testify/require" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -55,11 +57,11 @@ func TestInsertUnsharded(t *testing.T) { // Failure cases vc = &loggingVCursor{shardErr: errors.New("shard_error")} _, err = ins.Execute(vc, map[string]*querypb.BindVariable{}, false) - expectError(t, "Execute", err, "execInsertUnsharded: shard_error") + require.EqualError(t, err, `shard_error`) vc = &loggingVCursor{} _, err = ins.Execute(vc, map[string]*querypb.BindVariable{}, false) - expectError(t, "Execute", err, "Keyspace does not have exactly one shard: []") + require.EqualError(t, err, `Keyspace does not have exactly one shard: []`) } func TestInsertUnshardedGenerate(t *testing.T) { @@ -368,7 +370,7 @@ func TestInsertShardedFail(t *testing.T) { // The lookup will fail to map to a keyspace id. _, err = ins.Execute(vc, map[string]*querypb.BindVariable{}, false) - expectError(t, "Execute", err, "execInsertSharded: getInsertShardedRoute: could not map [INT64(1)] to a keyspace id") + require.EqualError(t, err, `could not map [INT64(1)] to a keyspace id`) } func TestInsertShardedGenerate(t *testing.T) { @@ -910,7 +912,7 @@ func TestInsertShardedIgnoreOwned(t *testing.T) { " suffix", ) - ksid0_lookup := sqltypes.MakeTestResult( + ksid0Lookup := sqltypes.MakeTestResult( sqltypes.MakeTestFields( "from|to", "int64|varbinary", @@ -931,7 +933,7 @@ func TestInsertShardedIgnoreOwned(t *testing.T) { vc.shardForKsid = []string{"20-", "-20"} vc.results = []*sqltypes.Result{ // primary vindex lookups: fail row 2. - ksid0_lookup, + ksid0Lookup, // insert lkp2 noresult, // fail one verification (row 3) @@ -1409,7 +1411,7 @@ func TestInsertShardedIgnoreUnownedVerifyFail(t *testing.T) { vc := newDMLTestVCursor("-20", "20-") _, err = ins.Execute(vc, map[string]*querypb.BindVariable{}, false) - expectError(t, "Execute", err, "execInsertSharded: getInsertShardedRoute: values [[INT64(2)]] for column [c3] does not map to keyspace ids") + require.EqualError(t, err, `values [[INT64(2)]] for column [c3] does not map to keyspace ids`) } func TestInsertShardedUnownedReverseMap(t *testing.T) { @@ -1619,5 +1621,5 @@ func TestInsertShardedUnownedReverseMapFail(t *testing.T) { vc := newDMLTestVCursor("-20", "20-") _, err = ins.Execute(vc, map[string]*querypb.BindVariable{}, false) - expectError(t, "Execute", err, "execInsertSharded: getInsertShardedRoute: value must be supplied for column [c3]") + require.EqualError(t, err, `value must be supplied for column [c3]`) } diff --git a/go/vt/vtgate/engine/join.go b/go/vt/vtgate/engine/join.go index 29d61e9706c..22b472ff470 100644 --- a/go/vt/vtgate/engine/join.go +++ b/go/vt/vtgate/engine/join.go @@ -84,9 +84,6 @@ func (jn *Join) Execute(vcursor VCursor, bindVars map[string]*querypb.BindVariab } if jn.Opcode == LeftJoin && len(rresult.Rows) == 0 { result.Rows = append(result.Rows, joinRows(lrow, nil, jn.Cols)) - result.RowsAffected++ - } else { - result.RowsAffected += uint64(len(rresult.Rows)) } if vcursor.ExceedsMaxMemoryRows(len(result.Rows)) { return nil, fmt.Errorf("in-memory row count exceeded allowed limit of %d", vcursor.MaxMemoryRows()) @@ -244,6 +241,7 @@ func (jn *Join) GetTableName() string { return jn.Left.GetTableName() + "_" + jn.Right.GetTableName() } +// NeedsTransaction implements the Primitive interface func (jn *Join) NeedsTransaction() bool { return jn.Right.NeedsTransaction() || jn.Left.NeedsTransaction() } diff --git a/go/vt/vtgate/engine/join_test.go b/go/vt/vtgate/engine/join_test.go index 47d857fc5cc..3c18bfc2361 100644 --- a/go/vt/vtgate/engine/join_test.go +++ b/go/vt/vtgate/engine/join_test.go @@ -76,7 +76,7 @@ func TestJoinExecute(t *testing.T) { "bv": 1, }, } - r, err := jn.Execute(noopVCursor{}, bv, true) + r, err := jn.Execute(&noopVCursor{}, bv, true) if err != nil { t.Fatal(err) } @@ -103,7 +103,7 @@ func TestJoinExecute(t *testing.T) { leftPrim.rewind() rightPrim.rewind() jn.Opcode = LeftJoin - r, err = jn.Execute(noopVCursor{}, bv, true) + r, err = jn.Execute(&noopVCursor{}, bv, true) if err != nil { t.Fatal(err) } @@ -194,7 +194,7 @@ func TestJoinExecuteMaxMemoryRows(t *testing.T) { }, } testIgnoreMaxMemoryRows = test.ignoreMaxMemoryRows - _, err := jn.Execute(noopVCursor{}, bv, true) + _, err := jn.Execute(&noopVCursor{}, bv, true) if testIgnoreMaxMemoryRows { require.NoError(t, err) } else { @@ -235,7 +235,7 @@ func TestJoinExecuteNoResult(t *testing.T) { "bv": 1, }, } - r, err := jn.Execute(noopVCursor{}, map[string]*querypb.BindVariable{}, true) + r, err := jn.Execute(&noopVCursor{}, map[string]*querypb.BindVariable{}, true) if err != nil { t.Fatal(err) } @@ -265,8 +265,8 @@ func TestJoinExecuteErrors(t *testing.T) { Opcode: NormalJoin, Left: leftPrim, } - _, err := jn.Execute(noopVCursor{}, map[string]*querypb.BindVariable{}, true) - expectError(t, "jn.Execute", err, "left err") + _, err := jn.Execute(&noopVCursor{}, map[string]*querypb.BindVariable{}, true) + require.EqualError(t, err, "left err") // Error on right query leftPrim = &fakePrimitive{ @@ -295,8 +295,8 @@ func TestJoinExecuteErrors(t *testing.T) { "bv": 1, }, } - _, err = jn.Execute(noopVCursor{}, map[string]*querypb.BindVariable{}, true) - expectError(t, "jn.Execute", err, "right err") + _, err = jn.Execute(&noopVCursor{}, map[string]*querypb.BindVariable{}, true) + require.EqualError(t, err, "right err") // Error on right getfields leftPrim = &fakePrimitive{ @@ -322,8 +322,8 @@ func TestJoinExecuteErrors(t *testing.T) { "bv": 1, }, } - _, err = jn.Execute(noopVCursor{}, map[string]*querypb.BindVariable{}, true) - expectError(t, "jn.Execute", err, "right err") + _, err = jn.Execute(&noopVCursor{}, map[string]*querypb.BindVariable{}, true) + require.EqualError(t, err, "right err") } func TestJoinStreamExecute(t *testing.T) { @@ -502,7 +502,7 @@ func TestGetFieldsErrors(t *testing.T) { }, } _, err := jn.GetFields(nil, map[string]*querypb.BindVariable{}) - expectError(t, "jn.GetFields", err, "left err") + require.EqualError(t, err, "left err") jn.Left = &fakePrimitive{ results: []*sqltypes.Result{ @@ -515,5 +515,5 @@ func TestGetFieldsErrors(t *testing.T) { }, } _, err = jn.GetFields(nil, map[string]*querypb.BindVariable{}) - expectError(t, "jn.GetFields", err, "right err") + require.EqualError(t, err, "right err") } diff --git a/go/vt/vtgate/engine/limit.go b/go/vt/vtgate/engine/limit.go index e712c182dc5..1ec801124ed 100644 --- a/go/vt/vtgate/engine/limit.go +++ b/go/vt/vtgate/engine/limit.go @@ -73,18 +73,15 @@ func (l *Limit) Execute(vcursor VCursor, bindVars map[string]*querypb.BindVariab // There are more rows in the response than limit + offset if count+offset <= len(result.Rows) { result.Rows = result.Rows[offset : count+offset] - result.RowsAffected = uint64(count) return result, nil } // Remove extra rows from response if offset <= len(result.Rows) { result.Rows = result.Rows[offset:] - result.RowsAffected = uint64(len(result.Rows)) return result, nil } // offset is beyond the result set result.Rows = nil - result.RowsAffected = 0 return result, nil } diff --git a/go/vt/vtgate/engine/lock.go b/go/vt/vtgate/engine/lock.go index 5da8c242d4d..794dbc7084e 100644 --- a/go/vt/vtgate/engine/lock.go +++ b/go/vt/vtgate/engine/lock.go @@ -65,7 +65,7 @@ func (l *Lock) Execute(vcursor VCursor, bindVars map[string]*querypb.BindVariabl return nil, err } if len(rss) != 1 { - return nil, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "lock query cannot be routed to vttablet: %v", rss) + return nil, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "lock query can be routed to single shard only: %v", rss) } query := &querypb.BoundQuery{ diff --git a/go/vt/vtgate/engine/memory_sort.go b/go/vt/vtgate/engine/memory_sort.go index 8c8badbd3f8..560b029356e 100644 --- a/go/vt/vtgate/engine/memory_sort.go +++ b/go/vt/vtgate/engine/memory_sort.go @@ -76,8 +76,8 @@ func (ms *MemorySort) Execute(vcursor VCursor, bindVars map[string]*querypb.Bind return nil, err } sh := &sortHeap{ - rows: result.Rows, - orderBy: ms.OrderBy, + rows: result.Rows, + comparers: extractSlices(ms.OrderBy), } sort.Sort(sh) if sh.err != nil { @@ -86,7 +86,6 @@ func (ms *MemorySort) Execute(vcursor VCursor, bindVars map[string]*querypb.Bind result.Rows = sh.rows if len(result.Rows) > count { result.Rows = result.Rows[:count] - result.RowsAffected = uint64(count) } return result.Truncate(ms.TruncateColumnCount), nil } @@ -105,8 +104,8 @@ func (ms *MemorySort) StreamExecute(vcursor VCursor, bindVars map[string]*queryp // You have to reverse the ordering because the highest values // must be dropped once the upper limit is reached. sh := &sortHeap{ - orderBy: ms.OrderBy, - reverse: true, + comparers: extractSlices(ms.OrderBy), + reverse: true, } err = ms.Input.StreamExecute(vcursor, bindVars, wantfields, func(qr *sqltypes.Result) error { if len(qr.Fields) != 0 { @@ -116,9 +115,11 @@ func (ms *MemorySort) StreamExecute(vcursor VCursor, bindVars map[string]*queryp } for _, row := range qr.Rows { heap.Push(sh, row) - } - for len(sh.rows) > count { - _ = heap.Pop(sh) + // Remove the highest element from the heap if the size is more than the count + // This optimization means that the maximum size of the heap is going to be (count + 1) + for len(sh.rows) > count { + _ = heap.Pop(sh) + } } if vcursor.ExceedsMaxMemoryRows(len(sh.rows)) { return fmt.Errorf("in-memory row count exceeded allowed limit of %d", vcursor.MaxMemoryRows()) @@ -151,6 +152,7 @@ func (ms *MemorySort) Inputs() []Primitive { return []Primitive{ms.Input} } +// NeedsTransaction implements the Primitive interface func (ms *MemorySort) NeedsTransaction() bool { return ms.Input.NeedsTransaction() } @@ -215,10 +217,10 @@ func GenericJoin(input interface{}, f func(interface{}) string) string { // sortHeap is sorted based on the orderBy params. // Implementation is similar to scatterHeap type sortHeap struct { - rows [][]sqltypes.Value - orderBy []OrderbyParams - reverse bool - err error + rows [][]sqltypes.Value + comparers []*comparer + reverse bool + err error } // Len satisfies sort.Interface and heap.Interface. @@ -228,11 +230,11 @@ func (sh *sortHeap) Len() int { // Less satisfies sort.Interface and heap.Interface. func (sh *sortHeap) Less(i, j int) bool { - for _, order := range sh.orderBy { + for _, c := range sh.comparers { if sh.err != nil { return true } - cmp, err := evalengine.NullsafeCompare(sh.rows[i][order.Col], sh.rows[j][order.Col]) + cmp, err := c.compare(sh.rows[i], sh.rows[j]) if err != nil { sh.err = err return true @@ -240,17 +242,7 @@ func (sh *sortHeap) Less(i, j int) bool { if cmp == 0 { continue } - // This is equivalent to: - //if !sh.reverse { - // if order.Desc { - // cmp = -cmp - // } - //} else { - // if !order.Desc { - // cmp = -cmp - // } - //} - if sh.reverse != order.Desc { + if sh.reverse { cmp = -cmp } return cmp < 0 diff --git a/go/vt/vtgate/engine/memory_sort_test.go b/go/vt/vtgate/engine/memory_sort_test.go index 0ffda4cb002..1a39208f83c 100644 --- a/go/vt/vtgate/engine/memory_sort_test.go +++ b/go/vt/vtgate/engine/memory_sort_test.go @@ -17,10 +17,10 @@ limitations under the License. package engine import ( - "encoding/json" - "reflect" "testing" + "vitess.io/vitess/go/test/utils" + "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" @@ -37,7 +37,7 @@ func TestMemorySortExecute(t *testing.T) { results: []*sqltypes.Result{sqltypes.MakeTestResult( fields, "a|1", - "b|2", + "g|2", "a|1", "c|4", "c|3", @@ -46,50 +46,164 @@ func TestMemorySortExecute(t *testing.T) { ms := &MemorySort{ OrderBy: []OrderbyParams{{ - Col: 1, + WeightStringCol: -1, + Col: 1, }}, Input: fp, } result, err := ms.Execute(nil, nil, false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) wantResult := sqltypes.MakeTestResult( fields, "a|1", "a|1", - "b|2", + "g|2", "c|3", "c|4", ) - if !reflect.DeepEqual(result, wantResult) { - t.Errorf("oa.Execute:\n%v, want\n%v", result, wantResult) - } + utils.MustMatch(t, wantResult, result) fp.rewind() - upperlimit, err := sqlparser.NewPlanValue(sqlparser.NewArgument([]byte(":__upper_limit"))) - if err != nil { - t.Fatal(err) - } + upperlimit, err := sqlparser.NewPlanValue(sqlparser.NewArgument(":__upper_limit")) + require.NoError(t, err) ms.UpperLimit = upperlimit bv := map[string]*querypb.BindVariable{"__upper_limit": sqltypes.Int64BindVariable(3)} result, err = ms.Execute(nil, bv, false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) wantResult = sqltypes.MakeTestResult( fields, "a|1", "a|1", - "b|2", + "g|2", + ) + utils.MustMatch(t, wantResult, result) +} + +func TestMemorySortStreamExecuteWeightString(t *testing.T) { + fields := sqltypes.MakeTestFields( + "weightString|normal", + "varbinary|varchar", + ) + fp := &fakePrimitive{ + results: []*sqltypes.Result{sqltypes.MakeTestResult( + fields, + "null|x", + "g|d", + "a|a", + "c|t", + "f|p", + )}, + } + + ms := &MemorySort{ + OrderBy: []OrderbyParams{{ + WeightStringCol: 0, + Col: 1, + }}, + Input: fp, + } + var results []*sqltypes.Result + + t.Run("order by weight string", func(t *testing.T) { + + err := ms.StreamExecute(&noopVCursor{}, nil, false, func(qr *sqltypes.Result) error { + results = append(results, qr) + return nil + }) + require.NoError(t, err) + + wantResults := sqltypes.MakeTestStreamingResults( + fields, + "null|x", + "a|a", + "c|t", + "f|p", + "g|d", + ) + utils.MustMatch(t, wantResults, results) + }) + + t.Run("Limit test", func(t *testing.T) { + fp.rewind() + upperlimit, err := sqlparser.NewPlanValue(sqlparser.NewArgument(":__upper_limit")) + require.NoError(t, err) + ms.UpperLimit = upperlimit + bv := map[string]*querypb.BindVariable{"__upper_limit": sqltypes.Int64BindVariable(3)} + + results = nil + err = ms.StreamExecute(&noopVCursor{}, bv, false, func(qr *sqltypes.Result) error { + results = append(results, qr) + return nil + }) + require.NoError(t, err) + + wantResults := sqltypes.MakeTestStreamingResults( + fields, + "null|x", + "a|a", + "c|t", + ) + utils.MustMatch(t, wantResults, results) + }) +} + +func TestMemorySortExecuteWeightString(t *testing.T) { + fields := sqltypes.MakeTestFields( + "c1|c2", + "varchar|varbinary", ) - if !reflect.DeepEqual(result, wantResult) { - t.Errorf("oa.Execute:\n%v, want\n%v", result, wantResult) + fp := &fakePrimitive{ + results: []*sqltypes.Result{sqltypes.MakeTestResult( + fields, + "a|1", + "g|2", + "a|1", + "c|4", + "c|3", + )}, + } + + ms := &MemorySort{ + OrderBy: []OrderbyParams{{ + WeightStringCol: 1, + Col: 0, + }}, + Input: fp, } + + result, err := ms.Execute(nil, nil, false) + require.NoError(t, err) + + wantResult := sqltypes.MakeTestResult( + fields, + "a|1", + "a|1", + "g|2", + "c|3", + "c|4", + ) + utils.MustMatch(t, wantResult, result) + + fp.rewind() + upperlimit, err := sqlparser.NewPlanValue(sqlparser.NewArgument(":__upper_limit")) + require.NoError(t, err) + ms.UpperLimit = upperlimit + bv := map[string]*querypb.BindVariable{"__upper_limit": sqltypes.Int64BindVariable(3)} + + result, err = ms.Execute(nil, bv, false) + require.NoError(t, err) + + wantResult = sqltypes.MakeTestResult( + fields, + "a|1", + "a|1", + "g|2", + ) + utils.MustMatch(t, wantResult, result) } func TestMemorySortStreamExecute(t *testing.T) { @@ -101,7 +215,7 @@ func TestMemorySortStreamExecute(t *testing.T) { results: []*sqltypes.Result{sqltypes.MakeTestResult( fields, "a|1", - "b|2", + "g|2", "a|1", "c|4", "c|3", @@ -110,60 +224,49 @@ func TestMemorySortStreamExecute(t *testing.T) { ms := &MemorySort{ OrderBy: []OrderbyParams{{ - Col: 1, + WeightStringCol: -1, + Col: 1, }}, Input: fp, } var results []*sqltypes.Result - err := ms.StreamExecute(noopVCursor{}, nil, false, func(qr *sqltypes.Result) error { + err := ms.StreamExecute(&noopVCursor{}, nil, false, func(qr *sqltypes.Result) error { results = append(results, qr) return nil }) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) wantResults := sqltypes.MakeTestStreamingResults( fields, "a|1", "a|1", - "b|2", + "g|2", "c|3", "c|4", ) - if !reflect.DeepEqual(results, wantResults) { - t.Errorf("oa.Execute:\n%v, want\n%v", results, wantResults) - } + utils.MustMatch(t, wantResults, results) fp.rewind() - upperlimit, err := sqlparser.NewPlanValue(sqlparser.NewArgument([]byte(":__upper_limit"))) - if err != nil { - t.Fatal(err) - } + upperlimit, err := sqlparser.NewPlanValue(sqlparser.NewArgument(":__upper_limit")) + require.NoError(t, err) ms.UpperLimit = upperlimit bv := map[string]*querypb.BindVariable{"__upper_limit": sqltypes.Int64BindVariable(3)} results = nil - err = ms.StreamExecute(noopVCursor{}, bv, false, func(qr *sqltypes.Result) error { + err = ms.StreamExecute(&noopVCursor{}, bv, false, func(qr *sqltypes.Result) error { results = append(results, qr) return nil }) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) wantResults = sqltypes.MakeTestStreamingResults( fields, "a|1", "a|1", - "b|2", + "g|2", ) - if !reflect.DeepEqual(results, wantResults) { - r, _ := json.Marshal(results) - w, _ := json.Marshal(wantResults) - t.Errorf("oa.Execute:\n%s, want\n%s", r, w) - } + utils.MustMatch(t, wantResults, results) } func TestMemorySortGetFields(t *testing.T) { @@ -178,12 +281,8 @@ func TestMemorySortGetFields(t *testing.T) { ms := &MemorySort{Input: fp} got, err := ms.GetFields(nil, nil) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(got, result) { - t.Errorf("l.GetFields:\n%v, want\n%v", got, result) - } + require.NoError(t, err) + utils.MustMatch(t, result, got) } func TestMemorySortExecuteTruncate(t *testing.T) { @@ -195,7 +294,7 @@ func TestMemorySortExecuteTruncate(t *testing.T) { results: []*sqltypes.Result{sqltypes.MakeTestResult( fields, "a|1|1", - "b|2|1", + "g|2|1", "a|1|1", "c|4|1", "c|3|1", @@ -204,28 +303,25 @@ func TestMemorySortExecuteTruncate(t *testing.T) { ms := &MemorySort{ OrderBy: []OrderbyParams{{ - Col: 1, + WeightStringCol: -1, + Col: 1, }}, Input: fp, TruncateColumnCount: 2, } result, err := ms.Execute(nil, nil, false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) wantResult := sqltypes.MakeTestResult( fields[:2], "a|1", "a|1", - "b|2", + "g|2", "c|3", "c|4", ) - if !reflect.DeepEqual(result, wantResult) { - t.Errorf("oa.Execute:\n%v, want\n%v", result, wantResult) - } + utils.MustMatch(t, wantResult, result) } func TestMemorySortStreamExecuteTruncate(t *testing.T) { @@ -237,7 +333,7 @@ func TestMemorySortStreamExecuteTruncate(t *testing.T) { results: []*sqltypes.Result{sqltypes.MakeTestResult( fields, "a|1|1", - "b|2|1", + "g|2|1", "a|1|1", "c|4|1", "c|3|1", @@ -246,32 +342,29 @@ func TestMemorySortStreamExecuteTruncate(t *testing.T) { ms := &MemorySort{ OrderBy: []OrderbyParams{{ - Col: 1, + WeightStringCol: -1, + Col: 1, }}, Input: fp, TruncateColumnCount: 2, } var results []*sqltypes.Result - err := ms.StreamExecute(noopVCursor{}, nil, false, func(qr *sqltypes.Result) error { + err := ms.StreamExecute(&noopVCursor{}, nil, false, func(qr *sqltypes.Result) error { results = append(results, qr) return nil }) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) wantResults := sqltypes.MakeTestStreamingResults( fields[:2], "a|1", "a|1", - "b|2", + "g|2", "c|3", "c|4", ) - if !reflect.DeepEqual(results, wantResults) { - t.Errorf("oa.Execute:\n%v, want\n%v", results, wantResults) - } + utils.MustMatch(t, wantResults, results) } func TestMemorySortMultiColumn(t *testing.T) { @@ -292,18 +385,18 @@ func TestMemorySortMultiColumn(t *testing.T) { ms := &MemorySort{ OrderBy: []OrderbyParams{{ - Col: 1, + Col: 1, + WeightStringCol: -1, }, { - Col: 0, - Desc: true, + Col: 0, + WeightStringCol: -1, + Desc: true, }}, Input: fp, } result, err := ms.Execute(nil, nil, false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) wantResult := sqltypes.MakeTestResult( fields, @@ -313,22 +406,16 @@ func TestMemorySortMultiColumn(t *testing.T) { "c|3", "c|4", ) - if !reflect.DeepEqual(result, wantResult) { - t.Errorf("oa.Execute:\n%v, want\n%v", result, wantResult) - } + utils.MustMatch(t, wantResult, result) fp.rewind() - upperlimit, err := sqlparser.NewPlanValue(sqlparser.NewArgument([]byte(":__upper_limit"))) - if err != nil { - t.Fatal(err) - } + upperlimit, err := sqlparser.NewPlanValue(sqlparser.NewArgument(":__upper_limit")) + require.NoError(t, err) ms.UpperLimit = upperlimit bv := map[string]*querypb.BindVariable{"__upper_limit": sqltypes.Int64BindVariable(3)} result, err = ms.Execute(nil, bv, false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) wantResult = sqltypes.MakeTestResult( fields, @@ -336,9 +423,7 @@ func TestMemorySortMultiColumn(t *testing.T) { "a|1", "b|2", ) - if !reflect.DeepEqual(result, wantResult) { - t.Errorf("oa.Execute:\n%v, want\n%v", result, wantResult) - } + utils.MustMatch(t, wantResult, result) } func TestMemorySortMaxMemoryRows(t *testing.T) { @@ -375,13 +460,14 @@ func TestMemorySortMaxMemoryRows(t *testing.T) { ms := &MemorySort{ OrderBy: []OrderbyParams{{ - Col: 1, + WeightStringCol: -1, + Col: 1, }}, Input: fp, } testIgnoreMaxMemoryRows = test.ignoreMaxMemoryRows - err := ms.StreamExecute(noopVCursor{}, nil, false, func(qr *sqltypes.Result) error { + err := ms.StreamExecute(&noopVCursor{}, nil, false, func(qr *sqltypes.Result) error { return nil }) if testIgnoreMaxMemoryRows { @@ -410,7 +496,8 @@ func TestMemorySortExecuteNoVarChar(t *testing.T) { ms := &MemorySort{ OrderBy: []OrderbyParams{{ - Col: 0, + WeightStringCol: -1, + Col: 0, }}, Input: fp, } @@ -422,7 +509,7 @@ func TestMemorySortExecuteNoVarChar(t *testing.T) { } fp.rewind() - err = ms.StreamExecute(noopVCursor{}, nil, false, func(qr *sqltypes.Result) error { + err = ms.StreamExecute(&noopVCursor{}, nil, false, func(qr *sqltypes.Result) error { return nil }) if err == nil || err.Error() != want { diff --git a/go/vt/vtgate/engine/merge_sort.go b/go/vt/vtgate/engine/merge_sort.go index 59d3c1ab3bb..a93b2ba4515 100644 --- a/go/vt/vtgate/engine/merge_sort.go +++ b/go/vt/vtgate/engine/merge_sort.go @@ -20,8 +20,6 @@ import ( "container/heap" "io" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "context" "vitess.io/vitess/go/sqltypes" @@ -66,12 +64,12 @@ func (ms *MergeSort) GetTableName() string { return "" } // Execute is not supported. func (ms *MergeSort) Execute(vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { - return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "Execute is not supported") + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] Execute is not reachable") } // GetFields is not supported. func (ms *MergeSort) GetFields(vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { - return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "GetFields is not supported") + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] GetFields is not reachable") } // StreamExecute performs a streaming exec. @@ -96,9 +94,10 @@ func (ms *MergeSort) StreamExecute(vcursor VCursor, bindVars map[string]*querypb return err } + comparers := extractSlices(ms.OrderBy) sh := &scatterHeap{ - rows: make([]streamRow, 0, len(handles)), - orderBy: ms.OrderBy, + rows: make([]streamRow, 0, len(handles)), + comparers: comparers, } // Prime the heap. One element must be pulled from @@ -235,9 +234,9 @@ type streamRow struct { // yielded an error, err is set. This must be checked // after every heap operation. type scatterHeap struct { - rows []streamRow - orderBy []OrderbyParams - err error + rows []streamRow + err error + comparers []*comparer } // Len satisfies sort.Interface and heap.Interface. @@ -247,11 +246,12 @@ func (sh *scatterHeap) Len() int { // Less satisfies sort.Interface and heap.Interface. func (sh *scatterHeap) Less(i, j int) bool { - for _, order := range sh.orderBy { + for _, c := range sh.comparers { if sh.err != nil { return true } - cmp, err := evalengine.NullsafeCompare(sh.rows[i].row[order.Col], sh.rows[j].row[order.Col]) + // First try to compare the columns that we want to order + cmp, err := c.compare(sh.rows[i].row, sh.rows[j].row) if err != nil { sh.err = err return true @@ -259,9 +259,6 @@ func (sh *scatterHeap) Less(i, j int) bool { if cmp == 0 { continue } - if order.Desc { - cmp = -cmp - } return cmp < 0 } return true diff --git a/go/vt/vtgate/engine/merge_sort_test.go b/go/vt/vtgate/engine/merge_sort_test.go index 4351d732889..ede93d784b2 100644 --- a/go/vt/vtgate/engine/merge_sort_test.go +++ b/go/vt/vtgate/engine/merge_sort_test.go @@ -18,9 +18,10 @@ package engine import ( "errors" - "reflect" "testing" + "vitess.io/vitess/go/test/utils" + "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" @@ -56,7 +57,8 @@ func TestMergeSortNormal(t *testing.T) { ), }} orderBy := []OrderbyParams{{ - Col: 0, + WeightStringCol: -1, + Col: 0, }} var results []*sqltypes.Result @@ -84,9 +86,65 @@ func TestMergeSortNormal(t *testing.T) { "---", "8|h", ) - if !reflect.DeepEqual(results, wantResults) { - t.Errorf("MergeSort:\n%s, want\n%s", sqltypes.PrintResults(results), sqltypes.PrintResults(wantResults)) - } + utils.MustMatch(t, wantResults, results) +} + +func TestMergeSortWeightString(t *testing.T) { + idColFields := sqltypes.MakeTestFields("id|col", "varbinary|varchar") + shardResults := []*shardResult{{ + results: sqltypes.MakeTestStreamingResults(idColFields, + "1|a", + "7|g", + ), + }, { + results: sqltypes.MakeTestStreamingResults(idColFields, + "2|b", + "---", + "3|c", + ), + }, { + results: sqltypes.MakeTestStreamingResults(idColFields, + "4|d", + "6|f", + ), + }, { + results: sqltypes.MakeTestStreamingResults(idColFields, + "4|d", + "---", + "8|h", + ), + }} + orderBy := []OrderbyParams{{ + WeightStringCol: 0, + Col: 1, + }} + + var results []*sqltypes.Result + err := testMergeSort(shardResults, orderBy, func(qr *sqltypes.Result) error { + results = append(results, qr) + return nil + }) + require.NoError(t, err) + + // Results are returned one row at a time. + wantResults := sqltypes.MakeTestStreamingResults(idColFields, + "1|a", + "---", + "2|b", + "---", + "3|c", + "---", + "4|d", + "---", + "4|d", + "---", + "6|f", + "---", + "7|g", + "---", + "8|h", + ) + utils.MustMatch(t, wantResults, results) } // TestMergeSortDescending tests the normal flow of a merge @@ -117,8 +175,9 @@ func TestMergeSortDescending(t *testing.T) { ), }} orderBy := []OrderbyParams{{ - Col: 0, - Desc: true, + WeightStringCol: -1, + Col: 0, + Desc: true, }} var results []*sqltypes.Result @@ -146,9 +205,7 @@ func TestMergeSortDescending(t *testing.T) { "---", "1|a", ) - if !reflect.DeepEqual(results, wantResults) { - t.Errorf("MergeSort:\n%s, want\n%s", sqltypes.PrintResults(results), sqltypes.PrintResults(wantResults)) - } + utils.MustMatch(t, wantResults, results) } func TestMergeSortEmptyResults(t *testing.T) { @@ -169,7 +226,8 @@ func TestMergeSortEmptyResults(t *testing.T) { results: sqltypes.MakeTestStreamingResults(idColFields), }} orderBy := []OrderbyParams{{ - Col: 0, + WeightStringCol: -1, + Col: 0, }} var results []*sqltypes.Result @@ -189,16 +247,15 @@ func TestMergeSortEmptyResults(t *testing.T) { "---", "7|g", ) - if !reflect.DeepEqual(results, wantResults) { - t.Errorf("MergeSort:\n%s, want\n%s", sqltypes.PrintResults(results), sqltypes.PrintResults(wantResults)) - } + utils.MustMatch(t, wantResults, results) } // TestMergeSortResultFailures tests failures at various // stages of result return. func TestMergeSortResultFailures(t *testing.T) { orderBy := []OrderbyParams{{ - Col: 0, + WeightStringCol: -1, + Col: 0, }} // Test early error. @@ -207,9 +264,7 @@ func TestMergeSortResultFailures(t *testing.T) { }} err := testMergeSort(shardResults, orderBy, func(qr *sqltypes.Result) error { return nil }) want := "early error" - if err == nil || err.Error() != want { - t.Errorf("MergeSort(): %v, want %v", err, want) - } + require.EqualError(t, err, want) // Test fail after fields. idFields := sqltypes.MakeTestFields("id", "int32") @@ -219,9 +274,7 @@ func TestMergeSortResultFailures(t *testing.T) { }} err = testMergeSort(shardResults, orderBy, func(qr *sqltypes.Result) error { return nil }) want = "fail after fields" - if err == nil || err.Error() != want { - t.Errorf("MergeSort(): %v, want %v", err, want) - } + require.EqualError(t, err, want) // Test fail after first row. shardResults = []*shardResult{{ @@ -230,9 +283,7 @@ func TestMergeSortResultFailures(t *testing.T) { }} err = testMergeSort(shardResults, orderBy, func(qr *sqltypes.Result) error { return nil }) want = "fail after first row" - if err == nil || err.Error() != want { - t.Errorf("MergeSort(): %v, want %v", err, want) - } + require.EqualError(t, err, want) } func TestMergeSortDataFailures(t *testing.T) { @@ -249,14 +300,13 @@ func TestMergeSortDataFailures(t *testing.T) { ), }} orderBy := []OrderbyParams{{ - Col: 0, + WeightStringCol: -1, + Col: 0, }} err := testMergeSort(shardResults, orderBy, func(qr *sqltypes.Result) error { return nil }) want := `strconv.ParseInt: parsing "2.1": invalid syntax` - if err == nil || err.Error() != want { - t.Errorf("MergeSort(): %v, want %v", err, want) - } + require.EqualError(t, err, want) // Create a new VCursor because the previous MergeSort will still // have lingering goroutines that can cause data race. @@ -272,9 +322,7 @@ func TestMergeSortDataFailures(t *testing.T) { }} err = testMergeSort(shardResults, orderBy, func(qr *sqltypes.Result) error { return nil }) want = `strconv.ParseInt: parsing "1.1": invalid syntax` - if err == nil || err.Error() != want { - t.Errorf("MergeSort(): %v, want %v", err, want) - } + require.EqualError(t, err, want) } func testMergeSort(shardResults []*shardResult, orderBy []OrderbyParams, callback func(qr *sqltypes.Result) error) error { @@ -286,7 +334,7 @@ func testMergeSort(shardResults []*shardResult, orderBy []OrderbyParams, callbac Primitives: prims, OrderBy: orderBy, } - return ms.StreamExecute(noopVCursor{}, nil, true, callback) + return ms.StreamExecute(&noopVCursor{}, nil, true, callback) } type shardResult struct { diff --git a/go/vt/vtgate/engine/online_ddl.go b/go/vt/vtgate/engine/online_ddl.go index 39ee6bbfda0..a6778c04bd0 100644 --- a/go/vt/vtgate/engine/online_ddl.go +++ b/go/vt/vtgate/engine/online_ddl.go @@ -96,8 +96,7 @@ func (v *OnlineDDL) Execute(vcursor VCursor, bindVars map[string]*query.BindVari Type: sqltypes.VarChar, }, }, - Rows: rows, - RowsAffected: uint64(len(rows)), + Rows: rows, } return result, err } @@ -113,5 +112,5 @@ func (v *OnlineDDL) StreamExecute(vcursor VCursor, bindVars map[string]*query.Bi //GetFields implements the Primitive interface func (v *OnlineDDL) GetFields(vcursor VCursor, bindVars map[string]*query.BindVariable) (*sqltypes.Result, error) { - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "not reachable") + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] GetFields is not reachable") } diff --git a/go/vt/vtgate/engine/ordered_aggregate.go b/go/vt/vtgate/engine/ordered_aggregate.go index bee3cc52de5..803a4223a32 100644 --- a/go/vt/vtgate/engine/ordered_aggregate.go +++ b/go/vt/vtgate/engine/ordered_aggregate.go @@ -206,7 +206,6 @@ func (oa *OrderedAggregate) execute(vcursor VCursor, bindVars map[string]*queryp if current != nil { out.Rows = append(out.Rows, current) } - out.RowsAffected = uint64(len(out.Rows)) return out, nil } @@ -324,6 +323,7 @@ func (oa *OrderedAggregate) Inputs() []Primitive { return []Primitive{oa.Input} } +// NeedsTransaction implements the Primitive interface func (oa *OrderedAggregate) NeedsTransaction() bool { return oa.Input.NeedsTransaction() } diff --git a/go/vt/vtgate/engine/ordered_aggregate_test.go b/go/vt/vtgate/engine/ordered_aggregate_test.go index 9158d27e575..5fb44b84e0a 100644 --- a/go/vt/vtgate/engine/ordered_aggregate_test.go +++ b/go/vt/vtgate/engine/ordered_aggregate_test.go @@ -613,7 +613,6 @@ func TestOrderedAggregateMergeFail(t *testing.T) { sqltypes.MakeTrusted(querypb.Type_DECIMAL, []byte("1")), }, }, - RowsAffected: 1, } res, err := oa.Execute(nil, nil, false) diff --git a/go/vt/vtgate/engine/primitive.go b/go/vt/vtgate/engine/primitive.go index 5bc98d7481c..6825f077a80 100644 --- a/go/vt/vtgate/engine/primitive.go +++ b/go/vt/vtgate/engine/primitive.go @@ -18,7 +18,7 @@ package engine import ( "encoding/json" - "sync" + "sync/atomic" "time" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -98,6 +98,12 @@ type ( LookupRowLockShardSession() vtgatepb.CommitOrder FindRoutedTable(tablename sqlparser.TableName) (*vindexes.Table, error) + + // GetDBDDLPlugin gets the configured plugin for DROP/CREATE DATABASE + GetDBDDLPluginName() string + + // KeyspaceAvailable returns true when a keyspace is visible from vtgate + KeyspaceAvailable(ks string) bool } //SessionActions gives primitives ability to interact with the session state @@ -126,6 +132,7 @@ type ( SetSQLSelectLimit(int64) error SetTransactionMode(vtgatepb.TransactionMode) SetWorkload(querypb.ExecuteOptions_Workload) + SetPlannerVersion(querypb.ExecuteOptions_PlannerVersion) SetFoundRows(uint64) SetDDLStrategy(string) @@ -133,10 +140,16 @@ type ( GetSessionUUID() string + SetSessionEnableSystemSettings(bool) error + GetSessionEnableSystemSettings() bool + // SetReadAfterWriteGTID sets the GTID that the user expects a replica to have caught up with before answering a query SetReadAfterWriteGTID(string) SetReadAfterWriteTimeout(float64) SetSessionTrackGTIDs(bool) + + // HasCreatedTempTable will mark the session as having created temp tables + HasCreatedTempTable() } // Plan represents the execution strategy for a given query. @@ -149,13 +162,14 @@ type ( Original string // Original is the original query. Instructions Primitive // Instructions contains the instructions needed to fulfil the query. BindVarNeeds *sqlparser.BindVarNeeds // Stores BindVars needed to be provided as part of expression rewriting - - mu sync.Mutex // Mutex to protect the fields below - ExecCount uint64 // Count of times this plan was executed - ExecTime time.Duration // Total execution time - ShardQueries uint64 // Total number of shard queries - Rows uint64 // Total number of rows - Errors uint64 // Total number of errors + Warnings []*querypb.QueryWarning // Warnings that need to be yielded every time this query runs + + ExecCount uint64 // Count of times this plan was executed + ExecTime uint64 // Total execution time + ShardQueries uint64 // Total number of shard queries + RowsReturned uint64 // Total number of rows + RowsAffected uint64 // Total number of rows + Errors uint64 // Total number of errors } // Match is used to check if a Primitive matches @@ -193,25 +207,23 @@ type ( ) // AddStats updates the plan execution statistics -func (p *Plan) AddStats(execCount uint64, execTime time.Duration, shardQueries, rows, errors uint64) { - p.mu.Lock() - p.ExecCount += execCount - p.ExecTime += execTime - p.ShardQueries += shardQueries - p.Rows += rows - p.Errors += errors - p.mu.Unlock() +func (p *Plan) AddStats(execCount uint64, execTime time.Duration, shardQueries, rowsAffected, rowsReturned, errors uint64) { + atomic.AddUint64(&p.ExecCount, execCount) + atomic.AddUint64(&p.ExecTime, uint64(execTime)) + atomic.AddUint64(&p.ShardQueries, shardQueries) + atomic.AddUint64(&p.RowsAffected, rowsAffected) + atomic.AddUint64(&p.RowsReturned, rowsReturned) + atomic.AddUint64(&p.Errors, errors) } // Stats returns a copy of the plan execution statistics -func (p *Plan) Stats() (execCount uint64, execTime time.Duration, shardQueries, rows, errors uint64) { - p.mu.Lock() - execCount = p.ExecCount - execTime = p.ExecTime - shardQueries = p.ShardQueries - rows = p.Rows - errors = p.Errors - p.mu.Unlock() +func (p *Plan) Stats() (execCount uint64, execTime time.Duration, shardQueries, rowsAffected, rowsReturned, errors uint64) { + execCount = atomic.LoadUint64(&p.ExecCount) + execTime = time.Duration(atomic.LoadUint64(&p.ExecTime)) + shardQueries = atomic.LoadUint64(&p.ShardQueries) + rowsAffected = atomic.LoadUint64(&p.RowsAffected) + rowsReturned = atomic.LoadUint64(&p.RowsReturned) + errors = atomic.LoadUint64(&p.Errors) return } @@ -234,13 +246,6 @@ func Exists(m Match, p Primitive) bool { return Find(m, p) != nil } -// Size is defined so that Plan can be given to a cache.LRUCache. -// VTGate needs to maintain a cache of plans. It uses LRUCache, which -// in turn requires its objects to define a Size function. -func (p *Plan) Size() int { - return 1 -} - //MarshalJSON serializes the plan into a JSON representation. func (p *Plan) MarshalJSON() ([]byte, error) { var instructions *PrimitiveDescription @@ -256,17 +261,19 @@ func (p *Plan) MarshalJSON() ([]byte, error) { ExecCount uint64 `json:",omitempty"` ExecTime time.Duration `json:",omitempty"` ShardQueries uint64 `json:",omitempty"` - Rows uint64 `json:",omitempty"` + RowsAffected uint64 `json:",omitempty"` + RowsReturned uint64 `json:",omitempty"` Errors uint64 `json:",omitempty"` }{ QueryType: p.Type.String(), Original: p.Original, Instructions: instructions, - ExecCount: p.ExecCount, - ExecTime: p.ExecTime, - ShardQueries: p.ShardQueries, - Rows: p.Rows, - Errors: p.Errors, + ExecCount: atomic.LoadUint64(&p.ExecCount), + ExecTime: time.Duration(atomic.LoadUint64(&p.ExecTime)), + ShardQueries: atomic.LoadUint64(&p.ShardQueries), + RowsAffected: atomic.LoadUint64(&p.RowsAffected), + RowsReturned: atomic.LoadUint64(&p.RowsReturned), + Errors: atomic.LoadUint64(&p.Errors), } return json.Marshal(marshalPlan) } diff --git a/go/vt/vtgate/engine/pullout_subquery.go b/go/vt/vtgate/engine/pullout_subquery.go index 90d53d9d801..bbf614ddebd 100644 --- a/go/vt/vtgate/engine/pullout_subquery.go +++ b/go/vt/vtgate/engine/pullout_subquery.go @@ -100,10 +100,16 @@ func (ps *PulloutSubquery) GetFields(vcursor VCursor, bindVars map[string]*query return ps.Underlying.GetFields(vcursor, combinedVars) } +// NeedsTransaction implements the Primitive interface func (ps *PulloutSubquery) NeedsTransaction() bool { return ps.Subquery.NeedsTransaction() || ps.Underlying.NeedsTransaction() } +var ( + errSqRow = vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "subquery returned more than one row") + errSqColumn = vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "subquery returned more than one column") +) + func (ps *PulloutSubquery) execSubquery(vcursor VCursor, bindVars map[string]*querypb.BindVariable) (map[string]*querypb.BindVariable, error) { result, err := ps.Subquery.Execute(vcursor, bindVars, false) if err != nil { @@ -120,11 +126,11 @@ func (ps *PulloutSubquery) execSubquery(vcursor VCursor, bindVars map[string]*qu combinedVars[ps.SubqueryResult] = sqltypes.NullBindVariable case 1: if len(result.Rows[0]) != 1 { - return nil, vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "subquery returned more than one column") + return nil, errSqColumn } combinedVars[ps.SubqueryResult] = sqltypes.ValueBindVariable(result.Rows[0][0]) default: - return nil, vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "subquery returned more than one row") + return nil, errSqRow } case PulloutIn, PulloutNotIn: switch len(result.Rows) { @@ -137,7 +143,7 @@ func (ps *PulloutSubquery) execSubquery(vcursor VCursor, bindVars map[string]*qu } default: if len(result.Rows[0]) != 1 { - return nil, vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "subquery returned more than one column") + return nil, errSqColumn } combinedVars[ps.HasValues] = sqltypes.Int64BindVariable(1) values := &querypb.BindVariable{ diff --git a/go/vt/vtgate/engine/pullout_subquery_test.go b/go/vt/vtgate/engine/pullout_subquery_test.go index a24a2de7526..7c1c08e64f7 100644 --- a/go/vt/vtgate/engine/pullout_subquery_test.go +++ b/go/vt/vtgate/engine/pullout_subquery_test.go @@ -110,7 +110,7 @@ func TestPulloutSubqueryValueBadColumns(t *testing.T) { } _, err := ps.Execute(nil, make(map[string]*querypb.BindVariable), false) - expectError(t, "ps.Execute", err, "subquery returned more than one column") + require.EqualError(t, err, "subquery returned more than one column") } func TestPulloutSubqueryValueBadRows(t *testing.T) { @@ -132,7 +132,7 @@ func TestPulloutSubqueryValueBadRows(t *testing.T) { } _, err := ps.Execute(nil, make(map[string]*querypb.BindVariable), false) - expectError(t, "ps.Execute", err, "subquery returned more than one row") + require.EqualError(t, err, "subquery returned more than one row") } func TestPulloutSubqueryInNotinGood(t *testing.T) { @@ -217,7 +217,7 @@ func TestPulloutSubqueryInBadColumns(t *testing.T) { } _, err := ps.Execute(nil, make(map[string]*querypb.BindVariable), false) - expectError(t, "ps.Execute", err, "subquery returned more than one column") + require.EqualError(t, err, "subquery returned more than one column") } func TestPulloutSubqueryExists(t *testing.T) { @@ -282,7 +282,7 @@ func TestPulloutSubqueryError(t *testing.T) { } _, err := ps.Execute(nil, make(map[string]*querypb.BindVariable), false) - expectError(t, "ps.Execute", err, "err") + require.EqualError(t, err, "err") } func TestPulloutSubqueryStream(t *testing.T) { diff --git a/go/vt/vtgate/engine/rename_fields.go b/go/vt/vtgate/engine/rename_fields.go new file mode 100644 index 00000000000..9983c5036e1 --- /dev/null +++ b/go/vt/vtgate/engine/rename_fields.go @@ -0,0 +1,118 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +var _ Primitive = (*RenameFields)(nil) + +// RenameFields is a primitive that renames the fields +type RenameFields struct { + Cols []string + Indices []int + Input Primitive + noTxNeeded +} + +// NewRenameField creates a new rename field +func NewRenameField(cols []string, indices []int, input Primitive) (*RenameFields, error) { + if len(cols) != len(indices) { + return nil, vterrors.New(vtrpc.Code_INTERNAL, "Unequal length of columns and indices in RenameField primitive") + } + return &RenameFields{ + Cols: cols, + Indices: indices, + Input: input, + }, nil +} + +// RouteType implements the primitive interface +func (r *RenameFields) RouteType() string { + return r.Input.RouteType() +} + +// GetKeyspaceName implements the primitive interface +func (r *RenameFields) GetKeyspaceName() string { + return r.Input.GetKeyspaceName() +} + +// GetTableName implements the primitive interface +func (r *RenameFields) GetTableName() string { + return r.Input.GetTableName() +} + +// Execute implements the primitive interface +func (r *RenameFields) Execute(vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { + qr, err := r.Input.Execute(vcursor, bindVars, wantfields) + if err != nil { + return nil, err + } + if wantfields { + r.renameFields(qr) + } + return qr, nil +} + +func (r *RenameFields) renameFields(qr *sqltypes.Result) { + for ind, index := range r.Indices { + colName := r.Cols[ind] + qr.Fields[index].Name = colName + } +} + +// StreamExecute implements the primitive interface +func (r *RenameFields) StreamExecute(vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { + if wantfields { + innerCallback := callback + callback = func(result *sqltypes.Result) error { + r.renameFields(result) + return innerCallback(result) + } + } + return r.Input.StreamExecute(vcursor, bindVars, wantfields, callback) +} + +// GetFields implements the primitive interface +func (r *RenameFields) GetFields(vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { + qr, err := r.Input.GetFields(vcursor, bindVars) + if err != nil { + return nil, err + } + r.renameFields(qr) + return qr, nil +} + +// Inputs implements the primitive interface +func (r *RenameFields) Inputs() []Primitive { + return []Primitive{r.Input} +} + +// description implements the primitive interface +func (r *RenameFields) description() PrimitiveDescription { + return PrimitiveDescription{ + OperatorType: "RenameFields", + Other: map[string]interface{}{ + "Indices": r.Indices, + "Columns": r.Cols, + }, + } +} diff --git a/go/vt/vtgate/engine/replace_variables.go b/go/vt/vtgate/engine/replace_variables.go new file mode 100644 index 00000000000..c0397e26403 --- /dev/null +++ b/go/vt/vtgate/engine/replace_variables.go @@ -0,0 +1,97 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" +) + +var _ Primitive = (*ReplaceVariables)(nil) + +// ReplaceVariables is used in SHOW VARIABLES statements so that it replaces the values for vitess-aware variables +type ReplaceVariables struct { + Input Primitive + noTxNeeded +} + +// NewReplaceVariables is used to create a new ReplaceVariables primitive +func NewReplaceVariables(input Primitive) *ReplaceVariables { + return &ReplaceVariables{Input: input} +} + +// RouteType implements the Primitive interface +func (r *ReplaceVariables) RouteType() string { + return r.Input.RouteType() +} + +// GetKeyspaceName implements the Primitive interface +func (r *ReplaceVariables) GetKeyspaceName() string { + return r.Input.GetKeyspaceName() +} + +// GetTableName implements the Primitive interface +func (r *ReplaceVariables) GetTableName() string { + return r.Input.GetTableName() +} + +// Execute implements the Primitive interface +func (r *ReplaceVariables) Execute(vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { + qr, err := r.Input.Execute(vcursor, bindVars, wantfields) + if err != nil { + return nil, err + } + replaceVariables(qr, bindVars) + return qr, nil +} + +// StreamExecute implements the Primitive interface +func (r *ReplaceVariables) StreamExecute(vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { + innerCallback := callback + callback = func(result *sqltypes.Result) error { + replaceVariables(result, bindVars) + return innerCallback(result) + } + return r.Input.StreamExecute(vcursor, bindVars, wantfields, callback) +} + +// GetFields implements the Primitive interface +func (r *ReplaceVariables) GetFields(vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { + return r.Input.GetFields(vcursor, bindVars) +} + +// Inputs implements the Primitive interface +func (r *ReplaceVariables) Inputs() []Primitive { + return []Primitive{r.Input} +} + +// description implements the Primitive interface +func (r *ReplaceVariables) description() PrimitiveDescription { + return PrimitiveDescription{ + OperatorType: "ReplaceVariables", + } +} + +func replaceVariables(qr *sqltypes.Result, bindVars map[string]*querypb.BindVariable) { + for i, row := range qr.Rows { + variableName := row[0].ToString() + res, found := bindVars["__vt"+variableName] + if found { + qr.Rows[i][1] = sqltypes.NewVarChar(string(res.GetValue())) + } + } +} diff --git a/go/vt/vtgate/engine/revert_migration.go b/go/vt/vtgate/engine/revert_migration.go new file mode 100644 index 00000000000..a2facc7c201 --- /dev/null +++ b/go/vt/vtgate/engine/revert_migration.go @@ -0,0 +1,109 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "fmt" + + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/proto/query" + querypb "vitess.io/vitess/go/vt/proto/query" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/schema" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +var _ Primitive = (*RevertMigration)(nil) + +//RevertMigration represents the instructions to perform an online schema change via vtctld +type RevertMigration struct { + Keyspace *vindexes.Keyspace + Stmt *sqlparser.RevertMigration + Query string + + noTxNeeded + + noInputs +} + +func (v *RevertMigration) description() PrimitiveDescription { + return PrimitiveDescription{ + OperatorType: "RevertMigration", + Keyspace: v.Keyspace, + Other: map[string]interface{}{ + "query": v.Query, + }, + } +} + +// RouteType implements the Primitive interface +func (v *RevertMigration) RouteType() string { + return "RevertMigration" +} + +// GetKeyspaceName implements the Primitive interface +func (v *RevertMigration) GetKeyspaceName() string { + return v.Keyspace.Name +} + +// GetTableName implements the Primitive interface +func (v *RevertMigration) GetTableName() string { + return "" +} + +// Execute implements the Primitive interface +func (v *RevertMigration) Execute(vcursor VCursor, bindVars map[string]*query.BindVariable, wantfields bool) (result *sqltypes.Result, err error) { + sql := fmt.Sprintf("revert %s", v.Stmt.UUID) + onlineDDL, err := schema.NewOnlineDDL(v.GetKeyspaceName(), "", sql, schema.DDLStrategyOnline, "", fmt.Sprintf("vtgate:%s", vcursor.Session().GetSessionUUID())) + if err != nil { + return result, err + } + err = vcursor.SubmitOnlineDDL(onlineDDL) + if err != nil { + return result, err + } + rows := [][]sqltypes.Value{} + rows = append(rows, []sqltypes.Value{ + sqltypes.NewVarChar(onlineDDL.UUID), + }) + result = &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "uuid", + Type: sqltypes.VarChar, + }, + }, + Rows: rows, + } + return result, err +} + +//StreamExecute implements the Primitive interface +func (v *RevertMigration) StreamExecute(vcursor VCursor, bindVars map[string]*query.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { + results, err := v.Execute(vcursor, bindVars, wantfields) + if err != nil { + return err + } + return callback(results) +} + +//GetFields implements the Primitive interface +func (v *RevertMigration) GetFields(vcursor VCursor, bindVars map[string]*query.BindVariable) (*sqltypes.Result, error) { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] GetFields is not reachable") +} diff --git a/go/vt/vtgate/engine/route.go b/go/vt/vtgate/engine/route.go index 598d3e3af47..b799863964e 100644 --- a/go/vt/vtgate/engine/route.go +++ b/go/vt/vtgate/engine/route.go @@ -94,8 +94,8 @@ type Route struct { ScatterErrorsAsWarnings bool // The following two fields are used when routing information_schema queries - SysTableTableSchema []evalengine.Expr - SysTableTableName []evalengine.Expr + SysTableTableSchema evalengine.Expr + SysTableTableName evalengine.Expr // Route does not take inputs noInputs @@ -125,8 +125,11 @@ func NewRoute(opcode RouteOpcode, keyspace *vindexes.Keyspace, query, fieldQuery // OrderbyParams specifies the parameters for ordering. // This is used for merge-sorting scatter queries. type OrderbyParams struct { - Col int - Desc bool + Col int + // WeightStringCol is the weight_string column that will be used for sorting. + // It is set to -1 if such a column is not added to the query + WeightStringCol int + Desc bool } func (obp OrderbyParams) String() string { @@ -383,7 +386,7 @@ func (route *Route) GetFields(vcursor VCursor, bindVars map[string]*querypb.Bind func (route *Route) paramsAllShards(vcursor VCursor, bindVars map[string]*querypb.BindVariable) ([]*srvtopo.ResolvedShard, []map[string]*querypb.BindVariable, error) { rss, _, err := vcursor.ResolveDestinations(route.Keyspace.Name, nil, []key.Destination{key.DestinationAllShards{}}) if err != nil { - return nil, nil, vterrors.Wrap(err, "paramsAllShards") + return nil, nil, err } multiBindVars := make([]map[string]*querypb.BindVariable, len(rss)) for i := range multiBindVars { @@ -408,7 +411,7 @@ func (route *Route) routeInfoSchemaQuery(vcursor VCursor, bindVars map[string]*q return destinations, vterrors.Wrapf(err, "failed to find information about keyspace `%s`", ks) } - if len(route.SysTableTableName) == 0 && len(route.SysTableTableSchema) == 0 { + if route.SysTableTableName == nil && route.SysTableTableSchema == nil { return defaultRoute() } @@ -418,38 +421,22 @@ func (route *Route) routeInfoSchemaQuery(vcursor VCursor, bindVars map[string]*q } var specifiedKS string - for _, tableSchema := range route.SysTableTableSchema { - result, err := tableSchema.Evaluate(env) + if route.SysTableTableSchema != nil { + result, err := route.SysTableTableSchema.Evaluate(env) if err != nil { return nil, err } - ks := result.Value().ToString() - if specifiedKS == "" { - specifiedKS = ks - } - if specifiedKS != ks { - return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "specifying two different database in the query is not supported") - } - } - if specifiedKS != "" { + specifiedKS = result.Value().ToString() bindVars[sqltypes.BvSchemaName] = sqltypes.StringBindVariable(specifiedKS) } var tableName string - for _, sysTableName := range route.SysTableTableName { - val, err := sysTableName.Evaluate(env) + if route.SysTableTableName != nil { + val, err := route.SysTableTableName.Evaluate(env) if err != nil { return nil, err } - tabName := val.Value().ToString() - if tableName == "" { - tableName = tabName - } - if tableName != tabName { - return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "two predicates for table_name not supported") - } - } - if tableName != "" { + tableName = val.Value().ToString() bindVars[BvTableName] = sqltypes.StringBindVariable(tableName) } @@ -523,7 +510,7 @@ func setReplaceSchemaName(bindVars map[string]*querypb.BindVariable) { func (route *Route) paramsAnyShard(vcursor VCursor, bindVars map[string]*querypb.BindVariable) ([]*srvtopo.ResolvedShard, []map[string]*querypb.BindVariable, error) { rss, _, err := vcursor.ResolveDestinations(route.Keyspace.Name, nil, []key.Destination{key.DestinationAnyShard{}}) if err != nil { - return nil, nil, vterrors.Wrap(err, "paramsAnyShard") + return nil, nil, err } multiBindVars := make([]map[string]*querypb.BindVariable, len(rss)) for i := range multiBindVars { @@ -535,11 +522,11 @@ func (route *Route) paramsAnyShard(vcursor VCursor, bindVars map[string]*querypb func (route *Route) paramsSelectEqual(vcursor VCursor, bindVars map[string]*querypb.BindVariable) ([]*srvtopo.ResolvedShard, []map[string]*querypb.BindVariable, error) { key, err := route.Values[0].ResolveValue(bindVars) if err != nil { - return nil, nil, vterrors.Wrap(err, "paramsSelectEqual") + return nil, nil, err } rss, _, err := resolveShards(vcursor, route.Vindex, route.Keyspace, []sqltypes.Value{key}) if err != nil { - return nil, nil, vterrors.Wrap(err, "paramsSelectEqual") + return nil, nil, err } multiBindVars := make([]map[string]*querypb.BindVariable, len(rss)) for i := range multiBindVars { @@ -551,11 +538,11 @@ func (route *Route) paramsSelectEqual(vcursor VCursor, bindVars map[string]*quer func (route *Route) paramsSelectIn(vcursor VCursor, bindVars map[string]*querypb.BindVariable) ([]*srvtopo.ResolvedShard, []map[string]*querypb.BindVariable, error) { keys, err := route.Values[0].ResolveList(bindVars) if err != nil { - return nil, nil, vterrors.Wrap(err, "paramsSelectIn") + return nil, nil, err } rss, values, err := resolveShards(vcursor, route.Vindex, route.Keyspace, keys) if err != nil { - return nil, nil, vterrors.Wrap(err, "paramsSelectIn") + return nil, nil, err } return rss, shardVars(bindVars, values), nil } @@ -563,11 +550,11 @@ func (route *Route) paramsSelectIn(vcursor VCursor, bindVars map[string]*querypb func (route *Route) paramsSelectMultiEqual(vcursor VCursor, bindVars map[string]*querypb.BindVariable) ([]*srvtopo.ResolvedShard, []map[string]*querypb.BindVariable, error) { keys, err := route.Values[0].ResolveList(bindVars) if err != nil { - return nil, nil, vterrors.Wrap(err, "paramsSelectIn") + return nil, nil, err } rss, _, err := resolveShards(vcursor, route.Vindex, route.Keyspace, keys) if err != nil { - return nil, nil, vterrors.Wrap(err, "paramsSelectIn") + return nil, nil, err } multiBindVars := make([]map[string]*querypb.BindVariable, len(rss)) for i := range multiBindVars { @@ -605,27 +592,26 @@ func (route *Route) sort(in *sqltypes.Result) (*sqltypes.Result, error) { InsertID: in.InsertID, } + comparers := extractSlices(route.OrderBy) + sort.Slice(out.Rows, func(i, j int) bool { + var cmp int + if err != nil { + return true + } // If there are any errors below, the function sets // the external err and returns true. Once err is set, // all subsequent calls return true. This will make // Slice think that all elements are in the correct // order and return more quickly. - for _, order := range route.OrderBy { - if err != nil { - return true - } - var cmp int - cmp, err = evalengine.NullsafeCompare(out.Rows[i][order.Col], out.Rows[j][order.Col]) + for _, c := range comparers { + cmp, err = c.compare(out.Rows[i], out.Rows[j]) if err != nil { return true } if cmp == 0 { continue } - if order.Desc { - cmp = -cmp - } return cmp < 0 } return true @@ -726,7 +712,7 @@ func shardVars(bv map[string]*querypb.BindVariable, mapVals [][]*querypb.Value) func allowOnlyMaster(rss ...*srvtopo.ResolvedShard) error { for _, rs := range rss { if rs != nil && rs.Target.TabletType != topodatapb.TabletType_MASTER { - return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "supported only for master tablet type, current type: %v", topoproto.TabletTypeLString(rs.Target.TabletType)) + return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "supported only for master tablet type, current type: %v", topoproto.TabletTypeLString(rs.Target.TabletType)) } } return nil @@ -744,27 +730,11 @@ func (route *Route) description() PrimitiveDescription { if len(route.Values) > 0 { other["Values"] = route.Values } - if len(route.SysTableTableSchema) != 0 { - sysTabSchema := "[" - for idx, tableSchema := range route.SysTableTableSchema { - if idx != 0 { - sysTabSchema += ", " - } - sysTabSchema += tableSchema.String() - } - sysTabSchema += "]" - other["SysTableTableSchema"] = sysTabSchema - } - if len(route.SysTableTableName) != 0 { - sysTableName := "[" - for idx, tableName := range route.SysTableTableName { - if idx != 0 { - sysTableName += ", " - } - sysTableName += tableName.String() - } - sysTableName += "]" - other["SysTableTableName"] = sysTableName + if route.SysTableTableSchema != nil { + other["SysTableTableSchema"] = route.SysTableTableSchema.String() + } + if route.SysTableTableName != nil { + other["SysTableTableName"] = route.SysTableTableName.String() } orderBy := GenericJoin(route.OrderBy, orderByToString) if orderBy != "" { diff --git a/go/vt/vtgate/engine/route_test.go b/go/vt/vtgate/engine/route_test.go index 93deb22a727..67afadce4c4 100644 --- a/go/vt/vtgate/engine/route_test.go +++ b/go/vt/vtgate/engine/route_test.go @@ -20,6 +20,9 @@ import ( "errors" "testing" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/evalengine" @@ -56,9 +59,7 @@ func TestSelectUnsharded(t *testing.T) { results: []*sqltypes.Result{defaultSelectResult}, } result, err := sel.Execute(vc, map[string]*querypb.BindVariable{}, false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) vc.ExpectLog(t, []string{ `ResolveDestinations ks [] Destinations:DestinationAnyShard()`, `ExecuteMultiShard ks.0: dummy_select {} false false`, @@ -67,9 +68,7 @@ func TestSelectUnsharded(t *testing.T) { vc.Rewind() result, err = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) vc.ExpectLog(t, []string{ `ResolveDestinations ks [] Destinations:DestinationAnyShard()`, `StreamExecuteMulti dummy_select ks.0: {} `, @@ -78,24 +77,23 @@ func TestSelectUnsharded(t *testing.T) { } func TestSelectInformationSchemaWithTableAndSchemaWithRoutedTables(t *testing.T) { - stringListToExprList := func(in []string) []evalengine.Expr { - var schema []evalengine.Expr - for _, s := range in { - schema = append(schema, evalengine.NewLiteralString([]byte(s))) + stringToExpr := func(in string) evalengine.Expr { + var schema evalengine.Expr + if in != "" { + schema = evalengine.NewLiteralString([]byte(in)) } return schema } type testCase struct { - tableSchema, tableName []string - testName string - expectedLog []string - routed bool + tableSchema, tableName, testName string + expectedLog []string + routed bool } tests := []testCase{{ testName: "both schema and table predicates - routed table", - tableSchema: []string{"schema"}, - tableName: []string{"table"}, + tableSchema: "schema", + tableName: "table", routed: true, expectedLog: []string{ "FindTable(`schema`.`table`)", @@ -103,17 +101,8 @@ func TestSelectInformationSchemaWithTableAndSchemaWithRoutedTables(t *testing.T) "ExecuteMultiShard routedKeyspace.1: dummy_select {__replacevtschemaname: type:INT64 value:\"1\" __vttablename: type:VARBINARY value:\"routedTable\" } false false"}, }, { testName: "both schema and table predicates - not routed", - tableSchema: []string{"schema"}, - tableName: []string{"table"}, - routed: false, - expectedLog: []string{ - "FindTable(`schema`.`table`)", - "ResolveDestinations schema [] Destinations:DestinationAnyShard()", - "ExecuteMultiShard schema.1: dummy_select {__replacevtschemaname: type:INT64 value:\"1\" __vttablename: type:VARBINARY value:\"table\" } false false"}, - }, { - testName: "multiple schema and table predicates", - tableSchema: []string{"schema", "schema", "schema"}, - tableName: []string{"table", "table", "table"}, + tableSchema: "schema", + tableName: "table", routed: false, expectedLog: []string{ "FindTable(`schema`.`table`)", @@ -121,7 +110,7 @@ func TestSelectInformationSchemaWithTableAndSchemaWithRoutedTables(t *testing.T) "ExecuteMultiShard schema.1: dummy_select {__replacevtschemaname: type:INT64 value:\"1\" __vttablename: type:VARBINARY value:\"table\" } false false"}, }, { testName: "table name predicate - routed table", - tableName: []string{"tableName"}, + tableName: "tableName", routed: true, expectedLog: []string{ "FindTable(tableName)", @@ -129,7 +118,7 @@ func TestSelectInformationSchemaWithTableAndSchemaWithRoutedTables(t *testing.T) "ExecuteMultiShard routedKeyspace.1: dummy_select {__vttablename: type:VARBINARY value:\"routedTable\" } false false"}, }, { testName: "table name predicate - not routed", - tableName: []string{"tableName"}, + tableName: "tableName", routed: false, expectedLog: []string{ "FindTable(tableName)", @@ -137,13 +126,7 @@ func TestSelectInformationSchemaWithTableAndSchemaWithRoutedTables(t *testing.T) "ExecuteMultiShard ks.1: dummy_select {__vttablename: type:VARBINARY value:\"tableName\" } false false"}, }, { testName: "schema predicate", - tableSchema: []string{"myKeyspace"}, - expectedLog: []string{ - "ResolveDestinations myKeyspace [] Destinations:DestinationAnyShard()", - "ExecuteMultiShard myKeyspace.1: dummy_select {__replacevtschemaname: type:INT64 value:\"1\" } false false"}, - }, { - testName: "multiple schema predicates", - tableSchema: []string{"myKeyspace", "myKeyspace", "myKeyspace", "myKeyspace"}, + tableSchema: "myKeyspace", expectedLog: []string{ "ResolveDestinations myKeyspace [] Destinations:DestinationAnyShard()", "ExecuteMultiShard myKeyspace.1: dummy_select {__replacevtschemaname: type:INT64 value:\"1\" } false false"}, @@ -164,8 +147,8 @@ func TestSelectInformationSchemaWithTableAndSchemaWithRoutedTables(t *testing.T) }, Query: "dummy_select", FieldQuery: "dummy_select_field", - SysTableTableSchema: stringListToExprList(tc.tableSchema), - SysTableTableName: stringListToExprList(tc.tableName), + SysTableTableSchema: stringToExpr(tc.tableSchema), + SysTableTableName: stringToExpr(tc.tableName), } vc := &loggingVCursor{ shards: []string{"1"}, @@ -201,9 +184,7 @@ func TestSelectScatter(t *testing.T) { results: []*sqltypes.Result{defaultSelectResult}, } result, err := sel.Execute(vc, map[string]*querypb.BindVariable{}, false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) vc.ExpectLog(t, []string{ `ResolveDestinations ks [] Destinations:DestinationAllShards()`, `ExecuteMultiShard ks.-20: dummy_select {} ks.20-: dummy_select {} false false`, @@ -212,9 +193,7 @@ func TestSelectScatter(t *testing.T) { vc.Rewind() result, err = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) vc.ExpectLog(t, []string{ `ResolveDestinations ks [] Destinations:DestinationAllShards()`, `StreamExecuteMulti dummy_select ks.-20: {} ks.20-: {} `, @@ -241,9 +220,7 @@ func TestSelectEqualUnique(t *testing.T) { results: []*sqltypes.Result{defaultSelectResult}, } result, err := sel.Execute(vc, map[string]*querypb.BindVariable{}, false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) vc.ExpectLog(t, []string{ `ResolveDestinations ks [type:INT64 value:"1" ] Destinations:DestinationKeyspaceID(166b40b44aba4bd6)`, `ExecuteMultiShard ks.-20: dummy_select {} false false`, @@ -252,9 +229,7 @@ func TestSelectEqualUnique(t *testing.T) { vc.Rewind() result, err = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) vc.ExpectLog(t, []string{ `ResolveDestinations ks [type:INT64 value:"1" ] Destinations:DestinationKeyspaceID(166b40b44aba4bd6)`, `StreamExecuteMulti dummy_select ks.-20: {} `, @@ -316,9 +291,7 @@ func TestSelectEqualUniqueScatter(t *testing.T) { results: []*sqltypes.Result{defaultSelectResult}, } result, err := sel.Execute(vc, map[string]*querypb.BindVariable{}, false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) vc.ExpectLog(t, []string{ `ResolveDestinations ks [type:INT64 value:"1" ] Destinations:DestinationKeyRange(-)`, `ExecuteMultiShard ks.-20: dummy_select {} ks.20-: dummy_select {} false false`, @@ -327,9 +300,7 @@ func TestSelectEqualUniqueScatter(t *testing.T) { vc.Rewind() result, err = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) vc.ExpectLog(t, []string{ `ResolveDestinations ks [type:INT64 value:"1" ] Destinations:DestinationKeyRange(-)`, `StreamExecuteMulti dummy_select ks.-20: {} ks.20-: {} `, @@ -370,9 +341,7 @@ func TestSelectEqual(t *testing.T) { }, } result, err := sel.Execute(vc, map[string]*querypb.BindVariable{}, false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) vc.ExpectLog(t, []string{ `Execute select from, toc from lkp where from in ::from from: type:TUPLE values: false`, `ResolveDestinations ks [type:INT64 value:"1" ] Destinations:DestinationKeyspaceIDs(00,80)`, @@ -382,9 +351,7 @@ func TestSelectEqual(t *testing.T) { vc.Rewind() result, err = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) vc.ExpectLog(t, []string{ `Execute select from, toc from lkp where from in ::from from: type:TUPLE values: false`, `ResolveDestinations ks [type:INT64 value:"1" ] Destinations:DestinationKeyspaceIDs(00,80)`, @@ -413,9 +380,7 @@ func TestSelectEqualNoRoute(t *testing.T) { vc := &loggingVCursor{shards: []string{"-20", "20-"}} result, err := sel.Execute(vc, map[string]*querypb.BindVariable{}, false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) vc.ExpectLog(t, []string{ `Execute select from, toc from lkp where from in ::from from: type:TUPLE values: false`, `ResolveDestinations ks [type:INT64 value:"1" ] Destinations:DestinationNone()`, @@ -424,9 +389,7 @@ func TestSelectEqualNoRoute(t *testing.T) { vc.Rewind() result, err = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) vc.ExpectLog(t, []string{ `Execute select from, toc from lkp where from in ::from from: type:TUPLE values: false`, `ResolveDestinations ks [type:INT64 value:"1" ] Destinations:DestinationNone()`, @@ -462,9 +425,7 @@ func TestSelectINUnique(t *testing.T) { results: []*sqltypes.Result{defaultSelectResult}, } result, err := sel.Execute(vc, map[string]*querypb.BindVariable{}, false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) vc.ExpectLog(t, []string{ `ResolveDestinations ks [type:INT64 value:"1" type:INT64 value:"2" type:INT64 value:"4" ] Destinations:DestinationKeyspaceID(166b40b44aba4bd6),DestinationKeyspaceID(06e7ea22ce92708f),DestinationKeyspaceID(d2fd8867d50d2dfe)`, `ExecuteMultiShard ` + @@ -476,9 +437,7 @@ func TestSelectINUnique(t *testing.T) { vc.Rewind() result, err = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) vc.ExpectLog(t, []string{ `ResolveDestinations ks [type:INT64 value:"1" type:INT64 value:"2" type:INT64 value:"4" ] Destinations:DestinationKeyspaceID(166b40b44aba4bd6),DestinationKeyspaceID(06e7ea22ce92708f),DestinationKeyspaceID(d2fd8867d50d2dfe)`, `StreamExecuteMulti dummy_select ks.-20: {__vals: type:TUPLE values: values: } ks.20-: {__vals: type:TUPLE values: } `, @@ -533,9 +492,7 @@ func TestSelectINNonUnique(t *testing.T) { }, } result, err := sel.Execute(vc, map[string]*querypb.BindVariable{}, false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) vc.ExpectLog(t, []string{ `Execute select from, toc from lkp where from in ::from from: type:TUPLE values: values: values: false`, `ResolveDestinations ks [type:INT64 value:"1" type:INT64 value:"2" type:INT64 value:"4" ] Destinations:DestinationKeyspaceIDs(00,80),DestinationKeyspaceIDs(00),DestinationKeyspaceIDs(80)`, @@ -548,9 +505,7 @@ func TestSelectINNonUnique(t *testing.T) { vc.Rewind() result, err = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) vc.ExpectLog(t, []string{ `Execute select from, toc from lkp where from in ::from from: type:TUPLE values: values: values: false`, `ResolveDestinations ks [type:INT64 value:"1" type:INT64 value:"2" type:INT64 value:"4" ] Destinations:DestinationKeyspaceIDs(00,80),DestinationKeyspaceIDs(00),DestinationKeyspaceIDs(80)`, @@ -587,9 +542,7 @@ func TestSelectMultiEqual(t *testing.T) { results: []*sqltypes.Result{defaultSelectResult}, } result, err := sel.Execute(vc, map[string]*querypb.BindVariable{}, false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) vc.ExpectLog(t, []string{ `ResolveDestinations ks [type:INT64 value:"1" type:INT64 value:"2" type:INT64 value:"4" ] Destinations:DestinationKeyspaceID(166b40b44aba4bd6),DestinationKeyspaceID(06e7ea22ce92708f),DestinationKeyspaceID(d2fd8867d50d2dfe)`, `ExecuteMultiShard ks.-20: dummy_select {} ks.20-: dummy_select {} false false`, @@ -598,9 +551,7 @@ func TestSelectMultiEqual(t *testing.T) { vc.Rewind() result, err = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) vc.ExpectLog(t, []string{ `ResolveDestinations ks [type:INT64 value:"1" type:INT64 value:"2" type:INT64 value:"4" ] Destinations:DestinationKeyspaceID(166b40b44aba4bd6),DestinationKeyspaceID(06e7ea22ce92708f),DestinationKeyspaceID(d2fd8867d50d2dfe)`, `StreamExecuteMulti dummy_select ks.-20: {} ks.20-: {} `, @@ -624,9 +575,7 @@ func TestSelectNext(t *testing.T) { results: []*sqltypes.Result{defaultSelectResult}, } result, err := sel.Execute(vc, map[string]*querypb.BindVariable{}, false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) vc.ExpectLog(t, []string{ `ResolveDestinations ks [] Destinations:DestinationAnyShard()`, `ExecuteMultiShard ks.-20: dummy_select {} false false`, @@ -658,9 +607,7 @@ func TestSelectDBA(t *testing.T) { results: []*sqltypes.Result{defaultSelectResult}, } result, err := sel.Execute(vc, map[string]*querypb.BindVariable{}, false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) vc.ExpectLog(t, []string{ `ResolveDestinations ks [] Destinations:DestinationAnyShard()`, `ExecuteMultiShard ks.-20: dummy_select {} false false`, @@ -692,9 +639,7 @@ func TestSelectReference(t *testing.T) { results: []*sqltypes.Result{defaultSelectResult}, } result, err := sel.Execute(vc, map[string]*querypb.BindVariable{}, false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) vc.ExpectLog(t, []string{ `ResolveDestinations ks [] Destinations:DestinationAnyShard()`, `ExecuteMultiShard ks.-20: dummy_select {} false false`, @@ -730,9 +675,7 @@ func TestRouteGetFields(t *testing.T) { vc := &loggingVCursor{shards: []string{"-20", "20-"}} result, err := sel.Execute(vc, map[string]*querypb.BindVariable{}, true) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) vc.ExpectLog(t, []string{ `Execute select from, toc from lkp where from in ::from from: type:TUPLE values: false`, `ResolveDestinations ks [type:INT64 value:"1" ] Destinations:DestinationNone()`, @@ -743,9 +686,7 @@ func TestRouteGetFields(t *testing.T) { vc.Rewind() result, err = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, true) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) vc.ExpectLog(t, []string{ `Execute select from, toc from lkp where from in ::from from: type:TUPLE values: false`, `ResolveDestinations ks [type:INT64 value:"1" ] Destinations:DestinationNone()`, @@ -766,7 +707,8 @@ func TestRouteSort(t *testing.T) { "dummy_select_field", ) sel.OrderBy = []OrderbyParams{{ - Col: 0, + Col: 0, + WeightStringCol: -1, }} vc := &loggingVCursor{ @@ -785,9 +727,7 @@ func TestRouteSort(t *testing.T) { }, } result, err := sel.Execute(vc, map[string]*querypb.BindVariable{}, false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) vc.ExpectLog(t, []string{ `ResolveDestinations ks [] Destinations:DestinationAnyShard()`, `ExecuteMultiShard ks.0: dummy_select {} false false`, @@ -807,9 +747,7 @@ func TestRouteSort(t *testing.T) { sel.OrderBy[0].Desc = true vc.Rewind() result, err = sel.Execute(vc, map[string]*querypb.BindVariable{}, false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) wantResult = sqltypes.MakeTestResult( sqltypes.MakeTestFields( "id", @@ -837,7 +775,109 @@ func TestRouteSort(t *testing.T) { }, } _, err = sel.Execute(vc, map[string]*querypb.BindVariable{}, false) - expectError(t, "sel.Execute", err, "types are not comparable: VARCHAR vs VARCHAR") + require.EqualError(t, err, `types are not comparable: VARCHAR vs VARCHAR`) +} + +func TestRouteSortWeightStrings(t *testing.T) { + sel := NewRoute( + SelectUnsharded, + &vindexes.Keyspace{ + Name: "ks", + Sharded: false, + }, + "dummy_select", + "dummy_select_field", + ) + sel.OrderBy = []OrderbyParams{{ + Col: 1, + WeightStringCol: 0, + }} + + vc := &loggingVCursor{ + shards: []string{"0"}, + results: []*sqltypes.Result{ + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "weightString|normal", + "varbinary|varchar", + ), + "v|x", + "g|d", + "a|a", + "c|t", + "f|p", + ), + }, + } + + var result *sqltypes.Result + var wantResult *sqltypes.Result + var err error + t.Run("Sort using Weight Strings", func(t *testing.T) { + result, err = sel.Execute(vc, map[string]*querypb.BindVariable{}, false) + require.NoError(t, err) + vc.ExpectLog(t, []string{ + `ResolveDestinations ks [] Destinations:DestinationAnyShard()`, + `ExecuteMultiShard ks.0: dummy_select {} false false`, + }) + wantResult = sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "weightString|normal", + "varbinary|varchar", + ), + "a|a", + "c|t", + "f|p", + "g|d", + "v|x", + ) + expectResult(t, "sel.Execute", result, wantResult) + }) + + t.Run("Descending ordering using weighted strings", func(t *testing.T) { + sel.OrderBy[0].Desc = true + vc.Rewind() + result, err = sel.Execute(vc, map[string]*querypb.BindVariable{}, false) + require.NoError(t, err) + wantResult = sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "weightString|normal", + "varbinary|varchar", + ), + "v|x", + "g|d", + "f|p", + "c|t", + "a|a", + ) + expectResult(t, "sel.Execute", result, wantResult) + }) + + t.Run("Error when no weight string set", func(t *testing.T) { + sel.OrderBy = []OrderbyParams{{ + Col: 1, + WeightStringCol: -1, + }} + + vc = &loggingVCursor{ + shards: []string{"0"}, + results: []*sqltypes.Result{ + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "weightString|normal", + "varbinary|varchar", + ), + "v|x", + "g|d", + "a|a", + "c|t", + "f|p", + ), + }, + } + _, err = sel.Execute(vc, map[string]*querypb.BindVariable{}, false) + require.EqualError(t, err, `types are not comparable: VARCHAR vs VARCHAR`) + }) } func TestRouteSortTruncate(t *testing.T) { @@ -871,9 +911,7 @@ func TestRouteSortTruncate(t *testing.T) { }, } result, err := sel.Execute(vc, map[string]*querypb.BindVariable{}, false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) vc.ExpectLog(t, []string{ `ResolveDestinations ks [] Destinations:DestinationAnyShard()`, `ExecuteMultiShard ks.0: dummy_select {} false false`, @@ -917,9 +955,7 @@ func TestRouteStreamTruncate(t *testing.T) { }, } result, err := sel.Execute(vc, map[string]*querypb.BindVariable{}, false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) vc.ExpectLog(t, []string{ `ResolveDestinations ks [] Destinations:DestinationAnyShard()`, `ExecuteMultiShard ks.0: dummy_select {} false false`, @@ -964,9 +1000,7 @@ func TestRouteStreamSortTruncate(t *testing.T) { }, } result, err := wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) vc.ExpectLog(t, []string{ `ResolveDestinations ks [] Destinations:DestinationAnyShard()`, `StreamExecuteMulti dummy_select ks.0: {} `, @@ -999,11 +1033,11 @@ func TestParamsFail(t *testing.T) { vc := &loggingVCursor{shardErr: errors.New("shard error")} _, err := sel.Execute(vc, map[string]*querypb.BindVariable{}, false) - expectError(t, "sel.Execute err", err, "paramsAnyShard: shard error") + require.EqualError(t, err, `shard error`) vc.Rewind() _, err = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, false) - expectError(t, "sel.StreamExecute err", err, "paramsAnyShard: shard error") + require.EqualError(t, err, `shard error`) } func TestExecFail(t *testing.T) { @@ -1018,14 +1052,14 @@ func TestExecFail(t *testing.T) { "dummy_select_field", ) - vc := &loggingVCursor{shards: []string{"0"}, resultErr: mysql.NewSQLError(mysql.ERQueryInterrupted, "", "query timeout")} + vc := &loggingVCursor{shards: []string{"0"}, resultErr: vterrors.NewErrorf(vtrpcpb.Code_CANCELED, vterrors.QueryInterrupted, "query timeout")} _, err := sel.Execute(vc, map[string]*querypb.BindVariable{}, false) - expectError(t, "sel.Execute err", err, "query timeout (errno 1317) (sqlstate HY000)") + require.EqualError(t, err, `query timeout`) vc.ExpectWarnings(t, nil) vc.Rewind() _, err = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, false) - expectError(t, "sel.StreamExecute err", err, "query timeout (errno 1317) (sqlstate HY000)") + require.EqualError(t, err, `query timeout`) // Scatter fails if one of N fails without ScatterErrorsAsWarnings sel = NewRoute( @@ -1046,7 +1080,7 @@ func TestExecFail(t *testing.T) { }, } _, err = sel.Execute(vc, map[string]*querypb.BindVariable{}, false) - expectError(t, "sel.Execute err", err, "result error -20") + require.EqualError(t, err, `result error -20`) vc.ExpectWarnings(t, nil) vc.ExpectLog(t, []string{ `ResolveDestinations ks [] Destinations:DestinationAllShards()`, @@ -1054,7 +1088,6 @@ func TestExecFail(t *testing.T) { }) vc.Rewind() - // Scatter succeeds if all shards fail with ScatterErrorsAsWarnings sel = NewRoute( SelectScatter, @@ -1076,9 +1109,7 @@ func TestExecFail(t *testing.T) { }, } _, err = sel.Execute(vc, map[string]*querypb.BindVariable{}, false) - if err != nil { - t.Errorf("unexpected ScatterErrorsAsWarnings error %v", err) - } + require.NoError(t, err, "unexpected ScatterErrorsAsWarnings error %v", err) // Ensure that the error code is preserved from SQLErrors and that it // turns into ERUnknownError for all others @@ -1114,9 +1145,7 @@ func TestExecFail(t *testing.T) { }, } result, err := sel.Execute(vc, map[string]*querypb.BindVariable{}, false) - if err != nil { - t.Errorf("unexpected ScatterErrorsAsWarnings error %v", err) - } + require.NoError(t, err, "unexpected ScatterErrorsAsWarnings error %v", err) vc.ExpectLog(t, []string{ `ResolveDestinations ks [] Destinations:DestinationAllShards()`, `ExecuteMultiShard ks.-20: dummy_select {} ks.20-: dummy_select {} false false`, diff --git a/go/vt/vtgate/engine/rows.go b/go/vt/vtgate/engine/rows.go index 60d4c430734..98e7fd5e57f 100644 --- a/go/vt/vtgate/engine/rows.go +++ b/go/vt/vtgate/engine/rows.go @@ -55,10 +55,9 @@ func (r *Rows) GetTableName() string { //Execute implements the Primitive interface func (r *Rows) Execute(VCursor, map[string]*querypb.BindVariable, bool) (*sqltypes.Result, error) { return &sqltypes.Result{ - Fields: r.fields, - RowsAffected: uint64(len(r.rows)), - InsertID: 0, - Rows: r.rows, + Fields: r.fields, + InsertID: 0, + Rows: r.rows, }, nil } @@ -74,10 +73,9 @@ func (r *Rows) StreamExecute(vcursor VCursor, bindVars map[string]*querypb.BindV //GetFields implements the Primitive interface func (r *Rows) GetFields(VCursor, map[string]*querypb.BindVariable) (*sqltypes.Result, error) { return &sqltypes.Result{ - Fields: r.fields, - RowsAffected: uint64(len(r.rows)), - InsertID: 0, - Rows: nil, + Fields: r.fields, + InsertID: 0, + Rows: nil, }, nil } diff --git a/go/vt/vtgate/engine/send.go b/go/vt/vtgate/engine/send.go index fbdf859176d..ce1ad8106bc 100644 --- a/go/vt/vtgate/engine/send.go +++ b/go/vt/vtgate/engine/send.go @@ -79,7 +79,7 @@ func (s *Send) GetTableName() string { func (s *Send) Execute(vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { rss, _, err := vcursor.ResolveDestinations(s.Keyspace.Name, nil, []key.Destination{s.TargetDestination}) if err != nil { - return nil, vterrors.Wrap(err, "sendExecute") + return nil, err } if !s.Keyspace.Sharded && len(rss) != 1 { @@ -116,7 +116,7 @@ func (s *Send) Execute(vcursor VCursor, bindVars map[string]*querypb.BindVariabl func (s *Send) StreamExecute(vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { rss, _, err := vcursor.ResolveDestinations(s.Keyspace.Name, nil, []key.Destination{s.TargetDestination}) if err != nil { - return vterrors.Wrap(err, "sendStreamExecute") + return err } if !s.Keyspace.Sharded && len(rss) != 1 { @@ -146,7 +146,7 @@ func (s *Send) StreamExecute(vcursor VCursor, bindVars map[string]*querypb.BindV func (s *Send) GetFields(vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { qr, err := s.Execute(vcursor, bindVars, false) if err != nil { - return nil, vterrors.Wrap(err, "sendGetFields") + return nil, err } qr.Rows = nil return qr, nil diff --git a/go/vt/vtgate/engine/send_test.go b/go/vt/vtgate/engine/send_test.go index 38703e26b05..1d0d7a4cbf0 100644 --- a/go/vt/vtgate/engine/send_test.go +++ b/go/vt/vtgate/engine/send_test.go @@ -157,7 +157,7 @@ func TestSendTable(t *testing.T) { // Failure cases vc = &loggingVCursor{shardErr: errors.New("shard_error")} _, err = send.Execute(vc, map[string]*querypb.BindVariable{}, false) - require.EqualError(t, err, "sendExecute: shard_error") + require.EqualError(t, err, "shard_error") if !tc.sharded { vc = &loggingVCursor{} @@ -276,7 +276,7 @@ func TestSendTable_StreamExecute(t *testing.T) { // Failure cases vc = &loggingVCursor{shardErr: errors.New("shard_error")} _, err = wrapStreamExecute(send, vc, map[string]*querypb.BindVariable{}, false) - require.EqualError(t, err, "sendStreamExecute: shard_error") + require.EqualError(t, err, "shard_error") if !tc.sharded { vc = &loggingVCursor{} diff --git a/go/vt/vtgate/engine/set.go b/go/vt/vtgate/engine/set.go index dc317e52a23..c07e3920483 100644 --- a/go/vt/vtgate/engine/set.go +++ b/go/vt/vtgate/engine/set.go @@ -238,7 +238,7 @@ func (svci *SysVarCheckAndIgnore) VariableName() string { func (svci *SysVarCheckAndIgnore) Execute(vcursor VCursor, env evalengine.ExpressionEnv) error { rss, _, err := vcursor.ResolveDestinations(svci.Keyspace.Name, nil, []key.Destination{svci.TargetDestination}) if err != nil { - return vterrors.Wrap(err, "SysVarCheckAndIgnore") + return err } if len(rss) != 1 { @@ -253,7 +253,7 @@ func (svci *SysVarCheckAndIgnore) Execute(vcursor VCursor, env evalengine.Expres log.Warningf("unable to validate the current settings for '%s': %s", svci.Name, err.Error()) return nil } - if result.RowsAffected == 0 { + if len(result.Rows) == 0 { log.Infof("Ignored inapplicable SET %v = %v", svci.Name, svci.Expr) } return nil @@ -284,7 +284,7 @@ func (svs *SysVarReservedConn) Execute(vcursor VCursor, env evalengine.Expressio if svs.TargetDestination != nil { rss, _, err := vcursor.ResolveDestinations(svs.Keyspace.Name, nil, []key.Destination{svs.TargetDestination}) if err != nil { - return vterrors.Wrap(err, "SysVarSet") + return err } vcursor.Session().NeedsReservedConn() return svs.execSetStatement(vcursor, rss, env) @@ -329,7 +329,7 @@ func (svs *SysVarReservedConn) checkAndUpdateSysVar(vcursor VCursor, res evaleng sysVarExprValidationQuery := fmt.Sprintf("select %s from dual where @@%s != %s", svs.Expr, svs.Name, svs.Expr) rss, _, err := vcursor.ResolveDestinations(svs.Keyspace.Name, nil, []key.Destination{key.DestinationKeyspaceID{0}}) if err != nil { - return false, vterrors.Wrap(err, "SysVarSet") + return false, err } qr, err := execShard(vcursor, sysVarExprValidationQuery, res.BindVars, rss[0], false /* rollbackOnError */, false /* canAutocommit */) if err != nil { @@ -380,7 +380,7 @@ func (svss *SysVarSetAware) Execute(vcursor VCursor, env evalengine.ExpressionEn case sysvars.SQLSelectLimit.Name: intValue, err := svss.evalAsInt64(env) if err != nil { - return vterrors.Wrapf(err, "failed to evaluate value for %s", sysvars.SQLSelectLimit.Name) + return err } vcursor.Session().SetSQLSelectLimit(intValue) case sysvars.TransactionMode.Name: @@ -390,7 +390,7 @@ func (svss *SysVarSetAware) Execute(vcursor VCursor, env evalengine.ExpressionEn } out, ok := vtgatepb.TransactionMode_value[strings.ToUpper(str)] if !ok { - return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid transaction_mode: %s", str) + return vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongValueForVar, "invalid transaction_mode: %s", str) } vcursor.Session().SetTransactionMode(vtgatepb.TransactionMode(out)) case sysvars.Workload.Name: @@ -400,7 +400,7 @@ func (svss *SysVarSetAware) Execute(vcursor VCursor, env evalengine.ExpressionEn } out, ok := querypb.ExecuteOptions_Workload_value[strings.ToUpper(str)] if !ok { - return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid workload: %s", str) + return vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongValueForVar, "invalid workload: %s", str) } vcursor.Session().SetWorkload(querypb.ExecuteOptions_Workload(out)) case sysvars.DDLStrategy.Name: @@ -409,9 +409,11 @@ func (svss *SysVarSetAware) Execute(vcursor VCursor, env evalengine.ExpressionEn return err } if _, _, err := schema.ParseDDLStrategy(str); err != nil { - return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid DDL strategy: %s", str) + return vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongValueForVar, "invalid DDL strategy: %s", str) } vcursor.Session().SetDDLStrategy(str) + case sysvars.SessionEnableSystemSettings.Name: + err = svss.setBoolSysVar(env, vcursor.Session().SetSessionEnableSystemSettings) case sysvars.Charset.Name, sysvars.Names.Name: str, err := svss.evalAsString(env) if err != nil { @@ -422,7 +424,7 @@ func (svss *SysVarSetAware) Execute(vcursor VCursor, env evalengine.ExpressionEn // do nothing break default: - return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unexpected value for charset/names: %v", str) + return vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongValueForVar, "unexpected value for charset/names: %v", str) } case sysvars.ReadAfterWriteGTID.Name: str, err := svss.evalAsString(env) @@ -447,10 +449,10 @@ func (svss *SysVarSetAware) Execute(vcursor VCursor, env evalengine.ExpressionEn case "own_gtid": vcursor.Session().SetSessionTrackGTIDs(true) default: - return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%s", str) + return vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongValueForVar, "Variable 'session_track_gtids' can't be set to the value of '%s'", str) } default: - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unsupported construct %s", svss.Name) + return vterrors.NewErrorf(vtrpcpb.Code_NOT_FOUND, vterrors.UnknownSystemVariable, "Unknown system variable '%s'", svss.Name) } return err @@ -464,7 +466,7 @@ func (svss *SysVarSetAware) evalAsInt64(env evalengine.ExpressionEnv) (int64, er v := value.Value() if !v.IsIntegral() { - return 0, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "expected int, unexpected value type: %T", value.Value().Type().String()) + return 0, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongTypeForVar, "Incorrect argument type to variable '%s': %s", svss.Name, value.Value().Type().String()) } intValue, err := v.ToInt64() if err != nil { @@ -482,7 +484,7 @@ func (svss *SysVarSetAware) evalAsFloat(env evalengine.ExpressionEnv) (float64, v := value.Value() floatValue, err := v.ToFloat64() if err != nil { - return 0, err + return 0, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongTypeForVar, "Incorrect argument type to variable '%s': %s", svss.Name, value.Value().Type().String()) } return floatValue, nil } @@ -494,7 +496,7 @@ func (svss *SysVarSetAware) evalAsString(env evalengine.ExpressionEnv) (string, } v := value.Value() if !v.IsText() && !v.IsBinary() { - return "", vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unexpected value type for %s: %s", svss.Name, value.Value().Type().String()) + return "", vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongTypeForVar, "Incorrect argument type to variable '%s': %s", svss.Name, value.Value().Type().String()) } return v.ToString(), nil @@ -507,7 +509,7 @@ func (svss *SysVarSetAware) setBoolSysVar(env evalengine.ExpressionEnv, setter f } boolValue, err := value.ToBooleanStrict() if err != nil { - return vterrors.Wrapf(err, "System setting '%s' can't be set to this value", svss.Name) + return vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongValueForVar, "Variable '%s' can't be set to the value: %s", svss.Name, err.Error()) } return setter(boolValue) } diff --git a/go/vt/vtgate/engine/singlerow.go b/go/vt/vtgate/engine/singlerow.go index 260418705be..a2a5b80b308 100644 --- a/go/vt/vtgate/engine/singlerow.go +++ b/go/vt/vtgate/engine/singlerow.go @@ -47,7 +47,6 @@ func (s *SingleRow) GetTableName() string { // Execute performs a non-streaming exec. func (s *SingleRow) Execute(vcursor VCursor, bindVars map[string]*query.BindVariable, wantfields bool) (*sqltypes.Result, error) { result := sqltypes.Result{ - RowsAffected: 1, Rows: [][]sqltypes.Value{ {}, }, @@ -58,7 +57,6 @@ func (s *SingleRow) Execute(vcursor VCursor, bindVars map[string]*query.BindVari // StreamExecute performs a streaming exec. func (s *SingleRow) StreamExecute(vcursor VCursor, bindVars map[string]*query.BindVariable, wantields bool, callback func(*sqltypes.Result) error) error { result := sqltypes.Result{ - RowsAffected: 1, Rows: [][]sqltypes.Value{ {}, }, diff --git a/go/vt/vtgate/engine/subquery_test.go b/go/vt/vtgate/engine/subquery_test.go index 4c24ff9fbe5..216344d4bd4 100644 --- a/go/vt/vtgate/engine/subquery_test.go +++ b/go/vt/vtgate/engine/subquery_test.go @@ -20,6 +20,8 @@ import ( "errors" "testing" + "github.com/stretchr/testify/require" + "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" @@ -71,7 +73,7 @@ func TestSubqueryExecute(t *testing.T) { sendErr: errors.New("err"), } _, err = sq.Execute(nil, bv, true) - expectError(t, "sq.Execute", err, "err") + require.EqualError(t, err, `err`) } func TestSubqueryStreamExecute(t *testing.T) { @@ -120,7 +122,7 @@ func TestSubqueryStreamExecute(t *testing.T) { sendErr: errors.New("err"), } _, err = wrapStreamExecute(sq, nil, bv, true) - expectError(t, "sq.Execute", err, "err") + require.EqualError(t, err, `err`) } func TestSubqueryGetFields(t *testing.T) { @@ -167,5 +169,5 @@ func TestSubqueryGetFields(t *testing.T) { sendErr: errors.New("err"), } _, err = sq.GetFields(nil, bv) - expectError(t, "sq.Execute", err, "err") + require.EqualError(t, err, `err`) } diff --git a/go/vt/vtgate/engine/update.go b/go/vt/vtgate/engine/update.go index a8710e1e324..1bbdbc8d2a8 100644 --- a/go/vt/vtgate/engine/update.go +++ b/go/vt/vtgate/engine/update.go @@ -117,7 +117,7 @@ func (upd *Update) GetFields(vcursor VCursor, bindVars map[string]*querypb.BindV func (upd *Update) execUpdateUnsharded(vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { rss, _, err := vcursor.ResolveDestinations(upd.Keyspace.Name, nil, []key.Destination{key.DestinationAllShards{}}) if err != nil { - return nil, vterrors.Wrap(err, "execUpdateUnsharded") + return nil, err } if len(rss) != 1 { return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "Keyspace does not have exactly one shard: %v", rss) @@ -132,11 +132,11 @@ func (upd *Update) execUpdateUnsharded(vcursor VCursor, bindVars map[string]*que func (upd *Update) execUpdateEqual(vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { key, err := upd.Values[0].ResolveValue(bindVars) if err != nil { - return nil, vterrors.Wrap(err, "execUpdateEqual") + return nil, err } rs, ksid, err := resolveSingleShard(vcursor, upd.Vindex, upd.Keyspace, key) if err != nil { - return nil, vterrors.Wrap(err, "execUpdateEqual") + return nil, err } err = allowOnlyMaster(rs) if err != nil { @@ -147,7 +147,7 @@ func (upd *Update) execUpdateEqual(vcursor VCursor, bindVars map[string]*querypb } if len(upd.ChangedVindexValues) != 0 { if err := upd.updateVindexEntries(vcursor, bindVars, []*srvtopo.ResolvedShard{rs}); err != nil { - return nil, vterrors.Wrap(err, "execUpdateEqual") + return nil, err } } return execShard(vcursor, upd.Query, bindVars, rs, true /* rollbackOnError */, true /* canAutocommit */) @@ -164,7 +164,7 @@ func (upd *Update) execUpdateIn(vcursor VCursor, bindVars map[string]*querypb.Bi } if len(upd.ChangedVindexValues) != 0 { if err := upd.updateVindexEntries(vcursor, bindVars, rss); err != nil { - return nil, vterrors.Wrap(err, "execUpdateIn") + return nil, err } } return execMultiShard(vcursor, rss, queries, upd.MultiShardAutocommit) @@ -173,7 +173,7 @@ func (upd *Update) execUpdateIn(vcursor VCursor, bindVars map[string]*querypb.Bi func (upd *Update) execUpdateByDestination(vcursor VCursor, bindVars map[string]*querypb.BindVariable, dest key.Destination) (*sqltypes.Result, error) { rss, _, err := vcursor.ResolveDestinations(upd.Keyspace.Name, nil, []key.Destination{dest}) if err != nil { - return nil, vterrors.Wrap(err, "execUpdateByDestination") + return nil, err } err = allowOnlyMaster(rss...) if err != nil { @@ -191,7 +191,7 @@ func (upd *Update) execUpdateByDestination(vcursor VCursor, bindVars map[string] // update any owned vindexes if len(upd.ChangedVindexValues) != 0 { if err := upd.updateVindexEntries(vcursor, bindVars, rss); err != nil { - return nil, vterrors.Wrap(err, "execUpdateByDestination") + return nil, err } } return execMultiShard(vcursor, rss, queries, upd.MultiShardAutocommit) @@ -211,7 +211,7 @@ func (upd *Update) updateVindexEntries(vcursor VCursor, bindVars map[string]*que subQueryResult, errors := vcursor.ExecuteMultiShard(rss, queries, false, false) for _, err := range errors { if err != nil { - return vterrors.Wrap(err, "updateVindexEntries") + return err } } diff --git a/go/vt/vtgate/engine/update_target.go b/go/vt/vtgate/engine/update_target.go index a4451f808da..ad5d04d4e5e 100644 --- a/go/vt/vtgate/engine/update_target.go +++ b/go/vt/vtgate/engine/update_target.go @@ -78,5 +78,5 @@ func (updTarget *UpdateTarget) StreamExecute(vcursor VCursor, bindVars map[strin // GetFields implements the Primitive interface func (updTarget *UpdateTarget) GetFields(vcursor VCursor, bindVars map[string]*query.BindVariable) (*sqltypes.Result, error) { - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "use cannot be used for get fields") + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] GetFields not reachable for use statement") } diff --git a/go/vt/vtgate/engine/update_target_test.go b/go/vt/vtgate/engine/update_target_test.go index 2b9479a20aa..48496a59598 100644 --- a/go/vt/vtgate/engine/update_target_test.go +++ b/go/vt/vtgate/engine/update_target_test.go @@ -67,5 +67,5 @@ func TestUpdateTargetGetFields(t *testing.T) { updateTarget := &UpdateTarget{} vc := &noopVCursor{} _, err := updateTarget.GetFields(vc, map[string]*querypb.BindVariable{}) - require.EqualError(t, err, "use cannot be used for get fields") + require.EqualError(t, err, "[BUG] GetFields not reachable for use statement") } diff --git a/go/vt/vtgate/engine/update_test.go b/go/vt/vtgate/engine/update_test.go index 5e64062bdac..f369443c702 100644 --- a/go/vt/vtgate/engine/update_test.go +++ b/go/vt/vtgate/engine/update_test.go @@ -54,11 +54,11 @@ func TestUpdateUnsharded(t *testing.T) { // Failure cases vc = &loggingVCursor{shardErr: errors.New("shard_error")} _, err = upd.Execute(vc, map[string]*querypb.BindVariable{}, false) - expectError(t, "Execute", err, "execUpdateUnsharded: shard_error") + require.EqualError(t, err, `shard_error`) vc = &loggingVCursor{} _, err = upd.Execute(vc, map[string]*querypb.BindVariable{}, false) - expectError(t, "Execute", err, "Keyspace does not have exactly one shard: []") + require.EqualError(t, err, `Keyspace does not have exactly one shard: []`) } func TestUpdateEqual(t *testing.T) { @@ -87,7 +87,7 @@ func TestUpdateEqual(t *testing.T) { // Failure case upd.Values = []sqltypes.PlanValue{{Key: "aa"}} _, err = upd.Execute(vc, map[string]*querypb.BindVariable{}, false) - expectError(t, "Execute", err, "execUpdateEqual: missing bind var aa") + require.EqualError(t, err, `missing bind var aa`) } func TestUpdateScatter(t *testing.T) { @@ -189,7 +189,7 @@ func TestUpdateEqualNoScatter(t *testing.T) { vc := newDMLTestVCursor("0") _, err := upd.Execute(vc, map[string]*querypb.BindVariable{}, false) - expectError(t, "Execute", err, "execUpdateEqual: cannot map vindex to unique keyspace id: DestinationKeyRange(-)") + require.EqualError(t, err, `cannot map vindex to unique keyspace id: DestinationKeyRange(-)`) } func TestUpdateEqualChangedVindex(t *testing.T) { @@ -598,7 +598,7 @@ func TestUpdateInChangedVindex(t *testing.T) { func TestUpdateNoStream(t *testing.T) { upd := &Update{} err := upd.StreamExecute(nil, nil, false, nil) - expectError(t, "StreamExecute", err, `query "" cannot be used for streaming`) + require.EqualError(t, err, `query "" cannot be used for streaming`) } func buildTestVSchema() *vindexes.VSchema { diff --git a/go/vt/vtgate/engine/vindex_func.go b/go/vt/vtgate/engine/vindex_func.go index eb1c3b50b8d..0bdefcc98d7 100644 --- a/go/vt/vtgate/engine/vindex_func.go +++ b/go/vt/vtgate/engine/vindex_func.go @@ -128,7 +128,6 @@ func (vf *VindexFunc) mapVindex(vcursor VCursor, bindVars map[string]*querypb.Bi case key.DestinationKeyRange: if d.KeyRange != nil { result.Rows = append(result.Rows, vf.buildRow(vkey, nil, d.KeyRange)) - result.RowsAffected = 1 } case key.DestinationKeyspaceID: if len(d) > 0 { @@ -144,19 +143,16 @@ func (vf *VindexFunc) mapVindex(vcursor VCursor, bindVars map[string]*querypb.Bi result.Rows = [][]sqltypes.Value{ vf.buildRow(vkey, d, kr[0]), } - result.RowsAffected = 1 } else { result.Rows = [][]sqltypes.Value{ vf.buildRow(vkey, d, nil), } - result.RowsAffected = 1 } } case key.DestinationKeyspaceIDs: for _, ksid := range d { result.Rows = append(result.Rows, vf.buildRow(vkey, ksid, nil)) } - result.RowsAffected = uint64(len(d)) case key.DestinationNone: // Nothing to do. default: diff --git a/go/vt/vtgate/engine/vindex_func_test.go b/go/vt/vtgate/engine/vindex_func_test.go index eb8e3265ae8..067d7d51886 100644 --- a/go/vt/vtgate/engine/vindex_func_test.go +++ b/go/vt/vtgate/engine/vindex_func_test.go @@ -137,7 +137,7 @@ func TestVindexFuncMap(t *testing.T) { sqltypes.MakeTrusted(sqltypes.VarBinary, []byte{0x60}), sqltypes.NULL, }}, - RowsAffected: 1, + RowsAffected: 0, } if !reflect.DeepEqual(got, want) { t.Errorf("Execute(Map, uvindex(none)):\n%v, want\n%v", got, want) @@ -191,7 +191,7 @@ func TestVindexFuncMap(t *testing.T) { sqltypes.MakeTrusted(sqltypes.VarBinary, []byte{0x60}), sqltypes.NULL, }}, - RowsAffected: 1, + RowsAffected: 0, } if !reflect.DeepEqual(got, want) { t.Errorf("Execute(Map, uvindex(none)):\n%v, want\n%v", got, want) diff --git a/go/vt/vtgate/engine/vschema_ddl.go b/go/vt/vtgate/engine/vschema_ddl.go index 5eca45e566a..2cf2591a399 100644 --- a/go/vt/vtgate/engine/vschema_ddl.go +++ b/go/vt/vtgate/engine/vschema_ddl.go @@ -74,10 +74,10 @@ func (v *AlterVSchema) Execute(vcursor VCursor, bindVars map[string]*query.BindV //StreamExecute implements the Primitive interface func (v *AlterVSchema) StreamExecute(vcursor VCursor, bindVars map[string]*query.BindVariable, wantields bool, callback func(*sqltypes.Result) error) error { - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "not reachable") // TODO: systay - this should work + return vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "Alter vschema not supported in streaming") } //GetFields implements the Primitive interface func (v *AlterVSchema) GetFields(vcursor VCursor, bindVars map[string]*query.BindVariable) (*sqltypes.Result, error) { - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "not reachable") // TODO: systay - this should work + return nil, vterrors.NewErrorf(vtrpcpb.Code_UNIMPLEMENTED, vterrors.UnsupportedPS, "This command is not supported in the prepared statement protocol yet") } diff --git a/go/vt/vtgate/evalengine/arithmetic.go b/go/vt/vtgate/evalengine/arithmetic.go index 98ee792d089..3a887d77763 100644 --- a/go/vt/vtgate/evalengine/arithmetic.go +++ b/go/vt/vtgate/evalengine/arithmetic.go @@ -34,6 +34,17 @@ import ( // a Value, used for arithmetic operations. var zeroBytes = []byte("0") +// UnsupportedComparisonError represents the error where the comparison between the two types is unsupported on vitess +type UnsupportedComparisonError struct { + Type1 querypb.Type + Type2 querypb.Type +} + +// Error function implements the error interface +func (err UnsupportedComparisonError) Error() string { + return fmt.Sprintf("types are not comparable: %v vs %v", err.Type1, err.Type2) +} + // Add adds two values together // if v1 or v2 is null, then it returns null func Add(v1, v2 sqltypes.Value) (sqltypes.Value, error) { @@ -201,7 +212,10 @@ func NullsafeCompare(v1, v2 sqltypes.Value) (int, error) { if isByteComparable(v1) && isByteComparable(v2) { return bytes.Compare(v1.ToBytes(), v2.ToBytes()), nil } - return 0, fmt.Errorf("types are not comparable: %v vs %v", v1.Type(), v2.Type()) + return 0, UnsupportedComparisonError{ + Type1: v1.Type(), + Type2: v2.Type(), + } } // NullsafeHashcode returns an int64 hashcode that is guaranteed to be the same @@ -418,7 +432,7 @@ overflow: func intPlusIntWithError(v1, v2 int64) (EvalResult, error) { result := v1 + v2 if (result > v1) != (v2 > 0) { - return EvalResult{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "BIGINT value is out of range in %v + %v", v1, v2) + return EvalResult{}, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.DataOutOfRange, "%s value is out of range in %v + %v", "BIGINT", v1, v2) } return EvalResult{typ: sqltypes.Int64, ival: result}, nil } @@ -427,7 +441,7 @@ func intMinusIntWithError(v1, v2 int64) (EvalResult, error) { result := v1 - v2 if (result < v1) != (v2 > 0) { - return EvalResult{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "BIGINT value is out of range in %v - %v", v1, v2) + return EvalResult{}, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.DataOutOfRange, "%s value is out of range in %v - %v", "BIGINT", v1, v2) } return EvalResult{typ: sqltypes.Int64, ival: result}, nil } @@ -435,7 +449,7 @@ func intMinusIntWithError(v1, v2 int64) (EvalResult, error) { func intTimesIntWithError(v1, v2 int64) (EvalResult, error) { result := v1 * v2 if v1 != 0 && result/v1 != v2 { - return EvalResult{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "BIGINT value is out of range in %v * %v", v1, v2) + return EvalResult{}, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.DataOutOfRange, "%s value is out of range in %v * %v", "BIGINT", v1, v2) } return EvalResult{typ: sqltypes.Int64, ival: result}, nil @@ -443,7 +457,7 @@ func intTimesIntWithError(v1, v2 int64) (EvalResult, error) { func intMinusUintWithError(v1 int64, v2 uint64) (EvalResult, error) { if v1 < 0 || v1 < int64(v2) { - return EvalResult{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "BIGINT UNSIGNED value is out of range in %v - %v", v1, v2) + return EvalResult{}, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.DataOutOfRange, "%s value is out of range in %v - %v", "BIGINT UNSIGNED", v1, v2) } return uintMinusUintWithError(uint64(v1), v2) } @@ -454,7 +468,7 @@ func uintPlusInt(v1 uint64, v2 int64) EvalResult { func uintPlusIntWithError(v1 uint64, v2 int64) (EvalResult, error) { if v2 < 0 && v1 < uint64(v2) { - return EvalResult{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "BIGINT UNSIGNED value is out of range in %v + %v", v1, v2) + return EvalResult{}, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.DataOutOfRange, "%s value is out of range in %v + %v", "BIGINT UNSIGNED", v1, v2) } // convert to int -> uint is because for numeric operators (such as + or -) // where one of the operands is an unsigned integer, the result is unsigned by default. @@ -463,7 +477,7 @@ func uintPlusIntWithError(v1 uint64, v2 int64) (EvalResult, error) { func uintMinusIntWithError(v1 uint64, v2 int64) (EvalResult, error) { if int64(v1) < v2 && v2 > 0 { - return EvalResult{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "BIGINT UNSIGNED value is out of range in %v - %v", v1, v2) + return EvalResult{}, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.DataOutOfRange, "%s value is out of range in %v - %v", "BIGINT UNSIGNED", v1, v2) } // uint - (- int) = uint + int if v2 < 0 { @@ -474,7 +488,7 @@ func uintMinusIntWithError(v1 uint64, v2 int64) (EvalResult, error) { func uintTimesIntWithError(v1 uint64, v2 int64) (EvalResult, error) { if v2 < 0 || int64(v1) < 0 { - return EvalResult{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "BIGINT UNSIGNED value is out of range in %v * %v", v1, v2) + return EvalResult{}, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.DataOutOfRange, "%s value is out of range in %v * %v", "BIGINT UNSIGNED", v1, v2) } return uintTimesUintWithError(v1, uint64(v2)) } @@ -490,7 +504,7 @@ func uintPlusUint(v1, v2 uint64) EvalResult { func uintPlusUintWithError(v1, v2 uint64) (EvalResult, error) { result := v1 + v2 if result < v2 { - return EvalResult{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "BIGINT UNSIGNED value is out of range in %v + %v", v1, v2) + return EvalResult{}, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.DataOutOfRange, "%s value is out of range in %v + %v", "BIGINT UNSIGNED", v1, v2) } return EvalResult{typ: sqltypes.Uint64, uval: result}, nil } @@ -498,7 +512,7 @@ func uintPlusUintWithError(v1, v2 uint64) (EvalResult, error) { func uintMinusUintWithError(v1, v2 uint64) (EvalResult, error) { result := v1 - v2 if v2 > v1 { - return EvalResult{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "BIGINT UNSIGNED value is out of range in %v - %v", v1, v2) + return EvalResult{}, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.DataOutOfRange, "%s value is out of range in %v - %v", "BIGINT UNSIGNED", v1, v2) } return EvalResult{typ: sqltypes.Uint64, uval: result}, nil @@ -507,7 +521,7 @@ func uintMinusUintWithError(v1, v2 uint64) (EvalResult, error) { func uintTimesUintWithError(v1, v2 uint64) (EvalResult, error) { result := v1 * v2 if result < v2 || result < v1 { - return EvalResult{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "BIGINT UNSIGNED value is out of range in %v * %v", v1, v2) + return EvalResult{}, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.DataOutOfRange, "%s value is out of range in %v * %v", "BIGINT UNSIGNED", v1, v2) } return EvalResult{typ: sqltypes.Uint64, uval: result}, nil } @@ -554,7 +568,7 @@ func floatDivideAnyWithError(v1 float64, v2 EvalResult) (EvalResult, error) { resultMismatch := v2.fval*result != v1 if divisorLessThanOne && resultMismatch { - return EvalResult{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "BIGINT is out of range in %v / %v", v1, v2.fval) + return EvalResult{}, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.DataOutOfRange, "%s value is out of range in %v / %v", "BIGINT", v1, v2.fval) } return EvalResult{typ: sqltypes.Float64, fval: v1 / v2.fval}, nil diff --git a/go/vt/vtgate/evalengine/arithmetic_test.go b/go/vt/vtgate/evalengine/arithmetic_test.go index a1e466e533e..bb1750e7b92 100644 --- a/go/vt/vtgate/evalengine/arithmetic_test.go +++ b/go/vt/vtgate/evalengine/arithmetic_test.go @@ -342,7 +342,7 @@ func TestArithmetics(t *testing.T) { // testing for overflow of float64 v1: NewFloat64(math.MaxFloat64), v2: NewFloat64(0.5), - err: "BIGINT is out of range in 1.7976931348623157e+308 / 0.5", + err: "BIGINT value is out of range in 1.7976931348623157e+308 / 0.5", }}, }, { operator: "*", diff --git a/go/vt/vtgate/evalengine/cached_size.go b/go/vt/vtgate/evalengine/cached_size.go new file mode 100644 index 00000000000..53fc71e4ffb --- /dev/null +++ b/go/vt/vtgate/evalengine/cached_size.go @@ -0,0 +1,91 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by Sizegen. DO NOT EDIT. + +package evalengine + +type cachedObject interface { + CachedSize(alloc bool) int64 +} + +func (cached *BinaryOp) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field Expr vitess.io/vitess/go/vt/vtgate/evalengine.BinaryExpr + if cc, ok := cached.Expr.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Left vitess.io/vitess/go/vt/vtgate/evalengine.Expr + if cc, ok := cached.Left.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Right vitess.io/vitess/go/vt/vtgate/evalengine.Expr + if cc, ok := cached.Right.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *BindVariable) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(16) + } + // field Key string + size += int64(len(cached.Key)) + return size +} +func (cached *Column) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(8) + } + return size +} +func (cached *EvalResult) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(56) + } + // field bytes []byte + size += int64(cap(cached.bytes)) + return size +} +func (cached *Literal) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(56) + } + // field Val vitess.io/vitess/go/vt/vtgate/evalengine.EvalResult + size += cached.Val.CachedSize(false) + return size +} diff --git a/go/vt/vtgate/evalengine/evalengine.go b/go/vt/vtgate/evalengine/evalengine.go index 7e2b3a7226c..f5e123b5aba 100644 --- a/go/vt/vtgate/evalengine/evalengine.go +++ b/go/vt/vtgate/evalengine/evalengine.go @@ -200,34 +200,34 @@ func (v EvalResult) toSQLValue(resultType querypb.Type) sqltypes.Value { switch { case sqltypes.IsSigned(resultType): switch v.typ { - case sqltypes.Int64: - return sqltypes.MakeTrusted(resultType, strconv.AppendInt(nil, v.ival, 10)) - case sqltypes.Uint64: + case sqltypes.Int64, sqltypes.Int32: + return sqltypes.MakeTrusted(resultType, strconv.AppendInt(nil, int64(v.ival), 10)) + case sqltypes.Uint64, sqltypes.Uint32: return sqltypes.MakeTrusted(resultType, strconv.AppendInt(nil, int64(v.uval), 10)) - case sqltypes.Float64: + case sqltypes.Float64, sqltypes.Float32: return sqltypes.MakeTrusted(resultType, strconv.AppendInt(nil, int64(v.fval), 10)) } case sqltypes.IsUnsigned(resultType): switch v.typ { - case sqltypes.Uint64: - return sqltypes.MakeTrusted(resultType, strconv.AppendUint(nil, v.uval, 10)) - case sqltypes.Int64: + case sqltypes.Uint64, sqltypes.Uint32: + return sqltypes.MakeTrusted(resultType, strconv.AppendUint(nil, uint64(v.uval), 10)) + case sqltypes.Int64, sqltypes.Int32: return sqltypes.MakeTrusted(resultType, strconv.AppendUint(nil, uint64(v.ival), 10)) - case sqltypes.Float64: + case sqltypes.Float64, sqltypes.Float32: return sqltypes.MakeTrusted(resultType, strconv.AppendUint(nil, uint64(v.fval), 10)) } case sqltypes.IsFloat(resultType) || resultType == sqltypes.Decimal: switch v.typ { - case sqltypes.Int64: - return sqltypes.MakeTrusted(resultType, strconv.AppendInt(nil, v.ival, 10)) - case sqltypes.Uint64: - return sqltypes.MakeTrusted(resultType, strconv.AppendUint(nil, v.uval, 10)) - case sqltypes.Float64: + case sqltypes.Int64, sqltypes.Int32: + return sqltypes.MakeTrusted(resultType, strconv.AppendInt(nil, int64(v.ival), 10)) + case sqltypes.Uint64, sqltypes.Uint32: + return sqltypes.MakeTrusted(resultType, strconv.AppendUint(nil, uint64(v.uval), 10)) + case sqltypes.Float64, sqltypes.Float32: format := byte('g') if resultType == sqltypes.Decimal { format = 'f' } - return sqltypes.MakeTrusted(resultType, strconv.AppendFloat(nil, v.fval, format, -1, 64)) + return sqltypes.MakeTrusted(resultType, strconv.AppendFloat(nil, float64(v.fval), format, -1, 64)) } default: return sqltypes.MakeTrusted(resultType, v.bytes) diff --git a/go/vt/vtgate/evalengine/expressions.go b/go/vt/vtgate/evalengine/expressions.go index a32badee480..baafbe8a0c2 100644 --- a/go/vt/vtgate/evalengine/expressions.go +++ b/go/vt/vtgate/evalengine/expressions.go @@ -327,3 +327,15 @@ func evaluateByType(val *querypb.BindVariable) (EvalResult, error) { func (e *EvalResult) debugString() string { return fmt.Sprintf("(%s) %d %d %f %s", querypb.Type_name[int32(e.typ)], e.ival, e.uval, e.fval, string(e.bytes)) } + +// AreExprEqual checks if the provided Expr are the same or not +func AreExprEqual(expr1 Expr, expr2 Expr) bool { + // Check the types of the two expressions, if they don't match then the two are not equal + if fmt.Sprintf("%T", expr1) != fmt.Sprintf("%T", expr2) { + return false + } + if expr1.String() == expr2.String() { + return true + } + return false +} diff --git a/go/vt/vtgate/executor.go b/go/vt/vtgate/executor.go index 6a23241d200..73fb0bf6346 100644 --- a/go/vt/vtgate/executor.go +++ b/go/vt/vtgate/executor.go @@ -29,6 +29,8 @@ import ( "sync" "time" + "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/sysvars" "context" @@ -63,7 +65,7 @@ import ( var vtgateHealthCheck discovery.HealthCheck var ( - errNoKeyspace = vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "no keyspace in database name specified. Supported database name format (items in <> are optional): keyspace<:shard><@type> or keyspace<[range]><@type>") + errNoKeyspace = vterrors.NewErrorf(vtrpcpb.Code_FAILED_PRECONDITION, vterrors.NoDB, "No database selected: use keyspace<:shard><@type> or keyspace<[range]><@type> (<> are optional)") defaultTabletType topodatapb.TabletType // TODO: @rafael - These two counters should be deprecated in favor of the ByTable ones. They are kept for now for backwards compatibility. @@ -97,32 +99,34 @@ type Executor struct { mu sync.Mutex vschema *vindexes.VSchema - normalize bool streamSize int - plans *cache.LRUCache + plans cache.Cache vschemaStats *VSchemaStats + normalize bool + warnShardedOnly bool + vm *VSchemaManager } var executorOnce sync.Once const pathQueryPlans = "/debug/query_plans" - const pathScatterStats = "/debug/scatter_stats" const pathVSchema = "/debug/vschema" // NewExecutor creates a new Executor. -func NewExecutor(ctx context.Context, serv srvtopo.Server, cell string, resolver *Resolver, normalize bool, streamSize int, queryPlanCacheSize int64) *Executor { +func NewExecutor(ctx context.Context, serv srvtopo.Server, cell string, resolver *Resolver, normalize, warnOnShardedOnly bool, streamSize int, cacheCfg *cache.Config) *Executor { e := &Executor{ - serv: serv, - cell: cell, - resolver: resolver, - scatterConn: resolver.scatterConn, - txConn: resolver.scatterConn.txConn, - plans: cache.NewLRUCache(queryPlanCacheSize), - normalize: normalize, - streamSize: streamSize, + serv: serv, + cell: cell, + resolver: resolver, + scatterConn: resolver.scatterConn, + txConn: resolver.scatterConn.txConn, + plans: cache.NewDefaultCacheImpl(cacheCfg), + normalize: normalize, + warnShardedOnly: warnOnShardedOnly, + streamSize: streamSize, } vschemaacl.Init() @@ -130,13 +134,12 @@ func NewExecutor(ctx context.Context, serv srvtopo.Server, cell string, resolver e.vm.watchSrvVSchema(ctx, cell) executorOnce.Do(func() { - stats.NewGaugeFunc("QueryPlanCacheLength", "Query plan cache length", e.plans.Length) - stats.NewGaugeFunc("QueryPlanCacheSize", "Query plan cache size", e.plans.Size) - stats.NewGaugeFunc("QueryPlanCacheCapacity", "Query plan cache capacity", e.plans.Capacity) + stats.NewGaugeFunc("QueryPlanCacheLength", "Query plan cache length", func() int64 { + return int64(e.plans.Len()) + }) + stats.NewGaugeFunc("QueryPlanCacheSize", "Query plan cache size", e.plans.UsedCapacity) + stats.NewGaugeFunc("QueryPlanCacheCapacity", "Query plan cache capacity", e.plans.MaxCapacity) stats.NewCounterFunc("QueryPlanCacheEvictions", "Query plan cache evictions", e.plans.Evictions) - stats.Publish("QueryPlanCacheOldest", stats.StringFunc(func() string { - return fmt.Sprintf("%v", e.plans.Oldest()) - })) http.Handle(pathQueryPlans, e) http.Handle(pathScatterStats, e) http.Handle(pathVSchema, e) @@ -174,7 +177,7 @@ func saveSessionStats(safeSession *SafeSession, stmtType sqlparser.StatementType return } if !safeSession.foundRowsHandled { - safeSession.FoundRows = result.RowsAffected + safeSession.FoundRows = uint64(len(result.Rows)) } if result.InsertID > 0 { safeSession.LastInsertId = result.InsertID @@ -182,7 +185,7 @@ func saveSessionStats(safeSession *SafeSession, stmtType sqlparser.StatementType switch stmtType { case sqlparser.StmtInsert, sqlparser.StmtReplace, sqlparser.StmtUpdate, sqlparser.StmtDelete: safeSession.RowCount = int64(result.RowsAffected) - case sqlparser.StmtDDL, sqlparser.StmtSet, sqlparser.StmtBegin, sqlparser.StmtCommit, sqlparser.StmtRollback: + case sqlparser.StmtDDL, sqlparser.StmtSet, sqlparser.StmtBegin, sqlparser.StmtCommit, sqlparser.StmtRollback, sqlparser.StmtFlush: safeSession.RowCount = 0 } } @@ -212,7 +215,7 @@ func (e *Executor) legacyExecute(ctx context.Context, safeSession *SafeSession, logStats.TabletType = destTabletType.String() // Legacy gateway allows transactions only on MASTER if UsingLegacyGateway() && safeSession.InTransaction() && destTabletType != topodatapb.TabletType_MASTER { - return 0, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Executor.execute: transactions are supported only for master tablet types, current type: %v", destTabletType) + return 0, nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "transaction is supported only for master tablet type, current type: %v", destTabletType) } if bindVars == nil { bindVars = make(map[string]*querypb.BindVariable) @@ -234,8 +237,8 @@ func (e *Executor) legacyExecute(ctx context.Context, safeSession *SafeSession, switch stmtType { case sqlparser.StmtSelect, sqlparser.StmtInsert, sqlparser.StmtReplace, sqlparser.StmtUpdate, - sqlparser.StmtDelete, sqlparser.StmtDDL, sqlparser.StmtUse, sqlparser.StmtExplain, sqlparser.StmtOther: - return 0, nil, vterrors.New(vtrpcpb.Code_INTERNAL, "BUG: not reachable as handled with plan execute") + sqlparser.StmtDelete, sqlparser.StmtDDL, sqlparser.StmtUse, sqlparser.StmtExplain, sqlparser.StmtOther, sqlparser.StmtFlush: + return 0, nil, vterrors.New(vtrpcpb.Code_INTERNAL, "[BUG] not reachable, should be handled with plan execute") case sqlparser.StmtSet: qr, err := e.handleSet(ctx, sql, logStats) return sqlparser.StmtSet, qr, err @@ -247,7 +250,7 @@ func (e *Executor) legacyExecute(ctx context.Context, safeSession *SafeSession, // There are some statements which are not planned for special comments. return sqlparser.StmtComment, &sqltypes.Result{}, nil } - return 0, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unrecognized statement: %s", sql) + return 0, nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] statement not handled: %s", sql) } // addNeededBindVars adds bind vars that are needed by the plan @@ -300,6 +303,8 @@ func (e *Executor) addNeededBindVars(bindVarNeeds *sqlparser.BindVarNeeds, bindV bindVars[key] = sqltypes.StringBindVariable(session.DDLStrategy) case sysvars.SessionUUID.Name: bindVars[key] = sqltypes.StringBindVariable(session.SessionUUID) + case sysvars.SessionEnableSystemSettings.Name: + bindVars[key] = sqltypes.BoolBindVariable(session.EnableSystemSettings) case sysvars.ReadAfterWriteGTID.Name: var v string ifReadAfterWriteExist(session, func(raw *vtgatepb.ReadAfterWrite) { @@ -320,6 +325,12 @@ func (e *Executor) addNeededBindVars(bindVarNeeds *sqlparser.BindVarNeeds, bindV } }) bindVars[key] = sqltypes.StringBindVariable(v) + case sysvars.Version.Name: + bindVars[key] = sqltypes.StringBindVariable(servenv.AppVersion.MySQLVersion()) + case sysvars.VersionComment.Name: + bindVars[key] = sqltypes.StringBindVariable(servenv.AppVersion.String()) + case sysvars.Socket.Name: + bindVars[key] = sqltypes.StringBindVariable(mysqlSocketPath()) } } @@ -370,7 +381,7 @@ func (e *Executor) handleBegin(ctx context.Context, safeSession *SafeSession, lo func (e *Executor) handleCommit(ctx context.Context, safeSession *SafeSession, logStats *LogStats) (*sqltypes.Result, error) { execStart := time.Now() logStats.PlanTime = execStart.Sub(logStats.StartTime) - logStats.ShardQueries = uint32(len(safeSession.ShardSessions)) + logStats.ShardQueries = uint64(len(safeSession.ShardSessions)) e.updateQueryCounts("Commit", "", "", int64(logStats.ShardQueries)) err := e.txConn.Commit(ctx, safeSession) @@ -386,7 +397,7 @@ func (e *Executor) Commit(ctx context.Context, safeSession *SafeSession) error { func (e *Executor) handleRollback(ctx context.Context, safeSession *SafeSession, logStats *LogStats) (*sqltypes.Result, error) { execStart := time.Now() logStats.PlanTime = execStart.Sub(logStats.StartTime) - logStats.ShardQueries = uint32(len(safeSession.ShardSessions)) + logStats.ShardQueries = uint64(len(safeSession.ShardSessions)) e.updateQueryCounts("Rollback", "", "", int64(logStats.ShardQueries)) err := e.txConn.Rollback(ctx, safeSession) logStats.CommitTime = time.Since(execStart) @@ -396,7 +407,7 @@ func (e *Executor) handleRollback(ctx context.Context, safeSession *SafeSession, func (e *Executor) handleSavepoint(ctx context.Context, safeSession *SafeSession, sql string, planType string, logStats *LogStats, nonTxResponse func(query string) (*sqltypes.Result, error), ignoreMaxMemoryRows bool) (*sqltypes.Result, error) { execStart := time.Now() logStats.PlanTime = execStart.Sub(logStats.StartTime) - logStats.ShardQueries = uint32(len(safeSession.ShardSessions)) + logStats.ShardQueries = uint64(len(safeSession.ShardSessions)) e.updateQueryCounts(planType, "", "", int64(logStats.ShardQueries)) defer func() { logStats.ExecuteTime = time.Since(execStart) @@ -437,11 +448,11 @@ func (e *Executor) CloseSession(ctx context.Context, safeSession *SafeSession) e } func (e *Executor) handleSet(ctx context.Context, sql string, logStats *LogStats) (*sqltypes.Result, error) { - stmt, err := sqlparser.Parse(sql) + stmt, reservedVars, err := sqlparser.Parse2(sql) if err != nil { return nil, err } - rewrittenAST, err := sqlparser.PrepareAST(stmt, nil, "vtg", false, "") + rewrittenAST, err := sqlparser.PrepareAST(stmt, reservedVars, nil, "vtg", false, "") if err != nil { return nil, err } @@ -478,11 +489,11 @@ func (e *Executor) handleSet(ctx context.Context, sql string, logStats *LogStats } val, ok := value.(string) if !ok { - return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unexpected value type for charset: %v", value) + return nil, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongValueForVar, "unexpected value type for '%s': %v", name, value) } _, err = e.handleSetVitessMetadata(ctx, name, val) default: - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "should have been handled by planning: %s", sql) + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unreachable statement: %s", sql) } if err != nil { @@ -533,10 +544,10 @@ func getValueFor(expr *sqlparser.SetExpr) (interface{}, error) { func (e *Executor) handleSetVitessMetadata(ctx context.Context, name, value string) (*sqltypes.Result, error) { //TODO(kalfonso): move to its own acl check and consolidate into an acl component that can handle multiple operations (vschema, metadata) - allowed := vschemaacl.Authorized(callerid.ImmediateCallerIDFromContext(ctx)) + user := callerid.ImmediateCallerIDFromContext(ctx) + allowed := vschemaacl.Authorized(user) if !allowed { - return nil, vterrors.Errorf(vtrpcpb.Code_PERMISSION_DENIED, "not authorized to perform vitess metadata operations") - + return nil, vterrors.NewErrorf(vtrpcpb.Code_PERMISSION_DENIED, vterrors.AccessDeniedError, "User '%s' not authorized to perform vitess metadata operations", user.GetUsername()) } ts, err := e.serv.GetTopoServer() @@ -554,7 +565,7 @@ func (e *Executor) handleSetVitessMetadata(ctx context.Context, name, value stri return nil, err } - return &sqltypes.Result{RowsAffected: 1}, nil + return &sqltypes.Result{}, nil } func (e *Executor) handleShowVitessMetadata(ctx context.Context, opt *sqlparser.ShowTablesOpt) (*sqltypes.Result, error) { @@ -583,9 +594,8 @@ func (e *Executor) handleShowVitessMetadata(ctx context.Context, opt *sqlparser. } return &sqltypes.Result{ - Fields: buildVarCharFields("Key", "Value"), - Rows: rows, - RowsAffected: uint64(len(rows)), + Fields: buildVarCharFields("Key", "Value"), + Rows: rows, }, nil } @@ -597,11 +607,11 @@ func (e *Executor) handleShow(ctx context.Context, safeSession *SafeSession, sql showOuter, ok := stmt.(*sqlparser.Show) if !ok { // This code is unreachable. - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unrecognized SHOW statement: %v", sql) + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unrecognized SHOW statement: %v", sql) } show, ok := showOuter.Internal.(*sqlparser.ShowLegacy) if !ok { - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "BUG: This should only be SHOW Legacy statement type: %v", sql) + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] This should only be SHOW Legacy statement type: %v", sql) } ignoreMaxMemoryRows := sqlparser.IgnoreMaxMaxMemoryRowsDirective(stmt) execStart := time.Now() @@ -623,9 +633,8 @@ func (e *Executor) handleShow(ctx context.Context, safeSession *SafeSession, sql "YES") rows = append(rows, row) return &sqltypes.Result{ - Fields: buildVarCharFields("Engine", "Support", "Comment", "Transactions", "XA", "Savepoints"), - Rows: rows, - RowsAffected: 1, + Fields: buildVarCharFields("Engine", "Support", "Comment", "Transactions", "XA", "Savepoints"), + Rows: rows, }, nil // for PLUGINS, return InnoDb + mysql_native_password case sqlparser.KeywordString(sqlparser.PLUGINS): @@ -638,59 +647,9 @@ func (e *Executor) handleShow(ctx context.Context, safeSession *SafeSession, sql "GPL") rows = append(rows, row) return &sqltypes.Result{ - Fields: buildVarCharFields("Name", "Status", "Type", "Library", "License"), - Rows: rows, - RowsAffected: 1, + Fields: buildVarCharFields("Name", "Status", "Type", "Library", "License"), + Rows: rows, }, nil - case "create table": - if !show.Table.Qualifier.IsEmpty() { - // Explicit keyspace was passed. Use that for targeting but remove from the query itself. - destKeyspace = show.Table.Qualifier.String() - show.Table.Qualifier = sqlparser.NewTableIdent("") - } else { - // No keyspace was indicated. Try to find one using the vschema. - tbl, err := e.VSchema().FindTable(destKeyspace, show.Table.Name.String()) - if err != nil { - return nil, err - } - destKeyspace = tbl.Keyspace.Name - } - sql = sqlparser.String(show) - case sqlparser.KeywordString(sqlparser.COLUMNS): - if !show.OnTable.Qualifier.IsEmpty() { - destKeyspace = show.OnTable.Qualifier.String() - show.OnTable.Qualifier = sqlparser.NewTableIdent("") - } else if show.ShowTablesOpt != nil { - if show.ShowTablesOpt.DbName != "" { - destKeyspace = show.ShowTablesOpt.DbName - show.ShowTablesOpt.DbName = "" - } - } else { - break - } - sql = sqlparser.String(show) - case sqlparser.KeywordString(sqlparser.INDEX), sqlparser.KeywordString(sqlparser.KEYS), sqlparser.KeywordString(sqlparser.INDEXES): - if !show.OnTable.Qualifier.IsEmpty() { - destKeyspace = show.OnTable.Qualifier.String() - show.OnTable.Qualifier = sqlparser.NewTableIdent("") - } else if show.ShowTablesOpt != nil { - if show.ShowTablesOpt.DbName != "" { - destKeyspace = show.ShowTablesOpt.DbName - show.ShowTablesOpt.DbName = "" - } - } else { - break - } - sql = sqlparser.String(show) - case sqlparser.KeywordString(sqlparser.TABLES): - if show.ShowTablesOpt != nil && show.ShowTablesOpt.DbName != "" { - if destKeyspace == "" { - // Change "show tables from " to "show tables" directed to that keyspace. - destKeyspace = show.ShowTablesOpt.DbName - } - show.ShowTablesOpt.DbName = "" - } - sql = sqlparser.String(show) case sqlparser.KeywordString(sqlparser.VITESS_SHARDS): showVitessShardsFilters := func(show *sqlparser.ShowLegacy) ([]func(string) bool, []func(string, *topodatapb.ShardReference) bool) { keyspaceFilters := []func(string) bool{} @@ -766,9 +725,8 @@ func (e *Executor) handleShow(ctx context.Context, safeSession *SafeSession, sql } return &sqltypes.Result{ - Fields: buildVarCharFields("Shards"), - Rows: rows, - RowsAffected: uint64(len(rows)), + Fields: buildVarCharFields("Shards"), + Rows: rows, }, nil case sqlparser.KeywordString(sqlparser.VITESS_TABLETS): return e.showTablets(show) @@ -776,9 +734,8 @@ func (e *Executor) handleShow(ctx context.Context, safeSession *SafeSession, sql var rows [][]sqltypes.Value rows = append(rows, buildVarCharRow(safeSession.TargetString)) return &sqltypes.Result{ - Fields: buildVarCharFields("Target"), - Rows: rows, - RowsAffected: uint64(len(rows)), + Fields: buildVarCharFields("Target"), + Rows: rows, }, nil case "vschema tables": if destKeyspace == "" { @@ -786,7 +743,7 @@ func (e *Executor) handleShow(ctx context.Context, safeSession *SafeSession, sql } ks, ok := e.VSchema().Keyspaces[destKeyspace] if !ok { - return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "keyspace %s not found in vschema", destKeyspace) + return nil, vterrors.NewErrorf(vtrpcpb.Code_NOT_FOUND, vterrors.BadDb, "Unknown database '%s' in vschema", destKeyspace) } var tables []string @@ -801,9 +758,8 @@ func (e *Executor) handleShow(ctx context.Context, safeSession *SafeSession, sql } return &sqltypes.Result{ - Fields: buildVarCharFields("Tables"), - Rows: rows, - RowsAffected: uint64(len(rows)), + Fields: buildVarCharFields("Tables"), + Rows: rows, }, nil case "vschema vindexes": vschema := e.vm.GetCurrentSrvVschema() @@ -831,7 +787,7 @@ func (e *Executor) handleShow(ctx context.Context, safeSession *SafeSession, sql tableName := show.OnTable.Name.String() table, ok := ks.Tables[tableName] if !ok { - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "table `%s` does not exist in keyspace `%s`", tableName, ksName) + return nil, vterrors.NewErrorf(vtrpcpb.Code_NOT_FOUND, vterrors.NoSuchTable, "table '%s' does not exist in keyspace '%s'", tableName, ksName) } for _, colVindex := range table.ColumnVindexes { @@ -853,9 +809,8 @@ func (e *Executor) handleShow(ctx context.Context, safeSession *SafeSession, sql } return &sqltypes.Result{ - Fields: buildVarCharFields("Columns", "Name", "Type", "Params", "Owner"), - Rows: rows, - RowsAffected: uint64(len(rows)), + Fields: buildVarCharFields("Columns", "Name", "Type", "Params", "Owner"), + Rows: rows, }, nil } @@ -886,9 +841,8 @@ func (e *Executor) handleShow(ctx context.Context, safeSession *SafeSession, sql } } return &sqltypes.Result{ - Fields: buildVarCharFields("Keyspace", "Name", "Type", "Params", "Owner"), - Rows: rows, - RowsAffected: uint64(len(rows)), + Fields: buildVarCharFields("Keyspace", "Name", "Type", "Params", "Owner"), + Rows: rows, }, nil case sqlparser.KeywordString(sqlparser.WARNINGS): fields := []*querypb.Field{ @@ -1031,9 +985,8 @@ func (e *Executor) showTablets(show *sqlparser.ShowLegacy) (*sqltypes.Result, er } } return &sqltypes.Result{ - Fields: buildVarCharFields("Cell", "Keyspace", "Shard", "TabletType", "State", "Alias", "Hostname", "MasterTermStartTime"), - Rows: rows, - RowsAffected: uint64(len(rows)), + Fields: buildVarCharFields("Cell", "Keyspace", "Shard", "TabletType", "State", "Alias", "Hostname", "MasterTermStartTime"), + Rows: rows, }, nil } @@ -1089,7 +1042,7 @@ func (e *Executor) StreamExecute(ctx context.Context, method string, safeSession bindVars = make(map[string]*querypb.BindVariable) } query, comments := sqlparser.SplitMarginComments(sql) - vcursor, _ := newVCursorImpl(ctx, safeSession, comments, e, logStats, e.vm, e.VSchema(), e.resolver.resolver, e.serv) + vcursor, _ := newVCursorImpl(ctx, safeSession, comments, e, logStats, e.vm, e.VSchema(), e.resolver.resolver, e.serv, e.warnShardedOnly) vcursor.SetIgnoreMaxMemoryRows(true) switch stmtType { case sqlparser.StmtStream: @@ -1097,19 +1050,15 @@ func (e *Executor) StreamExecute(ctx context.Context, method string, safeSession // TODO: support keyRange syntax return e.handleMessageStream(ctx, sql, target, callback, vcursor, logStats) case sqlparser.StmtSelect, sqlparser.StmtDDL, sqlparser.StmtSet, sqlparser.StmtInsert, sqlparser.StmtReplace, sqlparser.StmtUpdate, sqlparser.StmtDelete, - sqlparser.StmtUse, sqlparser.StmtOther, sqlparser.StmtComment: + sqlparser.StmtUse, sqlparser.StmtOther, sqlparser.StmtComment, sqlparser.StmtFlush: // These may or may not all work, but getPlan() should either return a plan with instructions // or an error, so it's safe to try. break - case sqlparser.StmtBegin, sqlparser.StmtCommit, sqlparser.StmtRollback: - // These statements don't populate plan.Instructions. We want to make sure we don't try to - // dereference nil Instructions which would panic. - fallthrough case sqlparser.StmtVStream: log.Infof("handleVStream called with target %v", target) return e.handleVStream(ctx, sql, target, callback, vcursor, logStats) default: - return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unsupported statement type for OLAP: %s", stmtType) + return vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "OLAP does not supported statement type: %s", stmtType) } plan, err := e.getPlan( @@ -1130,6 +1079,11 @@ func (e *Executor) StreamExecute(ctx context.Context, method string, safeSession return err } + // add any warnings that the planner wants to add + for _, warning := range plan.Warnings { + safeSession.RecordWarning(warning) + } + execStart := time.Now() logStats.PlanTime = execStart.Sub(logStats.StartTime) @@ -1205,7 +1159,7 @@ func (e *Executor) handleMessageStream(ctx context.Context, sql string, target q streamStmt, ok := stmt.(*sqlparser.Stream) if !ok { logStats.Error = err - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unrecognized STREAM statement: %v", sql) + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unrecognized STREAM statement: %v", sql) } // TODO: Add support for destination target in streamed queries @@ -1283,7 +1237,7 @@ func (e *Executor) getPlan(vcursor *vcursorImpl, sql string, comments sqlparser. return nil, errors.New("vschema not initialized") } - stmt, err := sqlparser.Parse(sql) + stmt, reservedVars, err := sqlparser.Parse2(sql) if err != nil { return nil, err } @@ -1291,20 +1245,15 @@ func (e *Executor) getPlan(vcursor *vcursorImpl, sql string, comments sqlparser. statement := stmt bindVarNeeds := &sqlparser.BindVarNeeds{} if !sqlparser.IgnoreMaxPayloadSizeDirective(statement) && !isValidPayloadSize(query) { - return nil, mysql.NewSQLError(mysql.ERNetPacketTooLarge, "", "query payload size above threshold") + return nil, vterrors.NewErrorf(vtrpcpb.Code_RESOURCE_EXHAUSTED, vterrors.NetPacketTooLarge, "query payload size above threshold") } ignoreMaxMemoryRows := sqlparser.IgnoreMaxMaxMemoryRowsDirective(stmt) vcursor.SetIgnoreMaxMemoryRows(ignoreMaxMemoryRows) - planKey := vcursor.planPrefixKey() + ":" + sql - if plan, ok := e.plans.Get(planKey); ok { - return plan.(*engine.Plan), nil - } - // Normalize if possible and retry. - if (e.normalize && sqlparser.CanNormalize(stmt)) || sqlparser.IsSetStatement(stmt) { + if (e.normalize && sqlparser.CanNormalize(stmt)) || sqlparser.MustRewriteAST(stmt) { parameterize := e.normalize // the public flag is called normalize - result, err := sqlparser.PrepareAST(stmt, bindVars, "vtg", parameterize, vcursor.keyspace) + result, err := sqlparser.PrepareAST(stmt, reservedVars, bindVars, "vtg", parameterize, vcursor.keyspace) if err != nil { return nil, err } @@ -1318,14 +1267,19 @@ func (e *Executor) getPlan(vcursor *vcursorImpl, sql string, comments sqlparser. logStats.BindVariables = bindVars } - planKey = vcursor.planPrefixKey() + ":" + query + planKey := vcursor.planPrefixKey() + ":" + query if plan, ok := e.plans.Get(planKey); ok { return plan.(*engine.Plan), nil } - plan, err := planbuilder.BuildFromStmt(query, statement, vcursor, bindVarNeeds) + + plan, err := planbuilder.BuildFromStmt(query, statement, reservedVars, vcursor, bindVarNeeds) if err != nil { return nil, err } + + plan.Warnings = vcursor.warnings + vcursor.warnings = nil + if !skipQueryPlanCache && !sqlparser.SkipQueryPlanCacheDirective(statement) && sqlparser.CachePlan(statement) { e.plans.Set(planKey, plan) } @@ -1337,7 +1291,24 @@ func skipQueryPlanCache(safeSession *SafeSession) bool { if safeSession == nil || safeSession.Options == nil { return false } - return safeSession.Options.SkipQueryPlanCache + return safeSession.Options.SkipQueryPlanCache || safeSession.Options.HasCreatedTempTables +} + +type cacheItem struct { + Key string + Value *engine.Plan +} + +func (e *Executor) debugCacheEntries() (items []cacheItem) { + e.plans.ForEach(func(value interface{}) bool { + plan := value.(*engine.Plan) + items = append(items, cacheItem{ + Key: plan.Original, + Value: plan, + }) + return true + }) + return } // ServeHTTP shows the current plans in the query cache. @@ -1349,7 +1320,7 @@ func (e *Executor) ServeHTTP(response http.ResponseWriter, request *http.Request switch request.URL.Path { case pathQueryPlans: - returnAsJSON(response, e.plans.Items()) + returnAsJSON(response, e.debugCacheEntries()) case pathVSchema: returnAsJSON(response, e.VSchema()) case pathScatterStats: @@ -1372,7 +1343,7 @@ func returnAsJSON(response http.ResponseWriter, stuff interface{}) { } // Plans returns the LRU plan cache -func (e *Executor) Plans() *cache.LRUCache { +func (e *Executor) Plans() cache.Cache { return e.plans } @@ -1435,19 +1406,19 @@ func generateCharsetRows(showFilter *sqlparser.ShowFilter, colNames []string) ([ } else { cmpExp, ok := showFilter.Filter.(*sqlparser.ComparisonExpr) if !ok { - return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "expect a 'LIKE' or '=' expression") + return nil, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.SyntaxError, "expect a 'LIKE' or '=' expression") } left, ok := cmpExp.Left.(*sqlparser.ColName) if !ok { - return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "expect left side to be 'charset'") + return nil, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.SyntaxError, "expect left side to be 'column'") } leftOk := left.Name.EqualString(charset) if leftOk { literal, ok := cmpExp.Right.(*sqlparser.Literal) if !ok { - return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "we expect the right side to be a string") + return nil, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.SyntaxError, "expect right side to be string") } rightString := string(literal.Val) @@ -1554,7 +1525,7 @@ func (e *Executor) prepare(ctx context.Context, safeSession *SafeSession, sql st } if UsingLegacyGateway() && safeSession.InTransaction() && destTabletType != topodatapb.TabletType_MASTER { - return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Executor.prepare: transactions are supported only for master tablet types, current type: %v", destTabletType) + return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "transaction is supported only for master tablet type, current type: %v", destTabletType) } if bindVars == nil { bindVars = make(map[string]*querypb.BindVariable) @@ -1578,7 +1549,7 @@ func (e *Executor) prepare(ctx context.Context, safeSession *SafeSession, sql st case sqlparser.StmtSelect: return e.handlePrepare(ctx, safeSession, sql, bindVars, logStats) case sqlparser.StmtDDL, sqlparser.StmtBegin, sqlparser.StmtCommit, sqlparser.StmtRollback, sqlparser.StmtSet, sqlparser.StmtInsert, sqlparser.StmtReplace, sqlparser.StmtUpdate, sqlparser.StmtDelete, - sqlparser.StmtUse, sqlparser.StmtOther, sqlparser.StmtComment, sqlparser.StmtExplain: + sqlparser.StmtUse, sqlparser.StmtOther, sqlparser.StmtComment, sqlparser.StmtExplain, sqlparser.StmtFlush: return nil, nil case sqlparser.StmtShow: res, err := e.handleShow(ctx, safeSession, sql, bindVars, dest, destKeyspace, destTabletType, logStats) @@ -1587,13 +1558,13 @@ func (e *Executor) prepare(ctx context.Context, safeSession *SafeSession, sql st } return res.Fields, nil } - return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unrecognized statement: %s", sql) + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unrecognized prepare statement: %s", sql) } func (e *Executor) handlePrepare(ctx context.Context, safeSession *SafeSession, sql string, bindVars map[string]*querypb.BindVariable, logStats *LogStats) ([]*querypb.Field, error) { // V3 mode. query, comments := sqlparser.SplitMarginComments(sql) - vcursor, _ := newVCursorImpl(ctx, safeSession, comments, e, logStats, e.vm, e.VSchema(), e.resolver.resolver, e.serv) + vcursor, _ := newVCursorImpl(ctx, safeSession, comments, e, logStats, e.vm, e.VSchema(), e.resolver.resolver, e.serv, e.warnShardedOnly) plan, err := e.getPlan( vcursor, query, @@ -1610,6 +1581,12 @@ func (e *Executor) handlePrepare(ctx context.Context, safeSession *SafeSession, return nil, err } + err = e.addNeededBindVars(plan.BindVarNeeds, bindVars, safeSession) + if err != nil { + logStats.Error = err + return nil, err + } + qr, err := plan.Instructions.GetFields(vcursor, bindVars) logStats.ExecuteTime = time.Since(execStart) var errCount uint64 @@ -1620,7 +1597,7 @@ func (e *Executor) handlePrepare(ctx context.Context, safeSession *SafeSession, } logStats.RowsAffected = qr.RowsAffected - plan.AddStats(1, time.Since(logStats.StartTime), uint64(logStats.ShardQueries), logStats.RowsAffected, errCount) + plan.AddStats(1, time.Since(logStats.StartTime), logStats.ShardQueries, qr.RowsAffected, uint64(len(qr.Rows)), errCount) return qr.Fields, err } diff --git a/go/vt/vtgate/executor_dml_test.go b/go/vt/vtgate/executor_dml_test.go index 1219fd4c96f..bce657e7993 100644 --- a/go/vt/vtgate/executor_dml_test.go +++ b/go/vt/vtgate/executor_dml_test.go @@ -50,7 +50,7 @@ func TestUpdateEqual(t *testing.T) { _, err := executorExec(executor, "update user set a=2 where id = 1", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "update user set a = 2 where id = 1", + Sql: "update `user` set a = 2 where id = 1", BindVariables: map[string]*querypb.BindVariable{}, }} if !reflect.DeepEqual(sbc1.Queries, wantQueries) { @@ -65,7 +65,7 @@ func TestUpdateEqual(t *testing.T) { _, err = executorExec(executor, "update user set a=2 where id = 3", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ - Sql: "update user set a = 2 where id = 3", + Sql: "update `user` set a = 2 where id = 3", BindVariables: map[string]*querypb.BindVariable{}, }} if !reflect.DeepEqual(sbc2.Queries, wantQueries) { @@ -225,10 +225,10 @@ func TestUpdateMultiOwned(t *testing.T) { t.Fatal(err) } wantQueries := []*querypb.BoundQuery{{ - Sql: "select id, a, b, c, d, e, f, a = 1 and b = 2, e = 3 and f = 4 from user where id = 1 for update", + Sql: "select id, a, b, c, d, e, f, a = 1 and b = 2, e = 3 and f = 4 from `user` where id = 1 for update", BindVariables: map[string]*querypb.BindVariable{}, }, { - Sql: "update user set a = 1, b = 2, f = 4, e = 3 where id = 1", + Sql: "update `user` set a = 1, b = 2, f = 4, e = 3 where id = 1", BindVariables: map[string]*querypb.BindVariable{}, }} if !reflect.DeepEqual(sbc1.Queries, wantQueries) { @@ -277,7 +277,7 @@ func TestUpdateComments(t *testing.T) { _, err := executorExec(executor, "update user set a=2 where id = 1 /* trailing */", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "update user set a = 2 where id = 1 /* trailing */", + Sql: "update `user` set a = 2 where id = 1 /* trailing */", BindVariables: map[string]*querypb.BindVariable{}, }} if !reflect.DeepEqual(sbc1.Queries, wantQueries) { @@ -295,7 +295,7 @@ func TestUpdateNormalize(t *testing.T) { _, err := executorExec(executor, "/* leading */ update user set a=2 where id = 1 /* trailing */", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "/* leading */ update user set a = :vtg1 where id = :vtg2 /* trailing */", + Sql: "/* leading */ update `user` set a = :vtg1 where id = :vtg2 /* trailing */", BindVariables: map[string]*querypb.BindVariable{ "vtg1": sqltypes.TestBindVariable(int64(2)), "vtg2": sqltypes.TestBindVariable(int64(1)), @@ -314,7 +314,7 @@ func TestUpdateNormalize(t *testing.T) { _, err = executorExec(executor, "/* leading */ update user set a=2 where id = 1 /* trailing */", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ - Sql: "/* leading */ update user set a = :vtg1 where id = :vtg2 /* trailing */", + Sql: "/* leading */ update `user` set a = :vtg1 where id = :vtg2 /* trailing */", BindVariables: map[string]*querypb.BindVariable{ "vtg1": sqltypes.TestBindVariable(int64(2)), "vtg2": sqltypes.TestBindVariable(int64(1)), @@ -344,10 +344,10 @@ func TestDeleteEqual(t *testing.T) { _, err := executorExec(executor, "delete from user where id = 1", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select Id, `name` from user where id = 1 for update", + Sql: "select Id, `name` from `user` where id = 1 for update", BindVariables: map[string]*querypb.BindVariable{}, }, { - Sql: "delete from user where id = 1", + Sql: "delete from `user` where id = 1", BindVariables: map[string]*querypb.BindVariable{}, }} if !reflect.DeepEqual(sbc.Queries, wantQueries) { @@ -371,10 +371,10 @@ func TestDeleteEqual(t *testing.T) { _, err = executorExec(executor, "delete from user where id = 1", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ - Sql: "select Id, `name` from user where id = 1 for update", + Sql: "select Id, `name` from `user` where id = 1 for update", BindVariables: map[string]*querypb.BindVariable{}, }, { - Sql: "delete from user where id = 1", + Sql: "delete from `user` where id = 1", BindVariables: map[string]*querypb.BindVariable{}, }} if !reflect.DeepEqual(sbc.Queries, wantQueries) { @@ -529,10 +529,10 @@ func TestDeleteComments(t *testing.T) { _, err := executorExec(executor, "delete from user where id = 1 /* trailing */", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select Id, `name` from user where id = 1 for update /* trailing */", + Sql: "select Id, `name` from `user` where id = 1 for update /* trailing */", BindVariables: map[string]*querypb.BindVariable{}, }, { - Sql: "delete from user where id = 1 /* trailing */", + Sql: "delete from `user` where id = 1 /* trailing */", BindVariables: map[string]*querypb.BindVariable{}, }} if !reflect.DeepEqual(sbc.Queries, wantQueries) { @@ -560,7 +560,7 @@ func TestInsertSharded(t *testing.T) { _, err := executorExec(executor, "insert into user(id, v, name) values (1, 2, 'myname')", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "insert into user(id, v, `name`) values (:_Id_0, 2, :_name_0)", + Sql: "insert into `user`(id, v, `name`) values (:_Id_0, 2, :_name_0)", BindVariables: map[string]*querypb.BindVariable{ "_Id_0": sqltypes.Int64BindVariable(1), "_name_0": sqltypes.BytesBindVariable([]byte("myname")), @@ -592,7 +592,7 @@ func TestInsertSharded(t *testing.T) { _, err = executorExec(executor, "insert into user(id, v, name) values (3, 2, 'myname2')", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ - Sql: "insert into user(id, v, `name`) values (:_Id_0, 2, :_name_0)", + Sql: "insert into `user`(id, v, `name`) values (:_Id_0, 2, :_name_0)", BindVariables: map[string]*querypb.BindVariable{ "_Id_0": sqltypes.Int64BindVariable(3), "__seq0": sqltypes.Int64BindVariable(3), @@ -637,10 +637,7 @@ func TestInsertShardedKeyrange(t *testing.T) { // If a unique vindex returns a keyrange, we fail the insert _, err := executorExec(executor, "insert into keyrange_table(krcol_unique, krcol) values(1, 1)", nil) - want := "execInsertSharded: getInsertShardedRoute: could not map [INT64(1)] to a unique keyspace id: DestinationKeyRange(-10)" - if err == nil || err.Error() != want { - t.Errorf("executorExec error: %v, want %s", err, want) - } + require.EqualError(t, err, "could not map [INT64(1)] to a unique keyspace id: DestinationKeyRange(-10)") } func TestInsertShardedAutocommitLookup(t *testing.T) { @@ -694,7 +691,7 @@ func TestInsertShardedAutocommitLookup(t *testing.T) { _, err := executorExec(executor, "insert into user(id, v, name) values (1, 2, 'myname')", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "insert into user(id, v, `name`) values (:_Id_0, 2, :_name_0)", + Sql: "insert into `user`(id, v, `name`) values (:_Id_0, 2, :_name_0)", BindVariables: map[string]*querypb.BindVariable{ "_Id_0": sqltypes.Int64BindVariable(1), "_name_0": sqltypes.BytesBindVariable([]byte("myname")), @@ -955,7 +952,7 @@ func TestInsertComments(t *testing.T) { _, err := executorExec(executor, "insert into user(id, v, name) values (1, 2, 'myname') /* trailing */", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "insert into user(id, v, `name`) values (:_Id_0, 2, :_name_0) /* trailing */", + Sql: "insert into `user`(id, v, `name`) values (:_Id_0, 2, :_name_0) /* trailing */", BindVariables: map[string]*querypb.BindVariable{ "_Id_0": sqltypes.Int64BindVariable(1), "_name_0": sqltypes.BytesBindVariable([]byte("myname")), @@ -993,7 +990,7 @@ func TestInsertGeneratorSharded(t *testing.T) { result, err := executorExec(executor, "insert into user(v, `name`) values (2, 'myname')", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "insert into user(v, `name`, id) values (2, :_name_0, :_Id_0)", + Sql: "insert into `user`(v, `name`, id) values (2, :_name_0, :_Id_0)", BindVariables: map[string]*querypb.BindVariable{ "_Id_0": sqltypes.Int64BindVariable(1), "__seq0": sqltypes.Int64BindVariable(1), @@ -1014,11 +1011,11 @@ func TestInsertGeneratorSharded(t *testing.T) { }, }} utils.MustMatch(t, wantQueries, sbclookup.Queries, "sbclookup.Queries") - wantResult := *sandboxconn.SingleRowResult - wantResult.InsertID = 1 - if !result.Equal(&wantResult) { - t.Errorf("result: %+v, want %+v", result, &wantResult) + wantResult := &sqltypes.Result{ + InsertID: 1, + RowsAffected: 1, } + utils.MustMatch(t, wantResult, result) } func TestInsertAutoincSharded(t *testing.T) { @@ -1066,11 +1063,11 @@ func TestInsertGeneratorUnsharded(t *testing.T) { if !reflect.DeepEqual(sbclookup.Queries, wantQueries) { t.Errorf("sbclookup.Queries: \n%#v, want \n%#v\n", sbclookup.Queries, wantQueries) } - wantResult := *sandboxconn.SingleRowResult - wantResult.InsertID = 1 - if !result.Equal(&wantResult) { - t.Errorf("result: %+v, want %+v", result, &wantResult) + wantResult := &sqltypes.Result{ + InsertID: 1, + RowsAffected: 1, } + utils.MustMatch(t, wantResult, result) } func TestInsertAutoincUnsharded(t *testing.T) { @@ -1163,11 +1160,11 @@ func TestInsertLookupOwnedGenerator(t *testing.T) { if !reflect.DeepEqual(sbclookup.Queries, wantQueries) { t.Errorf("sbclookup.Queries:\n%+v, want\n%+v\n", sbclookup.Queries, wantQueries) } - wantResult := *sandboxconn.SingleRowResult - wantResult.InsertID = 4 - if !result.Equal(&wantResult) { - t.Errorf("result:\n%+v, want\n%+v", result, &wantResult) + wantResult := &sqltypes.Result{ + InsertID: 4, + RowsAffected: 1, } + utils.MustMatch(t, wantResult, result) } func TestInsertLookupUnowned(t *testing.T) { @@ -1243,10 +1240,7 @@ func TestInsertPartialFail1(t *testing.T) { "insert into user(id, v, name) values (1, 2, 'myname')", nil, ) - want := "execInsertSharded:" - if err == nil || !strings.HasPrefix(err.Error(), want) { - t.Errorf("insert first DML fail: %v, must start with %s", err, want) - } + require.Error(t, err) } // If a statement gets broken up into two, and the second one fails @@ -1277,7 +1271,7 @@ func TestMultiInsertSharded(t *testing.T) { _, err := executorExec(executor, "insert into user(id, v, name) values (1, 1, 'myname1'),(3, 3, 'myname3')", nil) require.NoError(t, err) wantQueries1 := []*querypb.BoundQuery{{ - Sql: "insert into user(id, v, `name`) values (:_Id_0, 1, :_name_0)", + Sql: "insert into `user`(id, v, `name`) values (:_Id_0, 1, :_name_0)", BindVariables: map[string]*querypb.BindVariable{ "_Id_0": sqltypes.Int64BindVariable(1), "_name_0": sqltypes.BytesBindVariable([]byte("myname1")), @@ -1289,7 +1283,7 @@ func TestMultiInsertSharded(t *testing.T) { }} wantQueries2 := []*querypb.BoundQuery{{ - Sql: "insert into user(id, v, `name`) values (:_Id_1, 3, :_name_1)", + Sql: "insert into `user`(id, v, `name`) values (:_Id_1, 3, :_name_1)", BindVariables: map[string]*querypb.BindVariable{ "_Id_0": sqltypes.Int64BindVariable(1), "_name_0": sqltypes.BytesBindVariable([]byte("myname1")), @@ -1326,7 +1320,7 @@ func TestMultiInsertSharded(t *testing.T) { _, err = executorExec(executor, "insert into user(id, v, name) values (1, 1, 'myname1'),(2, 2, 'myname2')", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "insert into user(id, v, `name`) values (:_Id_0, 1, :_name_0),(:_Id_1, 2, :_name_1)", + Sql: "insert into `user`(id, v, `name`) values (:_Id_0, 1, :_name_0),(:_Id_1, 2, :_name_1)", BindVariables: map[string]*querypb.BindVariable{ "_Id_0": sqltypes.Int64BindVariable(1), "__seq0": sqltypes.Int64BindVariable(1), @@ -1432,11 +1426,11 @@ func TestMultiInsertGenerator(t *testing.T) { }, }} utils.MustMatch(t, wantQueries, sbclookup.Queries, "sbclookup.Queries") - wantResult := *sandboxconn.SingleRowResult - wantResult.InsertID = 1 - if !result.Equal(&wantResult) { - t.Errorf("result: %+v, want %+v", result, &wantResult) + wantResult := &sqltypes.Result{ + InsertID: 1, + RowsAffected: 1, } + utils.MustMatch(t, wantResult, result) } func TestMultiInsertGeneratorSparse(t *testing.T) { @@ -1484,11 +1478,11 @@ func TestMultiInsertGeneratorSparse(t *testing.T) { }, }} utils.MustMatch(t, wantQueries, sbclookup.Queries, "sbclookup.Queries") - wantResult := *sandboxconn.SingleRowResult - wantResult.InsertID = 1 - if !result.Equal(&wantResult) { - t.Errorf("result: %+v, want %+v", result, &wantResult) + wantResult := &sqltypes.Result{ + InsertID: 1, + RowsAffected: 1, } + utils.MustMatch(t, wantResult, result) } func TestInsertBadAutoInc(t *testing.T) { @@ -1611,7 +1605,7 @@ func TestKeyDestRangeQuery(t *testing.T) { masterSession.TargetString = "TestExecutor[-]" _, err := executorExec(executor, insertInput, nil) - require.EqualError(t, err, "range queries not supported for inserts: TestExecutor[-]") + require.EqualError(t, err, "range queries are not allowed for insert statement: TestExecutor[-]") masterSession.TargetString = "" } @@ -1704,7 +1698,7 @@ func TestUpdateLastInsertID(t *testing.T) { _, err := executorExec(executor, sql, map[string]*querypb.BindVariable{}) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "update user set a = :__lastInsertId where id = :vtg1", + Sql: "update `user` set a = :__lastInsertId where id = :vtg1", BindVariables: map[string]*querypb.BindVariable{ "__lastInsertId": sqltypes.Uint64BindVariable(43), "vtg1": sqltypes.Int64BindVariable(1)}, @@ -1747,8 +1741,7 @@ func TestReservedConnDML(t *testing.T) { defer QueryLogger.Unsubscribe(logChan) ctx := context.Background() - *sysVarSetEnabled = true - session := NewAutocommitSession(&vtgatepb.Session{}) + session := NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true}) _, err := executor.Execute(ctx, "TestReservedConnDML", session, "use "+KsTestUnsharded, nil) require.NoError(t, err) @@ -1779,7 +1772,7 @@ func TestReservedConnDML(t *testing.T) { _, err = executor.Execute(ctx, "TestReservedConnDML", session, "begin", nil) require.NoError(t, err) - sbc.EphemeralShardErr = mysql.NewSQLError(mysql.CRServerGone, mysql.SSUnknownSQLState, "connection gone") + sbc.EphemeralShardErr = mysql.NewSQLError(mysql.CRServerGone, mysql.SSNetError, "connection gone") // as the first time the query fails due to connection loss i.e. reserved conn lost. It will be recreated to set statement will be executed again. wantQueries = append(wantQueries, &querypb.BoundQuery{Sql: "set @@default_week_format = 1", BindVariables: map[string]*querypb.BindVariable{}}, diff --git a/go/vt/vtgate/executor_framework_test.go b/go/vt/vtgate/executor_framework_test.go index fcc24f67b08..8f9b865987b 100644 --- a/go/vt/vtgate/executor_framework_test.go +++ b/go/vt/vtgate/executor_framework_test.go @@ -30,6 +30,7 @@ import ( "context" + "vitess.io/vitess/go/cache" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/streamlog" "vitess.io/vitess/go/vt/discovery" @@ -304,7 +305,6 @@ var unshardedVSchema = ` const ( testBufferSize = 10 - testCacheSize = int64(10) ) type DestinationAnyShardPickerFirstShard struct{} @@ -398,7 +398,7 @@ func createLegacyExecutorEnv() (executor *Executor, sbc1, sbc2, sbclookup *sandb bad.VSchema = badVSchema getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - executor = NewExecutor(context.Background(), serv, cell, resolver, false, testBufferSize, testCacheSize) + executor = NewExecutor(context.Background(), serv, cell, resolver, false, false, testBufferSize, cache.DefaultConfig) key.AnyShardPicker = DestinationAnyShardPickerFirstShard{} return executor, sbc1, sbc2, sbclookup @@ -433,7 +433,7 @@ func createExecutorEnv() (executor *Executor, sbc1, sbc2, sbclookup *sandboxconn bad.VSchema = badVSchema getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - executor = NewExecutor(context.Background(), serv, cell, resolver, false, testBufferSize, testCacheSize) + executor = NewExecutor(context.Background(), serv, cell, resolver, false, false, testBufferSize, cache.DefaultConfig) key.AnyShardPicker = DestinationAnyShardPickerFirstShard{} return executor, sbc1, sbc2, sbclookup @@ -453,7 +453,7 @@ func createCustomExecutor(vschema string) (executor *Executor, sbc1, sbc2, sbclo sbclookup = hc.AddTestTablet(cell, "0", 1, KsTestUnsharded, "0", topodatapb.TabletType_MASTER, true, 1, nil) getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - executor = NewExecutor(context.Background(), serv, cell, resolver, false, testBufferSize, testCacheSize) + executor = NewExecutor(context.Background(), serv, cell, resolver, false, false, testBufferSize, cache.DefaultConfig) return executor, sbc1, sbc2, sbclookup } diff --git a/go/vt/vtgate/executor_scatter_stats.go b/go/vt/vtgate/executor_scatter_stats.go index 9ba7ae3ea3a..74641f11de6 100644 --- a/go/vt/vtgate/executor_scatter_stats.go +++ b/go/vt/vtgate/executor_scatter_stats.go @@ -20,6 +20,7 @@ import ( "fmt" "html/template" "net/http" + "sync/atomic" "time" "vitess.io/vitess/go/vt/logz" @@ -56,12 +57,12 @@ func (e *Executor) gatherScatterStats() (statsResults, error) { totalExecTime := time.Duration(0) totalCount := uint64(0) + var err error plans := make([]*engine.Plan, 0) routes := make([]*engine.Route, 0) // First we go over all plans and collect statistics and all query plans for scatter queries - for _, item := range e.plans.Items() { - plan := item.Value.(*engine.Plan) - + e.plans.ForEach(func(value interface{}) bool { + plan := value.(*engine.Plan) scatter := engine.Find(findScatter, plan.Instructions) readOnly := !engine.Exists(isUpdating, plan.Instructions) isScatter := scatter != nil @@ -69,39 +70,47 @@ func (e *Executor) gatherScatterStats() (statsResults, error) { if isScatter { route, isRoute := scatter.(*engine.Route) if !isRoute { - return statsResults{}, vterrors.Errorf(vtrpc.Code_INTERNAL, "expected a route, but found a %v", scatter) + err = vterrors.Errorf(vtrpc.Code_INTERNAL, "expected a route, but found a %v", scatter) + return false } plans = append(plans, plan) routes = append(routes, route) - scatterExecTime += plan.ExecTime - scatterCount += plan.ExecCount + scatterExecTime += time.Duration(atomic.LoadUint64(&plan.ExecTime)) + scatterCount += atomic.LoadUint64(&plan.ExecCount) } if readOnly { - readOnlyTime += plan.ExecTime - readOnlyCount += plan.ExecCount + readOnlyTime += time.Duration(atomic.LoadUint64(&plan.ExecTime)) + readOnlyCount += atomic.LoadUint64(&plan.ExecCount) } - totalExecTime += plan.ExecTime - totalCount += plan.ExecCount + totalExecTime += time.Duration(atomic.LoadUint64(&plan.ExecTime)) + totalCount += atomic.LoadUint64(&plan.ExecCount) + return true + }) + if err != nil { + return statsResults{}, err } // Now we'll go over all scatter queries we've found and produce result items for each resultItems := make([]*statsResultItem, len(plans)) for i, plan := range plans { route := routes[i] + execCount := atomic.LoadUint64(&plan.ExecCount) + execTime := time.Duration(atomic.LoadUint64(&plan.ExecTime)) + var avgTimePerQuery int64 - if plan.ExecCount != 0 { - avgTimePerQuery = plan.ExecTime.Nanoseconds() / int64(plan.ExecCount) + if execCount != 0 { + avgTimePerQuery = execTime.Nanoseconds() / int64(execCount) } resultItems[i] = &statsResultItem{ Query: plan.Original, AvgTimePerQuery: time.Duration(avgTimePerQuery), - PercentTimeOfReads: 100 * float64(plan.ExecTime) / float64(readOnlyTime), - PercentTimeOfScatters: 100 * float64(plan.ExecTime) / float64(scatterExecTime), - PercentCountOfReads: 100 * float64(plan.ExecCount) / float64(readOnlyCount), - PercentCountOfScatters: 100 * float64(plan.ExecCount) / float64(scatterCount), + PercentTimeOfReads: 100 * float64(execTime) / float64(readOnlyTime), + PercentTimeOfScatters: 100 * float64(execTime) / float64(scatterExecTime), + PercentCountOfReads: 100 * float64(execCount) / float64(readOnlyCount), + PercentCountOfScatters: 100 * float64(execCount) / float64(scatterCount), From: route.Keyspace.Name + "." + route.TableName, - Count: plan.ExecCount, + Count: execCount, } } result := statsResults{ diff --git a/go/vt/vtgate/executor_scatter_stats_test.go b/go/vt/vtgate/executor_scatter_stats_test.go index f1b18a6c080..12a591299cf 100644 --- a/go/vt/vtgate/executor_scatter_stats_test.go +++ b/go/vt/vtgate/executor_scatter_stats_test.go @@ -68,6 +68,8 @@ func TestScatterStatsHttpWriting(t *testing.T) { _, err = executor.Execute(context.Background(), "TestExecutorResultsExceeded", session, query4, nil) require.NoError(t, err) + executor.plans.Wait() + recorder := httptest.NewRecorder() executor.WriteScatterStats(recorder) diff --git a/go/vt/vtgate/executor_select_test.go b/go/vt/vtgate/executor_select_test.go index 99cb73273f4..6f25dc460bc 100644 --- a/go/vt/vtgate/executor_select_test.go +++ b/go/vt/vtgate/executor_select_test.go @@ -23,6 +23,7 @@ import ( "testing" "time" + "vitess.io/vitess/go/cache" "vitess.io/vitess/go/test/utils" "github.com/stretchr/testify/assert" @@ -69,8 +70,8 @@ func TestSelectDBA(t *testing.T) { require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{Sql: query, BindVariables: map[string]*querypb.BindVariable{}}} utils.MustMatch(t, wantQueries, sbc1.Queries) - sbc1.Queries = nil + query = "SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES WHERE table_schema = 'performance_schema' AND table_name = 'foo'" _, err = executor.Execute(context.Background(), "TestSelectDBA", NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}), @@ -84,33 +85,6 @@ func TestSelectDBA(t *testing.T) { }}} utils.MustMatch(t, wantQueries, sbc1.Queries) - sbc1.Queries = nil - query = "select 1 from information_schema.table_constraints where constraint_schema = 'vt_ks' and table_name = 'user'" - _, err = executor.Execute(context.Background(), "TestSelectDBA", - NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}), - query, map[string]*querypb.BindVariable{}, - ) - require.NoError(t, err) - wantQueries = []*querypb.BoundQuery{{Sql: "select 1 from information_schema.table_constraints where constraint_schema = :__vtschemaname and table_name = :__vttablename", - BindVariables: map[string]*querypb.BindVariable{ - "__vtschemaname": sqltypes.StringBindVariable("vt_ks"), - "__vttablename": sqltypes.StringBindVariable("user"), - }}} - utils.MustMatch(t, wantQueries, sbc1.Queries) - - sbc1.Queries = nil - query = "select 1 from information_schema.table_constraints where constraint_schema = 'vt_ks'" - _, err = executor.Execute(context.Background(), "TestSelectDBA", - NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}), - query, map[string]*querypb.BindVariable{}, - ) - require.NoError(t, err) - wantQueries = []*querypb.BoundQuery{{Sql: "select 1 from information_schema.table_constraints where constraint_schema = :__vtschemaname", - BindVariables: map[string]*querypb.BindVariable{ - "__vtschemaname": sqltypes.StringBindVariable("vt_ks"), - }}} - utils.MustMatch(t, wantQueries, sbc1.Queries) - } func TestUnsharded(t *testing.T) { @@ -249,13 +223,16 @@ func TestStreamLimitOffset(t *testing.T) { Fields: []*querypb.Field{ {Name: "id", Type: sqltypes.Int32}, {Name: "textcol", Type: sqltypes.VarChar}, + {Name: "weight_string(id)", Type: sqltypes.VarBinary}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewInt32(1), sqltypes.NewVarChar("1234"), + sqltypes.NULL, }, { sqltypes.NewInt32(4), sqltypes.NewVarChar("4567"), + sqltypes.NULL, }}, }}) @@ -263,10 +240,12 @@ func TestStreamLimitOffset(t *testing.T) { Fields: []*querypb.Field{ {Name: "id", Type: sqltypes.Int32}, {Name: "textcol", Type: sqltypes.VarChar}, + {Name: "weight_string(id)", Type: sqltypes.VarBinary}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewInt32(2), sqltypes.NewVarChar("2345"), + sqltypes.NULL, }}, }}) @@ -322,7 +301,6 @@ func TestSelectLastInsertId(t *testing.T) { sql := "select last_insert_id()" result, err := executorExec(executor, sql, map[string]*querypb.BindVariable{}) wantResult := &sqltypes.Result{ - RowsAffected: 1, Fields: []*querypb.Field{ {Name: "last_insert_id()", Type: sqltypes.Uint64}, }, @@ -331,7 +309,7 @@ func TestSelectLastInsertId(t *testing.T) { }}, } require.NoError(t, err) - utils.MustMatch(t, result, wantResult, "Mismatch") + utils.MustMatch(t, wantResult, result, "Mismatch") } func TestSelectSystemVariables(t *testing.T) { @@ -345,16 +323,17 @@ func TestSelectSystemVariables(t *testing.T) { logChan := QueryLogger.Subscribe("Test") defer QueryLogger.Unsubscribe(logChan) - sql := "select @@autocommit, @@client_found_rows, @@skip_query_plan_cache, " + + sql := "select @@autocommit, @@client_found_rows, @@skip_query_plan_cache, @@enable_system_settings, " + "@@sql_select_limit, @@transaction_mode, @@workload, @@read_after_write_gtid, " + - "@@read_after_write_timeout, @@session_track_gtids, @@ddl_strategy" + "@@read_after_write_timeout, @@session_track_gtids, @@ddl_strategy, @@socket" result, err := executorExec(executor, sql, map[string]*querypb.BindVariable{}) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "@@autocommit", Type: sqltypes.Int32}, - {Name: "@@client_found_rows", Type: sqltypes.Int32}, - {Name: "@@skip_query_plan_cache", Type: sqltypes.Int32}, + {Name: "@@autocommit", Type: sqltypes.Int64}, + {Name: "@@client_found_rows", Type: sqltypes.Int64}, + {Name: "@@skip_query_plan_cache", Type: sqltypes.Int64}, + {Name: "@@enable_system_settings", Type: sqltypes.Int64}, {Name: "@@sql_select_limit", Type: sqltypes.Int64}, {Name: "@@transaction_mode", Type: sqltypes.VarBinary}, {Name: "@@workload", Type: sqltypes.VarBinary}, @@ -362,13 +341,14 @@ func TestSelectSystemVariables(t *testing.T) { {Name: "@@read_after_write_timeout", Type: sqltypes.Float64}, {Name: "@@session_track_gtids", Type: sqltypes.VarBinary}, {Name: "@@ddl_strategy", Type: sqltypes.VarBinary}, + {Name: "@@socket", Type: sqltypes.VarBinary}, }, - RowsAffected: 1, Rows: [][]sqltypes.Value{{ // the following are the uninitialised session values - sqltypes.NULL, - sqltypes.NULL, - sqltypes.NULL, + sqltypes.NewInt64(0), + sqltypes.NewInt64(0), + sqltypes.NewInt64(0), + sqltypes.NewInt64(0), sqltypes.NewInt64(0), sqltypes.NewVarBinary("UNSPECIFIED"), sqltypes.NewVarBinary(""), @@ -377,10 +357,42 @@ func TestSelectSystemVariables(t *testing.T) { sqltypes.NewFloat64(13), sqltypes.NewVarBinary("own_gtid"), sqltypes.NewVarBinary(""), + sqltypes.NewVarBinary(""), }}, } require.NoError(t, err) - utils.MustMatch(t, result, wantResult, "Mismatch") + utils.MustMatch(t, wantResult, result, "Mismatch") +} + +func TestSelectInitializedVitessAwareVariable(t *testing.T) { + executor, _, _, _ := createLegacyExecutorEnv() + executor.normalize = true + logChan := QueryLogger.Subscribe("Test") + defer QueryLogger.Unsubscribe(logChan) + + masterSession.Autocommit = true + masterSession.EnableSystemSettings = true + + defer func() { + masterSession.Autocommit = false + masterSession.EnableSystemSettings = false + }() + + sql := "select @@autocommit, @@enable_system_settings" + + result, err := executorExec(executor, sql, nil) + wantResult := &sqltypes.Result{ + Fields: []*querypb.Field{ + {Name: "@@autocommit", Type: sqltypes.Int64}, + {Name: "@@enable_system_settings", Type: sqltypes.Int64}, + }, + Rows: [][]sqltypes.Value{{ + sqltypes.NewInt64(1), + sqltypes.NewInt64(1), + }}, + } + require.NoError(t, err) + utils.MustMatch(t, wantResult, result, "Mismatch") } func TestSelectUserDefindVariable(t *testing.T) { @@ -393,7 +405,6 @@ func TestSelectUserDefindVariable(t *testing.T) { result, err := executorExec(executor, sql, map[string]*querypb.BindVariable{}) require.NoError(t, err) wantResult := &sqltypes.Result{ - RowsAffected: 1, Fields: []*querypb.Field{ {Name: "@foo", Type: sqltypes.Null}, }, @@ -401,13 +412,12 @@ func TestSelectUserDefindVariable(t *testing.T) { sqltypes.NULL, }}, } - utils.MustMatch(t, result, wantResult, "Mismatch") + utils.MustMatch(t, wantResult, result, "Mismatch") masterSession = &vtgatepb.Session{UserDefinedVariables: createMap([]string{"foo"}, []interface{}{"bar"})} result, err = executorExec(executor, sql, map[string]*querypb.BindVariable{}) require.NoError(t, err) wantResult = &sqltypes.Result{ - RowsAffected: 1, Fields: []*querypb.Field{ {Name: "@foo", Type: sqltypes.VarBinary}, }, @@ -415,7 +425,7 @@ func TestSelectUserDefindVariable(t *testing.T) { sqltypes.NewVarBinary("bar"), }}, } - utils.MustMatch(t, result, wantResult, "Mismatch") + utils.MustMatch(t, wantResult, result, "Mismatch") } func TestFoundRows(t *testing.T) { @@ -431,7 +441,6 @@ func TestFoundRows(t *testing.T) { sql := "select found_rows()" result, err := executorExec(executor, sql, map[string]*querypb.BindVariable{}) wantResult := &sqltypes.Result{ - RowsAffected: 1, Fields: []*querypb.Field{ {Name: "found_rows()", Type: sqltypes.Uint64}, }, @@ -440,7 +449,7 @@ func TestFoundRows(t *testing.T) { }}, } require.NoError(t, err) - utils.MustMatch(t, result, wantResult, "Mismatch") + utils.MustMatch(t, wantResult, result, "Mismatch") } func TestRowCount(t *testing.T) { @@ -459,9 +468,9 @@ func TestRowCount(t *testing.T) { } func testRowCount(t *testing.T, executor *Executor, wantRowCount int64) { + t.Helper() result, err := executorExec(executor, "select row_count()", map[string]*querypb.BindVariable{}) wantResult := &sqltypes.Result{ - RowsAffected: 1, Fields: []*querypb.Field{ {Name: "row_count()", Type: sqltypes.Int64}, }, @@ -470,7 +479,7 @@ func testRowCount(t *testing.T, executor *Executor, wantRowCount int64) { }}, } require.NoError(t, err) - utils.MustMatch(t, result, wantResult, "Mismatch") + utils.MustMatch(t, wantResult, result, "Mismatch") } func TestSelectLastInsertIdInUnion(t *testing.T) { @@ -507,8 +516,7 @@ func TestLastInsertIDInVirtualTable(t *testing.T) { {Name: "id", Type: sqltypes.Int32}, {Name: "col", Type: sqltypes.Int32}, }, - RowsAffected: 1, - InsertID: 0, + InsertID: 0, Rows: [][]sqltypes.Value{{ sqltypes.NewInt32(1), sqltypes.NewInt32(3), @@ -536,7 +544,6 @@ func TestLastInsertIDInSubQueryExpression(t *testing.T) { rs, err := executorExec(executor, "select (select last_insert_id()) as x", nil) require.NoError(t, err) wantResult := &sqltypes.Result{ - RowsAffected: 1, Fields: []*querypb.Field{ {Name: "x", Type: sqltypes.Uint64}, }, @@ -565,7 +572,6 @@ func TestSelectDatabase(t *testing.T) { sql, map[string]*querypb.BindVariable{}) wantResult := &sqltypes.Result{ - RowsAffected: 1, Fields: []*querypb.Field{ {Name: "database()", Type: sqltypes.VarBinary}, }, @@ -574,7 +580,7 @@ func TestSelectDatabase(t *testing.T) { }}, } require.NoError(t, err) - utils.MustMatch(t, result, wantResult, "Mismatch") + utils.MustMatch(t, wantResult, result, "Mismatch") } @@ -597,7 +603,7 @@ func TestSelectBindvars(t *testing.T) { }) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select id from user where id = :id", + Sql: "select id from `user` where id = :id", BindVariables: map[string]*querypb.BindVariable{"id": sqltypes.Int64BindVariable(1)}, }} utils.MustMatch(t, sbc1.Queries, wantQueries) @@ -613,7 +619,7 @@ func TestSelectBindvars(t *testing.T) { }) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ - Sql: "select id from user where `name` in ::__vals", + Sql: "select id from `user` where `name` in ::__vals", BindVariables: map[string]*querypb.BindVariable{ "name1": sqltypes.BytesBindVariable([]byte("foo1")), "name2": sqltypes.BytesBindVariable([]byte("foo2")), @@ -634,7 +640,7 @@ func TestSelectBindvars(t *testing.T) { }) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ - Sql: "select id from user where 1 != 1", + Sql: "select id from `user` where 1 != 1", BindVariables: map[string]*querypb.BindVariable{ "name1": sqltypes.BytesBindVariable([]byte("foo1")), "name2": sqltypes.BytesBindVariable([]byte("foo2")), @@ -665,7 +671,7 @@ func TestSelectBindvars(t *testing.T) { // When there are no matching rows in the vindex, vtgate still needs the field info wantQueries = []*querypb.BoundQuery{{ - Sql: "select id from user where 1 != 1", + Sql: "select id from `user` where 1 != 1", BindVariables: map[string]*querypb.BindVariable{ "name": sqltypes.StringBindVariable("nonexistent"), }, @@ -694,7 +700,7 @@ func TestSelectEqual(t *testing.T) { _, err := executorExec(executor, "select id from user where id = 1", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select id from user where id = 1", + Sql: "select id from `user` where id = 1", BindVariables: map[string]*querypb.BindVariable{}, }} utils.MustMatch(t, wantQueries, sbc1.Queries) @@ -706,7 +712,7 @@ func TestSelectEqual(t *testing.T) { _, err = executorExec(executor, "select id from user where id = 3", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ - Sql: "select id from user where id = 3", + Sql: "select id from `user` where id = 3", BindVariables: map[string]*querypb.BindVariable{}, }} utils.MustMatch(t, wantQueries, sbc2.Queries) @@ -721,7 +727,7 @@ func TestSelectEqual(t *testing.T) { _, err = executorExec(executor, "select id from user where id = '3'", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ - Sql: "select id from user where id = '3'", + Sql: "select id from `user` where id = '3'", BindVariables: map[string]*querypb.BindVariable{}, }} utils.MustMatch(t, wantQueries, sbc2.Queries) @@ -740,7 +746,7 @@ func TestSelectEqual(t *testing.T) { _, err = executorExec(executor, "select id from user where name = 'foo'", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ - Sql: "select id from user where `name` = 'foo'", + Sql: "select id from `user` where `name` = 'foo'", BindVariables: map[string]*querypb.BindVariable{}, }} utils.MustMatch(t, wantQueries, sbc1.Queries) @@ -777,7 +783,7 @@ func TestSelectComments(t *testing.T) { _, err := executorExec(executor, "/* leading */ select id from user where id = 1 /* trailing */", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "/* leading */ select id from user where id = 1 /* trailing */", + Sql: "/* leading */ select id from `user` where id = 1 /* trailing */", BindVariables: map[string]*querypb.BindVariable{}, }} utils.MustMatch(t, wantQueries, sbc1.Queries) @@ -794,7 +800,7 @@ func TestSelectNormalize(t *testing.T) { _, err := executorExec(executor, "/* leading */ select id from user where id = 1 /* trailing */", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "/* leading */ select id from user where id = :vtg1 /* trailing */", + Sql: "/* leading */ select id from `user` where id = :vtg1 /* trailing */", BindVariables: map[string]*querypb.BindVariable{ "vtg1": sqltypes.TestBindVariable(int64(1)), }, @@ -810,7 +816,7 @@ func TestSelectNormalize(t *testing.T) { _, err = executorExec(executor, "/* leading */ select id from user where id = 1 /* trailing */", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ - Sql: "/* leading */ select id from user where id = :vtg1 /* trailing */", + Sql: "/* leading */ select id from `user` where id = :vtg1 /* trailing */", BindVariables: map[string]*querypb.BindVariable{ "vtg1": sqltypes.TestBindVariable(int64(1)), }, @@ -827,7 +833,7 @@ func TestSelectCaseSensitivity(t *testing.T) { _, err := executorExec(executor, "select Id from user where iD = 1", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select Id from user where iD = 1", + Sql: "select Id from `user` where iD = 1", BindVariables: map[string]*querypb.BindVariable{}, }} utils.MustMatch(t, wantQueries, sbc1.Queries) @@ -888,7 +894,7 @@ func TestSelectIN(t *testing.T) { _, err := executorExec(executor, "select id from user where id in (1)", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select id from user where id in ::__vals", + Sql: "select id from `user` where id in ::__vals", BindVariables: map[string]*querypb.BindVariable{ "__vals": sqltypes.TestBindVariable([]interface{}{int64(1)}), }, @@ -905,14 +911,14 @@ func TestSelectIN(t *testing.T) { _, err = executorExec(executor, "select id from user where id in (1, 3)", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ - Sql: "select id from user where id in ::__vals", + Sql: "select id from `user` where id in ::__vals", BindVariables: map[string]*querypb.BindVariable{ "__vals": sqltypes.TestBindVariable([]interface{}{int64(1)}), }, }} utils.MustMatch(t, wantQueries, sbc1.Queries) wantQueries = []*querypb.BoundQuery{{ - Sql: "select id from user where id in ::__vals", + Sql: "select id from `user` where id in ::__vals", BindVariables: map[string]*querypb.BindVariable{ "__vals": sqltypes.TestBindVariable([]interface{}{int64(3)}), }, @@ -928,7 +934,7 @@ func TestSelectIN(t *testing.T) { }) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ - Sql: "select id from user where id in ::__vals", + Sql: "select id from `user` where id in ::__vals", BindVariables: map[string]*querypb.BindVariable{ "__vals": sqltypes.TestBindVariable([]interface{}{int64(1)}), "vals": sqltypes.TestBindVariable([]interface{}{int64(1), int64(3)}), @@ -936,7 +942,7 @@ func TestSelectIN(t *testing.T) { }} utils.MustMatch(t, wantQueries, sbc1.Queries) wantQueries = []*querypb.BoundQuery{{ - Sql: "select id from user where id in ::__vals", + Sql: "select id from `user` where id in ::__vals", BindVariables: map[string]*querypb.BindVariable{ "__vals": sqltypes.TestBindVariable([]interface{}{int64(3)}), "vals": sqltypes.TestBindVariable([]interface{}{int64(1), int64(3)}), @@ -954,7 +960,7 @@ func TestSelectIN(t *testing.T) { _, err = executorExec(executor, "select id from user where name = 'foo'", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ - Sql: "select id from user where `name` = 'foo'", + Sql: "select id from `user` where `name` = 'foo'", BindVariables: map[string]*querypb.BindVariable{}, }} utils.MustMatch(t, wantQueries, sbc1.Queries) @@ -1029,20 +1035,21 @@ func TestSelectScatter(t *testing.T) { sbc := hc.AddTestTablet(cell, shard, 1, "TestExecutor", shard, topodatapb.TabletType_MASTER, true, 1, nil) conns = append(conns, sbc) } - executor := NewExecutor(context.Background(), serv, cell, resolver, false, testBufferSize, testCacheSize) + executor := NewExecutor(context.Background(), serv, cell, resolver, false, false, testBufferSize, cache.DefaultConfig) logChan := QueryLogger.Subscribe("Test") defer QueryLogger.Unsubscribe(logChan) - _, err := executorExec(executor, "select id from user", nil) + sql := "select id from user" + _, err := executorExec(executor, sql, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select id from user", + Sql: "select id from `user`", BindVariables: map[string]*querypb.BindVariable{}, }} for _, conn := range conns { utils.MustMatch(t, wantQueries, conn.Queries) } - testQueryLog(t, logChan, "TestExecute", "SELECT", wantQueries[0].Sql, 8) + testQueryLog(t, logChan, "TestExecute", "SELECT", sql, 8) } func TestSelectScatterPartial(t *testing.T) { @@ -1061,14 +1068,14 @@ func TestSelectScatterPartial(t *testing.T) { conns = append(conns, sbc) } - executor := NewExecutor(context.Background(), serv, cell, resolver, false, testBufferSize, testCacheSize) + executor := NewExecutor(context.Background(), serv, cell, resolver, false, false, testBufferSize, cache.DefaultConfig) logChan := QueryLogger.Subscribe("Test") defer QueryLogger.Unsubscribe(logChan) // Fail 1 of N without the directive fails the whole operation conns[2].MustFailCodes[vtrpcpb.Code_RESOURCE_EXHAUSTED] = 1000 results, err := executorExec(executor, "select id from user", nil) - wantErr := "TestExecutor.40-60.master, used tablet: aa-0 (40-60)" + wantErr := "TestExecutor.40-60.master" if err == nil || !strings.Contains(err.Error(), wantErr) { t.Errorf("want error %v, got %v", wantErr, err) } @@ -1118,7 +1125,7 @@ func TestStreamSelectScatter(t *testing.T) { for _, shard := range shards { _ = hc.AddTestTablet(cell, shard, 1, "TestExecutor", shard, topodatapb.TabletType_MASTER, true, 1, nil) } - executor := NewExecutor(context.Background(), serv, cell, resolver, false, testBufferSize, testCacheSize) + executor := NewExecutor(context.Background(), serv, cell, resolver, false, false, testBufferSize, cache.DefaultConfig) sql := "select id from user" result, err := executorStream(executor, sql) @@ -1159,27 +1166,28 @@ func TestSelectScatterOrderBy(t *testing.T) { Fields: []*querypb.Field{ {Name: "col1", Type: sqltypes.Int32}, {Name: "col2", Type: sqltypes.Int32}, + {Name: "weight_string(col2)", Type: sqltypes.VarBinary}, }, - RowsAffected: 1, - InsertID: 0, + InsertID: 0, Rows: [][]sqltypes.Value{{ sqltypes.NewInt32(1), // i%4 ensures that there are duplicates across shards. // This will allow us to test that cross-shard ordering // still works correctly. sqltypes.NewInt32(int32(i % 4)), + sqltypes.NULL, }}, }}) conns = append(conns, sbc) } - executor := NewExecutor(context.Background(), serv, cell, resolver, false, testBufferSize, testCacheSize) + executor := NewExecutor(context.Background(), serv, cell, resolver, false, false, testBufferSize, cache.DefaultConfig) query := "select col1, col2 from user order by col2 desc" gotResult, err := executorExec(executor, query, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: query, + Sql: "select col1, col2, weight_string(col2) from `user` order by col2 desc", BindVariables: map[string]*querypb.BindVariable{}, }} for _, conn := range conns { @@ -1191,8 +1199,7 @@ func TestSelectScatterOrderBy(t *testing.T) { {Name: "col1", Type: sqltypes.Int32}, {Name: "col2", Type: sqltypes.Int32}, }, - RowsAffected: 8, - InsertID: 0, + InsertID: 0, } for i := 0; i < 4; i++ { // There should be a duplicate for each row returned. @@ -1226,8 +1233,7 @@ func TestSelectScatterOrderByVarChar(t *testing.T) { {Name: "col1", Type: sqltypes.Int32}, {Name: "textcol", Type: sqltypes.VarChar}, }, - RowsAffected: 1, - InsertID: 0, + InsertID: 0, Rows: [][]sqltypes.Value{{ sqltypes.NewInt32(1), // i%4 ensures that there are duplicates across shards. @@ -1239,14 +1245,14 @@ func TestSelectScatterOrderByVarChar(t *testing.T) { }}) conns = append(conns, sbc) } - executor := NewExecutor(context.Background(), serv, cell, resolver, false, testBufferSize, testCacheSize) + executor := NewExecutor(context.Background(), serv, cell, resolver, false, false, testBufferSize, cache.DefaultConfig) query := "select col1, textcol from user order by textcol desc" gotResult, err := executorExec(executor, query, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select col1, textcol, weight_string(textcol) from user order by textcol desc", + Sql: "select col1, textcol, weight_string(textcol) from `user` order by textcol desc", BindVariables: map[string]*querypb.BindVariable{}, }} for _, conn := range conns { @@ -1258,8 +1264,7 @@ func TestSelectScatterOrderByVarChar(t *testing.T) { {Name: "col1", Type: sqltypes.Int32}, {Name: "textcol", Type: sqltypes.VarChar}, }, - RowsAffected: 8, - InsertID: 0, + InsertID: 0, } for i := 0; i < 4; i++ { // There should be a duplicate for each row returned. @@ -1291,24 +1296,25 @@ func TestStreamSelectScatterOrderBy(t *testing.T) { Fields: []*querypb.Field{ {Name: "id", Type: sqltypes.Int32}, {Name: "col", Type: sqltypes.Int32}, + {Name: "weight_string(col)", Type: sqltypes.VarBinary}, }, - RowsAffected: 1, - InsertID: 0, + InsertID: 0, Rows: [][]sqltypes.Value{{ sqltypes.NewInt32(1), sqltypes.NewInt32(int32(i % 4)), + sqltypes.NULL, }}, }}) conns = append(conns, sbc) } - executor := NewExecutor(context.Background(), serv, cell, resolver, false, testBufferSize, testCacheSize) + executor := NewExecutor(context.Background(), serv, cell, resolver, false, false, testBufferSize, cache.DefaultConfig) query := "select id, col from user order by col desc" gotResult, err := executorStream(executor, query) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: query, + Sql: "select id, col, weight_string(col) from `user` order by col desc", BindVariables: map[string]*querypb.BindVariable{}, }} for _, conn := range conns { @@ -1349,8 +1355,7 @@ func TestStreamSelectScatterOrderByVarChar(t *testing.T) { {Name: "id", Type: sqltypes.Int32}, {Name: "textcol", Type: sqltypes.VarChar}, }, - RowsAffected: 1, - InsertID: 0, + InsertID: 0, Rows: [][]sqltypes.Value{{ sqltypes.NewInt32(1), sqltypes.NewVarChar(fmt.Sprintf("%d", i%4)), @@ -1359,14 +1364,14 @@ func TestStreamSelectScatterOrderByVarChar(t *testing.T) { }}) conns = append(conns, sbc) } - executor := NewExecutor(context.Background(), serv, cell, resolver, false, testBufferSize, testCacheSize) + executor := NewExecutor(context.Background(), serv, cell, resolver, false, false, testBufferSize, cache.DefaultConfig) query := "select id, textcol from user order by textcol desc" gotResult, err := executorStream(executor, query) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select id, textcol, weight_string(textcol) from user order by textcol desc", + Sql: "select id, textcol, weight_string(textcol) from `user` order by textcol desc", BindVariables: map[string]*querypb.BindVariable{}, }} for _, conn := range conns { @@ -1407,24 +1412,25 @@ func TestSelectScatterAggregate(t *testing.T) { Fields: []*querypb.Field{ {Name: "col", Type: sqltypes.Int32}, {Name: "sum(foo)", Type: sqltypes.Int32}, + {Name: "weight_string(col)", Type: sqltypes.VarBinary}, }, - RowsAffected: 1, - InsertID: 0, + InsertID: 0, Rows: [][]sqltypes.Value{{ sqltypes.NewInt32(int32(i % 4)), sqltypes.NewInt32(int32(i)), + sqltypes.NULL, }}, }}) conns = append(conns, sbc) } - executor := NewExecutor(context.Background(), serv, cell, resolver, false, testBufferSize, testCacheSize) + executor := NewExecutor(context.Background(), serv, cell, resolver, false, false, testBufferSize, cache.DefaultConfig) query := "select col, sum(foo) from user group by col" gotResult, err := executorExec(executor, query, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: query + " order by col asc", + Sql: "select col, sum(foo), weight_string(col) from `user` group by col order by col asc", BindVariables: map[string]*querypb.BindVariable{}, }} for _, conn := range conns { @@ -1436,8 +1442,7 @@ func TestSelectScatterAggregate(t *testing.T) { {Name: "col", Type: sqltypes.Int32}, {Name: "sum(foo)", Type: sqltypes.Int32}, }, - RowsAffected: 4, - InsertID: 0, + InsertID: 0, } for i := 0; i < 4; i++ { row := []sqltypes.Value{ @@ -1466,24 +1471,25 @@ func TestStreamSelectScatterAggregate(t *testing.T) { Fields: []*querypb.Field{ {Name: "col", Type: sqltypes.Int32}, {Name: "sum(foo)", Type: sqltypes.Int32}, + {Name: "weight_string(col)", Type: sqltypes.VarBinary}, }, - RowsAffected: 1, - InsertID: 0, + InsertID: 0, Rows: [][]sqltypes.Value{{ sqltypes.NewInt32(int32(i % 4)), sqltypes.NewInt32(int32(i)), + sqltypes.NULL, }}, }}) conns = append(conns, sbc) } - executor := NewExecutor(context.Background(), serv, cell, resolver, false, testBufferSize, testCacheSize) + executor := NewExecutor(context.Background(), serv, cell, resolver, false, false, testBufferSize, cache.DefaultConfig) query := "select col, sum(foo) from user group by col" gotResult, err := executorStream(executor, query) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: query + " order by col asc", + Sql: "select col, sum(foo), weight_string(col) from `user` group by col order by col asc", BindVariables: map[string]*querypb.BindVariable{}, }} for _, conn := range conns { @@ -1525,24 +1531,25 @@ func TestSelectScatterLimit(t *testing.T) { Fields: []*querypb.Field{ {Name: "col1", Type: sqltypes.Int32}, {Name: "col2", Type: sqltypes.Int32}, + {Name: "weight_string(col2)", Type: sqltypes.VarBinary}, }, - RowsAffected: 1, - InsertID: 0, + InsertID: 0, Rows: [][]sqltypes.Value{{ sqltypes.NewInt32(1), sqltypes.NewInt32(int32(i % 4)), + sqltypes.NULL, }}, }}) conns = append(conns, sbc) } - executor := NewExecutor(context.Background(), serv, cell, resolver, false, testBufferSize, testCacheSize) + executor := NewExecutor(context.Background(), serv, cell, resolver, false, false, testBufferSize, cache.DefaultConfig) query := "select col1, col2 from user order by col2 desc limit 3" gotResult, err := executorExec(executor, query, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select col1, col2 from user order by col2 desc limit :__upper_limit", + Sql: "select col1, col2, weight_string(col2) from `user` order by col2 desc limit :__upper_limit", BindVariables: map[string]*querypb.BindVariable{"__upper_limit": sqltypes.Int64BindVariable(3)}, }} for _, conn := range conns { @@ -1554,8 +1561,7 @@ func TestSelectScatterLimit(t *testing.T) { {Name: "col1", Type: sqltypes.Int32}, {Name: "col2", Type: sqltypes.Int32}, }, - RowsAffected: 3, - InsertID: 0, + InsertID: 0, } wantResult.Rows = append(wantResult.Rows, []sqltypes.Value{ @@ -1593,24 +1599,25 @@ func TestStreamSelectScatterLimit(t *testing.T) { Fields: []*querypb.Field{ {Name: "col1", Type: sqltypes.Int32}, {Name: "col2", Type: sqltypes.Int32}, + {Name: "weight_string(col2)", Type: sqltypes.VarBinary}, }, - RowsAffected: 1, - InsertID: 0, + InsertID: 0, Rows: [][]sqltypes.Value{{ sqltypes.NewInt32(1), sqltypes.NewInt32(int32(i % 4)), + sqltypes.NULL, }}, }}) conns = append(conns, sbc) } - executor := NewExecutor(context.Background(), serv, cell, resolver, false, testBufferSize, testCacheSize) + executor := NewExecutor(context.Background(), serv, cell, resolver, false, false, testBufferSize, cache.DefaultConfig) query := "select col1, col2 from user order by col2 desc limit 3" gotResult, err := executorStream(executor, query) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select col1, col2 from user order by col2 desc limit :__upper_limit", + Sql: "select col1, col2, weight_string(col2) from `user` order by col2 desc limit :__upper_limit", BindVariables: map[string]*querypb.BindVariable{"__upper_limit": sqltypes.Int64BindVariable(3)}, }} for _, conn := range conns { @@ -1651,12 +1658,12 @@ func TestSimpleJoin(t *testing.T) { result, err := executorExec(executor, sql, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select u1.id from user as u1 where u1.id = 1", + Sql: "select u1.id from `user` as u1 where u1.id = 1", BindVariables: map[string]*querypb.BindVariable{}, }} utils.MustMatch(t, wantQueries, sbc1.Queries) wantQueries = []*querypb.BoundQuery{{ - Sql: "select u2.id from user as u2 where u2.id = 3", + Sql: "select u2.id from `user` as u2 where u2.id = 3", BindVariables: map[string]*querypb.BindVariable{}, }} utils.MustMatch(t, wantQueries, sbc2.Queries) @@ -1671,7 +1678,6 @@ func TestSimpleJoin(t *testing.T) { sandboxconn.SingleRowResult.Rows[0][0], }, }, - RowsAffected: 1, } if !result.Equal(wantResult) { t.Errorf("result: %+v, want %+v", result, wantResult) @@ -1689,12 +1695,12 @@ func TestJoinComments(t *testing.T) { _, err := executorExec(executor, sql, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select u1.id from user as u1 where u1.id = 1 /* trailing */", + Sql: "select u1.id from `user` as u1 where u1.id = 1 /* trailing */", BindVariables: map[string]*querypb.BindVariable{}, }} utils.MustMatch(t, wantQueries, sbc1.Queries) wantQueries = []*querypb.BoundQuery{{ - Sql: "select u2.id from user as u2 where u2.id = 3 /* trailing */", + Sql: "select u2.id from `user` as u2 where u2.id = 3 /* trailing */", BindVariables: map[string]*querypb.BindVariable{}, }} utils.MustMatch(t, wantQueries, sbc2.Queries) @@ -1711,12 +1717,12 @@ func TestSimpleJoinStream(t *testing.T) { result, err := executorStream(executor, sql) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select u1.id from user as u1 where u1.id = 1", + Sql: "select u1.id from `user` as u1 where u1.id = 1", BindVariables: map[string]*querypb.BindVariable{}, }} utils.MustMatch(t, wantQueries, sbc1.Queries) wantQueries = []*querypb.BoundQuery{{ - Sql: "select u2.id from user as u2 where u2.id = 3", + Sql: "select u2.id from `user` as u2 where u2.id = 3", BindVariables: map[string]*querypb.BindVariable{}, }} utils.MustMatch(t, wantQueries, sbc2.Queries) @@ -1750,8 +1756,7 @@ func TestVarJoin(t *testing.T) { {Name: "id", Type: sqltypes.Int32}, {Name: "col", Type: sqltypes.Int32}, }, - RowsAffected: 1, - InsertID: 0, + InsertID: 0, Rows: [][]sqltypes.Value{{ sqltypes.NewInt32(1), sqltypes.NewInt32(3), @@ -1762,13 +1767,13 @@ func TestVarJoin(t *testing.T) { _, err := executorExec(executor, sql, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select u1.id, u1.col from user as u1 where u1.id = 1", + Sql: "select u1.id, u1.col from `user` as u1 where u1.id = 1", BindVariables: map[string]*querypb.BindVariable{}, }} utils.MustMatch(t, wantQueries, sbc1.Queries) // We have to use string representation because bindvars type is too complex. got := fmt.Sprintf("%+v", sbc2.Queries) - want := `[sql:"select u2.id from user as u2 where u2.id = :u1_col" bind_variables: > ]` + want := `[sql:"select u2.id from ` + "`user`" + ` as u2 where u2.id = :u1_col" bind_variables: > ]` if got != want { t.Errorf("sbc2.Queries: %s, want %s\n", got, want) } @@ -1786,8 +1791,7 @@ func TestVarJoinStream(t *testing.T) { {Name: "id", Type: sqltypes.Int32}, {Name: "col", Type: sqltypes.Int32}, }, - RowsAffected: 1, - InsertID: 0, + InsertID: 0, Rows: [][]sqltypes.Value{{ sqltypes.NewInt32(1), sqltypes.NewInt32(3), @@ -1798,13 +1802,13 @@ func TestVarJoinStream(t *testing.T) { _, err := executorStream(executor, sql) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select u1.id, u1.col from user as u1 where u1.id = 1", + Sql: "select u1.id, u1.col from `user` as u1 where u1.id = 1", BindVariables: map[string]*querypb.BindVariable{}, }} utils.MustMatch(t, wantQueries, sbc1.Queries) // We have to use string representation because bindvars type is too complex. got := fmt.Sprintf("%+v", sbc2.Queries) - want := `[sql:"select u2.id from user as u2 where u2.id = :u1_col" bind_variables: > ]` + want := `[sql:"select u2.id from ` + "`user`" + ` as u2 where u2.id = :u1_col" bind_variables: > ]` if got != want { t.Errorf("sbc2.Queries: %s, want %s\n", got, want) } @@ -1821,8 +1825,7 @@ func TestLeftJoin(t *testing.T) { {Name: "id", Type: sqltypes.Int32}, {Name: "col", Type: sqltypes.Int32}, }, - RowsAffected: 1, - InsertID: 0, + InsertID: 0, Rows: [][]sqltypes.Value{{ sqltypes.NewInt32(1), sqltypes.NewInt32(3), @@ -1849,14 +1852,11 @@ func TestLeftJoin(t *testing.T) { {}, }, }, - RowsAffected: 1, } if !result.Equal(wantResult) { t.Errorf("result: %+v, want %+v", result, wantResult) } - testQueryLog(t, logChan, "TestExecute", "SELECT", sql, 2) - } func TestLeftJoinStream(t *testing.T) { @@ -1866,8 +1866,7 @@ func TestLeftJoinStream(t *testing.T) { {Name: "id", Type: sqltypes.Int32}, {Name: "col", Type: sqltypes.Int32}, }, - RowsAffected: 1, - InsertID: 0, + InsertID: 0, Rows: [][]sqltypes.Value{{ sqltypes.NewInt32(1), sqltypes.NewInt32(3), @@ -1916,10 +1915,10 @@ func TestEmptyJoin(t *testing.T) { result, err := executorExec(executor, "select u1.id, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 1", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select u1.id, u1.col from user as u1 where u1.id = 1", + Sql: "select u1.id, u1.col from `user` as u1 where u1.id = 1", BindVariables: map[string]*querypb.BindVariable{}, }, { - Sql: "select u2.id from user as u2 where 1 != 1", + Sql: "select u2.id from `user` as u2 where 1 != 1", BindVariables: map[string]*querypb.BindVariable{ "u1_col": sqltypes.NullBindVariable, }, @@ -1952,10 +1951,10 @@ func TestEmptyJoinStream(t *testing.T) { result, err := executorStream(executor, "select u1.id, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 1") require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select u1.id, u1.col from user as u1 where u1.id = 1", + Sql: "select u1.id, u1.col from `user` as u1 where u1.id = 1", BindVariables: map[string]*querypb.BindVariable{}, }, { - Sql: "select u2.id from user as u2 where 1 != 1", + Sql: "select u2.id from `user` as u2 where 1 != 1", BindVariables: map[string]*querypb.BindVariable{ "u1_col": sqltypes.NullBindVariable, }, @@ -1992,13 +1991,13 @@ func TestEmptyJoinRecursive(t *testing.T) { result, err := executorExec(executor, "select u1.id, u2.id, u3.id from user u1 join (user u2 join user u3 on u3.id = u2.col) where u1.id = 1", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select u1.id from user as u1 where u1.id = 1", + Sql: "select u1.id from `user` as u1 where u1.id = 1", BindVariables: map[string]*querypb.BindVariable{}, }, { - Sql: "select u2.id, u2.col from user as u2 where 1 != 1", + Sql: "select u2.id, u2.col from `user` as u2 where 1 != 1", BindVariables: map[string]*querypb.BindVariable{}, }, { - Sql: "select u3.id from user as u3 where 1 != 1", + Sql: "select u3.id from `user` as u3 where 1 != 1", BindVariables: map[string]*querypb.BindVariable{ "u2_col": sqltypes.NullBindVariable, }, @@ -2036,13 +2035,13 @@ func TestEmptyJoinRecursiveStream(t *testing.T) { result, err := executorStream(executor, "select u1.id, u2.id, u3.id from user u1 join (user u2 join user u3 on u3.id = u2.col) where u1.id = 1") require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select u1.id from user as u1 where u1.id = 1", + Sql: "select u1.id from `user` as u1 where u1.id = 1", BindVariables: map[string]*querypb.BindVariable{}, }, { - Sql: "select u2.id, u2.col from user as u2 where 1 != 1", + Sql: "select u2.id, u2.col from `user` as u2 where 1 != 1", BindVariables: map[string]*querypb.BindVariable{}, }, { - Sql: "select u3.id from user as u3 where 1 != 1", + Sql: "select u3.id from `user` as u3 where 1 != 1", BindVariables: map[string]*querypb.BindVariable{ "u2_col": sqltypes.NullBindVariable, }, @@ -2067,8 +2066,7 @@ func TestCrossShardSubquery(t *testing.T) { {Name: "id", Type: sqltypes.Int32}, {Name: "col", Type: sqltypes.Int32}, }, - RowsAffected: 1, - InsertID: 0, + InsertID: 0, Rows: [][]sqltypes.Value{{ sqltypes.NewInt32(1), sqltypes.NewInt32(3), @@ -2078,13 +2076,13 @@ func TestCrossShardSubquery(t *testing.T) { result, err := executorExec(executor, "select id1 from (select u1.id id1, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 1) as t", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select u1.id as id1, u1.col from user as u1 where u1.id = 1", + Sql: "select u1.id as id1, u1.col from `user` as u1 where u1.id = 1", BindVariables: map[string]*querypb.BindVariable{}, }} utils.MustMatch(t, wantQueries, sbc1.Queries) // We have to use string representation because bindvars type is too complex. got := fmt.Sprintf("%+v", sbc2.Queries) - want := `[sql:"select u2.id from user as u2 where u2.id = :u1_col" bind_variables: > ]` + want := `[sql:"select u2.id from ` + "`user`" + ` as u2 where u2.id = :u1_col" bind_variables: > ]` if got != want { t.Errorf("sbc2.Queries: %s, want %s\n", got, want) } @@ -2093,7 +2091,6 @@ func TestCrossShardSubquery(t *testing.T) { Fields: []*querypb.Field{ {Name: "id", Type: sqltypes.Int32}, }, - RowsAffected: 1, Rows: [][]sqltypes.Value{{ sqltypes.NewInt32(1), }}, @@ -2110,8 +2107,7 @@ func TestCrossShardSubqueryStream(t *testing.T) { {Name: "id", Type: sqltypes.Int32}, {Name: "col", Type: sqltypes.Int32}, }, - RowsAffected: 1, - InsertID: 0, + InsertID: 0, Rows: [][]sqltypes.Value{{ sqltypes.NewInt32(1), sqltypes.NewInt32(3), @@ -2121,13 +2117,13 @@ func TestCrossShardSubqueryStream(t *testing.T) { result, err := executorStream(executor, "select id1 from (select u1.id id1, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 1) as t") require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select u1.id as id1, u1.col from user as u1 where u1.id = 1", + Sql: "select u1.id as id1, u1.col from `user` as u1 where u1.id = 1", BindVariables: map[string]*querypb.BindVariable{}, }} utils.MustMatch(t, wantQueries, sbc1.Queries) // We have to use string representation because bindvars type is too complex. got := fmt.Sprintf("%+v", sbc2.Queries) - want := `[sql:"select u2.id from user as u2 where u2.id = :u1_col" bind_variables: > ]` + want := `[sql:"select u2.id from ` + "`user`" + ` as u2 where u2.id = :u1_col" bind_variables: > ]` if got != want { t.Errorf("sbc2.Queries:\n%s, want\n%s\n", got, want) } @@ -2162,10 +2158,10 @@ func TestCrossShardSubqueryGetFields(t *testing.T) { result, err := executorExec(executor, "select main1.col, t.id1 from main1 join (select u1.id id1, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 1) as t", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select u1.id as id1, u1.col from user as u1 where 1 != 1", + Sql: "select u1.id as id1, u1.col from `user` as u1 where 1 != 1", BindVariables: map[string]*querypb.BindVariable{}, }, { - Sql: "select u2.id from user as u2 where 1 != 1", + Sql: "select u2.id from `user` as u2 where 1 != 1", BindVariables: map[string]*querypb.BindVariable{ "u1_col": sqltypes.NullBindVariable, }, @@ -2195,7 +2191,7 @@ func TestSelectBindvarswithPrepare(t *testing.T) { require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select id from user where 1 != 1", + Sql: "select id from `user` where 1 != 1", BindVariables: map[string]*querypb.BindVariable{"id": sqltypes.Int64BindVariable(1)}, }} utils.MustMatch(t, wantQueries, sbc1.Queries) @@ -2204,6 +2200,17 @@ func TestSelectBindvarswithPrepare(t *testing.T) { } } +func TestSelectDatabasePrepare(t *testing.T) { + executor, _, _, _ := createExecutorEnv() + executor.normalize = true + logChan := QueryLogger.Subscribe("Test") + defer QueryLogger.Unsubscribe(logChan) + + sql := "select database()" + _, err := executorPrepare(executor, sql, map[string]*querypb.BindVariable{}) + require.NoError(t, err) +} + func TestSelectWithUnionAll(t *testing.T) { executor, sbc1, sbc2, _ := createLegacyExecutorEnv() executor.normalize = true @@ -2212,14 +2219,14 @@ func TestSelectWithUnionAll(t *testing.T) { bv1, _ := sqltypes.BuildBindVariable([]int64{1, 2}) bv2, _ := sqltypes.BuildBindVariable([]int64{3}) sbc1WantQueries := []*querypb.BoundQuery{{ - Sql: "select id from user where id in ::__vals", + Sql: "select id from `user` where id in ::__vals", BindVariables: map[string]*querypb.BindVariable{ "__vals": bv1, "vtg1": bv, "vtg2": bv, }, }, { - Sql: "select id from user where id in ::__vals", + Sql: "select id from `user` where id in ::__vals", BindVariables: map[string]*querypb.BindVariable{ "__vals": bv1, "vtg1": bv, @@ -2227,14 +2234,14 @@ func TestSelectWithUnionAll(t *testing.T) { }, }} sbc2WantQueries := []*querypb.BoundQuery{{ - Sql: "select id from user where id in ::__vals", + Sql: "select id from `user` where id in ::__vals", BindVariables: map[string]*querypb.BindVariable{ "__vals": bv2, "vtg1": bv, "vtg2": bv, }, }, { - Sql: "select id from user where id in ::__vals", + Sql: "select id from `user` where id in ::__vals", BindVariables: map[string]*querypb.BindVariable{ "__vals": bv2, "vtg1": bv, @@ -2286,7 +2293,6 @@ func TestSelectLock(t *testing.T) { TabletAlias: sbc1.Tablet().Alias, }}, LockSession: &vtgatepb.Session_ShardSession{ - Target: &querypb.Target{Keyspace: "TestExecutor", Shard: "-20", TabletType: topodatapb.TabletType_MASTER}, TabletAlias: sbc1.Tablet().Alias, ReservedId: 1, @@ -2317,7 +2323,7 @@ func TestSelectFromInformationSchema(t *testing.T) { // check failure when trying to query two keyspaces _, err := exec(executor, session, "SELECT B.TABLE_NAME FROM INFORMATION_SCHEMA.TABLES AS A, INFORMATION_SCHEMA.COLUMNS AS B WHERE A.TABLE_SCHEMA = 'TestExecutor' AND A.TABLE_SCHEMA = 'TestXBadSharding'") require.Error(t, err) - require.Contains(t, err.Error(), "specifying two different database in the query is not supported") + require.Contains(t, err.Error(), "two predicates for specifying the database are not supported") // we pick a keyspace and query for table_schema = database(). should be routed to the picked keyspace session.TargetString = "TestExecutor" @@ -2347,12 +2353,12 @@ func TestStreamOrderByLimitWithMultipleResults(t *testing.T) { for _, shard := range shards { sbc := hc.AddTestTablet(cell, shard, 1, "TestExecutor", shard, topodatapb.TabletType_MASTER, true, 1, nil) sbc.SetResults([]*sqltypes.Result{ - sqltypes.MakeTestResult(sqltypes.MakeTestFields("id|col", "int32|int32"), fmt.Sprintf("%d|%d", count, count)), - sqltypes.MakeTestResult(sqltypes.MakeTestFields("id|col", "int32|int32"), fmt.Sprintf("%d|%d", count+10, count)), + sqltypes.MakeTestResult(sqltypes.MakeTestFields("id|col|weight_string(id)", "int32|int32|varchar"), fmt.Sprintf("%d|%d|NULL", count, count)), + sqltypes.MakeTestResult(sqltypes.MakeTestFields("id|col|weight_string(id)", "int32|int32|varchar"), fmt.Sprintf("%d|%d|NULL", count+10, count)), }) count++ } - executor := NewExecutor(context.Background(), serv, cell, resolver, false, testBufferSize, testCacheSize) + executor := NewExecutor(context.Background(), serv, cell, resolver, true, false, testBufferSize, cache.DefaultConfig) before := runtime.NumGoroutine() query := "select id, col from user order by id limit 2" @@ -2360,7 +2366,6 @@ func TestStreamOrderByLimitWithMultipleResults(t *testing.T) { require.NoError(t, err) wantResult := sqltypes.MakeTestResult(sqltypes.MakeTestFields("id|col", "int32|int32"), "1|1", "2|2") - wantResult.RowsAffected = 0 utils.MustMatch(t, wantResult, gotResult) // some sleep to close all goroutines. time.Sleep(100 * time.Millisecond) diff --git a/go/vt/vtgate/executor_set_test.go b/go/vt/vtgate/executor_set_test.go index 19e8d980600..aa177d77cde 100644 --- a/go/vt/vtgate/executor_set_test.go +++ b/go/vt/vtgate/executor_set_test.go @@ -89,10 +89,10 @@ func TestExecutorSet(t *testing.T) { out: &vtgatepb.Session{}, }, { in: "set AUTOCOMMIT = 'aa'", - err: "System setting 'autocommit' can't be set to this value: 'aa' is not a boolean", + err: "Variable 'autocommit' can't be set to the value: 'aa' is not a boolean", }, { in: "set autocommit = 2", - err: "System setting 'autocommit' can't be set to this value: 2 is not a boolean", + err: "Variable 'autocommit' can't be set to the value: 2 is not a boolean", }, { in: "set client_found_rows = 1", out: &vtgatepb.Session{Autocommit: true, Options: &querypb.ExecuteOptions{ClientFoundRows: true}}, @@ -110,10 +110,10 @@ func TestExecutorSet(t *testing.T) { err: "cannot use scope and @@", }, { in: "set client_found_rows = 'aa'", - err: "System setting 'client_found_rows' can't be set to this value: 'aa' is not a boolean", + err: "Variable 'client_found_rows' can't be set to the value: 'aa' is not a boolean", }, { in: "set client_found_rows = 2", - err: "System setting 'client_found_rows' can't be set to this value: 2 is not a boolean", + err: "Variable 'client_found_rows' can't be set to the value: 2 is not a boolean", }, { in: "set transaction_mode = 'unspecified'", out: &vtgatepb.Session{Autocommit: true, TransactionMode: vtgatepb.TransactionMode_UNSPECIFIED}, @@ -134,7 +134,7 @@ func TestExecutorSet(t *testing.T) { err: "invalid transaction_mode: aa", }, { in: "set transaction_mode = 1", - err: "unexpected value type for transaction_mode: INT64", + err: "Incorrect argument type to variable 'transaction_mode': INT64", }, { in: "set workload = 'unspecified'", out: &vtgatepb.Session{Autocommit: true, Options: &querypb.ExecuteOptions{Workload: querypb.ExecuteOptions_UNSPECIFIED}}, @@ -152,7 +152,7 @@ func TestExecutorSet(t *testing.T) { err: "invalid workload: aa", }, { in: "set workload = 1", - err: "unexpected value type for workload: INT64", + err: "Incorrect argument type to variable 'workload': INT64", }, { in: "set transaction_mode = 'twopc', autocommit=1", out: &vtgatepb.Session{Autocommit: true, TransactionMode: vtgatepb.TransactionMode_TWOPC}, @@ -164,10 +164,10 @@ func TestExecutorSet(t *testing.T) { out: &vtgatepb.Session{Autocommit: true, Options: &querypb.ExecuteOptions{SqlSelectLimit: 0}}, }, { in: "set sql_select_limit = 'asdfasfd'", - err: "failed to evaluate value for sql_select_limit: expected int, unexpected value type: string", + err: "Incorrect argument type to variable 'sql_select_limit': VARBINARY", }, { in: "set autocommit = 1+1", - err: "System setting 'autocommit' can't be set to this value: 2 is not a boolean", + err: "Variable 'autocommit' can't be set to the value: 2 is not a boolean", }, { in: "set autocommit = 1+0", out: &vtgatepb.Session{Autocommit: true}, @@ -176,7 +176,7 @@ func TestExecutorSet(t *testing.T) { out: &vtgatepb.Session{Autocommit: true}, }, { in: "set foo = 1", - err: "unsupported construct in set: session foo = 1", + err: "Unknown system variable 'session foo = 1'", }, { in: "set names utf8", out: &vtgatepb.Session{Autocommit: true}, @@ -200,10 +200,10 @@ func TestExecutorSet(t *testing.T) { out: &vtgatepb.Session{Autocommit: true, Options: &querypb.ExecuteOptions{}}, }, { in: "set tx_read_only = 2", - err: "System setting 'tx_read_only' can't be set to this value: 2 is not a boolean", + err: "Variable 'tx_read_only' can't be set to the value: 2 is not a boolean", }, { in: "set transaction_read_only = 2", - err: "System setting 'transaction_read_only' can't be set to this value: 2 is not a boolean", + err: "Variable 'transaction_read_only' can't be set to the value: 2 is not a boolean", }, { in: "set session transaction isolation level repeatable read", out: &vtgatepb.Session{Autocommit: true}, @@ -228,6 +228,27 @@ func TestExecutorSet(t *testing.T) { }, { in: "set session transaction read write", out: &vtgatepb.Session{Autocommit: true}, + }, { + in: "set @@enable_system_settings = on", + out: &vtgatepb.Session{Autocommit: true, EnableSystemSettings: true}, + }, { + in: "set @@enable_system_settings = off", + out: &vtgatepb.Session{Autocommit: true, EnableSystemSettings: false}, + }, { + in: "set @@enable_system_settings = 1", + out: &vtgatepb.Session{Autocommit: true, EnableSystemSettings: true}, + }, { + in: "set @@enable_system_settings = 0", + out: &vtgatepb.Session{Autocommit: true, EnableSystemSettings: false}, + }, { + in: "set @@enable_system_settings = true", + out: &vtgatepb.Session{Autocommit: true, EnableSystemSettings: true}, + }, { + in: "set @@enable_system_settings = false", + out: &vtgatepb.Session{Autocommit: true, EnableSystemSettings: false}, + }, { + in: "set @@socket = '/tmp/change.sock'", + err: "Variable 'socket' is a read only variable", }} for i, tcase := range testcases { t.Run(fmt.Sprintf("%d-%s", i, tcase.in), func(t *testing.T) { @@ -247,76 +268,91 @@ func TestExecutorSetOp(t *testing.T) { executor, _, _, sbclookup := createLegacyExecutorEnv() *sysVarSetEnabled = true - sbclookup.SetResults([]*sqltypes.Result{ - sqltypes.MakeTestResult(sqltypes.MakeTestFields("sql_mode", "varchar"), "STRICT_ALL_TABLES,NO_AUTO_UPDATES"), - sqltypes.MakeTestResult(sqltypes.MakeTestFields("sql_safe_updates", "int64"), "1"), - sqltypes.MakeTestResult(sqltypes.MakeTestFields("tx_isolation", "varchar"), "read-committed"), - sqltypes.MakeTestResult(sqltypes.MakeTestFields("sql_quote_show_create", "int64"), "0"), - sqltypes.MakeTestResult(sqltypes.MakeTestFields("foreign_key_checks", "int64")), - sqltypes.MakeTestResult(sqltypes.MakeTestFields("unique_checks", "int64"), "0"), - sqltypes.MakeTestResult(sqltypes.MakeTestFields("net_write_timeout", "int64"), "600"), - sqltypes.MakeTestResult(sqltypes.MakeTestFields("net_read_timeout", "int64"), "300"), - sqltypes.MakeTestResult(sqltypes.MakeTestFields("character_set_client", "varchar"), "utf8"), - sqltypes.MakeTestResult(sqltypes.MakeTestFields("character_set_results", "varchar")), - sqltypes.MakeTestResult(sqltypes.MakeTestFields("character_set_results", "varchar")), - sqltypes.MakeTestResult(sqltypes.MakeTestFields("character_set_results", "varchar")), - sqltypes.MakeTestResult(sqltypes.MakeTestFields("character_set_results", "varchar")), - sqltypes.MakeTestResult(sqltypes.MakeTestFields("character_set_results", "varchar")), - sqltypes.MakeTestResult(sqltypes.MakeTestFields("character_set_results", "varchar")), - sqltypes.MakeTestResult(sqltypes.MakeTestFields("client_found_rows", "int64")), - sqltypes.MakeTestResult(sqltypes.MakeTestFields("client_found_rows", "int64")), - }) + returnResult := func(columnName, typ, value string) *sqltypes.Result { + return sqltypes.MakeTestResult(sqltypes.MakeTestFields(columnName, typ), value) + } + returnNoResult := func(columnName, typ string) *sqltypes.Result { + return sqltypes.MakeTestResult(sqltypes.MakeTestFields(columnName, typ)) + } testcases := []struct { - in string - warning []*querypb.QueryWarning - sysVars map[string]string + in string + warning []*querypb.QueryWarning + sysVars map[string]string + disallowResConn bool + result *sqltypes.Result }{{ in: "set big_tables = 1", //ignore }, { in: "set sql_mode = 'STRICT_ALL_TABLES,NO_AUTO_UPDATES'", sysVars: map[string]string{"sql_mode": "'STRICT_ALL_TABLES,NO_AUTO_UPDATES'"}, + result: returnResult("sql_mode", "varchar", "STRICT_ALL_TABLES,NO_AUTO_UPDATES"), + }, { + // even though the tablet is saying that the value has changed, + // useReservedConn is false, so we won't allow this change + in: "set sql_mode = 'STRICT_ALL_TABLES,NO_AUTO_UPDATES'", + result: returnResult("sql_mode", "varchar", "STRICT_ALL_TABLES,NO_AUTO_UPDATES"), + sysVars: nil, + disallowResConn: true, }, { in: "set sql_safe_updates = 1", sysVars: map[string]string{"sql_safe_updates": "1"}, + result: returnResult("sql_safe_updates", "int64", "1"), }, { in: "set tx_isolation = 'read-committed'", sysVars: map[string]string{"tx_isolation": "'read-committed'"}, + result: returnResult("tx_isolation", "varchar", "read-committed"), }, { in: "set sql_quote_show_create = 0", sysVars: map[string]string{"sql_quote_show_create": "0"}, + result: returnResult("sql_quote_show_create", "int64", "0"), }, { - in: "set foreign_key_checks = 1", + in: "set foreign_key_checks = 1", + result: returnNoResult("foreign_key_checks", "int64"), }, { in: "set unique_checks = 0", sysVars: map[string]string{"unique_checks": "0"}, + result: returnResult("unique_checks", "int64", "0"), }, { - in: "set net_write_timeout = 600", + in: "set net_write_timeout = 600", + result: returnResult("net_write_timeout", "int64", "600"), }, { - in: "set net_read_timeout = 600", + in: "set net_read_timeout = 600", + result: returnResult("net_read_timeout", "int64", "300"), }, { - in: "set character_set_client = utf8", + in: "set character_set_client = utf8", + result: returnResult("character_set_client", "varchar", "utf8"), }, { - in: "set character_set_results=null", + in: "set character_set_results=null", + result: returnNoResult("character_set_results", "varchar"), }, { - in: "set character_set_results='binary'", + in: "set character_set_results='binary'", + result: returnNoResult("character_set_results", "varchar"), }, { - in: "set character_set_results='utf8'", + in: "set character_set_results='utf8'", + result: returnNoResult("character_set_results", "varchar"), }, { - in: "set character_set_results=utf8mb4", + in: "set character_set_results=utf8mb4", + result: returnNoResult("character_set_results", "varchar"), }, { - in: "set character_set_results='latin1'", + in: "set character_set_results='latin1'", + result: returnNoResult("character_set_results", "varchar"), }, { - in: "set character_set_results='abcd'", + in: "set character_set_results='abcd'", + result: returnNoResult("character_set_results", "varchar"), }, { - in: "set @@global.client_found_rows = 1", + in: "set @@global.client_found_rows = 1", + result: returnNoResult("client_found_rows", "int64"), }, { - in: "set global client_found_rows = 1", + in: "set global client_found_rows = 1", + result: returnNoResult("client_found_rows", "int64"), }} for _, tcase := range testcases { t.Run(tcase.in, func(t *testing.T) { session := NewAutocommitSession(masterSession) session.TargetString = KsTestUnsharded + session.EnableSystemSettings = !tcase.disallowResConn + sbclookup.SetResults([]*sqltypes.Result{tcase.result}) _, err := executor.Execute( context.Background(), "TestExecute", diff --git a/go/vt/vtgate/executor_stream_test.go b/go/vt/vtgate/executor_stream_test.go index c4351b79581..a53af0c5243 100644 --- a/go/vt/vtgate/executor_stream_test.go +++ b/go/vt/vtgate/executor_stream_test.go @@ -24,6 +24,7 @@ import ( "github.com/stretchr/testify/require" + "vitess.io/vitess/go/cache" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/discovery" querypb "vitess.io/vitess/go/vt/proto/query" @@ -59,7 +60,7 @@ func TestStreamSQLSharded(t *testing.T) { for _, shard := range shards { _ = hc.AddTestTablet(cell, shard, 1, "TestExecutor", shard, topodatapb.TabletType_MASTER, true, 1, nil) } - executor := NewExecutor(context.Background(), serv, cell, resolver, false, testBufferSize, testCacheSize) + executor := NewExecutor(context.Background(), serv, cell, resolver, false, false, testBufferSize, cache.DefaultConfig) sql := "stream * from sharded_user_msgs" result, err := executorStreamMessages(executor, sql) @@ -82,6 +83,26 @@ func TestStreamSQLSharded(t *testing.T) { } } +func TestStreamError(t *testing.T) { + executor, _, _, _ := createLegacyExecutorEnv() + logChan := QueryLogger.Subscribe("TestStreamError") + defer QueryLogger.Unsubscribe(logChan) + + queries := []string{ + "start transaction", + "begin", + "rollback", + "commit", + } + for _, query := range queries { + t.Run(query, func(t *testing.T) { + _, err := executorStreamMessages(executor, query) + require.Error(t, err) + require.Contains(t, err.Error(), "OLAP does not supported statement") + }) + } +} + func executorStreamMessages(executor *Executor, sql string) (qr *sqltypes.Result, err error) { results := make(chan *sqltypes.Result, 100) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) diff --git a/go/vt/vtgate/executor_test.go b/go/vt/vtgate/executor_test.go index 869a4823baf..2f9fd2f80ac 100644 --- a/go/vt/vtgate/executor_test.go +++ b/go/vt/vtgate/executor_test.go @@ -29,7 +29,9 @@ import ( "testing" "time" + "vitess.io/vitess/go/cache" "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/topo" @@ -93,7 +95,7 @@ func TestExecutorMaxMemoryRowsExceeded(t *testing.T) { err string }{ {"select /*vt+ IGNORE_MAX_MEMORY_ROWS=1 */ * from main1", ""}, - {"select * from main1", "in-memory row count exceeded allowed limit of 3 (errno 1153) (sqlstate HY000)"}, + {"select * from main1", "in-memory row count exceeded allowed limit of 3"}, } for _, test := range testCases { @@ -181,7 +183,7 @@ func TestLegacyExecutorTransactionsNoAutoCommit(t *testing.T) { session = NewSafeSession(&vtgatepb.Session{TargetString: "@replica", InTransaction: true}) _, err = executor.Execute(ctx, "TestExecute", session, "select id from main1", nil) require.Error(t, err) - want := "transactions are supported only for master tablet types, current type: REPLICA" + want := "transaction is supported only for master tablet type, current type: REPLICA" require.Contains(t, err.Error(), want) // Prevent begin on non-master. @@ -193,10 +195,7 @@ func TestLegacyExecutorTransactionsNoAutoCommit(t *testing.T) { // Prevent use of non-master if in_transaction is on. session = NewSafeSession(&vtgatepb.Session{TargetString: "@master", InTransaction: true}) _, err = executor.Execute(ctx, "TestExecute", session, "use @replica", nil) - want = "cannot change to a non-master type in the middle of a transaction: REPLICA" - if err == nil || err.Error() != want { - t.Errorf("Execute(@replica, in_transaction) err: %v, want %s", err, want) - } + require.EqualError(t, err, `Can't execute the given command because you have an active transaction`) } func TestDirectTargetRewrites(t *testing.T) { @@ -315,8 +314,8 @@ func TestExecutorAutocommit(t *testing.T) { if logStats.CommitTime != 0 { t.Errorf("logstats: expected zero CommitTime") } - if logStats.RowsAffected == 0 { - t.Errorf("logstats: expected non-zero RowsAffected") + if logStats.RowsReturned == 0 { + t.Errorf("logstats: expected non-zero RowsReturned") } // autocommit = 1 @@ -331,7 +330,7 @@ func TestExecutorAutocommit(t *testing.T) { _, err = executor.Execute(ctx, "TestExecute", session, "update main1 set id=1", nil) require.NoError(t, err) - wantSession = &vtgatepb.Session{Autocommit: true, TargetString: "@master", FoundRows: 1, RowCount: 1} + wantSession = &vtgatepb.Session{Autocommit: true, TargetString: "@master", FoundRows: 0, RowCount: 1} utils.MustMatch(t, wantSession, session.Session, "session does not match for autocommit=1") logStats = testQueryLog(t, logChan, "TestExecute", "UPDATE", "update main1 set id=1", 1) @@ -347,7 +346,7 @@ func TestExecutorAutocommit(t *testing.T) { _, err = executor.Execute(ctx, "TestExecute", session, "update main1 set id=1", nil) require.NoError(t, err) - wantSession = &vtgatepb.Session{InTransaction: true, Autocommit: true, TargetString: "@master", FoundRows: 1, RowCount: 1} + wantSession = &vtgatepb.Session{InTransaction: true, Autocommit: true, TargetString: "@master", FoundRows: 0, RowCount: 1} testSession = *session.Session testSession.ShardSessions = nil utils.MustMatch(t, wantSession, &testSession, "session does not match for autocommit=1") @@ -411,7 +410,7 @@ func TestExecutorShowColumns(t *testing.T) { require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "show columns from user", + Sql: "show columns from `user`", BindVariables: map[string]*querypb.BindVariable{}, }} @@ -437,11 +436,19 @@ func TestExecutorShow(t *testing.T) { executor, _, _, sbclookup := createLegacyExecutorEnv() session := NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}) - for _, query := range []string{"show databases", "show vitess_keyspaces", "show keyspaces", "show DATABASES", "show schemas", "show SCHEMAS"} { + for _, query := range []string{"show vitess_keyspaces", "show keyspaces"} { + qr, err := executor.Execute(ctx, "TestExecute", session, query, nil) + require.NoError(t, err) + require.EqualValues(t, 5, len(qr.Rows), fmt.Sprintf("unexpected results running query: %s", query)) + } + + for _, query := range []string{"show databases", "show DATABASES", "show schemas", "show SCHEMAS"} { qr, err := executor.Execute(ctx, "TestExecute", session, query, nil) require.NoError(t, err) - require.EqualValues(t, 5, qr.RowsAffected, fmt.Sprintf("unexpected results running query: %s", query)) + // Showing default tables (5+4[default]) + require.EqualValues(t, 9, len(qr.Rows), fmt.Sprintf("unexpected results running query: %s", query)) } + _, err := executor.Execute(ctx, "TestExecute", session, "show variables", nil) require.NoError(t, err) _, err = executor.Execute(ctx, "TestExecute", session, "show collation", nil) @@ -501,58 +508,50 @@ func TestExecutorShow(t *testing.T) { _, err = executor.Execute(ctx, "TestExecute", session, fmt.Sprintf("show keys from %v.unknown", KsTestUnsharded), nil) require.NoError(t, err) lastQuery = sbclookup.Queries[len(sbclookup.Queries)-1].Sql - wantQuery = "show keys from unknown" + wantQuery = "show indexes from unknown" assert.Equal(t, wantQuery, lastQuery, "Got: %v. Want: %v", lastQuery, wantQuery) _, err = executor.Execute(ctx, "TestExecute", session, fmt.Sprintf("show keys from unknown from %v", KsTestUnsharded), nil) require.NoError(t, err) lastQuery = sbclookup.Queries[len(sbclookup.Queries)-1].Sql - wantQuery = "show keys from unknown" assert.Equal(t, wantQuery, lastQuery, "Got: %v. Want: %v", lastQuery, wantQuery) // SHOW INDEX with two different syntax _, err = executor.Execute(ctx, "TestExecute", session, fmt.Sprintf("show index from %v.unknown", KsTestUnsharded), nil) require.NoError(t, err) lastQuery = sbclookup.Queries[len(sbclookup.Queries)-1].Sql - wantQuery = "show index from unknown" assert.Equal(t, wantQuery, lastQuery, "Got: %v. Want: %v", lastQuery, wantQuery) _, err = executor.Execute(ctx, "TestExecute", session, fmt.Sprintf("show index from unknown from %v", KsTestUnsharded), nil) require.NoError(t, err) lastQuery = sbclookup.Queries[len(sbclookup.Queries)-1].Sql - wantQuery = "show index from unknown" assert.Equal(t, wantQuery, lastQuery, "Got: %v. Want: %v", lastQuery, wantQuery) // SHOW INDEXES with two different syntax _, err = executor.Execute(ctx, "TestExecute", session, fmt.Sprintf("show indexes from %v.unknown", KsTestUnsharded), nil) require.NoError(t, err) lastQuery = sbclookup.Queries[len(sbclookup.Queries)-1].Sql - wantQuery = "show indexes from unknown" assert.Equal(t, wantQuery, lastQuery, "Got: %v. Want: %v", lastQuery, wantQuery) _, err = executor.Execute(ctx, "TestExecute", session, fmt.Sprintf("show indexes from unknown from %v", KsTestUnsharded), nil) require.NoError(t, err) lastQuery = sbclookup.Queries[len(sbclookup.Queries)-1].Sql - wantQuery = "show indexes from unknown" assert.Equal(t, wantQuery, lastQuery, "Got: %v. Want: %v", lastQuery, wantQuery) // SHOW EXTENDED {INDEX | INDEXES | KEYS} _, err = executor.Execute(ctx, "TestExecute", session, fmt.Sprintf("show extended index from unknown from %v", KsTestUnsharded), nil) require.NoError(t, err) lastQuery = sbclookup.Queries[len(sbclookup.Queries)-1].Sql - wantQuery = "show extended index from unknown" assert.Equal(t, wantQuery, lastQuery, "Got: %v. Want: %v", lastQuery, wantQuery) _, err = executor.Execute(ctx, "TestExecute", session, fmt.Sprintf("show extended indexes from unknown from %v", KsTestUnsharded), nil) require.NoError(t, err) lastQuery = sbclookup.Queries[len(sbclookup.Queries)-1].Sql - wantQuery = "show extended indexes from unknown" assert.Equal(t, wantQuery, lastQuery, "Got: %v. Want: %v", lastQuery, wantQuery) _, err = executor.Execute(ctx, "TestExecute", session, fmt.Sprintf("show extended keys from unknown from %v", KsTestUnsharded), nil) require.NoError(t, err) lastQuery = sbclookup.Queries[len(sbclookup.Queries)-1].Sql - wantQuery = "show extended keys from unknown" assert.Equal(t, wantQuery, lastQuery, "Got: %v. Want: %v", lastQuery, wantQuery) // Set desitation keyspace in session @@ -584,7 +583,6 @@ func TestExecutorShow(t *testing.T) { "utf8mb4_general_ci"), sqltypes.NewInt32(4)), }, - RowsAffected: 2, } utils.MustMatch(t, wantqr, qr, query) @@ -613,7 +611,6 @@ func TestExecutorShow(t *testing.T) { "UTF-8 Unicode", "utf8_general_ci"), sqltypes.NewInt32(3)), }, - RowsAffected: 1, } utils.MustMatch(t, wantqr, qr, query) @@ -631,9 +628,7 @@ func TestExecutorShow(t *testing.T) { "utf8mb4_general_ci"), sqltypes.NewInt32(4)), }, - RowsAffected: 1, } - utils.MustMatch(t, wantqr, qr, query) } @@ -651,7 +646,6 @@ func TestExecutorShow(t *testing.T) { "YES", "YES"), }, - RowsAffected: 1, } utils.MustMatch(t, wantqr, qr, query) @@ -668,7 +662,6 @@ func TestExecutorShow(t *testing.T) { "NULL", "GPL"), }, - RowsAffected: 1, } utils.MustMatch(t, wantqr, qr, query) @@ -676,9 +669,8 @@ func TestExecutorShow(t *testing.T) { qr, err = executor.Execute(ctx, "TestExecute", session, "show session status", nil) require.NoError(t, err) wantqr = &sqltypes.Result{ - Fields: buildVarCharFields("Variable_name", "Value"), - Rows: make([][]sqltypes.Value, 0, 2), - RowsAffected: 0, + Fields: buildVarCharFields("Variable_name", "Value"), + Rows: make([][]sqltypes.Value, 0, 2), } utils.MustMatch(t, wantqr, qr, query) @@ -700,7 +692,6 @@ func TestExecutorShow(t *testing.T) { buildVarCharRow("TestExecutor/-20"), buildVarCharRow("TestXBadVSchema/e0-"), }, - RowsAffected: 33, } utils.MustMatch(t, wantqr, qr, query) @@ -715,7 +706,6 @@ func TestExecutorShow(t *testing.T) { buildVarCharRow("FakeCell", "TestExecutor", "-20", "MASTER", "SERVING", "aa-0000000000", "-20", "1970-01-01T00:00:01Z"), buildVarCharRow("FakeCell", "TestUnsharded", "0", "MASTER", "SERVING", "aa-0000000000", "0", "1970-01-01T00:00:01Z"), }, - RowsAffected: 9, } utils.MustMatch(t, wantqr, qr, query) @@ -723,9 +713,8 @@ func TestExecutorShow(t *testing.T) { qr, err = executor.Execute(ctx, "TestExecute", session, query, nil) require.NoError(t, err) wantqr = &sqltypes.Result{ - Fields: buildVarCharFields("Cell", "Keyspace", "Shard", "TabletType", "State", "Alias", "Hostname", "MasterTermStartTime"), - Rows: [][]sqltypes.Value{}, - RowsAffected: 0, + Fields: buildVarCharFields("Cell", "Keyspace", "Shard", "TabletType", "State", "Alias", "Hostname", "MasterTermStartTime"), + Rows: [][]sqltypes.Value{}, } utils.MustMatch(t, wantqr, qr, fmt.Sprintf("%q should be empty", query)) @@ -737,7 +726,6 @@ func TestExecutorShow(t *testing.T) { Rows: [][]sqltypes.Value{ buildVarCharRow("FakeCell", "TestExecutor", "-20", "MASTER", "SERVING", "aa-0000000000", "-20", "1970-01-01T00:00:01Z"), }, - RowsAffected: 1, } utils.MustMatch(t, wantqr, qr, query) @@ -759,7 +747,6 @@ func TestExecutorShow(t *testing.T) { buildVarCharRow("TestExecutor", "name_user_map", "lookup_hash", "from=name; table=name_user_map; to=user_id", "user"), buildVarCharRow("TestExecutor", "t1_lkp_vdx", "consistent_lookup_unique", "from=unq_col; table=t1_lkp_idx; to=keyspace_id", "t1"), }, - RowsAffected: 11, } utils.MustMatch(t, wantqr, qr, query) @@ -772,7 +759,6 @@ func TestExecutorShow(t *testing.T) { buildVarCharRow("Id", "hash_index", "hash", "", ""), buildVarCharRow("name", "name_user_map", "lookup_hash", "from=name; table=name_user_map; to=user_id", "user"), }, - RowsAffected: 2, } utils.MustMatch(t, wantqr, qr, query) @@ -783,7 +769,7 @@ func TestExecutorShow(t *testing.T) { query = "show vschema vindexes on TestExecutor.garbage" _, err = executor.Execute(ctx, "TestExecute", session, query, nil) - wantErr = "table `garbage` does not exist in keyspace `TestExecutor`" + wantErr = "table 'garbage' does not exist in keyspace 'TestExecutor'" assert.EqualError(t, err, wantErr, query) query = "show vschema vindexes on user" @@ -796,7 +782,6 @@ func TestExecutorShow(t *testing.T) { buildVarCharRow("Id", "hash_index", "hash", "", ""), buildVarCharRow("name", "name_user_map", "lookup_hash", "from=name; table=name_user_map; to=user_id", "user"), }, - RowsAffected: 2, } utils.MustMatch(t, wantqr, qr, query) @@ -810,13 +795,12 @@ func TestExecutorShow(t *testing.T) { buildVarCharRow("id", "hash_index", "hash", "", ""), buildVarCharRow("name, lastname", "name_lastname_keyspace_id_map", "lookup", "from=name,lastname; table=name_lastname_keyspace_id_map; to=keyspace_id", "user2"), }, - RowsAffected: 2, } utils.MustMatch(t, wantqr, qr, query) query = "show vschema vindexes on garbage" _, err = executor.Execute(ctx, "TestExecute", session, query, nil) - wantErr = "table `garbage` does not exist in keyspace `TestExecutor`" + wantErr = "table 'garbage' does not exist in keyspace 'TestExecutor'" assert.EqualError(t, err, wantErr, query) query = "show warnings" @@ -828,8 +812,7 @@ func TestExecutorShow(t *testing.T) { {Name: "Code", Type: sqltypes.Uint16}, {Name: "Message", Type: sqltypes.VarChar}, }, - Rows: [][]sqltypes.Value{}, - RowsAffected: 0, + Rows: [][]sqltypes.Value{}, } utils.MustMatch(t, wantqr, qr, query) @@ -843,8 +826,7 @@ func TestExecutorShow(t *testing.T) { {Name: "Code", Type: sqltypes.Uint16}, {Name: "Message", Type: sqltypes.VarChar}, }, - Rows: [][]sqltypes.Value{}, - RowsAffected: 0, + Rows: [][]sqltypes.Value{}, } utils.MustMatch(t, wantqr, qr, query) @@ -866,7 +848,6 @@ func TestExecutorShow(t *testing.T) { {sqltypes.NewVarChar("Warning"), sqltypes.NewUint32(mysql.ERBadTable), sqltypes.NewVarChar("bad table")}, {sqltypes.NewVarChar("Warning"), sqltypes.NewUint32(mysql.EROutOfResources), sqltypes.NewVarChar("ks/-40: query timed out")}, }, - RowsAffected: 0, } utils.MustMatch(t, wantqr, qr, query) @@ -883,7 +864,6 @@ func TestExecutorShow(t *testing.T) { buildVarCharRow("TestSharded/-20"), buildVarCharRow("TestXBadVSchema/e0-"), }, - RowsAffected: 25, } utils.MustMatch(t, wantqr, qr, fmt.Sprintf("%s, with a bad keyspace", query)) @@ -904,7 +884,6 @@ func TestExecutorShow(t *testing.T) { buildVarCharRow("user_msgs"), buildVarCharRow("user_seq"), }, - RowsAffected: 9, } utils.MustMatch(t, wantqr, qr, query) @@ -922,7 +901,17 @@ func TestExecutorShow(t *testing.T) { query = "show vschema tables" session = NewSafeSession(&vtgatepb.Session{TargetString: "no_such_keyspace"}) _, err = executor.Execute(ctx, "TestExecute", session, query, nil) - want = "keyspace no_such_keyspace not found in vschema" + want = "Unknown database 'no_such_keyspace' in vschema" + assert.EqualError(t, err, want, query) + + query = "show vitess_migrations" + _, err = executor.Execute(ctx, "TestExecute", session, query, nil) + want = "Unknown database 'no_such_keyspace' in vschema" + assert.EqualError(t, err, want, query) + + query = "show vitess_migrations from ks like '9748c3b7_7fdb_11eb_ac2c_f875a4d24e90'" + _, err = executor.Execute(ctx, "TestExecute", session, query, nil) + want = "Unknown database 'ks' in vschema" assert.EqualError(t, err, want, query) } @@ -954,7 +943,7 @@ func TestExecutorUse(t *testing.T) { } _, err = executor.Execute(ctx, "TestExecute", NewSafeSession(&vtgatepb.Session{}), "use UnexistentKeyspace", nil) - wantErr = "Unknown database 'UnexistentKeyspace' (errno 1049) (sqlstate 42000)" + wantErr = "Unknown database 'UnexistentKeyspace'" if err == nil || err.Error() != wantErr { t.Errorf("got: %v, want %v", err, wantErr) } @@ -1049,27 +1038,26 @@ func TestExecutorOther(t *testing.T) { for _, stmt := range stmts { for _, tc := range tcs { - sbc1.ExecCount.Set(0) - sbc2.ExecCount.Set(0) - sbclookup.ExecCount.Set(0) - - _, err := executor.Execute(ctx, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil) - if tc.hasNoKeyspaceErr { - assert.Error(t, err, errNoKeyspace) - } else if tc.hasDestinationShardErr { - assert.Errorf(t, err, "Destination can only be a single shard for statement: %s, got: DestinationExactKeyRange(-)", stmt) - } else { - assert.NoError(t, err) - } - - diff := cmp.Diff(tc.wantCnts, cnts{ - Sbc1Cnt: sbc1.ExecCount.Get(), - Sbc2Cnt: sbc2.ExecCount.Get(), - SbcLookupCnt: sbclookup.ExecCount.Get(), + t.Run(fmt.Sprintf("%s-%s", stmt, tc.targetStr), func(t *testing.T) { + sbc1.ExecCount.Set(0) + sbc2.ExecCount.Set(0) + sbclookup.ExecCount.Set(0) + + _, err := executor.Execute(ctx, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil) + if tc.hasNoKeyspaceErr { + assert.Error(t, err, errNoKeyspace) + } else if tc.hasDestinationShardErr { + assert.Errorf(t, err, "Destination can only be a single shard for statement: %s", stmt) + } else { + assert.NoError(t, err) + } + + utils.MustMatch(t, tc.wantCnts, cnts{ + Sbc1Cnt: sbc1.ExecCount.Get(), + Sbc2Cnt: sbc2.ExecCount.Get(), + SbcLookupCnt: sbclookup.ExecCount.Get(), + }) }) - if diff != "" { - t.Errorf("stmt: %s\ntc: %+v\n-want,+got:\n%s", stmt, tc, diff) - } } } } @@ -1129,7 +1117,7 @@ func TestExecutorDDL(t *testing.T) { stmts := []string{ "create table t1(id bigint primary key)", "alter table t2 add primary key id", - "rename table t1 to t2", + "rename table t2 to t3", "truncate table t2", "drop table t2", `create table test_partitioned ( @@ -1151,7 +1139,7 @@ func TestExecutorDDL(t *testing.T) { stmtType := "DDL" _, err := executor.Execute(ctx, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil) if tc.hasNoKeyspaceErr { - require.EqualError(t, err, "keyspace not specified", "expect query to fail") + require.EqualError(t, err, errNoKeyspace.Error(), "expect query to fail") stmtType = "" // For error case, plan is not generated to query log will not contain any stmtType. } else { require.NoError(t, err) @@ -1188,7 +1176,7 @@ func TestExecutorDDL(t *testing.T) { sbclookup.ExecCount.Set(0) _, err := executor.Execute(ctx, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: ""}), stmt.input, nil) if stmt.hasErr { - require.EqualError(t, err, "keyspace not specified", "expect query to fail") + require.EqualError(t, err, errNoKeyspace.Error(), "expect query to fail") testQueryLog(t, logChan, "TestExecute", "", stmt.input, 0) } else { require.NoError(t, err) @@ -1197,6 +1185,32 @@ func TestExecutorDDL(t *testing.T) { } } +func TestExecutorDDLFk(t *testing.T) { + executor, _, _, sbc := createExecutorEnv() + + mName := "TestExecutorDDLFk" + stmts := []string{ + "create table t1(id bigint primary key, foreign key (id) references t2(id))", + "alter table t2 add foreign key (id) references t1(id) on delete cascade", + } + + for _, stmt := range stmts { + for _, fkMode := range []string{"allow", "disallow"} { + t.Run(stmt+fkMode, func(t *testing.T) { + sbc.ExecCount.Set(0) + *foreignKeyMode = fkMode + _, err := executor.Execute(ctx, mName, NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded}), stmt, nil) + if fkMode == "allow" { + require.NoError(t, err) + require.EqualValues(t, 1, sbc.ExecCount.Get()) + } else { + require.EqualError(t, err, "foreign key constraint is not allowed") + } + }) + } + } +} + func TestExecutorAlterVSchemaKeyspace(t *testing.T) { *vschemaacl.AuthorizedDDLUsers = "%" defer func() { @@ -1356,16 +1370,11 @@ func TestExecutorVindexDDLACL(t *testing.T) { // test that by default no users can perform the operation stmt := "alter vschema create vindex test_hash using hash" - authErr := "not authorized to perform vschema operations" _, err := executor.Execute(ctxRedUser, "TestExecute", session, stmt, nil) - if err == nil || err.Error() != authErr { - t.Errorf("expected error '%s' got '%v'", authErr, err) - } + require.EqualError(t, err, `User 'redUser' is not allowed to perform vschema operations`) _, err = executor.Execute(ctxBlueUser, "TestExecute", session, stmt, nil) - if err == nil || err.Error() != authErr { - t.Errorf("expected error '%s' got '%v'", authErr, err) - } + require.EqualError(t, err, `User 'blueUser' is not allowed to perform vschema operations`) // test when all users are enabled *vschemaacl.AuthorizedDDLUsers = "%" @@ -1384,9 +1393,8 @@ func TestExecutorVindexDDLACL(t *testing.T) { *vschemaacl.AuthorizedDDLUsers = "orangeUser, blueUser, greenUser" vschemaacl.Init() _, err = executor.Execute(ctxRedUser, "TestExecute", session, stmt, nil) - if err == nil || err.Error() != authErr { - t.Errorf("expected error '%s' got '%v'", authErr, err) - } + require.EqualError(t, err, `User 'redUser' is not allowed to perform vschema operations`) + stmt = "alter vschema create vindex test_hash3 using hash" _, err = executor.Execute(ctxBlueUser, "TestExecute", session, stmt, nil) if err != nil { @@ -1428,45 +1436,35 @@ func TestVSchemaStats(t *testing.T) { func TestGetPlanUnnormalized(t *testing.T) { r, _, _, _ := createLegacyExecutorEnv() - emptyvc, _ := newVCursorImpl(ctx, NewSafeSession(&vtgatepb.Session{TargetString: "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil) - unshardedvc, _ := newVCursorImpl(ctx, NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil) + emptyvc, _ := newVCursorImpl(ctx, NewSafeSession(&vtgatepb.Session{TargetString: "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false) + unshardedvc, _ := newVCursorImpl(ctx, NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false) - logStats1 := NewLogStats(ctx, "Test", "", nil) query1 := "select * from music_user_map where id = 1" - plan1, err := r.getPlan(emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false, logStats1) - require.NoError(t, err) + plan1, logStats1 := getPlanCached(t, r, emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) wantSQL := query1 + " /* comment */" if logStats1.SQL != wantSQL { t.Errorf("logstats sql want \"%s\" got \"%s\"", wantSQL, logStats1.SQL) } - logStats2 := NewLogStats(ctx, "Test", "", nil) - plan2, err := r.getPlan(emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false, logStats2) - require.NoError(t, err) + plan2, logStats2 := getPlanCached(t, r, emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) if plan1 != plan2 { t.Errorf("getPlan(query1): plans must be equal: %p %p", plan1, plan2) } want := []string{ "@unknown:" + query1, } - if keys := r.plans.Keys(); !reflect.DeepEqual(keys, want) { - t.Errorf("Plan keys: %s, want %s", keys, want) - } + assertCacheContains(t, r.plans, want) if logStats2.SQL != wantSQL { t.Errorf("logstats sql want \"%s\" got \"%s\"", wantSQL, logStats2.SQL) } - logStats3 := NewLogStats(ctx, "Test", "", nil) - plan3, err := r.getPlan(unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false, logStats3) - require.NoError(t, err) + plan3, logStats3 := getPlanCached(t, r, unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) if plan1 == plan3 { t.Errorf("getPlan(query1, ks): plans must not be equal: %p %p", plan1, plan3) } if logStats3.SQL != wantSQL { t.Errorf("logstats sql want \"%s\" got \"%s\"", wantSQL, logStats3.SQL) } - logStats4 := NewLogStats(ctx, "Test", "", nil) - plan4, err := r.getPlan(unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false, logStats4) - require.NoError(t, err) + plan4, logStats4 := getPlanCached(t, r, unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) if plan3 != plan4 { t.Errorf("getPlan(query1, ks): plans must be equal: %p %p", plan3, plan4) } @@ -1474,37 +1472,59 @@ func TestGetPlanUnnormalized(t *testing.T) { KsTestUnsharded + "@unknown:" + query1, "@unknown:" + query1, } - if diff := cmp.Diff(want, r.plans.Keys()); diff != "" { - t.Errorf("\n-want,+got:\n%s", diff) - } - //if keys := r.plans.Keys(); !reflect.DeepEqual(keys, want) { - // t.Errorf("Plan keys: %s, want %s", keys, want) - //} + assertCacheContains(t, r.plans, want) if logStats4.SQL != wantSQL { t.Errorf("logstats sql want \"%s\" got \"%s\"", wantSQL, logStats4.SQL) } } +func assertCacheSize(t *testing.T, c cache.Cache, expected int) { + t.Helper() + var size int + c.ForEach(func(_ interface{}) bool { + size++ + return true + }) + if size != expected { + t.Errorf("getPlan() expected cache to have size %d, but got: %d", expected, size) + } +} + +func assertCacheContains(t *testing.T, c cache.Cache, want []string) { + t.Helper() + for _, wantKey := range want { + if _, ok := c.Get(wantKey); !ok { + t.Errorf("missing key in plan cache: %v", wantKey) + } + } +} + +func getPlanCached(t *testing.T, e *Executor, vcursor *vcursorImpl, sql string, comments sqlparser.MarginComments, bindVars map[string]*querypb.BindVariable, skipQueryPlanCache bool) (*engine.Plan, *LogStats) { + logStats := NewLogStats(ctx, "Test", "", nil) + plan, err := e.getPlan(vcursor, sql, comments, bindVars, skipQueryPlanCache, logStats) + require.NoError(t, err) + + // Wait for cache to settle + e.plans.Wait() + return plan, logStats +} + func TestGetPlanCacheUnnormalized(t *testing.T) { r, _, _, _ := createLegacyExecutorEnv() - emptyvc, _ := newVCursorImpl(ctx, NewSafeSession(&vtgatepb.Session{TargetString: "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil) + emptyvc, _ := newVCursorImpl(ctx, NewSafeSession(&vtgatepb.Session{TargetString: "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false) query1 := "select * from music_user_map where id = 1" - logStats1 := NewLogStats(ctx, "Test", "", nil) - _, err := r.getPlan(emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, true /* skipQueryPlanCache */, logStats1) - require.NoError(t, err) - if r.plans.Size() != 0 { - t.Errorf("getPlan() expected cache to have size 0, but got: %b", r.plans.Size()) - } + + _, logStats1 := getPlanCached(t, r, emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, true) + assertCacheSize(t, r.plans, 0) + wantSQL := query1 + " /* comment */" if logStats1.SQL != wantSQL { t.Errorf("logstats sql want \"%s\" got \"%s\"", wantSQL, logStats1.SQL) } - logStats2 := NewLogStats(ctx, "Test", "", nil) - _, err = r.getPlan(emptyvc, query1, makeComments(" /* comment 2 */"), map[string]*querypb.BindVariable{}, false /* skipQueryPlanCache */, logStats2) - require.NoError(t, err) - if r.plans.Size() != 1 { - t.Errorf("getPlan() expected cache to have size 1, but got: %b", r.plans.Size()) - } + + _, logStats2 := getPlanCached(t, r, emptyvc, query1, makeComments(" /* comment 2 */"), map[string]*querypb.BindVariable{}, false) + assertCacheSize(t, r.plans, 1) + wantSQL = query1 + " /* comment 2 */" if logStats2.SQL != wantSQL { t.Errorf("logstats sql want \"%s\" got \"%s\"", wantSQL, logStats2.SQL) @@ -1512,61 +1532,42 @@ func TestGetPlanCacheUnnormalized(t *testing.T) { // Skip cache using directive r, _, _, _ = createLegacyExecutorEnv() - unshardedvc, _ := newVCursorImpl(ctx, NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil) + unshardedvc, _ := newVCursorImpl(ctx, NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false) query1 = "insert /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ into user(id) values (1), (2)" - logStats1 = NewLogStats(ctx, "Test", "", nil) - _, err = r.getPlan(unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false, logStats1) - require.NoError(t, err) - if len(r.plans.Keys()) != 0 { - t.Errorf("Plan keys should be 0, got: %v", len(r.plans.Keys())) - } + getPlanCached(t, r, unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) + assertCacheSize(t, r.plans, 0) query1 = "insert into user(id) values (1), (2)" - _, err = r.getPlan(unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false, logStats1) - require.NoError(t, err) - if len(r.plans.Keys()) != 1 { - t.Errorf("Plan keys should be 1, got: %v", len(r.plans.Keys())) - } + getPlanCached(t, r, unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) + assertCacheSize(t, r.plans, 1) // the target string will be resolved and become part of the plan cache key, which adds a new entry - ksIDVc1, _ := newVCursorImpl(context.Background(), NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[deadbeef]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil) - _, err = r.getPlan(ksIDVc1, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false, logStats1) - require.NoError(t, err) - if len(r.plans.Keys()) != 2 { - t.Errorf("Plan keys should be 2, got: %v", len(r.plans.Keys())) - } + ksIDVc1, _ := newVCursorImpl(context.Background(), NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[deadbeef]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false) + getPlanCached(t, r, ksIDVc1, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) + assertCacheSize(t, r.plans, 2) // the target string will be resolved and become part of the plan cache key, as it's an unsharded ks, it will be the same entry as above - ksIDVc2, _ := newVCursorImpl(context.Background(), NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[beefdead]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil) - _, err = r.getPlan(ksIDVc2, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false, logStats1) - require.NoError(t, err) - if len(r.plans.Keys()) != 2 { - t.Errorf("Plan keys should be 2, got: %v", len(r.plans.Keys())) - } + ksIDVc2, _ := newVCursorImpl(context.Background(), NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[beefdead]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false) + getPlanCached(t, r, ksIDVc2, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) + assertCacheSize(t, r.plans, 2) } func TestGetPlanCacheNormalized(t *testing.T) { r, _, _, _ := createLegacyExecutorEnv() r.normalize = true - emptyvc, _ := newVCursorImpl(ctx, NewSafeSession(&vtgatepb.Session{TargetString: "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil) + emptyvc, _ := newVCursorImpl(ctx, NewSafeSession(&vtgatepb.Session{TargetString: "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false) + query1 := "select * from music_user_map where id = 1" - logStats1 := NewLogStats(ctx, "Test", "", nil) - _, err := r.getPlan(emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, true /* skipQueryPlanCache */, logStats1) - require.NoError(t, err) - if r.plans.Size() != 0 { - t.Errorf("getPlan() expected cache to have size 0, but got: %b", r.plans.Size()) - } + _, logStats1 := getPlanCached(t, r, emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, true /* skipQueryPlanCache */) + assertCacheSize(t, r.plans, 0) wantSQL := "select * from music_user_map where id = :vtg1 /* comment */" if logStats1.SQL != wantSQL { t.Errorf("logstats sql want \"%s\" got \"%s\"", wantSQL, logStats1.SQL) } - logStats2 := NewLogStats(ctx, "Test", "", nil) - _, err = r.getPlan(emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false /* skipQueryPlanCache */, logStats2) - require.NoError(t, err) - if r.plans.Size() != 1 { - t.Errorf("getPlan() expected cache to have size 1, but got: %b", r.plans.Size()) - } + + _, logStats2 := getPlanCached(t, r, emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false /* skipQueryPlanCache */) + assertCacheSize(t, r.plans, 1) if logStats2.SQL != wantSQL { t.Errorf("logstats sql want \"%s\" got \"%s\"", wantSQL, logStats2.SQL) } @@ -1574,64 +1575,47 @@ func TestGetPlanCacheNormalized(t *testing.T) { // Skip cache using directive r, _, _, _ = createLegacyExecutorEnv() r.normalize = true - unshardedvc, _ := newVCursorImpl(ctx, NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil) + unshardedvc, _ := newVCursorImpl(ctx, NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false) query1 = "insert /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ into user(id) values (1), (2)" - logStats1 = NewLogStats(ctx, "Test", "", nil) - _, err = r.getPlan(unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false, logStats1) - require.NoError(t, err) - if len(r.plans.Keys()) != 0 { - t.Errorf("Plan keys should be 0, got: %v", len(r.plans.Keys())) - } + getPlanCached(t, r, unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) + assertCacheSize(t, r.plans, 0) query1 = "insert into user(id) values (1), (2)" - _, err = r.getPlan(unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false, logStats1) - require.NoError(t, err) - if len(r.plans.Keys()) != 1 { - t.Errorf("Plan keys should be 1, got: %v", len(r.plans.Keys())) - } + getPlanCached(t, r, unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) + assertCacheSize(t, r.plans, 1) // the target string will be resolved and become part of the plan cache key, which adds a new entry - ksIDVc1, _ := newVCursorImpl(context.Background(), NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[deadbeef]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil) - _, err = r.getPlan(ksIDVc1, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false, logStats1) - require.NoError(t, err) - if len(r.plans.Keys()) != 2 { - t.Errorf("Plan keys should be 2, got: %v", len(r.plans.Keys())) - } + ksIDVc1, _ := newVCursorImpl(context.Background(), NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[deadbeef]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false) + getPlanCached(t, r, ksIDVc1, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) + assertCacheSize(t, r.plans, 2) // the target string will be resolved and become part of the plan cache key, as it's an unsharded ks, it will be the same entry as above - ksIDVc2, _ := newVCursorImpl(context.Background(), NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[beefdead]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil) - _, err = r.getPlan(ksIDVc2, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false, logStats1) - require.NoError(t, err) - if len(r.plans.Keys()) != 2 { - t.Errorf("Plan keys should be 2, got: %v", len(r.plans.Keys())) - } + ksIDVc2, _ := newVCursorImpl(context.Background(), NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[beefdead]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false) + getPlanCached(t, r, ksIDVc2, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) + assertCacheSize(t, r.plans, 2) } func TestGetPlanNormalized(t *testing.T) { r, _, _, _ := createLegacyExecutorEnv() r.normalize = true - emptyvc, _ := newVCursorImpl(ctx, NewSafeSession(&vtgatepb.Session{TargetString: "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil) - unshardedvc, _ := newVCursorImpl(ctx, NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil) + emptyvc, _ := newVCursorImpl(ctx, NewSafeSession(&vtgatepb.Session{TargetString: "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false) + unshardedvc, _ := newVCursorImpl(ctx, NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false) query1 := "select * from music_user_map where id = 1" query2 := "select * from music_user_map where id = 2" normalized := "select * from music_user_map where id = :vtg1" - logStats1 := NewLogStats(ctx, "Test", "", nil) - plan1, err := r.getPlan(emptyvc, query1, makeComments(" /* comment 1 */"), map[string]*querypb.BindVariable{}, false, logStats1) - require.NoError(t, err) - logStats2 := NewLogStats(ctx, "Test", "", nil) - plan2, err := r.getPlan(emptyvc, query1, makeComments(" /* comment 2 */"), map[string]*querypb.BindVariable{}, false, logStats2) - require.NoError(t, err) + + plan1, logStats1 := getPlanCached(t, r, emptyvc, query1, makeComments(" /* comment 1 */"), map[string]*querypb.BindVariable{}, false) + plan2, logStats2 := getPlanCached(t, r, emptyvc, query1, makeComments(" /* comment 2 */"), map[string]*querypb.BindVariable{}, false) + if plan1 != plan2 { t.Errorf("getPlan(query1): plans must be equal: %p %p", plan1, plan2) } want := []string{ "@unknown:" + normalized, } - if keys := r.plans.Keys(); !reflect.DeepEqual(keys, want) { - t.Errorf("Plan keys: %s, want %s", keys, want) - } + assertCacheContains(t, r.plans, want) wantSQL := normalized + " /* comment 1 */" if logStats1.SQL != wantSQL { @@ -1642,9 +1626,7 @@ func TestGetPlanNormalized(t *testing.T) { t.Errorf("logstats sql want \"%s\" got \"%s\"", wantSQL, logStats2.SQL) } - logStats3 := NewLogStats(ctx, "Test", "", nil) - plan3, err := r.getPlan(emptyvc, query2, makeComments(" /* comment 3 */"), map[string]*querypb.BindVariable{}, false, logStats3) - require.NoError(t, err) + plan3, logStats3 := getPlanCached(t, r, emptyvc, query2, makeComments(" /* comment 3 */"), map[string]*querypb.BindVariable{}, false) if plan1 != plan3 { t.Errorf("getPlan(query2): plans must be equal: %p %p", plan1, plan3) } @@ -1653,9 +1635,7 @@ func TestGetPlanNormalized(t *testing.T) { t.Errorf("logstats sql want \"%s\" got \"%s\"", wantSQL, logStats3.SQL) } - logStats4 := NewLogStats(ctx, "Test", "", nil) - plan4, err := r.getPlan(emptyvc, normalized, makeComments(" /* comment 4 */"), map[string]*querypb.BindVariable{}, false, logStats4) - require.NoError(t, err) + plan4, logStats4 := getPlanCached(t, r, emptyvc, normalized, makeComments(" /* comment 4 */"), map[string]*querypb.BindVariable{}, false) if plan1 != plan4 { t.Errorf("getPlan(normalized): plans must be equal: %p %p", plan1, plan4) } @@ -1664,9 +1644,8 @@ func TestGetPlanNormalized(t *testing.T) { t.Errorf("logstats sql want \"%s\" got \"%s\"", wantSQL, logStats4.SQL) } - logStats5 := NewLogStats(ctx, "Test", "", nil) - plan3, err = r.getPlan(unshardedvc, query1, makeComments(" /* comment 5 */"), map[string]*querypb.BindVariable{}, false, logStats5) - require.NoError(t, err) + var logStats5 *LogStats + plan3, logStats5 = getPlanCached(t, r, unshardedvc, query1, makeComments(" /* comment 5 */"), map[string]*querypb.BindVariable{}, false) if plan1 == plan3 { t.Errorf("getPlan(query1, ks): plans must not be equal: %p %p", plan1, plan3) } @@ -1675,9 +1654,7 @@ func TestGetPlanNormalized(t *testing.T) { t.Errorf("logstats sql want \"%s\" got \"%s\"", wantSQL, logStats5.SQL) } - logStats6 := NewLogStats(ctx, "Test", "", nil) - plan4, err = r.getPlan(unshardedvc, query1, makeComments(" /* comment 6 */"), map[string]*querypb.BindVariable{}, false, logStats6) - require.NoError(t, err) + plan4, _ = getPlanCached(t, r, unshardedvc, query1, makeComments(" /* comment 6 */"), map[string]*querypb.BindVariable{}, false) if plan3 != plan4 { t.Errorf("getPlan(query1, ks): plans must be equal: %p %p", plan3, plan4) } @@ -1685,20 +1662,14 @@ func TestGetPlanNormalized(t *testing.T) { KsTestUnsharded + "@unknown:" + normalized, "@unknown:" + normalized, } - if keys := r.plans.Keys(); !reflect.DeepEqual(keys, want) { - t.Errorf("Plan keys: %s, want %s", keys, want) - } + assertCacheContains(t, r.plans, want) - // Errors - logStats7 := NewLogStats(ctx, "Test", "", nil) - _, err = r.getPlan(emptyvc, "syntax", makeComments(""), map[string]*querypb.BindVariable{}, false, logStats7) + _, err := r.getPlan(emptyvc, "syntax", makeComments(""), map[string]*querypb.BindVariable{}, false, nil) wantErr := "syntax error at position 7 near 'syntax'" if err == nil || err.Error() != wantErr { t.Errorf("getPlan(syntax): %v, want %s", err, wantErr) } - if keys := r.plans.Keys(); !reflect.DeepEqual(keys, want) { - t.Errorf("Plan keys: %s, want %s", keys, want) - } + assertCacheContains(t, r.plans, want) } func TestPassthroughDDL(t *testing.T) { @@ -1914,7 +1885,7 @@ func TestExecutorMaxPayloadSizeExceeded(t *testing.T) { for _, query := range testMaxPayloadSizeExceeded { _, err := executor.Execute(context.Background(), "TestExecutorMaxPayloadSizeExceeded", session, query, nil) require.NotNil(t, err) - assert.EqualError(t, err, "query payload size above threshold (errno 1153) (sqlstate HY000)") + assert.EqualError(t, err, "query payload size above threshold") } assert.Equal(t, warningCount, warnings.Counts()["WarnPayloadSizeExceeded"], "warnings count") @@ -2017,24 +1988,26 @@ func TestExecutorOtherRead(t *testing.T) { for _, stmt := range stmts { for _, tc := range tcs { - sbc1.ExecCount.Set(0) - sbc2.ExecCount.Set(0) - sbclookup.ExecCount.Set(0) - - _, err := executor.Execute(context.Background(), "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil) - if tc.hasNoKeyspaceErr { - assert.EqualError(t, err, "keyspace not specified") - } else if tc.hasDestinationShardErr { - assert.Errorf(t, err, "Destination can only be a single shard for statement: %s, got: DestinationExactKeyRange(-)", stmt) - } else { - assert.NoError(t, err) - } - - utils.MustMatch(t, tc.wantCnts, cnts{ - Sbc1Cnt: sbc1.ExecCount.Get(), - Sbc2Cnt: sbc2.ExecCount.Get(), - SbcLookupCnt: sbclookup.ExecCount.Get(), - }, "count did not match") + t.Run(stmt+tc.targetStr, func(t *testing.T) { + sbc1.ExecCount.Set(0) + sbc2.ExecCount.Set(0) + sbclookup.ExecCount.Set(0) + + _, err := executor.Execute(context.Background(), "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil) + if tc.hasNoKeyspaceErr { + assert.EqualError(t, err, errNoKeyspace.Error()) + } else if tc.hasDestinationShardErr { + assert.Errorf(t, err, "Destination can only be a single shard for statement: %s, got: DestinationExactKeyRange(-)", stmt) + } else { + assert.NoError(t, err) + } + + utils.MustMatch(t, tc.wantCnts, cnts{ + Sbc1Cnt: sbc1.ExecCount.Get(), + Sbc2Cnt: sbc2.ExecCount.Get(), + SbcLookupCnt: sbclookup.ExecCount.Get(), + }, "count did not match") + }) } } } @@ -2050,7 +2023,7 @@ func TestExecutorExplain(t *testing.T) { require.NoError(t, err) require.Equal(t, - `[[VARCHAR("Route") VARCHAR("SelectScatter") VARCHAR("TestExecutor") VARCHAR("") VARCHAR("UNKNOWN") VARCHAR("select * from user")]]`, + `[[VARCHAR("Route") VARCHAR("SelectScatter") VARCHAR("TestExecutor") VARCHAR("") VARCHAR("UNKNOWN") VARCHAR("select * from `+"`user`"+`")]]`, fmt.Sprintf("%v", result.Rows)) result, err = executorExec(executor, "explain format = vitess select 42", bindVars) @@ -2173,7 +2146,7 @@ func TestExecutorSavepointInTx(t *testing.T) { Sql: "release savepoint a", BindVariables: map[string]*querypb.BindVariable{}, }, { - Sql: "select id from user where id = 1", + Sql: "select id from `user` where id = 1", BindVariables: map[string]*querypb.BindVariable{}, }, { Sql: "savepoint b", @@ -2205,7 +2178,7 @@ func TestExecutorSavepointInTx(t *testing.T) { Sql: "release savepoint b", BindVariables: map[string]*querypb.BindVariable{}, }, { - Sql: "select id from user where id = 3", + Sql: "select id from `user` where id = 3", BindVariables: map[string]*querypb.BindVariable{}, }} utils.MustMatch(t, sbc1WantQueries, sbc1.Queries, "") @@ -2244,12 +2217,12 @@ func TestExecutorSavepointWithoutTx(t *testing.T) { _, err = exec(executor, session, "select id from user where id = 3") require.NoError(t, err) sbc1WantQueries := []*querypb.BoundQuery{{ - Sql: "select id from user where id = 1", + Sql: "select id from `user` where id = 1", BindVariables: map[string]*querypb.BindVariable{}, }} sbc2WantQueries := []*querypb.BoundQuery{{ - Sql: "select id from user where id = 3", + Sql: "select id from `user` where id = 3", BindVariables: map[string]*querypb.BindVariable{}, }} utils.MustMatch(t, sbc1WantQueries, sbc1.Queries, "") @@ -2264,6 +2237,101 @@ func TestExecutorSavepointWithoutTx(t *testing.T) { testQueryLog(t, logChan, "TestExecute", "SELECT", "select id from user where id = 3", 1) } +func TestExecutorCallProc(t *testing.T) { + executor, sbc1, sbc2, sbcUnsharded := createExecutorEnv() + + type cnts struct { + Sbc1Cnt int64 + Sbc2Cnt int64 + SbcUnsharded int64 + } + + tcs := []struct { + name, targetStr string + + hasNoKeyspaceErr bool + unshardedOnlyErr bool + wantCnts cnts + }{{ + name: "simple call with no keyspace set", + targetStr: "", + hasNoKeyspaceErr: true, + }, { + name: "keyrange targeted keyspace", + targetStr: "TestExecutor[-]", + wantCnts: cnts{ + Sbc1Cnt: 1, + Sbc2Cnt: 1, + SbcUnsharded: 0, + }, + }, { + name: "unsharded call proc", + targetStr: KsTestUnsharded, + wantCnts: cnts{ + Sbc1Cnt: 0, + Sbc2Cnt: 0, + SbcUnsharded: 1, + }, + }, { + name: "should fail with sharded call proc", + targetStr: "TestExecutor", + unshardedOnlyErr: true, + }} + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + sbc1.ExecCount.Set(0) + sbc2.ExecCount.Set(0) + sbcUnsharded.ExecCount.Set(0) + + _, err := executor.Execute(context.Background(), "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), "CALL proc()", nil) + if tc.hasNoKeyspaceErr { + assert.EqualError(t, err, errNoKeyspace.Error()) + } else if tc.unshardedOnlyErr { + require.EqualError(t, err, "CALL is not supported for sharded database") + } else { + assert.NoError(t, err) + } + + utils.MustMatch(t, tc.wantCnts, cnts{ + Sbc1Cnt: sbc1.ExecCount.Get(), + Sbc2Cnt: sbc2.ExecCount.Get(), + SbcUnsharded: sbcUnsharded.ExecCount.Get(), + }, "count did not match") + }) + } +} + +func TestExecutorTempTable(t *testing.T) { + executor, _, _, sbcUnsharded := createExecutorEnv() + executor.warnShardedOnly = true + creatQuery := "create temporary table temp_t(id bigint primary key)" + session := NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded}) + ctx := context.Background() + _, err := executor.Execute(ctx, "TestExecutorTempTable", session, creatQuery, nil) + require.NoError(t, err) + assert.EqualValues(t, 1, sbcUnsharded.ExecCount.Get()) + assert.NotEmpty(t, session.Warnings) + + before := executor.plans.Len() + + _, err = executor.Execute(ctx, "TestExecutorTempTable", session, "select * from temp_t", nil) + require.NoError(t, err) + + assert.Equal(t, before, executor.plans.Len()) +} + +func TestExecutorShowVitessMigrations(t *testing.T) { + executor, sbc1, sbc2, _ := createExecutorEnv() + showQuery := "show vitess_migrations" + session := NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}) + ctx := context.Background() + _, err := executor.Execute(ctx, "", session, showQuery, nil) + require.NoError(t, err) + assert.Contains(t, sbc1.StringQueries(), "SELECT * FROM _vt.schema_migrations") + assert.Contains(t, sbc2.StringQueries(), "SELECT * FROM _vt.schema_migrations") +} + func exec(executor *Executor, session *SafeSession, sql string) (*sqltypes.Result, error) { return executor.Execute(context.Background(), "TestExecute", session, sql, nil) } diff --git a/go/vt/vtgate/executor_vschema_ddl_test.go b/go/vt/vtgate/executor_vschema_ddl_test.go index b6346761cbe..e0f9432f0bf 100644 --- a/go/vt/vtgate/executor_vschema_ddl_test.go +++ b/go/vt/vtgate/executor_vschema_ddl_test.go @@ -58,14 +58,14 @@ func waitForVindex(t *testing.T, ks, name string, watch chan *vschemapb.SrvVSche t.Errorf("vschema was not updated as expected") } - // Wait up to 10ms until the vindex manager gets notified of the update + // Wait up to 100ms until the vindex manager gets notified of the update for i := 0; i < 10; i++ { vschema := executor.vm.GetCurrentSrvVschema() vindex, ok := vschema.Keyspaces[ks].Vindexes[name] if ok { return vschema, vindex } - time.Sleep(time.Millisecond) + time.Sleep(10 * time.Millisecond) } t.Fatalf("updated vschema did not contain %s", name) @@ -75,7 +75,7 @@ func waitForVindex(t *testing.T, ks, name string, watch chan *vschemapb.SrvVSche func waitForVschemaTables(t *testing.T, ks string, tables []string, executor *Executor) *vschemapb.SrvVSchema { t.Helper() - // Wait up to 10ms until the vindex manager gets notified of the update + // Wait up to 100ms until the vindex manager gets notified of the update for i := 0; i < 10; i++ { vschema := executor.vm.GetCurrentSrvVschema() gotTables := []string{} @@ -87,7 +87,7 @@ func waitForVschemaTables(t *testing.T, ks string, tables []string, executor *Ex if reflect.DeepEqual(tables, gotTables) { return vschema } - time.Sleep(time.Millisecond) + time.Sleep(10 * time.Millisecond) } t.Fatalf("updated vschema did not contain tables %v", tables) @@ -725,16 +725,11 @@ func TestPlanExecutorVindexDDLACL(t *testing.T) { // test that by default no users can perform the operation stmt := "alter vschema create vindex test_hash using hash" - authErr := "not authorized to perform vschema operations" _, err := executor.Execute(ctxRedUser, "TestExecute", session, stmt, nil) - if err == nil || err.Error() != authErr { - t.Errorf("expected error '%s' got '%v'", authErr, err) - } + require.EqualError(t, err, `User 'redUser' is not allowed to perform vschema operations`) _, err = executor.Execute(ctxBlueUser, "TestExecute", session, stmt, nil) - if err == nil || err.Error() != authErr { - t.Errorf("expected error '%s' got '%v'", authErr, err) - } + require.EqualError(t, err, `User 'blueUser' is not allowed to perform vschema operations`) // test when all users are enabled *vschemaacl.AuthorizedDDLUsers = "%" @@ -753,9 +748,8 @@ func TestPlanExecutorVindexDDLACL(t *testing.T) { *vschemaacl.AuthorizedDDLUsers = "orangeUser, blueUser, greenUser" vschemaacl.Init() _, err = executor.Execute(ctxRedUser, "TestExecute", session, stmt, nil) - if err == nil || err.Error() != authErr { - t.Errorf("expected error '%s' got '%v'", authErr, err) - } + require.EqualError(t, err, `User 'redUser' is not allowed to perform vschema operations`) + stmt = "alter vschema create vindex test_hash3 using hash" _, err = executor.Execute(ctxBlueUser, "TestExecute", session, stmt, nil) if err != nil { diff --git a/go/vt/vtgate/executor_vstream.go b/go/vt/vtgate/executor_vstream.go index bb8d4f4cc3d..d41012df427 100644 --- a/go/vt/vtgate/executor_vstream.go +++ b/go/vt/vtgate/executor_vstream.go @@ -44,7 +44,7 @@ func (e *Executor) handleVStream(ctx context.Context, sql string, target querypb vstreamStmt, ok := stmt.(*sqlparser.VStream) if !ok { logStats.Error = err - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unrecognized VSTREAM statement: %v", sql) + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unrecognized VSTREAM statement: %v", sql) } table, _, _, _, err := vcursor.FindTable(vstreamStmt.Table) @@ -150,9 +150,8 @@ func (e *Executor) startVStream(ctx context.Context, keyspace string, shard stri } send := func(evs []*binlogdata.VEvent) error { result := &sqltypes.Result{ - Fields: nil, - RowsAffected: 0, - Rows: [][]sqltypes.Value{}, + Fields: nil, + Rows: [][]sqltypes.Value{}, } for _, ev := range evs { if totalRows+numRows >= limit { @@ -188,7 +187,6 @@ func (e *Executor) startVStream(ctx context.Context, keyspace string, shard stri break } } - result.RowsAffected = uint64(numRows) default: } } diff --git a/go/vt/vtgate/executor_vstream_test.go b/go/vt/vtgate/executor_vstream_test.go index 8759c3e4288..495aa69317e 100644 --- a/go/vt/vtgate/executor_vstream_test.go +++ b/go/vt/vtgate/executor_vstream_test.go @@ -20,6 +20,8 @@ import ( "testing" "time" + "vitess.io/vitess/go/test/utils" + "context" "github.com/stretchr/testify/require" @@ -93,8 +95,7 @@ func TestVStreamFrom(t *testing.T) { {Name: "id", Type: sqltypes.Int64}, {Name: "val", Type: sqltypes.VarChar}, }, - RowsAffected: 3, - InsertID: 0, + InsertID: 0, Rows: [][]sqltypes.Value{{ sqltypes.NewVarChar("+"), sqltypes.NewInt64(1), @@ -109,9 +110,7 @@ func TestVStreamFrom(t *testing.T) { sqltypes.NewVarChar("xyz"), }}, } - if !result.Equal(want) { - t.Errorf("result: %+v, want %+v", result, want) - } + utils.MustMatch(t, want, result) } func vstreamEvents(executor *Executor, sql string) (qr *sqltypes.Result, err error) { diff --git a/go/vt/vtgate/fakerpcvtgateconn/conn.go b/go/vt/vtgate/fakerpcvtgateconn/conn.go index 9142fa64386..ad655cb645d 100644 --- a/go/vt/vtgate/fakerpcvtgateconn/conn.go +++ b/go/vt/vtgate/fakerpcvtgateconn/conn.go @@ -164,7 +164,9 @@ func (conn *FakeVTGateConn) ResolveTransaction(ctx context.Context, dtid string) } // VStream streams binlog events. -func (conn *FakeVTGateConn) VStream(ctx context.Context, tabletType topodatapb.TabletType, vgtid *binlogdatapb.VGtid, filter *binlogdatapb.Filter) (vtgateconn.VStreamReader, error) { +func (conn *FakeVTGateConn) VStream(ctx context.Context, tabletType topodatapb.TabletType, vgtid *binlogdatapb.VGtid, + filter *binlogdatapb.Filter, flags *vtgatepb.VStreamFlags) (vtgateconn.VStreamReader, error) { + return nil, fmt.Errorf("NYI") } diff --git a/go/vt/vtgate/grpcvtgateconn/conn.go b/go/vt/vtgate/grpcvtgateconn/conn.go index 805558131a4..a82e81c3a3a 100644 --- a/go/vt/vtgate/grpcvtgateconn/conn.go +++ b/go/vt/vtgate/grpcvtgateconn/conn.go @@ -184,12 +184,15 @@ func (a *vstreamAdapter) Recv() ([]*binlogdatapb.VEvent, error) { return r.Events, nil } -func (conn *vtgateConn) VStream(ctx context.Context, tabletType topodatapb.TabletType, vgtid *binlogdatapb.VGtid, filter *binlogdatapb.Filter) (vtgateconn.VStreamReader, error) { +func (conn *vtgateConn) VStream(ctx context.Context, tabletType topodatapb.TabletType, vgtid *binlogdatapb.VGtid, + filter *binlogdatapb.Filter, flags *vtgatepb.VStreamFlags) (vtgateconn.VStreamReader, error) { + req := &vtgatepb.VStreamRequest{ CallerId: callerid.EffectiveCallerIDFromContext(ctx), TabletType: tabletType, Vgtid: vgtid, Filter: filter, + Flags: flags, } stream, err := conn.c.VStream(ctx, req) if err != nil { diff --git a/go/vt/vtgate/grpcvtgateconn/fuzz_flaky_test.go b/go/vt/vtgate/grpcvtgateconn/fuzz_flaky_test.go new file mode 100644 index 00000000000..1850f69d98e --- /dev/null +++ b/go/vt/vtgate/grpcvtgateconn/fuzz_flaky_test.go @@ -0,0 +1,119 @@ +// +build gofuzz + +/* +Copyright 2021 The Vitess Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package grpcvtgateconn + +import ( + "context" + "flag" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "testing" + + "google.golang.org/grpc" + + "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/vtgate/grpcvtgateservice" + "vitess.io/vitess/go/vt/vtgate/vtgateconn" +) + +func init() { + testing.Init() +} + +func IsDivisibleBy(n int, divisibleby int) bool { + return (n % divisibleby) == 0 +} + +func Fuzz(data []byte) int { + t := &testing.T{} + if len(data) < 20 { + return -1 + } + if IsDivisibleBy(len(data), 10) == false { + return -1 + } + + var opts []grpc.ServerOption + // fake service + service := CreateFakeServer(t) + + // listen on a random port + listener, err := net.Listen("tcp", ":0") + if err != nil { + fmt.Println("Cannot listen: %v", err) + return -1 + } + defer listener.Close() + + // add auth interceptors + opts = append(opts, grpc.StreamInterceptor(servenv.FakeAuthStreamInterceptor)) + opts = append(opts, grpc.UnaryInterceptor(servenv.FakeAuthUnaryInterceptor)) + + // Create a gRPC server and listen on the port + server := grpc.NewServer(opts...) + grpcvtgateservice.RegisterForTest(server, service) + go server.Serve(listener) + defer server.GracefulStop() + + authJSON := `{ + "Username": "valid", + "Password": "valid" + }` + + f, err := ioutil.TempFile("", "static_auth_creds.json") + if err != nil { + return -1 + } + defer os.Remove(f.Name()) + if _, err := io.WriteString(f, authJSON); err != nil { + return -1 + } + if err := f.Close(); err != nil { + return -1 + } + + // Create a Go RPC client connecting to the server + ctx := context.Background() + flag.Set("grpc_auth_static_client_creds", f.Name()) + client, err := dial(ctx, listener.Addr().String()) + if err != nil { + fmt.Println("dial failed: %v", err) + return -1 + } + defer client.Close() + + RegisterTestDialProtocol(client) + conn, err := vtgateconn.DialProtocol(context.Background(), "test", "") + if err != nil { + fmt.Println("Got err: %v from vtgateconn.DialProtocol", err) + return -1 + } + session := conn.Session("connection_ks@rdonly", testExecuteOptions) + + // Do the actual fuzzing: + // 10 executions per fuzz run + ctx = newContext() + chunkSize := len(data) / 10 + for i := 0; i < len(data); i = i + chunkSize { + from := i //lower + to := i + chunkSize //upper + _, _ = session.Execute(ctx, string(data[from:to]), nil) + } + return 1 +} diff --git a/go/vt/vtgate/grpcvtgateconn/suite_test.go b/go/vt/vtgate/grpcvtgateconn/suite_test.go index f767657964c..5d126fd7802 100644 --- a/go/vt/vtgate/grpcvtgateconn/suite_test.go +++ b/go/vt/vtgate/grpcvtgateconn/suite_test.go @@ -212,7 +212,7 @@ func (f *fakeVTGateService) ResolveTransaction(ctx context.Context, dtid string) return nil } -func (f *fakeVTGateService) VStream(ctx context.Context, tabletType topodatapb.TabletType, vgtid *binlogdatapb.VGtid, filter *binlogdatapb.Filter, send func([]*binlogdatapb.VEvent) error) error { +func (f *fakeVTGateService) VStream(ctx context.Context, tabletType topodatapb.TabletType, vgtid *binlogdatapb.VGtid, filter *binlogdatapb.Filter, flags *vtgatepb.VStreamFlags, send func([]*binlogdatapb.VEvent) error) error { panic("unimplemented") } diff --git a/go/vt/vtgate/grpcvtgateservice/server.go b/go/vt/vtgate/grpcvtgateservice/server.go index ae108c06ee3..43bbe917bec 100644 --- a/go/vt/vtgate/grpcvtgateservice/server.go +++ b/go/vt/vtgate/grpcvtgateservice/server.go @@ -198,6 +198,7 @@ func (vtg *VTGate) VStream(request *vtgatepb.VStreamRequest, stream vtgateservic request.TabletType, request.Vgtid, request.Filter, + request.Flags, func(events []*binlogdatapb.VEvent) error { return stream.Send(&vtgatepb.VStreamResponse{ Events: events, diff --git a/go/vt/vtgate/legacy_scatter_conn_test.go b/go/vt/vtgate/legacy_scatter_conn_test.go index aaac0c20e31..4c48cd2bf04 100644 --- a/go/vt/vtgate/legacy_scatter_conn_test.go +++ b/go/vt/vtgate/legacy_scatter_conn_test.go @@ -163,12 +163,8 @@ func TestScatterConnStreamExecuteMulti(t *testing.T) { // type, and error code. func verifyScatterConnError(t *testing.T, err error, wantErr string, wantCode vtrpcpb.Code) { t.Helper() - if err == nil || err.Error() != wantErr { - t.Errorf("wanted error: %s, got error: %v", wantErr, err) - } - if code := vterrors.Code(err); code != wantCode { - t.Errorf("wanted error code: %s, got: %v", wantCode, code) - } + assert.EqualError(t, err, wantErr) + assert.Equal(t, wantCode, vterrors.Code(err)) } func testScatterConnGeneric(t *testing.T, name string, f func(sc *ScatterConn, shards []string) (*sqltypes.Result, error)) { @@ -189,7 +185,7 @@ func testScatterConnGeneric(t *testing.T, name string, f func(sc *ScatterConn, s sbc := hc.AddTestTablet("aa", "0", 1, name, "0", topodatapb.TabletType_REPLICA, true, 1, nil) sbc.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 _, err = f(sc, []string{"0"}) - want := fmt.Sprintf("target: %v.0.replica, used tablet: aa-0 (0): INVALID_ARGUMENT error", name) + want := fmt.Sprintf("target: %v.0.replica: INVALID_ARGUMENT error", name) // Verify server error string. if err == nil || err.Error() != want { t.Errorf("want %s, got %v", want, err) @@ -209,7 +205,7 @@ func testScatterConnGeneric(t *testing.T, name string, f func(sc *ScatterConn, s sbc1.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 _, err = f(sc, []string{"0", "1"}) // Verify server errors are consolidated. - want = fmt.Sprintf("target: %v.0.replica, used tablet: aa-0 (0): INVALID_ARGUMENT error\ntarget: %v.1.replica, used tablet: aa-0 (1): INVALID_ARGUMENT error", name, name) + want = fmt.Sprintf("target: %v.0.replica: INVALID_ARGUMENT error\ntarget: %v.1.replica: INVALID_ARGUMENT error", name, name) verifyScatterConnError(t, err, want, vtrpcpb.Code_INVALID_ARGUMENT) // Ensure that we tried only once. if execCount := sbc0.ExecCount.Get(); execCount != 1 { @@ -229,7 +225,7 @@ func testScatterConnGeneric(t *testing.T, name string, f func(sc *ScatterConn, s sbc1.MustFailCodes[vtrpcpb.Code_RESOURCE_EXHAUSTED] = 1 _, err = f(sc, []string{"0", "1"}) // Verify server errors are consolidated. - want = fmt.Sprintf("target: %v.0.replica, used tablet: aa-0 (0): INVALID_ARGUMENT error\ntarget: %v.1.replica, used tablet: aa-0 (1): RESOURCE_EXHAUSTED error", name, name) + want = fmt.Sprintf("target: %v.0.replica: INVALID_ARGUMENT error\ntarget: %v.1.replica: RESOURCE_EXHAUSTED error", name, name) // We should only surface the higher priority error code verifyScatterConnError(t, err, want, vtrpcpb.Code_INVALID_ARGUMENT) // Ensure that we tried only once. @@ -267,8 +263,8 @@ func testScatterConnGeneric(t *testing.T, name string, f func(sc *ScatterConn, s if execCount := sbc1.ExecCount.Get(); execCount != 1 { t.Errorf("want 1, got %v", execCount) } - if qr.RowsAffected != 2 { - t.Errorf("want 2, got %v", qr.RowsAffected) + if qr.RowsAffected != 0 { + t.Errorf("want 0, got %v", qr.RowsAffected) } if len(qr.Rows) != 2 { t.Errorf("want 2, got %v", len(qr.Rows)) @@ -314,7 +310,7 @@ func TestMaxMemoryRows(t *testing.T) { err string }{ {true, ""}, - {false, "in-memory row count exceeded allowed limit of 3 (errno 1153) (sqlstate HY000)"}, + {false, "in-memory row count exceeded allowed limit of 3"}, } for _, test := range testCases { diff --git a/go/vt/vtgate/logstats.go b/go/vt/vtgate/logstats.go index 8ebfe584acb..aeac6fe84c2 100644 --- a/go/vt/vtgate/logstats.go +++ b/go/vt/vtgate/logstats.go @@ -47,8 +47,9 @@ type LogStats struct { BindVariables map[string]*querypb.BindVariable StartTime time.Time EndTime time.Time - ShardQueries uint32 + ShardQueries uint64 RowsAffected uint64 + RowsReturned uint64 PlanTime time.Duration ExecuteTime time.Duration CommitTime time.Duration @@ -125,7 +126,7 @@ func (stats *LogStats) RemoteAddrUsername() (string, string) { // Logf formats the log record to the given writer, either as // tab-separated list of logged fields or as JSON. func (stats *LogStats) Logf(w io.Writer, params url.Values) error { - if !streamlog.ShouldEmitLog(stats.SQL) { + if !streamlog.ShouldEmitLog(stats.SQL, stats.RowsAffected, stats.RowsReturned) { return nil } diff --git a/go/vt/vtgate/logstats_test.go b/go/vt/vtgate/logstats_test.go index 7a19e75069d..280cf9214c9 100644 --- a/go/vt/vtgate/logstats_test.go +++ b/go/vt/vtgate/logstats_test.go @@ -158,6 +158,35 @@ func TestLogStatsFilter(t *testing.T) { } } +func TestLogStatsRowThreshold(t *testing.T) { + defer func() { *streamlog.QueryLogRowThreshold = 0 }() + + logStats := NewLogStats(context.Background(), "test", "sql1 /* LOG_THIS_QUERY */", map[string]*querypb.BindVariable{"intVal": sqltypes.Int64BindVariable(1)}) + logStats.StartTime = time.Date(2017, time.January, 1, 1, 2, 3, 0, time.UTC) + logStats.EndTime = time.Date(2017, time.January, 1, 1, 2, 4, 1234, time.UTC) + params := map[string][]string{"full": {}} + + got := testFormat(logStats, url.Values(params)) + want := "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t0.000000\t0.000000\t0.000000\t\t\"sql1 /* LOG_THIS_QUERY */\"\tmap[intVal:type:INT64 value:\"1\" ]\t0\t0\t\"\"\t\"\"\t\"\"\t\"\"\t\n" + if got != want { + t.Errorf("logstats format: got:\n%q\nwant:\n%q\n", got, want) + } + + *streamlog.QueryLogRowThreshold = 0 + got = testFormat(logStats, url.Values(params)) + want = "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t0.000000\t0.000000\t0.000000\t\t\"sql1 /* LOG_THIS_QUERY */\"\tmap[intVal:type:INT64 value:\"1\" ]\t0\t0\t\"\"\t\"\"\t\"\"\t\"\"\t\n" + if got != want { + t.Errorf("logstats format: got:\n%q\nwant:\n%q\n", got, want) + } + + *streamlog.QueryLogRowThreshold = 1 + got = testFormat(logStats, url.Values(params)) + want = "" + if got != want { + t.Errorf("logstats format: got:\n%q\nwant:\n%q\n", got, want) + } +} + func TestLogStatsContextHTML(t *testing.T) { html := "HtmlContext" callInfo := &fakecallinfo.FakeCallInfo{ diff --git a/go/vt/vtgate/mysql_protocol_test.go b/go/vt/vtgate/mysql_protocol_test.go index c5443236f1f..33e1a814f5b 100644 --- a/go/vt/vtgate/mysql_protocol_test.go +++ b/go/vt/vtgate/mysql_protocol_test.go @@ -55,6 +55,7 @@ func TestMySQLProtocolExecute(t *testing.T) { options := &querypb.ExecuteOptions{ IncludedFields: querypb.ExecuteOptions_ALL, + Workload: querypb.ExecuteOptions_OLTP, } if !proto.Equal(sbc.Options[0], options) { t.Errorf("got ExecuteOptions \n%+v, want \n%+v", sbc.Options[0], options) @@ -132,12 +133,12 @@ func TestMySQLProtocolExecuteUseStatement(t *testing.T) { // No replica tablets, this should also fail _, err = c.ExecuteFetch("select id from t1", 10, true /* wantfields */) require.Error(t, err) - assert.Contains(t, err.Error(), "no valid tablet") + assert.Contains(t, err.Error(), `no healthy tablet available for 'keyspace:"TestUnsharded" shard:"0" tablet_type:REPLICA`) } func TestMysqlProtocolInvalidDB(t *testing.T) { _, err := mysqlConnect(&mysql.ConnParams{DbName: "invalidDB"}) - require.EqualError(t, err, "vtgate: : Unknown database 'invalidDB' (errno 1049) (sqlstate 42000) (errno 1049) (sqlstate 42000)") + require.EqualError(t, err, "Unknown database 'invalidDB' (errno 1049) (sqlstate 42000)") } func TestMySQLProtocolClientFoundRows(t *testing.T) { @@ -160,7 +161,9 @@ func TestMySQLProtocolClientFoundRows(t *testing.T) { options := &querypb.ExecuteOptions{ IncludedFields: querypb.ExecuteOptions_ALL, ClientFoundRows: true, + Workload: querypb.ExecuteOptions_OLTP, } + if !proto.Equal(sbc.Options[0], options) { t.Errorf("got ExecuteOptions \n%+v, want \n%+v", sbc.Options[0], options) } diff --git a/go/vt/vtgate/plan_execute.go b/go/vt/vtgate/plan_execute.go index 453e922a1d8..3865ffabad3 100644 --- a/go/vt/vtgate/plan_execute.go +++ b/go/vt/vtgate/plan_execute.go @@ -20,8 +20,6 @@ import ( "context" "time" - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" @@ -45,7 +43,7 @@ func (e *Executor) newExecute(ctx context.Context, safeSession *SafeSession, sql } query, comments := sqlparser.SplitMarginComments(sql) - vcursor, err := newVCursorImpl(ctx, safeSession, comments, e, logStats, e.vm, e.VSchema(), e.resolver.resolver, e.serv) + vcursor, err := newVCursorImpl(ctx, safeSession, comments, e, logStats, e.vm, e.VSchema(), e.resolver.resolver, e.serv, e.warnShardedOnly) if err != nil { return 0, nil, err } @@ -73,6 +71,11 @@ func (e *Executor) newExecute(ctx context.Context, safeSession *SafeSession, sql safeSession.ClearWarnings() } + // add any warnings that the planner wants to add + for _, warning := range plan.Warnings { + safeSession.RecordWarning(warning) + } + // We need to explicitly handle errors, and begin/commit/rollback, since these control transactions. Everything else // will fall through and be handled through planning switch plan.Type { @@ -94,13 +97,13 @@ func (e *Executor) newExecute(ctx context.Context, safeSession *SafeSession, sql case sqlparser.StmtSRollback: qr, err := e.handleSavepoint(ctx, safeSession, plan.Original, "Rollback Savepoint", logStats, func(query string) (*sqltypes.Result, error) { // Error as there is no transaction, so there is no savepoint that exists. - return nil, mysql.NewSQLError(mysql.ERSavepointNotExist, mysql.SSSyntaxErrorOrAccessViolation, "SAVEPOINT does not exist: %s", query) + return nil, vterrors.NewErrorf(vtrpcpb.Code_NOT_FOUND, vterrors.SPDoesNotExist, "SAVEPOINT does not exist: %s", query) }, vcursor.ignoreMaxMemoryRows) return sqlparser.StmtSRollback, qr, err case sqlparser.StmtRelease: qr, err := e.handleSavepoint(ctx, safeSession, plan.Original, "Release Savepoint", logStats, func(query string) (*sqltypes.Result, error) { // Error as there is no transaction, so there is no savepoint that exists. - return nil, mysql.NewSQLError(mysql.ERSavepointNotExist, mysql.SSSyntaxErrorOrAccessViolation, "SAVEPOINT does not exist: %s", query) + return nil, vterrors.NewErrorf(vtrpcpb.Code_NOT_FOUND, vterrors.SPDoesNotExist, "SAVEPOINT does not exist: %s", query) }, vcursor.ignoreMaxMemoryRows) return sqlparser.StmtRelease, qr, err } @@ -179,7 +182,7 @@ func (e *Executor) executePlan(ctx context.Context, plan *engine.Plan, vcursor * logStats.Table = plan.Instructions.GetTableName() logStats.TabletType = vcursor.TabletType().String() errCount := e.logExecutionEnd(logStats, execStart, plan, err, qr) - plan.AddStats(1, time.Since(logStats.StartTime), uint64(logStats.ShardQueries), logStats.RowsAffected, errCount) + plan.AddStats(1, time.Since(logStats.StartTime), uint64(logStats.ShardQueries), logStats.RowsAffected, logStats.RowsReturned, errCount) // Check if there was partial DML execution. If so, rollback the transaction. if err != nil && safeSession.InTransaction() && vcursor.rollbackOnPartialExec { @@ -201,6 +204,7 @@ func (e *Executor) logExecutionEnd(logStats *LogStats, execStart time.Time, plan errCount = 1 } else { logStats.RowsAffected = qr.RowsAffected + logStats.RowsReturned = uint64(len(qr.Rows)) } return errCount } diff --git a/go/vt/vtgate/planbuilder/builder.go b/go/vt/vtgate/planbuilder/builder.go index 08e15b876d0..c610c36d506 100644 --- a/go/vt/vtgate/planbuilder/builder.go +++ b/go/vt/vtgate/planbuilder/builder.go @@ -18,9 +18,12 @@ package planbuilder import ( "errors" + "flag" + "sort" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vterrors" @@ -33,6 +36,10 @@ import ( vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) +var ( + enableOnlineDDL = flag.Bool("enable_online_ddl", true, "Allow users to submit, review and control Online DDL") +) + // ContextVSchema defines the interface for this package to fetch // info about tables. type ContextVSchema interface { @@ -48,8 +55,38 @@ type ContextVSchema interface { SysVarSetEnabled() bool KeyspaceExists(keyspace string) bool AllKeyspace() ([]*vindexes.Keyspace, error) + GetSemTable() *semantics.SemTable + Planner() PlannerVersion + + // ErrorIfShardedF will return an error if the keyspace is sharded, + // and produce a warning if the vtgate if configured to do so + ErrorIfShardedF(keyspace *vindexes.Keyspace, warn, errFmt string, params ...interface{}) error + + // WarnUnshardedOnly is used when a feature is only supported in unsharded mode. + // This will let the user know that they are using something + // that could become a problem if they move to a sharded keyspace + WarnUnshardedOnly(format string, params ...interface{}) + + // ForeignKeyMode returns the foreign_key flag value + ForeignKeyMode() string } +// PlannerVersion is an alias here to make the code more readable +type PlannerVersion = querypb.ExecuteOptions_PlannerVersion + +const ( + // V3 is also the default planner + V3 = querypb.ExecuteOptions_V3 + // Gen4 uses the default Gen4 planner, which is the greedy planner + Gen4 = querypb.ExecuteOptions_Gen4 + // Gen4GreedyOnly uses only the faster greedy planner + Gen4GreedyOnly = querypb.ExecuteOptions_Gen4Greedy + // Gen4Left2Right tries to emulate the V3 planner by only joining plans in the order they are listed in the FROM-clause + Gen4Left2Right = querypb.ExecuteOptions_Gen4Left2Right + // Gen4WithFallback first attempts to use the Gen4 planner, and if that fails, uses the V3 planner instead + Gen4WithFallback = querypb.ExecuteOptions_Gen4WithFallback +) + type truncater interface { SetTruncateColumnCount(int) } @@ -57,7 +94,7 @@ type truncater interface { // TestBuilder builds a plan for a query based on the specified vschema. // This method is only used from tests func TestBuilder(query string, vschema ContextVSchema) (*engine.Plan, error) { - stmt, err := sqlparser.Parse(query) + stmt, reservedVars, err := sqlparser.Parse2(query) if err != nil { return nil, err } @@ -66,15 +103,15 @@ func TestBuilder(query string, vschema ContextVSchema) (*engine.Plan, error) { return nil, err } - return BuildFromStmt(query, result.AST, vschema, result.BindVarNeeds) + return BuildFromStmt(query, result.AST, reservedVars, vschema, result.BindVarNeeds) } // ErrPlanNotSupported is an error for plan building not supported var ErrPlanNotSupported = errors.New("plan building not supported") // BuildFromStmt builds a plan based on the AST provided. -func BuildFromStmt(query string, stmt sqlparser.Statement, vschema ContextVSchema, bindVarNeeds *sqlparser.BindVarNeeds) (*engine.Plan, error) { - instruction, err := createInstructionFor(query, stmt, vschema) +func BuildFromStmt(query string, stmt sqlparser.Statement, reservedVars sqlparser.BindVars, vschema ContextVSchema, bindVarNeeds *sqlparser.BindVarNeeds) (*engine.Plan, error) { + instruction, err := createInstructionFor(query, stmt, reservedVars, vschema) if err != nil { return nil, err } @@ -87,40 +124,59 @@ func BuildFromStmt(query string, stmt sqlparser.Statement, vschema ContextVSchem return plan, nil } -func buildRoutePlan(stmt sqlparser.Statement, vschema ContextVSchema, f func(statement sqlparser.Statement, schema ContextVSchema) (engine.Primitive, error)) (engine.Primitive, error) { +func getConfiguredPlanner(vschema ContextVSchema) (selectPlanner, error) { + switch vschema.Planner() { + case Gen4, Gen4Left2Right, Gen4GreedyOnly: + return gen4Planner, nil + case Gen4WithFallback: + fp := &fallbackPlanner{ + primary: gen4Planner, + fallback: buildSelectPlan, + } + return fp.plan, nil + default: + // default is v3 plan + return buildSelectPlan, nil + } +} + +func buildRoutePlan(stmt sqlparser.Statement, reservedVars sqlparser.BindVars, vschema ContextVSchema, f func(statement sqlparser.Statement, reservedVars sqlparser.BindVars, schema ContextVSchema) (engine.Primitive, error)) (engine.Primitive, error) { if vschema.Destination() != nil { - return buildPlanForBypass(stmt, vschema) + return buildPlanForBypass(stmt, reservedVars, vschema) } - return f(stmt, vschema) + return f(stmt, reservedVars, vschema) } -func createInstructionFor(query string, stmt sqlparser.Statement, vschema ContextVSchema) (engine.Primitive, error) { +type selectPlanner func(query string) func(sqlparser.Statement, sqlparser.BindVars, ContextVSchema) (engine.Primitive, error) + +func createInstructionFor(query string, stmt sqlparser.Statement, reservedVars sqlparser.BindVars, vschema ContextVSchema) (engine.Primitive, error) { switch stmt := stmt.(type) { case *sqlparser.Select: - return buildRoutePlan(stmt, vschema, buildSelectPlan(query)) + configuredPlanner, err := getConfiguredPlanner(vschema) + if err != nil { + return nil, err + } + return buildRoutePlan(stmt, reservedVars, vschema, configuredPlanner(query)) case *sqlparser.Insert: - return buildRoutePlan(stmt, vschema, buildInsertPlan) + return buildRoutePlan(stmt, reservedVars, vschema, buildInsertPlan) case *sqlparser.Update: - return buildRoutePlan(stmt, vschema, buildUpdatePlan) + return buildRoutePlan(stmt, reservedVars, vschema, buildUpdatePlan) case *sqlparser.Delete: - return buildRoutePlan(stmt, vschema, buildDeletePlan) + return buildRoutePlan(stmt, reservedVars, vschema, buildDeletePlan) case *sqlparser.Union: - return buildRoutePlan(stmt, vschema, buildUnionPlan) + return buildRoutePlan(stmt, reservedVars, vschema, buildUnionPlan) case sqlparser.DDLStatement: - return buildGeneralDDLPlan(query, stmt, vschema) + return buildGeneralDDLPlan(query, stmt, reservedVars, vschema) + case *sqlparser.AlterMigration: + return buildAlterMigrationPlan(query, vschema) + case *sqlparser.RevertMigration: + return buildRevertMigrationPlan(query, stmt, vschema) case *sqlparser.AlterVschema: return buildVSchemaDDLPlan(stmt, vschema) case *sqlparser.Use: return buildUsePlan(stmt, vschema) - case *sqlparser.Explain: - if stmt.Type == sqlparser.VitessType { - innerInstruction, err := createInstructionFor(query, stmt.Statement, vschema) - if err != nil { - return nil, err - } - return buildExplainPlan(innerInstruction) - } - return buildOtherReadAndAdmin(query, vschema) + case sqlparser.Explain: + return buildExplainPlan(stmt, reservedVars, vschema) case *sqlparser.OtherRead, *sqlparser.OtherAdmin: return buildOtherReadAndAdmin(query, vschema) case *sqlparser.Set: @@ -128,7 +184,7 @@ func createInstructionFor(query string, stmt sqlparser.Statement, vschema Contex case *sqlparser.Load: return buildLoadPlan(query, vschema) case sqlparser.DBDDLStatement: - return buildRoutePlan(stmt, vschema, buildDBDDLPlan) + return buildRoutePlan(stmt, reservedVars, vschema, buildDBDDLPlan) case *sqlparser.SetTransaction: return nil, ErrPlanNotSupported case *sqlparser.Begin, *sqlparser.Commit, *sqlparser.Rollback, *sqlparser.Savepoint, *sqlparser.SRollback, *sqlparser.Release: @@ -137,15 +193,19 @@ func createInstructionFor(query string, stmt sqlparser.Statement, vschema Contex case *sqlparser.Show: return buildShowPlan(stmt, vschema) case *sqlparser.LockTables: - return buildRoutePlan(stmt, vschema, buildLockPlan) + return buildRoutePlan(stmt, reservedVars, vschema, buildLockPlan) case *sqlparser.UnlockTables: - return buildRoutePlan(stmt, vschema, buildUnlockPlan) + return buildRoutePlan(stmt, reservedVars, vschema, buildUnlockPlan) + case *sqlparser.Flush: + return buildFlushPlan(stmt, vschema) + case *sqlparser.CallProc: + return buildCallProcPlan(stmt, vschema) } return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "BUG: unexpected statement type: %T", stmt) } -func buildDBDDLPlan(stmt sqlparser.Statement, vschema ContextVSchema) (engine.Primitive, error) { +func buildDBDDLPlan(stmt sqlparser.Statement, reservedVars sqlparser.BindVars, vschema ContextVSchema) (engine.Primitive, error) { dbDDLstmt := stmt.(sqlparser.DBDDLStatement) ksName := dbDDLstmt.GetDatabaseName() if ksName == "" { @@ -163,24 +223,24 @@ func buildDBDDLPlan(stmt sqlparser.Statement, vschema ContextVSchema) (engine.Pr return engine.NewRowsPrimitive(make([][]sqltypes.Value, 0), make([]*querypb.Field, 0)), nil } if !ksExists { - return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "cannot drop database '%s'; database does not exists", ksName) + return nil, vterrors.NewErrorf(vtrpcpb.Code_NOT_FOUND, vterrors.DbDropExists, "Can't drop database '%s'; database doesn't exists", ksName) } - return nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "drop database not allowed") + return engine.NewDBDDL(ksName, false, queryTimeout(sqlparser.ExtractCommentDirectives(dbDDL.Comments))), nil case *sqlparser.AlterDatabase: if !ksExists { - return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "cannot alter database '%s'; database does not exists", ksName) + return nil, vterrors.NewErrorf(vtrpcpb.Code_NOT_FOUND, vterrors.BadDb, "Can't alter database '%s'; unknown database", ksName) } - return nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "alter database not allowed") + return nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "alter database is not supported") case *sqlparser.CreateDatabase: if dbDDL.IfNotExists && ksExists { return engine.NewRowsPrimitive(make([][]sqltypes.Value, 0), make([]*querypb.Field, 0)), nil } if !dbDDL.IfNotExists && ksExists { - return nil, vterrors.Errorf(vtrpcpb.Code_ALREADY_EXISTS, "cannot create database '%s'; database exists", ksName) + return nil, vterrors.NewErrorf(vtrpcpb.Code_ALREADY_EXISTS, vterrors.DbCreateExists, "Can't create database '%s'; database exists", ksName) } - return nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "create database not allowed") + return engine.NewDBDDL(ksName, true, queryTimeout(sqlparser.ExtractCommentDirectives(dbDDL.Comments))), nil } - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unreachable code path: %s", sqlparser.String(dbDDLstmt)) + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] database ddl not recognized: %s", sqlparser.String(dbDDLstmt)) } func buildLoadPlan(query string, vschema ContextVSchema) (engine.Primitive, error) { @@ -191,8 +251,8 @@ func buildLoadPlan(query string, vschema ContextVSchema) (engine.Primitive, erro destination := vschema.Destination() if destination == nil { - if keyspace.Sharded { - return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: this construct is not supported on sharded keyspace") + if err := vschema.ErrorIfShardedF(keyspace, "LOAD", "LOAD is not supported on sharded database"); err != nil { + return nil, err } destination = key.DestinationAnyShard{} } @@ -216,3 +276,105 @@ func buildVSchemaDDLPlan(stmt *sqlparser.AlterVschema, vschema ContextVSchema) ( AlterVschemaDDL: stmt, }, nil } + +func buildFlushPlan(stmt *sqlparser.Flush, vschema ContextVSchema) (engine.Primitive, error) { + if len(stmt.TableNames) == 0 { + return buildFlushOptions(stmt, vschema) + } + return buildFlushTables(stmt, vschema) +} + +func buildFlushOptions(stmt *sqlparser.Flush, vschema ContextVSchema) (engine.Primitive, error) { + dest, keyspace, _, err := vschema.TargetDestination("") + if err != nil { + return nil, err + } + if dest == nil { + dest = key.DestinationAllShards{} + } + return &engine.Send{ + Keyspace: keyspace, + TargetDestination: dest, + Query: sqlparser.String(stmt), + IsDML: false, + SingleShardOnly: false, + }, nil +} + +func buildFlushTables(stmt *sqlparser.Flush, vschema ContextVSchema) (engine.Primitive, error) { + type sendDest struct { + ks *vindexes.Keyspace + dest key.Destination + } + + dest := vschema.Destination() + if dest == nil { + dest = key.DestinationAllShards{} + } + + tablesMap := make(map[sendDest]sqlparser.TableNames) + var keys []sendDest + for i, tab := range stmt.TableNames { + var ksTab *vindexes.Keyspace + var table *vindexes.Table + var err error + + table, _, _, _, _, err = vschema.FindTableOrVindex(tab) + if err != nil { + return nil, err + } + if table == nil { + return nil, vindexes.NotFoundError{TableName: tab.Name.String()} + } + + ksTab = table.Keyspace + stmt.TableNames[i] = sqlparser.TableName{ + Name: table.Name, + } + + key := sendDest{ksTab, dest} + tables, isAvail := tablesMap[key] + if !isAvail { + keys = append(keys, key) + } + tables = append(tables, stmt.TableNames[i]) // = append(tables.TableNames, stmt.TableNames[i]) + tablesMap[key] = tables + } + + if len(tablesMap) == 1 { + for sendDest, tables := range tablesMap { + return &engine.Send{ + Keyspace: sendDest.ks, + TargetDestination: sendDest.dest, + Query: sqlparser.String(newFlushStmt(stmt, tables)), + }, nil + } + } + + sort.Slice(keys, func(i, j int) bool { + return keys[i].ks.Name < keys[j].ks.Name + }) + + finalPlan := &engine.Concatenate{ + Sources: nil, + } + for _, sendDest := range keys { + plan := &engine.Send{ + Keyspace: sendDest.ks, + TargetDestination: sendDest.dest, + Query: sqlparser.String(newFlushStmt(stmt, tablesMap[sendDest])), + } + finalPlan.Sources = append(finalPlan.Sources, plan) + } + + return finalPlan, nil +} + +func newFlushStmt(stmt *sqlparser.Flush, tables sqlparser.TableNames) *sqlparser.Flush { + return &sqlparser.Flush{ + IsLocal: stmt.IsLocal, + TableNames: tables, + WithLock: stmt.WithLock, + ForExport: stmt.ForExport, + } +} diff --git a/go/vt/vtgate/planbuilder/bypass.go b/go/vt/vtgate/planbuilder/bypass.go index 29be8ef08c3..dda9129585c 100644 --- a/go/vt/vtgate/planbuilder/bypass.go +++ b/go/vt/vtgate/planbuilder/bypass.go @@ -24,11 +24,11 @@ import ( "vitess.io/vitess/go/vt/vtgate/engine" ) -func buildPlanForBypass(stmt sqlparser.Statement, vschema ContextVSchema) (engine.Primitive, error) { +func buildPlanForBypass(stmt sqlparser.Statement, _ sqlparser.BindVars, vschema ContextVSchema) (engine.Primitive, error) { switch vschema.Destination().(type) { case key.DestinationExactKeyRange: if _, ok := stmt.(*sqlparser.Insert); ok { - return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "range queries not supported for inserts: %s", vschema.TargetString()) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "range queries are not allowed for insert statement: %s", vschema.TargetString()) } } diff --git a/go/vt/vtgate/planbuilder/call_proc.go b/go/vt/vtgate/planbuilder/call_proc.go new file mode 100644 index 00000000000..5130c25a11c --- /dev/null +++ b/go/vt/vtgate/planbuilder/call_proc.go @@ -0,0 +1,52 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package planbuilder + +import ( + "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/engine" +) + +func buildCallProcPlan(stmt *sqlparser.CallProc, vschema ContextVSchema) (engine.Primitive, error) { + var ks string + if !stmt.Name.Qualifier.IsEmpty() { + ks = stmt.Name.Qualifier.String() + } + + dest, keyspace, _, err := vschema.TargetDestination(ks) + if err != nil { + return nil, err + } + + if dest == nil { + if err := vschema.ErrorIfShardedF(keyspace, "CALL", errNotAllowWhenSharded); err != nil { + return nil, err + } + dest = key.DestinationAnyShard{} + } + + stmt.Name.Qualifier = sqlparser.NewTableIdent("") + + return &engine.Send{ + Keyspace: keyspace, + TargetDestination: dest, + Query: sqlparser.String(stmt), + }, nil +} + +const errNotAllowWhenSharded = "CALL is not supported for sharded database" diff --git a/go/vt/vtgate/planbuilder/concatenate.go b/go/vt/vtgate/planbuilder/concatenate.go index 4ff12ad47d5..b482e795845 100644 --- a/go/vt/vtgate/planbuilder/concatenate.go +++ b/go/vt/vtgate/planbuilder/concatenate.go @@ -21,6 +21,7 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/semantics" ) type concatenate struct { @@ -53,6 +54,14 @@ func (c *concatenate) Wireup(plan logicalPlan, jt *jointab) error { return c.rhs.Wireup(plan, jt) } +func (c *concatenate) WireupV4(semTable *semantics.SemTable) error { + err := c.lhs.WireupV4(semTable) + if err != nil { + return err + } + return c.rhs.WireupV4(semTable) +} + func (c *concatenate) SupplyVar(from, to int, col *sqlparser.ColName, varname string) { panic("implement me") } @@ -84,6 +93,10 @@ func (c *concatenate) Rewrite(inputs ...logicalPlan) error { return nil } +func (c *concatenate) ContainsTables() semantics.TableSet { + return c.lhs.ContainsTables().Merge(c.rhs.ContainsTables()) +} + // Inputs implements the logicalPlan interface func (c *concatenate) Inputs() []logicalPlan { return []logicalPlan{c.lhs, c.rhs} diff --git a/go/vt/vtgate/planbuilder/ddl.go b/go/vt/vtgate/planbuilder/ddl.go index f0627c8723c..04503ae6fbb 100644 --- a/go/vt/vtgate/planbuilder/ddl.go +++ b/go/vt/vtgate/planbuilder/ddl.go @@ -1,21 +1,49 @@ package planbuilder import ( - "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/vindexes" - "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/vindexes" + + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) // Error messages for CreateView queries const ( ViewDifferentKeyspace string = "Select query does not belong to the same keyspace as the view statement" ViewComplex string = "Complex select queries are not supported in create or alter view statements" + DifferentDestinations string = "Tables or Views specified in the query do not belong to the same destination" ) +type fkStrategy int + +const ( + fkAllow fkStrategy = iota + fkDisallow +) + +var fkStrategyMap = map[string]fkStrategy{ + "allow": fkAllow, + "disallow": fkDisallow, +} + +type fkContraint struct { + found bool +} + +func (fk *fkContraint) FkWalk(node sqlparser.SQLNode) (kontinue bool, err error) { + switch node.(type) { + case *sqlparser.CreateTable, *sqlparser.AlterTable, + *sqlparser.TableSpec, *sqlparser.AddConstraintDefinition, *sqlparser.ConstraintDefinition: + return true, nil + case *sqlparser.ForeignKeyDefinition: + fk.found = true + } + return false, nil +} + // buildGeneralDDLPlan builds a general DDL plan, which can be either normal DDL or online DDL. // The two behave compeltely differently, and have two very different primitives. // We want to be able to dynamically choose between normal/online plans according to Session settings. @@ -23,22 +51,32 @@ const ( // a session context. It's only when we Execute() the primitive that we have that context. // This is why we return a compound primitive (DDL) which contains fully populated primitives (Send & OnlineDDL), // and which chooses which of the two to invoke at runtime. -func buildGeneralDDLPlan(sql string, ddlStatement sqlparser.DDLStatement, vschema ContextVSchema) (engine.Primitive, error) { +func buildGeneralDDLPlan(sql string, ddlStatement sqlparser.DDLStatement, reservedVars sqlparser.BindVars, vschema ContextVSchema) (engine.Primitive, error) { if vschema.Destination() != nil { return buildByPassDDLPlan(sql, vschema) } - - normalDDLPlan, onlineDDLPlan, err := buildDDLPlans(sql, ddlStatement, vschema) + normalDDLPlan, onlineDDLPlan, err := buildDDLPlans(sql, ddlStatement, reservedVars, vschema) if err != nil { return nil, err } + if ddlStatement.IsTemporary() { + err := vschema.ErrorIfShardedF(normalDDLPlan.Keyspace, "temporary table", "Temporary table not supported in sharded database %s", normalDDLPlan.Keyspace.Name) + if err != nil { + return nil, err + } + onlineDDLPlan = nil // emptying this so it does not accidentally gets used somewhere + } + return &engine.DDL{ - Keyspace: normalDDLPlan.Keyspace, - SQL: normalDDLPlan.Query, - DDL: ddlStatement, - NormalDDL: normalDDLPlan, - OnlineDDL: onlineDDLPlan, + Keyspace: normalDDLPlan.Keyspace, + SQL: normalDDLPlan.Query, + DDL: ddlStatement, + NormalDDL: normalDDLPlan, + OnlineDDL: onlineDDLPlan, + OnlineDDLEnabled: *enableOnlineDDL, + + CreateTempTable: ddlStatement.IsTemporary(), }, nil } @@ -54,52 +92,45 @@ func buildByPassDDLPlan(sql string, vschema ContextVSchema) (engine.Primitive, e }, nil } -func buildDDLPlans(sql string, ddlStatement sqlparser.DDLStatement, vschema ContextVSchema) (*engine.Send, *engine.OnlineDDL, error) { +func buildDDLPlans(sql string, ddlStatement sqlparser.DDLStatement, reservedVars sqlparser.BindVars, vschema ContextVSchema) (*engine.Send, *engine.OnlineDDL, error) { var destination key.Destination var keyspace *vindexes.Keyspace var err error switch ddl := ddlStatement.(type) { - case *sqlparser.CreateIndex, *sqlparser.AlterTable: - // For Create index and Alter Table, the table must already exist - // We should find the target of the query from this tables location - destination, keyspace, err = findTableDestinationAndKeyspace(vschema, ddlStatement) - if err != nil { - return nil, nil, err - } - case *sqlparser.DDL: - // For DDL, it is only required that the keyspace exist - // We should remove the keyspace name from the table name, as the database name in MySQL might be different than the keyspace name - destination, keyspace, _, err = vschema.TargetDestination(ddlStatement.GetTable().Qualifier.String()) + case *sqlparser.AlterTable, *sqlparser.TruncateTable: + err = checkFKError(vschema, ddlStatement) if err != nil { return nil, nil, err } - ddlStatement.SetTable("", ddlStatement.GetTable().Name.String()) + // For Alter Table and other statements, the table must already exist + // We should find the target of the query from this tables location + destination, keyspace, err = findTableDestinationAndKeyspace(vschema, ddlStatement) case *sqlparser.CreateView: - destination, keyspace, err = buildCreateView(vschema, ddl) - if err != nil { - return nil, nil, err - } + destination, keyspace, err = buildCreateView(vschema, ddl, reservedVars) case *sqlparser.AlterView: - destination, keyspace, err = buildAlterView(vschema, ddl) + destination, keyspace, err = buildAlterView(vschema, ddl, reservedVars) + case *sqlparser.CreateTable: + err = checkFKError(vschema, ddlStatement) if err != nil { return nil, nil, err } - case *sqlparser.CreateTable: destination, keyspace, _, err = vschema.TargetDestination(ddlStatement.GetTable().Qualifier.String()) - // Remove the keyspace name as the database name might be different. - ddlStatement.SetTable("", ddlStatement.GetTable().Name.String()) if err != nil { return nil, nil, err } + // Remove the keyspace name as the database name might be different. + ddlStatement.SetTable("", ddlStatement.GetTable().Name.String()) case *sqlparser.DropView, *sqlparser.DropTable: destination, keyspace, err = buildDropViewOrTable(vschema, ddlStatement) - if err != nil { - return nil, nil, err - } - + case *sqlparser.RenameTable: + destination, keyspace, err = buildRenameTable(vschema, ddl) default: - return nil, nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "BUG: unexpected statement type: %T", ddlStatement) + return nil, nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unexpected ddl statement type: %T", ddlStatement) + } + + if err != nil { + return nil, nil, err } if destination == nil { @@ -125,6 +156,17 @@ func buildDDLPlans(sql string, ddlStatement sqlparser.DDLStatement, vschema Cont }, nil } +func checkFKError(vschema ContextVSchema, ddlStatement sqlparser.DDLStatement) error { + if fkStrategyMap[vschema.ForeignKeyMode()] == fkDisallow { + fk := &fkContraint{} + _ = sqlparser.Walk(fk.FkWalk, ddlStatement) + if fk.found { + return vterrors.Errorf(vtrpcpb.Code_ABORTED, "foreign key constraint is not allowed") + } + } + return nil +} + func findTableDestinationAndKeyspace(vschema ContextVSchema, ddlStatement sqlparser.DDLStatement) (key.Destination, *vindexes.Keyspace, error) { var table *vindexes.Table var destination key.Destination @@ -150,7 +192,7 @@ func findTableDestinationAndKeyspace(vschema ContextVSchema, ddlStatement sqlpar return destination, keyspace, nil } -func buildAlterView(vschema ContextVSchema, ddl *sqlparser.AlterView) (key.Destination, *vindexes.Keyspace, error) { +func buildAlterView(vschema ContextVSchema, ddl *sqlparser.AlterView, reservedVars sqlparser.BindVars) (key.Destination, *vindexes.Keyspace, error) { // For Alter View, we require that the view exist and the select query can be satisfied within the keyspace itself // We should remove the keyspace name from the table name, as the database name in MySQL might be different than the keyspace name destination, keyspace, err := findTableDestinationAndKeyspace(vschema, ddl) @@ -159,21 +201,21 @@ func buildAlterView(vschema ContextVSchema, ddl *sqlparser.AlterView) (key.Desti } var selectPlan engine.Primitive - selectPlan, err = createInstructionFor(sqlparser.String(ddl.Select), ddl.Select, vschema) + selectPlan, err = createInstructionFor(sqlparser.String(ddl.Select), ddl.Select, reservedVars, vschema) if err != nil { return nil, nil, err } routePlan, isRoute := selectPlan.(*engine.Route) if !isRoute { - return nil, nil, vterrors.New(vtrpc.Code_INVALID_ARGUMENT, ViewComplex) + return nil, nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, ViewComplex) } if keyspace.Name != routePlan.GetKeyspaceName() { - return nil, nil, vterrors.New(vtrpc.Code_INVALID_ARGUMENT, ViewDifferentKeyspace) + return nil, nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, ViewDifferentKeyspace) } if routePlan.Opcode != engine.SelectUnsharded && routePlan.Opcode != engine.SelectEqualUnique && routePlan.Opcode != engine.SelectScatter { - return nil, nil, vterrors.New(vtrpc.Code_INVALID_ARGUMENT, ViewComplex) + return nil, nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, ViewComplex) } - sqlparser.Rewrite(ddl.Select, func(cursor *sqlparser.Cursor) bool { + _ = sqlparser.Rewrite(ddl.Select, func(cursor *sqlparser.Cursor) bool { switch tableName := cursor.Node().(type) { case sqlparser.TableName: cursor.Replace(sqlparser.TableName{ @@ -185,7 +227,7 @@ func buildAlterView(vschema ContextVSchema, ddl *sqlparser.AlterView) (key.Desti return destination, keyspace, nil } -func buildCreateView(vschema ContextVSchema, ddl *sqlparser.CreateView) (key.Destination, *vindexes.Keyspace, error) { +func buildCreateView(vschema ContextVSchema, ddl *sqlparser.CreateView, reservedVars sqlparser.BindVars) (key.Destination, *vindexes.Keyspace, error) { // For Create View, we require that the keyspace exist and the select query can be satisfied within the keyspace itself // We should remove the keyspace name from the table name, as the database name in MySQL might be different than the keyspace name destination, keyspace, _, err := vschema.TargetDestination(ddl.ViewName.Qualifier.String()) @@ -195,21 +237,21 @@ func buildCreateView(vschema ContextVSchema, ddl *sqlparser.CreateView) (key.Des ddl.ViewName.Qualifier = sqlparser.NewTableIdent("") var selectPlan engine.Primitive - selectPlan, err = createInstructionFor(sqlparser.String(ddl.Select), ddl.Select, vschema) + selectPlan, err = createInstructionFor(sqlparser.String(ddl.Select), ddl.Select, reservedVars, vschema) if err != nil { return nil, nil, err } routePlan, isRoute := selectPlan.(*engine.Route) if !isRoute { - return nil, nil, vterrors.New(vtrpc.Code_INVALID_ARGUMENT, ViewComplex) + return nil, nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, ViewComplex) } if keyspace.Name != routePlan.GetKeyspaceName() { - return nil, nil, vterrors.New(vtrpc.Code_INVALID_ARGUMENT, ViewDifferentKeyspace) + return nil, nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, ViewDifferentKeyspace) } if routePlan.Opcode != engine.SelectUnsharded && routePlan.Opcode != engine.SelectEqualUnique && routePlan.Opcode != engine.SelectScatter { - return nil, nil, vterrors.New(vtrpc.Code_INVALID_ARGUMENT, ViewComplex) + return nil, nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, ViewComplex) } - sqlparser.Rewrite(ddl.Select, func(cursor *sqlparser.Cursor) bool { + _ = sqlparser.Rewrite(ddl.Select, func(cursor *sqlparser.Cursor) bool { switch tableName := cursor.Node().(type) { case sqlparser.TableName: cursor.Replace(sqlparser.TableName{ @@ -257,7 +299,63 @@ func buildDropViewOrTable(vschema ContextVSchema, ddlStatement sqlparser.DDLStat keyspace = keyspaceTab } if destination != destinationTab || keyspace != keyspaceTab { - return nil, nil, vterrors.New(vtrpc.Code_INVALID_ARGUMENT, "Tables or Views specified in the query do not belong to the same destination") + return nil, nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, DifferentDestinations) + } + } + return destination, keyspace, nil +} + +func buildRenameTable(vschema ContextVSchema, renameTable *sqlparser.RenameTable) (key.Destination, *vindexes.Keyspace, error) { + var destination key.Destination + var keyspace *vindexes.Keyspace + + for _, tabPair := range renameTable.TablePairs { + var destinationFrom key.Destination + var keyspaceFrom *vindexes.Keyspace + var table *vindexes.Table + var err error + table, _, _, _, destinationFrom, err = vschema.FindTableOrVindex(tabPair.FromTable) + + if err != nil { + _, isNotFound := err.(vindexes.NotFoundError) + if !isNotFound { + return nil, nil, err + } + } + if table == nil { + destinationFrom, keyspaceFrom, _, err = vschema.TargetDestination(tabPair.FromTable.Qualifier.String()) + if err != nil { + return nil, nil, err + } + tabPair.FromTable = sqlparser.TableName{ + Name: tabPair.FromTable.Name, + } + } else { + keyspaceFrom = table.Keyspace + tabPair.FromTable = sqlparser.TableName{ + Name: table.Name, + } + } + + if tabPair.ToTable.Qualifier.String() != "" { + _, keyspaceTo, _, err := vschema.TargetDestination(tabPair.ToTable.Qualifier.String()) + if err != nil { + return nil, nil, err + } + if keyspaceTo.Name != keyspaceFrom.Name { + return nil, nil, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.ForbidSchemaChange, "Changing schema from '%s' to '%s' is not allowed", keyspaceFrom.Name, keyspaceTo.Name) + } + tabPair.ToTable = sqlparser.TableName{ + Name: tabPair.ToTable.Name, + } + } + + if destination == nil && keyspace == nil { + destination = destinationFrom + keyspace = keyspaceFrom + } + if destination != destinationFrom || keyspace != keyspaceFrom { + return nil, nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, DifferentDestinations) } } return destination, keyspace, nil diff --git a/go/vt/vtgate/planbuilder/delete.go b/go/vt/vtgate/planbuilder/delete.go index fab2edc0423..9b0ac35e175 100644 --- a/go/vt/vtgate/planbuilder/delete.go +++ b/go/vt/vtgate/planbuilder/delete.go @@ -17,16 +17,16 @@ limitations under the License. package planbuilder import ( - "vitess.io/vitess/go/vt/proto/vtrpc" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" ) // buildDeletePlan builds the instructions for a DELETE statement. -func buildDeletePlan(stmt sqlparser.Statement, vschema ContextVSchema) (engine.Primitive, error) { +func buildDeletePlan(stmt sqlparser.Statement, reservedVars sqlparser.BindVars, vschema ContextVSchema) (engine.Primitive, error) { del := stmt.(*sqlparser.Delete) - dml, ksidVindex, ksidCol, err := buildDMLPlan(vschema, "delete", del, del.TableExprs, del.Where, del.OrderBy, del.Limit, del.Comments, del.Targets) + dml, ksidVindex, ksidCol, err := buildDMLPlan(vschema, "delete", del, reservedVars, del.TableExprs, del.Where, del.OrderBy, del.Limit, del.Comments, del.Targets) if err != nil { return nil, err } @@ -39,11 +39,11 @@ func buildDeletePlan(stmt sqlparser.Statement, vschema ContextVSchema) (engine.P } if len(del.Targets) > 1 { - return nil, vterrors.New(vtrpc.Code_UNIMPLEMENTED, "unsupported: multi-table delete statement in sharded keyspace") + return nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "multi-table delete statement in not supported in sharded database") } if len(del.Targets) == 1 && del.Targets[0].Name != edel.Table.Name { - return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "Unknown table '%s' in MULTI DELETE", del.Targets[0].Name.String()) + return nil, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.UnknownTable, "Unknown table '%s' in MULTI DELETE", del.Targets[0].Name.String()) } if len(edel.Table.Owned) > 0 { diff --git a/go/vt/vtgate/planbuilder/dml.go b/go/vt/vtgate/planbuilder/dml.go index 3103b3b42d9..f7dae888979 100644 --- a/go/vt/vtgate/planbuilder/dml.go +++ b/go/vt/vtgate/planbuilder/dml.go @@ -100,10 +100,10 @@ func nameMatch(node sqlparser.Expr, col sqlparser.ColIdent) bool { return ok && colname.Name.Equal(col) } -func buildDMLPlan(vschema ContextVSchema, dmlType string, stmt sqlparser.Statement, tableExprs sqlparser.TableExprs, where *sqlparser.Where, orderBy sqlparser.OrderBy, limit *sqlparser.Limit, comments sqlparser.Comments, nodes ...sqlparser.SQLNode) (*engine.DML, vindexes.SingleColumn, string, error) { +func buildDMLPlan(vschema ContextVSchema, dmlType string, stmt sqlparser.Statement, reservedVars sqlparser.BindVars, tableExprs sqlparser.TableExprs, where *sqlparser.Where, orderBy sqlparser.OrderBy, limit *sqlparser.Limit, comments sqlparser.Comments, nodes ...sqlparser.SQLNode) (*engine.DML, vindexes.SingleColumn, string, error) { edml := &engine.DML{} - pb := newPrimitiveBuilder(vschema, newJointab(sqlparser.GetBindvars(stmt))) - rb, err := pb.processDMLTable(tableExprs, nil) + pb := newPrimitiveBuilder(vschema, newJointab(reservedVars)) + rb, err := pb.processDMLTable(tableExprs, reservedVars, nil) if err != nil { return nil, nil, "", err } @@ -113,7 +113,9 @@ func buildDMLPlan(vschema ContextVSchema, dmlType string, stmt sqlparser.Stateme var subqueryArgs []sqlparser.SQLNode subqueryArgs = append(subqueryArgs, nodes...) subqueryArgs = append(subqueryArgs, where, orderBy, limit) - if !pb.finalizeUnshardedDMLSubqueries(subqueryArgs...) { + if pb.finalizeUnshardedDMLSubqueries(reservedVars, subqueryArgs...) { + vschema.WarnUnshardedOnly("subqueries can't be sharded in DML") + } else { return nil, nil, "", vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: sharded subqueries in DML") } edml.Opcode = engine.Unsharded @@ -139,7 +141,7 @@ func buildDMLPlan(vschema ContextVSchema, dmlType string, stmt sqlparser.Stateme edml.QueryTimeout = queryTimeout(directives) if len(pb.st.tables) != 1 { - return nil, nil, "", vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: multi-table %s statement in sharded keyspace", dmlType) + return nil, nil, "", vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "multi-table %s statement is not supported in sharded database", dmlType) } for _, tval := range pb.st.tables { // There is only one table. @@ -153,7 +155,7 @@ func buildDMLPlan(vschema ContextVSchema, dmlType string, stmt sqlparser.Stateme if rb.eroute.TargetDestination != nil { if rb.eroute.TargetTabletType != topodatapb.TabletType_MASTER { - return nil, nil, "", vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unsupported: %s statement with a replica target", dmlType) + return nil, nil, "", vterrors.NewErrorf(vtrpcpb.Code_FAILED_PRECONDITION, vterrors.InnodbReadOnly, "unsupported: %s statement with a replica target", dmlType) } edml.Opcode = engine.ByDestination edml.TargetDestination = rb.eroute.TargetDestination @@ -163,7 +165,7 @@ func buildDMLPlan(vschema ContextVSchema, dmlType string, stmt sqlparser.Stateme edml.Opcode = routingType if routingType == engine.Scatter { if limit != nil { - return nil, nil, "", vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: multi shard %s with limit", dmlType) + return nil, nil, "", vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "multi shard %s with limit is not supported", dmlType) } } else { edml.Vindex = vindex diff --git a/go/vt/vtgate/planbuilder/explain.go b/go/vt/vtgate/planbuilder/explain.go index 74d912dddbf..b26bfd87dcb 100644 --- a/go/vt/vtgate/planbuilder/explain.go +++ b/go/vt/vtgate/planbuilder/explain.go @@ -20,26 +20,53 @@ import ( "strings" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/key" querypb "vitess.io/vitess/go/vt/proto/query" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" ) -func extractQuery(m map[string]interface{}) string { - queryObj, ok := m["Query"] - if !ok { - return "" +// Builds an explain-plan for the given Primitive +func buildExplainPlan(stmt sqlparser.Explain, reservedVars sqlparser.BindVars, vschema ContextVSchema) (engine.Primitive, error) { + switch explain := stmt.(type) { + case *sqlparser.ExplainTab: + return explainTabPlan(explain, vschema) + case *sqlparser.ExplainStmt: + if explain.Type == sqlparser.VitessType { + return buildVitessTypePlan(explain, reservedVars, vschema) + } + return buildOtherReadAndAdmin(sqlparser.String(explain), vschema) } - query, ok := queryObj.(string) - if !ok { - return "" + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unexpected explain type: %T", stmt) +} + +func explainTabPlan(explain *sqlparser.ExplainTab, vschema ContextVSchema) (engine.Primitive, error) { + table, _, _, _, destination, err := vschema.FindTableOrVindex(explain.Table) + if err != nil { + return nil, err } + explain.Table.Qualifier = sqlparser.NewTableIdent("") - return query + if destination == nil { + destination = key.DestinationAnyShard{} + } + + return &engine.Send{ + Keyspace: table.Keyspace, + TargetDestination: destination, + Query: sqlparser.String(explain), + SingleShardOnly: true, + }, nil } -// Builds an explain-plan for the given Primitive -func buildExplainPlan(input engine.Primitive) (engine.Primitive, error) { - descriptions := treeLines(engine.PrimitiveToPlanDescription(input)) +func buildVitessTypePlan(explain *sqlparser.ExplainStmt, reservedVars sqlparser.BindVars, vschema ContextVSchema) (engine.Primitive, error) { + innerInstruction, err := createInstructionFor(sqlparser.String(explain.Statement), explain.Statement, reservedVars, vschema) + if err != nil { + return nil, err + } + descriptions := treeLines(engine.PrimitiveToPlanDescription(innerInstruction)) var rows [][]sqltypes.Value for _, line := range descriptions { @@ -74,6 +101,19 @@ func buildExplainPlan(input engine.Primitive) (engine.Primitive, error) { return engine.NewRowsPrimitive(rows, fields), nil } +func extractQuery(m map[string]interface{}) string { + queryObj, ok := m["Query"] + if !ok { + return "" + } + query, ok := queryObj.(string) + if !ok { + return "" + } + + return query +} + type description struct { header string descr engine.PrimitiveDescription diff --git a/go/vt/vtgate/planbuilder/expr.go b/go/vt/vtgate/planbuilder/expr.go index 2a863c449af..d3c440511db 100644 --- a/go/vt/vtgate/planbuilder/expr.go +++ b/go/vt/vtgate/planbuilder/expr.go @@ -69,7 +69,7 @@ type subqueryInfo struct { // // If an expression has no references to the current query, then the left-most // origin is chosen as the default. -func (pb *primitiveBuilder) findOrigin(expr sqlparser.Expr) (pullouts []*pulloutSubquery, origin logicalPlan, pushExpr sqlparser.Expr, err error) { +func (pb *primitiveBuilder) findOrigin(expr sqlparser.Expr, reservedVars sqlparser.BindVars) (pullouts []*pulloutSubquery, origin logicalPlan, pushExpr sqlparser.Expr, err error) { // highestOrigin tracks the highest origin referenced by the expression. // Default is the First. highestOrigin := First(pb.plan) @@ -104,11 +104,11 @@ func (pb *primitiveBuilder) findOrigin(expr sqlparser.Expr) (pullouts []*pullout spb := newPrimitiveBuilder(pb.vschema, pb.jt) switch stmt := node.Select.(type) { case *sqlparser.Select: - if err := spb.processSelect(stmt, pb.st, ""); err != nil { + if err := spb.processSelect(stmt, reservedVars, pb.st, ""); err != nil { return false, err } case *sqlparser.Union: - if err := spb.processUnion(stmt, pb.st); err != nil { + if err := spb.processUnion(stmt, reservedVars, pb.st); err != nil { return false, err } default: @@ -156,7 +156,7 @@ func (pb *primitiveBuilder) findOrigin(expr sqlparser.Expr) (pullouts []*pullout construct, ok := constructsMap[sqi.ast] if !ok { // (subquery) -> :_sq - expr = sqlparser.ReplaceExpr(expr, sqi.ast, sqlparser.NewArgument([]byte(":"+sqName))) + expr = sqlparser.ReplaceExpr(expr, sqi.ast, sqlparser.NewArgument(":"+sqName)) pullouts = append(pullouts, newPulloutSubquery(engine.PulloutValue, sqName, hasValues, sqi.plan)) continue } @@ -170,9 +170,9 @@ func (pb *primitiveBuilder) findOrigin(expr sqlparser.Expr) (pullouts []*pullout Right: sqlparser.ListArg("::" + sqName), } left := &sqlparser.ComparisonExpr{ - Left: sqlparser.NewArgument([]byte(":" + hasValues)), + Left: sqlparser.NewArgument(":" + hasValues), Operator: sqlparser.EqualOp, - Right: sqlparser.NewIntLiteral([]byte("1")), + Right: sqlparser.NewIntLiteral("1"), } newExpr := &sqlparser.AndExpr{ Left: left, @@ -183,9 +183,9 @@ func (pb *primitiveBuilder) findOrigin(expr sqlparser.Expr) (pullouts []*pullout } else { // a not in (subquery) -> (:__sq_has_values = 0 or (a not in ::__sq)) left := &sqlparser.ComparisonExpr{ - Left: sqlparser.NewArgument([]byte(":" + hasValues)), + Left: sqlparser.NewArgument(":" + hasValues), Operator: sqlparser.EqualOp, - Right: sqlparser.NewIntLiteral([]byte("0")), + Right: sqlparser.NewIntLiteral("0"), } right := &sqlparser.ComparisonExpr{ Operator: construct.Operator, @@ -201,7 +201,7 @@ func (pb *primitiveBuilder) findOrigin(expr sqlparser.Expr) (pullouts []*pullout } case *sqlparser.ExistsExpr: // exists (subquery) -> :__sq_has_values - expr = sqlparser.ReplaceExpr(expr, construct, sqlparser.NewArgument([]byte(":"+hasValues))) + expr = sqlparser.ReplaceExpr(expr, construct, sqlparser.NewArgument(":"+hasValues)) pullouts = append(pullouts, newPulloutSubquery(engine.PulloutExists, sqName, hasValues, sqi.plan)) } } @@ -221,7 +221,7 @@ func hasSubquery(node sqlparser.SQLNode) bool { return has } -func (pb *primitiveBuilder) finalizeUnshardedDMLSubqueries(nodes ...sqlparser.SQLNode) bool { +func (pb *primitiveBuilder) finalizeUnshardedDMLSubqueries(reservedVars sqlparser.BindVars, nodes ...sqlparser.SQLNode) bool { var keyspace string if rb, ok := pb.plan.(*route); ok { keyspace = rb.eroute.Keyspace.Name @@ -243,7 +243,7 @@ func (pb *primitiveBuilder) finalizeUnshardedDMLSubqueries(nodes ...sqlparser.SQ return true, nil } spb := newPrimitiveBuilder(pb.vschema, pb.jt) - if err := spb.processSelect(nodeType, pb.st, ""); err != nil { + if err := spb.processSelect(nodeType, reservedVars, pb.st, ""); err != nil { samePlan = false return false, err } @@ -264,7 +264,7 @@ func (pb *primitiveBuilder) finalizeUnshardedDMLSubqueries(nodes ...sqlparser.SQ return true, nil } spb := newPrimitiveBuilder(pb.vschema, pb.jt) - if err := spb.processUnion(nodeType, pb.st); err != nil { + if err := spb.processUnion(nodeType, reservedVars, pb.st); err != nil { samePlan = false return false, err } @@ -299,7 +299,7 @@ func valEqual(a, b sqlparser.Expr) bool { if !ok { return false } - return bytes.Equal(a, b) + return a == b case *sqlparser.Literal: b, ok := b.(*sqlparser.Literal) if !ok { @@ -309,7 +309,7 @@ func valEqual(a, b sqlparser.Expr) bool { case sqlparser.StrVal: switch b.Type { case sqlparser.StrVal: - return bytes.Equal(a.Val, b.Val) + return a.Val == b.Val case sqlparser.HexVal: return hexEqual(b, a) } @@ -317,7 +317,7 @@ func valEqual(a, b sqlparser.Expr) bool { return hexEqual(a, b) case sqlparser.IntVal: if b.Type == (sqlparser.IntVal) { - return bytes.Equal(a.Val, b.Val) + return a.Val == b.Val } } } @@ -331,7 +331,7 @@ func hexEqual(a, b *sqlparser.Literal) bool { } switch b.Type { case sqlparser.StrVal: - return bytes.Equal(v, b.Val) + return bytes.Equal(v, b.Bytes()) case sqlparser.HexVal: v2, err := b.HexDecode() if err != nil { diff --git a/go/vt/vtgate/planbuilder/expr_test.go b/go/vt/vtgate/planbuilder/expr_test.go index ec5a2908134..057c9d15ef3 100644 --- a/go/vt/vtgate/planbuilder/expr_test.go +++ b/go/vt/vtgate/planbuilder/expr_test.go @@ -40,59 +40,59 @@ func TestValEqual(t *testing.T) { in2: &sqlparser.ColName{Metadata: c2, Name: sqlparser.NewColIdent("c1")}, out: false, }, { - in1: newValArg(":aa"), + in1: sqlparser.NewArgument(":aa"), in2: &sqlparser.ColName{Metadata: c1, Name: sqlparser.NewColIdent("c1")}, out: false, }, { - in1: newValArg(":aa"), - in2: newValArg(":aa"), + in1: sqlparser.NewArgument(":aa"), + in2: sqlparser.NewArgument(":aa"), out: true, }, { - in1: newValArg(":aa"), - in2: newValArg(":bb"), + in1: sqlparser.NewArgument(":aa"), + in2: sqlparser.NewArgument(":bb"), }, { - in1: newStrLiteral("aa"), - in2: newStrLiteral("aa"), + in1: sqlparser.NewStrLiteral("aa"), + in2: sqlparser.NewStrLiteral("aa"), out: true, }, { - in1: newStrLiteral("11"), - in2: newHexLiteral("3131"), + in1: sqlparser.NewStrLiteral("11"), + in2: sqlparser.NewHexLiteral("3131"), out: true, }, { - in1: newHexLiteral("3131"), - in2: newStrLiteral("11"), + in1: sqlparser.NewHexLiteral("3131"), + in2: sqlparser.NewStrLiteral("11"), out: true, }, { - in1: newHexLiteral("3131"), - in2: newHexLiteral("3131"), + in1: sqlparser.NewHexLiteral("3131"), + in2: sqlparser.NewHexLiteral("3131"), out: true, }, { - in1: newHexLiteral("3131"), - in2: newHexLiteral("3132"), + in1: sqlparser.NewHexLiteral("3131"), + in2: sqlparser.NewHexLiteral("3132"), out: false, }, { - in1: newHexLiteral("313"), - in2: newHexLiteral("3132"), + in1: sqlparser.NewHexLiteral("313"), + in2: sqlparser.NewHexLiteral("3132"), out: false, }, { - in1: newHexLiteral("3132"), - in2: newHexLiteral("313"), + in1: sqlparser.NewHexLiteral("3132"), + in2: sqlparser.NewHexLiteral("313"), out: false, }, { - in1: newIntLiteral("313"), - in2: newHexLiteral("3132"), + in1: sqlparser.NewIntLiteral("313"), + in2: sqlparser.NewHexLiteral("3132"), out: false, }, { - in1: newHexLiteral("3132"), - in2: newIntLiteral("313"), + in1: sqlparser.NewHexLiteral("3132"), + in2: sqlparser.NewIntLiteral("313"), out: false, }, { - in1: newIntLiteral("313"), - in2: newIntLiteral("313"), + in1: sqlparser.NewIntLiteral("313"), + in2: sqlparser.NewIntLiteral("313"), out: true, }, { - in1: newIntLiteral("313"), - in2: newIntLiteral("314"), + in1: sqlparser.NewIntLiteral("313"), + in2: sqlparser.NewIntLiteral("314"), out: false, }} for _, tc := range testcases { @@ -102,19 +102,3 @@ func TestValEqual(t *testing.T) { } } } - -func newStrLiteral(in string) *sqlparser.Literal { - return sqlparser.NewStrLiteral([]byte(in)) -} - -func newIntLiteral(in string) *sqlparser.Literal { - return sqlparser.NewIntLiteral([]byte(in)) -} - -func newHexLiteral(in string) *sqlparser.Literal { - return sqlparser.NewHexLiteral([]byte(in)) -} - -func newValArg(in string) sqlparser.Expr { - return sqlparser.NewArgument([]byte(in)) -} diff --git a/go/vt/vtgate/planbuilder/expression_converter.go b/go/vt/vtgate/planbuilder/expression_converter.go index d90da46be71..769c4b6f52d 100644 --- a/go/vt/vtgate/planbuilder/expression_converter.go +++ b/go/vt/vtgate/planbuilder/expression_converter.go @@ -38,7 +38,7 @@ func booleanValues(astExpr sqlparser.Expr) evalengine.Expr { case *sqlparser.Literal: //set autocommit = 'on' if node.Type == sqlparser.StrVal { - switch strings.ToLower(string(node.Val)) { + switch strings.ToLower(node.Val) { case "on": return ON case "off": diff --git a/go/vt/vtgate/planbuilder/fallback_planner.go b/go/vt/vtgate/planbuilder/fallback_planner.go new file mode 100644 index 00000000000..436ce3da041 --- /dev/null +++ b/go/vt/vtgate/planbuilder/fallback_planner.go @@ -0,0 +1,57 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package planbuilder + +import ( + "fmt" + + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/engine" +) + +type fallbackPlanner struct { + primary, fallback selectPlanner +} + +var _ selectPlanner = (*fallbackPlanner)(nil).plan + +func (fp *fallbackPlanner) safePrimary(query string) func(sqlparser.Statement, sqlparser.BindVars, ContextVSchema) (engine.Primitive, error) { + primaryF := fp.primary(query) + return func(stmt sqlparser.Statement, reservedVars sqlparser.BindVars, vschema ContextVSchema) (res engine.Primitive, err error) { + defer func() { + // if the primary planner panics, we want to catch it here so we can fall back + if r := recover(); r != nil { + err = fmt.Errorf("%v", r) // not using vterror since this will only be used for logging + } + }() + res, err = primaryF(stmt, reservedVars, vschema) + return + } +} + +func (fp *fallbackPlanner) plan(query string) func(sqlparser.Statement, sqlparser.BindVars, ContextVSchema) (engine.Primitive, error) { + primaryF := fp.safePrimary(query) + backupF := fp.fallback(query) + + return func(stmt sqlparser.Statement, reservedVars sqlparser.BindVars, vschema ContextVSchema) (engine.Primitive, error) { + res, err := primaryF(stmt, reservedVars, vschema) + if err != nil { + return backupF(stmt, reservedVars, vschema) + } + return res, nil + } +} diff --git a/go/vt/vtgate/planbuilder/fallback_planner_test.go b/go/vt/vtgate/planbuilder/fallback_planner_test.go new file mode 100644 index 00000000000..cd532784aa8 --- /dev/null +++ b/go/vt/vtgate/planbuilder/fallback_planner_test.go @@ -0,0 +1,80 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package planbuilder + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + "vitess.io/vitess/go/vt/sqlparser" + + "vitess.io/vitess/go/vt/vtgate/engine" +) + +type testPlanner struct { + panic interface{} + err error + res engine.Primitive + called bool +} + +var _ selectPlanner = (*testPlanner)(nil).plan + +func (tp *testPlanner) plan(_ string) func(sqlparser.Statement, sqlparser.BindVars, ContextVSchema) (engine.Primitive, error) { + return func(statement sqlparser.Statement, vars sqlparser.BindVars, schema ContextVSchema) (engine.Primitive, error) { + tp.called = true + if tp.panic != nil { + panic(tp.panic) + } + return tp.res, tp.err + } +} + +func TestFallbackPlanner(t *testing.T) { + a := &testPlanner{} + b := &testPlanner{} + fb := &fallbackPlanner{ + primary: a.plan, + fallback: b.plan, + } + + stmt := &sqlparser.Select{} + var vschema ContextVSchema + + // first planner succeeds + _, _ = fb.plan("query")(stmt, nil, vschema) + assert.True(t, a.called) + assert.False(t, b.called) + a.called = false + + // first planner errors + a.err = fmt.Errorf("fail") + _, _ = fb.plan("query")(stmt, nil, vschema) + assert.True(t, a.called) + assert.True(t, b.called) + + a.called = false + b.called = false + + // first planner panics + a.panic = "oh noes" + _, _ = fb.plan("query")(stmt, nil, vschema) + assert.True(t, a.called) + assert.True(t, b.called) +} diff --git a/go/vt/vtgate/planbuilder/filtering.go b/go/vt/vtgate/planbuilder/filtering.go index 6b442a2f169..4621cf169a8 100644 --- a/go/vt/vtgate/planbuilder/filtering.go +++ b/go/vt/vtgate/planbuilder/filtering.go @@ -78,7 +78,7 @@ func planFilter(pb *primitiveBuilder, input logicalPlan, filter sqlparser.Expr, return nil, errors.New("unsupported: filtering on results of aggregates") } - return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "%T.filtering: unreachable", input) + return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "[BUG] unreachable %T.filtering", input) } func filterVindexFunc(node *vindexFunc, filter sqlparser.Expr) (logicalPlan, error) { diff --git a/go/vt/vtgate/planbuilder/from.go b/go/vt/vtgate/planbuilder/from.go index 245186a5c59..94a75e134e2 100644 --- a/go/vt/vtgate/planbuilder/from.go +++ b/go/vt/vtgate/planbuilder/from.go @@ -32,8 +32,8 @@ import ( // This file has functions to analyze the FROM clause. // processDMLTable analyzes the FROM clause for DMLs and returns a route. -func (pb *primitiveBuilder) processDMLTable(tableExprs sqlparser.TableExprs, where sqlparser.Expr) (*route, error) { - if err := pb.processTableExprs(tableExprs, where); err != nil { +func (pb *primitiveBuilder) processDMLTable(tableExprs sqlparser.TableExprs, reservedVars sqlparser.BindVars, where sqlparser.Expr) (*route, error) { + if err := pb.processTableExprs(tableExprs, reservedVars, where); err != nil { return nil, err } rb, ok := pb.plan.(*route) @@ -48,28 +48,28 @@ func (pb *primitiveBuilder) processDMLTable(tableExprs sqlparser.TableExprs, whe // processTableExprs analyzes the FROM clause. It produces a logicalPlan // with all the routes identified. -func (pb *primitiveBuilder) processTableExprs(tableExprs sqlparser.TableExprs, where sqlparser.Expr) error { +func (pb *primitiveBuilder) processTableExprs(tableExprs sqlparser.TableExprs, reservedVars sqlparser.BindVars, where sqlparser.Expr) error { if len(tableExprs) == 1 { - return pb.processTableExpr(tableExprs[0], where) + return pb.processTableExpr(tableExprs[0], reservedVars, where) } - if err := pb.processTableExpr(tableExprs[0], where); err != nil { + if err := pb.processTableExpr(tableExprs[0], reservedVars, where); err != nil { return err } rpb := newPrimitiveBuilder(pb.vschema, pb.jt) - if err := rpb.processTableExprs(tableExprs[1:], where); err != nil { + if err := rpb.processTableExprs(tableExprs[1:], reservedVars, where); err != nil { return err } - return pb.join(rpb, nil, where) + return pb.join(rpb, nil, reservedVars, where) } // processTableExpr produces a logicalPlan subtree for the given TableExpr. -func (pb *primitiveBuilder) processTableExpr(tableExpr sqlparser.TableExpr, where sqlparser.Expr) error { +func (pb *primitiveBuilder) processTableExpr(tableExpr sqlparser.TableExpr, reservedVars sqlparser.BindVars, where sqlparser.Expr) error { switch tableExpr := tableExpr.(type) { case *sqlparser.AliasedTableExpr: - return pb.processAliasedTable(tableExpr) + return pb.processAliasedTable(tableExpr, reservedVars) case *sqlparser.ParenTableExpr: - err := pb.processTableExprs(tableExpr.Exprs, where) + err := pb.processTableExprs(tableExpr.Exprs, reservedVars, where) // If it's a route, preserve the parenthesis so things // don't associate differently when more things are pushed // into it. FROM a, (b, c) should not become FROM a, b, c. @@ -83,7 +83,7 @@ func (pb *primitiveBuilder) processTableExpr(tableExpr sqlparser.TableExpr, wher } return err case *sqlparser.JoinTableExpr: - return pb.processJoin(tableExpr, where) + return pb.processJoin(tableExpr, reservedVars, where) } return fmt.Errorf("BUG: unexpected table expression type: %T", tableExpr) } @@ -95,7 +95,7 @@ func (pb *primitiveBuilder) processTableExpr(tableExpr sqlparser.TableExpr, wher // versatile than a subquery. If a subquery becomes a route, then any result // columns that represent underlying vindex columns are also exposed as // vindex columns. -func (pb *primitiveBuilder) processAliasedTable(tableExpr *sqlparser.AliasedTableExpr) error { +func (pb *primitiveBuilder) processAliasedTable(tableExpr *sqlparser.AliasedTableExpr, reservedVars sqlparser.BindVars) error { switch expr := tableExpr.Expr.(type) { case sqlparser.TableName: return pb.buildTablePrimitive(tableExpr, expr) @@ -103,11 +103,11 @@ func (pb *primitiveBuilder) processAliasedTable(tableExpr *sqlparser.AliasedTabl spb := newPrimitiveBuilder(pb.vschema, pb.jt) switch stmt := expr.Select.(type) { case *sqlparser.Select: - if err := spb.processSelect(stmt, nil, ""); err != nil { + if err := spb.processSelect(stmt, reservedVars, nil, ""); err != nil { return err } case *sqlparser.Union: - if err := spb.processUnion(stmt, nil); err != nil { + if err := spb.processUnion(stmt, reservedVars, nil); err != nil { return err } default: @@ -259,7 +259,7 @@ func (pb *primitiveBuilder) buildTablePrimitive(tableExpr *sqlparser.AliasedTabl eroute.Vindex, _ = vindex.(vindexes.SingleColumn) eroute.Values = []sqltypes.PlanValue{{Value: sqltypes.MakeTrusted(sqltypes.VarBinary, vschemaTable.Pinned)}} } - eroute.TableName = vschemaTable.Name.String() + eroute.TableName = sqlparser.String(vschemaTable.Name) rb.eroute = eroute return nil @@ -268,7 +268,7 @@ func (pb *primitiveBuilder) buildTablePrimitive(tableExpr *sqlparser.AliasedTabl // processJoin produces a logicalPlan subtree for the given Join. // If the left and right nodes can be part of the same route, // then it's a route. Otherwise, it's a join. -func (pb *primitiveBuilder) processJoin(ajoin *sqlparser.JoinTableExpr, where sqlparser.Expr) error { +func (pb *primitiveBuilder) processJoin(ajoin *sqlparser.JoinTableExpr, reservedVars sqlparser.BindVars, where sqlparser.Expr) error { switch ajoin.Join { case sqlparser.NormalJoinType, sqlparser.StraightJoinType, sqlparser.LeftJoinType: case sqlparser.RightJoinType: @@ -276,14 +276,14 @@ func (pb *primitiveBuilder) processJoin(ajoin *sqlparser.JoinTableExpr, where sq default: return fmt.Errorf("unsupported: %s", ajoin.Join.ToString()) } - if err := pb.processTableExpr(ajoin.LeftExpr, where); err != nil { + if err := pb.processTableExpr(ajoin.LeftExpr, reservedVars, where); err != nil { return err } rpb := newPrimitiveBuilder(pb.vschema, pb.jt) - if err := rpb.processTableExpr(ajoin.RightExpr, where); err != nil { + if err := rpb.processTableExpr(ajoin.RightExpr, reservedVars, where); err != nil { return err } - return pb.join(rpb, ajoin, where) + return pb.join(rpb, ajoin, reservedVars, where) } // convertToLeftJoin converts a right join into a left join. @@ -300,7 +300,7 @@ func convertToLeftJoin(ajoin *sqlparser.JoinTableExpr) { ajoin.Join = sqlparser.LeftJoinType } -func (pb *primitiveBuilder) join(rpb *primitiveBuilder, ajoin *sqlparser.JoinTableExpr, where sqlparser.Expr) error { +func (pb *primitiveBuilder) join(rpb *primitiveBuilder, ajoin *sqlparser.JoinTableExpr, reservedVars sqlparser.BindVars, where sqlparser.Expr) error { // Merge the symbol tables. In the case of a left join, we have to // ideally create new symbols that originate from the join primitive. // However, this is not worth it for now, because the Push functions @@ -313,12 +313,12 @@ func (pb *primitiveBuilder) join(rpb *primitiveBuilder, ajoin *sqlparser.JoinTab lRoute, leftIsRoute := pb.plan.(*route) rRoute, rightIsRoute := rpb.plan.(*route) if !leftIsRoute || !rightIsRoute { - return newJoin(pb, rpb, ajoin) + return newJoin(pb, rpb, ajoin, reservedVars) } // Try merging the routes. if !lRoute.JoinCanMerge(pb, rRoute, ajoin, where) { - return newJoin(pb, rpb, ajoin) + return newJoin(pb, rpb, ajoin, reservedVars) } if lRoute.eroute.Opcode == engine.SelectReference { @@ -350,7 +350,7 @@ func (pb *primitiveBuilder) join(rpb *primitiveBuilder, ajoin *sqlparser.JoinTab if ajoin == nil { return nil } - pullouts, _, expr, err := pb.findOrigin(ajoin.Condition.On) + pullouts, _, expr, err := pb.findOrigin(ajoin.Condition.On, reservedVars) if err != nil { return err } diff --git a/go/vt/vtgate/planbuilder/grouping.go b/go/vt/vtgate/planbuilder/grouping.go index e3d81b3ea86..49324a0e8bb 100644 --- a/go/vt/vtgate/planbuilder/grouping.go +++ b/go/vt/vtgate/planbuilder/grouping.go @@ -56,7 +56,7 @@ func planGroupBy(pb *primitiveBuilder, input logicalPlan, groupBy sqlparser.Grou case *sqlparser.ColName: c := e.Metadata.(*column) if c.Origin() == node { - return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "group by expression cannot reference an aggregate function: %v", sqlparser.String(e)) + return nil, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongGroupField, "group by expression cannot reference an aggregate function: %v", sqlparser.String(e)) } for i, rc := range node.resultColumns { if rc.column == c { @@ -91,7 +91,7 @@ func planGroupBy(pb *primitiveBuilder, input logicalPlan, groupBy sqlparser.Grou return node, nil } - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "%T.groupBy: unreachable", input) + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unreachable %T.groupBy: ", input) } // planDistinct makes the output distinct @@ -122,5 +122,5 @@ func planDistinct(input logicalPlan) (logicalPlan, error) { return input, nil } - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "%T.distinct: unreachable", input) + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unreachable %T.distinct", input) } diff --git a/go/vt/vtgate/planbuilder/insert.go b/go/vt/vtgate/planbuilder/insert.go index c27e6c64ed6..c4164d18efa 100644 --- a/go/vt/vtgate/planbuilder/insert.go +++ b/go/vt/vtgate/planbuilder/insert.go @@ -30,11 +30,11 @@ import ( ) // buildInsertPlan builds the route for an INSERT statement. -func buildInsertPlan(stmt sqlparser.Statement, vschema ContextVSchema) (engine.Primitive, error) { +func buildInsertPlan(stmt sqlparser.Statement, reservedVars sqlparser.BindVars, vschema ContextVSchema) (engine.Primitive, error) { ins := stmt.(*sqlparser.Insert) - pb := newPrimitiveBuilder(vschema, newJointab(sqlparser.GetBindvars(ins))) + pb := newPrimitiveBuilder(vschema, newJointab(reservedVars)) exprs := sqlparser.TableExprs{&sqlparser.AliasedTableExpr{Expr: ins.Table}} - rb, err := pb.processDMLTable(exprs, nil) + rb, err := pb.processDMLTable(exprs, reservedVars, nil) if err != nil { return nil, err } @@ -46,7 +46,7 @@ func buildInsertPlan(stmt sqlparser.Statement, vschema ContextVSchema) (engine.P if len(pb.st.tables) != 1 { // Unreachable. - return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: multi-table insert statement in sharded keyspace") + return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "multi-table insert statement in not supported in sharded keyspace") } var vschemaTable *vindexes.Table for _, tval := range pb.st.tables { @@ -54,7 +54,9 @@ func buildInsertPlan(stmt sqlparser.Statement, vschema ContextVSchema) (engine.P vschemaTable = tval.vschemaTable } if !rb.eroute.Keyspace.Sharded { - if !pb.finalizeUnshardedDMLSubqueries(ins) { + if pb.finalizeUnshardedDMLSubqueries(reservedVars, ins) { + vschema.WarnUnshardedOnly("subqueries can't be sharded for INSERT") + } else { return nil, errors.New("unsupported: sharded subquery in insert values") } return buildInsertUnshardedPlan(ins, vschemaTable) @@ -182,7 +184,7 @@ func buildInsertShardedPlan(ins *sqlparser.Insert, table *vindexes.Table) (engin colNum := findOrAddColumn(ins, col) for rowNum, row := range rows { name := ":" + engine.InsertVarName(col, rowNum) - row[colNum] = sqlparser.NewArgument([]byte(name)) + row[colNum] = sqlparser.NewArgument(name) } } } @@ -234,7 +236,7 @@ func modifyForAutoinc(ins *sqlparser.Insert, eins *engine.Insert) error { return fmt.Errorf("could not compute value for vindex or auto-inc column: %v", err) } autoIncValues.Values = append(autoIncValues.Values, pv) - row[colNum] = sqlparser.NewArgument([]byte(":" + engine.SeqVarName + strconv.Itoa(rowNum))) + row[colNum] = sqlparser.NewArgument(":" + engine.SeqVarName + strconv.Itoa(rowNum)) } eins.Generate = &engine.Generate{ diff --git a/go/vt/vtgate/planbuilder/join.go b/go/vt/vtgate/planbuilder/join.go index dc4ffe26732..29b30d4c718 100644 --- a/go/vt/vtgate/planbuilder/join.go +++ b/go/vt/vtgate/planbuilder/join.go @@ -19,6 +19,8 @@ package planbuilder import ( "errors" + "vitess.io/vitess/go/vt/vtgate/semantics" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" @@ -72,7 +74,7 @@ type join struct { // newJoin makes a new join using the two planBuilder. ajoin can be nil // if the join is on a ',' operator. lpb will contain the resulting join. // rpb will be discarded. -func newJoin(lpb, rpb *primitiveBuilder, ajoin *sqlparser.JoinTableExpr) error { +func newJoin(lpb, rpb *primitiveBuilder, ajoin *sqlparser.JoinTableExpr, reservedVars sqlparser.BindVars) error { // This function converts ON clauses to WHERE clauses. The WHERE clause // scope can see all tables, whereas the ON clause can only see the // participants of the JOIN. However, since the ON clause doesn't allow @@ -94,7 +96,7 @@ func newJoin(lpb, rpb *primitiveBuilder, ajoin *sqlparser.JoinTableExpr) error { // At this point, the LHS symtab also contains symbols of the RHS. // But the RHS will hide those, as intended. rpb.st.Outer = lpb.st - if err := rpb.pushFilter(ajoin.Condition.On, sqlparser.WhereStr); err != nil { + if err := rpb.pushFilter(ajoin.Condition.On, sqlparser.WhereStr, reservedVars); err != nil { return err } case ajoin.Condition.Using != nil: @@ -114,7 +116,7 @@ func newJoin(lpb, rpb *primitiveBuilder, ajoin *sqlparser.JoinTableExpr) error { if ajoin == nil || opcode == engine.LeftJoin { return nil } - return lpb.pushFilter(ajoin.Condition.On, sqlparser.WhereStr) + return lpb.pushFilter(ajoin.Condition.On, sqlparser.WhereStr, reservedVars) } // Order implements the logicalPlan interface @@ -151,6 +153,15 @@ func (jb *join) Wireup(plan logicalPlan, jt *jointab) error { return jb.Left.Wireup(plan, jt) } +// Wireup2 implements the logicalPlan interface +func (jb *join) WireupV4(semTable *semantics.SemTable) error { + err := jb.Right.WireupV4(semTable) + if err != nil { + return err + } + return jb.Left.WireupV4(semTable) +} + // SupplyVar implements the logicalPlan interface func (jb *join) SupplyVar(from, to int, col *sqlparser.ColName, varname string) { if !jb.isOnLeft(from) { @@ -235,6 +246,11 @@ func (jb *join) Rewrite(inputs ...logicalPlan) error { return nil } +// Solves implements the logicalPlan interface +func (jb *join) ContainsTables() semantics.TableSet { + return jb.Left.ContainsTables().Merge(jb.Right.ContainsTables()) +} + // Inputs implements the logicalPlan interface func (jb *join) Inputs() []logicalPlan { return []logicalPlan{jb.Left, jb.Right} diff --git a/go/vt/vtgate/planbuilder/join2.go b/go/vt/vtgate/planbuilder/join2.go new file mode 100644 index 00000000000..7b273c88865 --- /dev/null +++ b/go/vt/vtgate/planbuilder/join2.go @@ -0,0 +1,103 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package planbuilder + +import ( + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/semantics" +) + +var _ logicalPlan = (*joinV4)(nil) + +// joinV4 is used to build a Join primitive. +// It's used to build an inner join and only used by the V4 planner +type joinV4 struct { + // Left and Right are the nodes for the join. + Left, Right logicalPlan + Cols []int + Vars map[string]int +} + +// Order implements the logicalPlan interface +func (j *joinV4) Order() int { + panic("implement me") +} + +// ResultColumns implements the logicalPlan interface +func (j *joinV4) ResultColumns() []*resultColumn { + panic("implement me") +} + +// Reorder implements the logicalPlan interface +func (j *joinV4) Reorder(i int) { + panic("implement me") +} + +// Wireup implements the logicalPlan interface +func (j *joinV4) Wireup(lp logicalPlan, jt *jointab) error { + panic("implement me") +} + +// Wireup2 implements the logicalPlan interface +func (j *joinV4) WireupV4(semTable *semantics.SemTable) error { + err := j.Left.WireupV4(semTable) + if err != nil { + return err + } + return j.Right.WireupV4(semTable) +} + +// SupplyVar implements the logicalPlan interface +func (j *joinV4) SupplyVar(from, to int, col *sqlparser.ColName, varname string) { + panic("implement me") +} + +// SupplyCol implements the logicalPlan interface +func (j *joinV4) SupplyCol(col *sqlparser.ColName) (rc *resultColumn, colNumber int) { + panic("implement me") +} + +// SupplyWeightString implements the logicalPlan interface +func (j *joinV4) SupplyWeightString(colNumber int) (weightcolNumber int, err error) { + panic("implement me") +} + +// Primitive implements the logicalPlan interface +func (j *joinV4) Primitive() engine.Primitive { + return &engine.Join{ + Left: j.Left.Primitive(), + Right: j.Right.Primitive(), + Cols: j.Cols, + Vars: j.Vars, + } +} + +// Inputs implements the logicalPlan interface +func (j *joinV4) Inputs() []logicalPlan { + panic("implement me") +} + +// Rewrite implements the logicalPlan interface +func (j *joinV4) Rewrite(inputs ...logicalPlan) error { + panic("implement me") +} + +// Solves implements the logicalPlan interface +func (j *joinV4) ContainsTables() semantics.TableSet { + return j.Left.ContainsTables().Merge(j.Right.ContainsTables()) +} diff --git a/go/vt/vtgate/planbuilder/jointab.go b/go/vt/vtgate/planbuilder/jointab.go index 472ab4d0016..77d82a8525c 100644 --- a/go/vt/vtgate/planbuilder/jointab.go +++ b/go/vt/vtgate/planbuilder/jointab.go @@ -50,11 +50,7 @@ func (jt *jointab) Procure(plan logicalPlan, col *sqlparser.ColName, to int) str suffix := "" i := 0 for { - if !col.Qualifier.IsEmpty() { - joinVar = col.Qualifier.Name.CompliantName() + "_" + col.Name.CompliantName() + suffix - } else { - joinVar = col.Name.CompliantName() + suffix - } + joinVar = col.CompliantName(suffix) if _, ok := jt.vars[joinVar]; !ok { break } diff --git a/go/vt/vtgate/planbuilder/jointree_transformers.go b/go/vt/vtgate/planbuilder/jointree_transformers.go new file mode 100644 index 00000000000..44e48f47bb7 --- /dev/null +++ b/go/vt/vtgate/planbuilder/jointree_transformers.go @@ -0,0 +1,116 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package planbuilder + +import ( + "sort" + "strings" + + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/semantics" + "vitess.io/vitess/go/vt/vtgate/vindexes" + + "vitess.io/vitess/go/vt/vterrors" +) + +func transformToLogicalPlan(tree joinTree, semTable *semantics.SemTable) (logicalPlan, error) { + switch n := tree.(type) { + case *routePlan: + return transformRoutePlan(n) + + case *joinPlan: + return transformJoinPlan(n, semTable) + } + + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unknown type encountered: %T", tree) +} + +func transformJoinPlan(n *joinPlan, semTable *semantics.SemTable) (logicalPlan, error) { + lhs, err := transformToLogicalPlan(n.lhs, semTable) + if err != nil { + return nil, err + } + rhs, err := transformToLogicalPlan(n.rhs, semTable) + if err != nil { + return nil, err + } + return &joinV4{ + Left: lhs, + Right: rhs, + Cols: n.columns, + Vars: n.vars, + }, nil +} + +func transformRoutePlan(n *routePlan) (*route, error) { + var tablesForSelect sqlparser.TableExprs + tableNameMap := map[string]interface{}{} + + sort.Sort(n._tables) + for _, t := range n._tables { + alias := sqlparser.AliasedTableExpr{ + Expr: sqlparser.TableName{ + Name: t.vtable.Name, + }, + Partitions: nil, + As: t.qtable.alias.As, + Hints: nil, + } + tablesForSelect = append(tablesForSelect, &alias) + tableNameMap[sqlparser.String(t.qtable.table.Name)] = nil + } + + predicates := n.Predicates() + var where *sqlparser.Where + if predicates != nil { + where = &sqlparser.Where{Expr: predicates, Type: sqlparser.WhereClause} + } + + var singleColumn vindexes.SingleColumn + if n.vindex != nil { + singleColumn = n.vindex.(vindexes.SingleColumn) + } + + var expressions sqlparser.SelectExprs + for _, col := range n.columns { + expressions = append(expressions, &sqlparser.AliasedExpr{Expr: col}) + } + + var tableNames []string + for name := range tableNameMap { + tableNames = append(tableNames, name) + } + sort.Strings(tableNames) + + return &route{ + eroute: &engine.Route{ + Opcode: n.routeOpCode, + TableName: strings.Join(tableNames, ", "), + Keyspace: n.keyspace, + Vindex: singleColumn, + Values: n.vindexValues, + }, + Select: &sqlparser.Select{ + SelectExprs: expressions, + From: tablesForSelect, + Where: where, + }, + tables: n.solved, + }, nil +} diff --git a/go/vt/vtgate/planbuilder/locktables.go b/go/vt/vtgate/planbuilder/locktables.go index 062e34f6968..d4d2eac8985 100644 --- a/go/vt/vtgate/planbuilder/locktables.go +++ b/go/vt/vtgate/planbuilder/locktables.go @@ -25,13 +25,13 @@ import ( ) // buildLockPlan plans lock tables statement. -func buildLockPlan(stmt sqlparser.Statement, vschema ContextVSchema) (engine.Primitive, error) { +func buildLockPlan(stmt sqlparser.Statement, _ sqlparser.BindVars, _ ContextVSchema) (engine.Primitive, error) { log.Warningf("Lock Tables statement is ignored: %v", stmt) return engine.NewRowsPrimitive(make([][]sqltypes.Value, 0), make([]*querypb.Field, 0)), nil } // buildUnlockPlan plans lock tables statement. -func buildUnlockPlan(stmt sqlparser.Statement, vschema ContextVSchema) (engine.Primitive, error) { +func buildUnlockPlan(stmt sqlparser.Statement, _ sqlparser.BindVars, _ ContextVSchema) (engine.Primitive, error) { log.Warningf("Unlock Tables statement is ignored: %v", stmt) return engine.NewRowsPrimitive(make([][]sqltypes.Value, 0), make([]*querypb.Field, 0)), nil } diff --git a/go/vt/vtgate/planbuilder/logical_plan.go b/go/vt/vtgate/planbuilder/logical_plan.go index 40608565c8b..81e4fae0aa2 100644 --- a/go/vt/vtgate/planbuilder/logical_plan.go +++ b/go/vt/vtgate/planbuilder/logical_plan.go @@ -21,6 +21,7 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/semantics" ) // logicalPlan defines the interface that a primitive must @@ -48,6 +49,9 @@ type logicalPlan interface { // the lhs nodes. Wireup(lp logicalPlan, jt *jointab) error + // WireupV4 does the wire up work for the V4 planner + WireupV4(semTable *semantics.SemTable) error + // SupplyVar finds the common root between from and to. If it's // the common root, it supplies the requested var to the rhs tree. // If the primitive already has the column in its list, it should @@ -65,15 +69,22 @@ type logicalPlan interface { SupplyCol(col *sqlparser.ColName) (rc *resultColumn, colNumber int) // SupplyWeightString must supply a weight_string expression of the - // specified column. + // specified column. It returns an error if we cannot supply a weight column for it. SupplyWeightString(colNumber int) (weightcolNumber int, err error) // Primitive returns the underlying primitive. // This function should only be called after Wireup is finished. Primitive() engine.Primitive + // Inputs are the children of this plan Inputs() []logicalPlan + + // Rewrite replaces the inputs of this plan with the ones provided Rewrite(inputs ...logicalPlan) error + + // ContainsTables keeps track which query tables are being solved by this logical plan + // This is only applicable for plans that have been built with the V4 planner + ContainsTables() semantics.TableSet } //------------------------------------------------------------------------- @@ -147,6 +158,10 @@ func (bc *logicalPlanCommon) Wireup(plan logicalPlan, jt *jointab) error { return bc.input.Wireup(plan, jt) } +func (bc *logicalPlanCommon) WireupV4(semTable *semantics.SemTable) error { + return bc.input.WireupV4(semTable) +} + func (bc *logicalPlanCommon) SupplyVar(from, to int, col *sqlparser.ColName, varname string) { bc.input.SupplyVar(from, to, col, varname) } @@ -173,6 +188,11 @@ func (bc *logicalPlanCommon) Inputs() []logicalPlan { return []logicalPlan{bc.input} } +// Solves implements the logicalPlan interface +func (bc *logicalPlanCommon) ContainsTables() semantics.TableSet { + return bc.input.ContainsTables() +} + //------------------------------------------------------------------------- // resultsBuilder is a superset of logicalPlanCommon. It also handles diff --git a/go/vt/vtgate/planbuilder/memory_sort.go b/go/vt/vtgate/planbuilder/memory_sort.go index ade1e68817f..1f96ed47d5e 100644 --- a/go/vt/vtgate/planbuilder/memory_sort.go +++ b/go/vt/vtgate/planbuilder/memory_sort.go @@ -20,6 +20,8 @@ import ( "errors" "fmt" + "vitess.io/vitess/go/vt/vtgate/semantics" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/engine" @@ -81,8 +83,9 @@ func newMemorySort(plan logicalPlan, orderBy sqlparser.OrderBy) (*memorySort, er return nil, fmt.Errorf("unsupported: memory sort: order by must reference a column in the select list: %s", sqlparser.String(order)) } ob := engine.OrderbyParams{ - Col: colNumber, - Desc: order.Direction == sqlparser.DescOrder, + Col: colNumber, + WeightStringCol: -1, + Desc: order.Direction == sqlparser.DescOrder, } ms.eMemorySort.OrderBy = append(ms.eMemorySort.OrderBy, ob) } @@ -108,20 +111,29 @@ func (ms *memorySort) SetLimit(limit *sqlparser.Limit) error { func (ms *memorySort) Wireup(plan logicalPlan, jt *jointab) error { for i, orderby := range ms.eMemorySort.OrderBy { rc := ms.resultColumns[orderby.Col] - if sqltypes.IsText(rc.column.typ) { + // Add a weight_string column if we know that the column is a textual column or if its type is unknown + if sqltypes.IsText(rc.column.typ) || rc.column.typ == sqltypes.Null { // If a weight string was previously requested, reuse it. if weightcolNumber, ok := ms.weightStrings[rc]; ok { - ms.eMemorySort.OrderBy[i].Col = weightcolNumber + ms.eMemorySort.OrderBy[i].WeightStringCol = weightcolNumber continue } weightcolNumber, err := ms.input.SupplyWeightString(orderby.Col) if err != nil { + _, isUnsupportedErr := err.(UnsupportedSupplyWeightString) + if isUnsupportedErr { + continue + } return err } ms.weightStrings[rc] = weightcolNumber - ms.eMemorySort.OrderBy[i].Col = weightcolNumber + ms.eMemorySort.OrderBy[i].WeightStringCol = weightcolNumber ms.eMemorySort.TruncateColumnCount = len(ms.resultColumns) } } return ms.input.Wireup(plan, jt) } + +func (ms *memorySort) WireupV4(semTable *semantics.SemTable) error { + return ms.input.WireupV4(semTable) +} diff --git a/go/vt/vtgate/planbuilder/merge_sort.go b/go/vt/vtgate/planbuilder/merge_sort.go index f0675489f20..1fea7894cc4 100644 --- a/go/vt/vtgate/planbuilder/merge_sort.go +++ b/go/vt/vtgate/planbuilder/merge_sort.go @@ -19,6 +19,7 @@ package planbuilder import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/semantics" ) var _ logicalPlan = (*mergeSort)(nil) @@ -65,15 +66,20 @@ func (ms *mergeSort) Wireup(plan logicalPlan, jt *jointab) error { rb := ms.input.(*route) for i, orderby := range rb.eroute.OrderBy { rc := ms.resultColumns[orderby.Col] - if sqltypes.IsText(rc.column.typ) { + // Add a weight_string column if we know that the column is a textual column or if its type is unknown + if sqltypes.IsText(rc.column.typ) || rc.column.typ == sqltypes.Null { // If a weight string was previously requested, reuse it. if colNumber, ok := ms.weightStrings[rc]; ok { - rb.eroute.OrderBy[i].Col = colNumber + rb.eroute.OrderBy[i].WeightStringCol = colNumber continue } var err error - rb.eroute.OrderBy[i].Col, err = rb.SupplyWeightString(orderby.Col) + rb.eroute.OrderBy[i].WeightStringCol, err = rb.SupplyWeightString(orderby.Col) if err != nil { + _, isUnsupportedErr := err.(UnsupportedSupplyWeightString) + if isUnsupportedErr { + continue + } return err } ms.truncateColumnCount = len(ms.resultColumns) @@ -82,3 +88,7 @@ func (ms *mergeSort) Wireup(plan logicalPlan, jt *jointab) error { rb.eroute.TruncateColumnCount = ms.truncateColumnCount return ms.input.Wireup(plan, jt) } + +func (ms *mergeSort) WireupV4(semTable *semantics.SemTable) error { + return ms.input.WireupV4(semTable) +} diff --git a/go/vt/vtgate/planbuilder/migration.go b/go/vt/vtgate/planbuilder/migration.go new file mode 100644 index 00000000000..6c1898b7138 --- /dev/null +++ b/go/vt/vtgate/planbuilder/migration.go @@ -0,0 +1,77 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package planbuilder + +import ( + "vitess.io/vitess/go/vt/key" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/schema" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/engine" +) + +func buildAlterMigrationPlan(query string, vschema ContextVSchema) (engine.Primitive, error) { + if !*enableOnlineDDL { + return nil, schema.ErrOnlineDDLDisabled + } + dest, ks, tabletType, err := vschema.TargetDestination("") + if err != nil { + return nil, err + } + if ks == nil { + return nil, vterrors.NewErrorf(vtrpcpb.Code_FAILED_PRECONDITION, vterrors.NoDB, "No database selected: use keyspace<:shard><@type> or keyspace<[range]><@type> (<> are optional)") + } + + if tabletType != topodatapb.TabletType_MASTER { + return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "ALTER VITESS_MIGRATION works only on primary tablet") + } + + if dest == nil { + dest = key.DestinationAllShards{} + } + + return &engine.Send{ + Keyspace: ks, + TargetDestination: dest, + Query: query, + }, nil +} + +func buildRevertMigrationPlan(query string, stmt *sqlparser.RevertMigration, vschema ContextVSchema) (engine.Primitive, error) { + if !*enableOnlineDDL { + return nil, schema.ErrOnlineDDLDisabled + } + _, ks, tabletType, err := vschema.TargetDestination("") + if err != nil { + return nil, err + } + if ks == nil { + return nil, vterrors.NewErrorf(vtrpcpb.Code_FAILED_PRECONDITION, vterrors.NoDB, "No database selected: use keyspace<:shard><@type> or keyspace<[range]><@type> (<> are optional)") + } + + if tabletType != topodatapb.TabletType_MASTER { + return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "REVERT VITESS_MIGRATION works only on primary tablet") + } + + return &engine.RevertMigration{ + Keyspace: ks, + Stmt: stmt, + Query: query, + }, nil +} diff --git a/go/vt/vtgate/planbuilder/ordered_aggregate.go b/go/vt/vtgate/planbuilder/ordered_aggregate.go index 60fff9ffd03..82d06035b2b 100644 --- a/go/vt/vtgate/planbuilder/ordered_aggregate.go +++ b/go/vt/vtgate/planbuilder/ordered_aggregate.go @@ -21,6 +21,8 @@ import ( "fmt" "strconv" + "vitess.io/vitess/go/vt/vtgate/semantics" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/engine" @@ -338,6 +340,10 @@ func (oa *orderedAggregate) Wireup(plan logicalPlan, jt *jointab) error { } weightcolNumber, err := oa.input.SupplyWeightString(colNumber) if err != nil { + _, isUnsupportedErr := err.(UnsupportedSupplyWeightString) + if isUnsupportedErr { + continue + } return err } oa.weightStrings[rc] = weightcolNumber @@ -347,3 +353,7 @@ func (oa *orderedAggregate) Wireup(plan logicalPlan, jt *jointab) error { } return oa.input.Wireup(plan, jt) } + +func (oa *orderedAggregate) WireupV4(semTable *semantics.SemTable) error { + return oa.input.WireupV4(semTable) +} diff --git a/go/vt/vtgate/planbuilder/ordering.go b/go/vt/vtgate/planbuilder/ordering.go index 8d918dfbd7b..c8c52739090 100644 --- a/go/vt/vtgate/planbuilder/ordering.go +++ b/go/vt/vtgate/planbuilder/ordering.go @@ -53,7 +53,7 @@ func planOrdering(pb *primitiveBuilder, input logicalPlan, orderBy sqlparser.Ord if orderBy == nil { return input, nil } - return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "%T.ordering: unreachable", input) + return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "[BUG] unreachable %T.ordering", input) } func planOAOrdering(pb *primitiveBuilder, orderBy sqlparser.OrderBy, oa *orderedAggregate) (logicalPlan, error) { @@ -290,8 +290,9 @@ func planRouteOrdering(orderBy sqlparser.OrderBy, node *route) (logicalPlan, err return nil, fmt.Errorf("unsupported: in scatter query: order by must reference a column in the select list: %s", sqlparser.String(order)) } ob := engine.OrderbyParams{ - Col: colNumber, - Desc: order.Direction == sqlparser.DescOrder, + Col: colNumber, + WeightStringCol: -1, + Desc: order.Direction == sqlparser.DescOrder, } node.eroute.OrderBy = append(node.eroute.OrderBy, ob) diff --git a/go/vt/vtgate/planbuilder/other_read.go b/go/vt/vtgate/planbuilder/other_read.go index ac9654a9498..aaedecc634f 100644 --- a/go/vt/vtgate/planbuilder/other_read.go +++ b/go/vt/vtgate/planbuilder/other_read.go @@ -35,7 +35,6 @@ func buildOtherReadAndAdmin(sql string, vschema ContextVSchema) (engine.Primitiv Keyspace: keyspace, TargetDestination: destination, Query: sql, //This is original sql query to be passed as the parser can provide partial ddl AST. - IsDML: false, SingleShardOnly: true, }, nil } diff --git a/go/vt/vtgate/planbuilder/plan_test.go b/go/vt/vtgate/planbuilder/plan_test.go index a73e62e2971..8dca9e9334e 100644 --- a/go/vt/vtgate/planbuilder/plan_test.go +++ b/go/vt/vtgate/planbuilder/plan_test.go @@ -23,10 +23,14 @@ import ( "fmt" "io" "io/ioutil" + "math/rand" "os" + "runtime/debug" "strings" "testing" + "vitess.io/vitess/go/vt/vtgate/semantics" + "github.com/google/go-cmp/cmp" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" @@ -146,6 +150,8 @@ func init() { vindexes.Register("costly", newCostlyIndex) } +const samePlanMarker = "Gen4 plan same as above\n" + func TestPlan(t *testing.T) { vschemaWrapper := &vschemaWrapper{ v: loadSchema(t, "schema_test.json"), @@ -165,24 +171,26 @@ func TestPlan(t *testing.T) { // the column is named as Id. This is to make sure that // column names are case-preserved, but treated as // case-insensitive even if they come from the vschema. - testFile(t, "aggr_cases.txt", testOutputTempDir, vschemaWrapper) - testFile(t, "dml_cases.txt", testOutputTempDir, vschemaWrapper) - testFile(t, "from_cases.txt", testOutputTempDir, vschemaWrapper) - testFile(t, "filter_cases.txt", testOutputTempDir, vschemaWrapper) - testFile(t, "postprocess_cases.txt", testOutputTempDir, vschemaWrapper) - testFile(t, "select_cases.txt", testOutputTempDir, vschemaWrapper) - testFile(t, "symtab_cases.txt", testOutputTempDir, vschemaWrapper) - testFile(t, "unsupported_cases.txt", testOutputTempDir, vschemaWrapper) - testFile(t, "vindex_func_cases.txt", testOutputTempDir, vschemaWrapper) - testFile(t, "wireup_cases.txt", testOutputTempDir, vschemaWrapper) - testFile(t, "memory_sort_cases.txt", testOutputTempDir, vschemaWrapper) - testFile(t, "use_cases.txt", testOutputTempDir, vschemaWrapper) - testFile(t, "set_cases.txt", testOutputTempDir, vschemaWrapper) - testFile(t, "union_cases.txt", testOutputTempDir, vschemaWrapper) - testFile(t, "transaction_cases.txt", testOutputTempDir, vschemaWrapper) - testFile(t, "lock_cases.txt", testOutputTempDir, vschemaWrapper) - testFile(t, "ddl_cases_no_default_keyspace.txt", testOutputTempDir, vschemaWrapper) - testFile(t, "show_cases_no_default_keyspace.txt", testOutputTempDir, vschemaWrapper) + testFile(t, "aggr_cases.txt", testOutputTempDir, vschemaWrapper, true) + testFile(t, "dml_cases.txt", testOutputTempDir, vschemaWrapper, true) + testFile(t, "from_cases.txt", testOutputTempDir, vschemaWrapper, true) + testFile(t, "filter_cases.txt", testOutputTempDir, vschemaWrapper, true) + testFile(t, "postprocess_cases.txt", testOutputTempDir, vschemaWrapper, true) + testFile(t, "select_cases.txt", testOutputTempDir, vschemaWrapper, true) + testFile(t, "symtab_cases.txt", testOutputTempDir, vschemaWrapper, true) + testFile(t, "unsupported_cases.txt", testOutputTempDir, vschemaWrapper, true) + testFile(t, "vindex_func_cases.txt", testOutputTempDir, vschemaWrapper, true) + testFile(t, "wireup_cases.txt", testOutputTempDir, vschemaWrapper, true) + testFile(t, "memory_sort_cases.txt", testOutputTempDir, vschemaWrapper, true) + testFile(t, "use_cases.txt", testOutputTempDir, vschemaWrapper, true) + testFile(t, "set_cases.txt", testOutputTempDir, vschemaWrapper, true) + testFile(t, "union_cases.txt", testOutputTempDir, vschemaWrapper, true) + testFile(t, "transaction_cases.txt", testOutputTempDir, vschemaWrapper, true) + testFile(t, "lock_cases.txt", testOutputTempDir, vschemaWrapper, true) + testFile(t, "large_cases.txt", testOutputTempDir, vschemaWrapper, true) + testFile(t, "ddl_cases_no_default_keyspace.txt", testOutputTempDir, vschemaWrapper, false) + testFile(t, "flush_cases_no_default_keyspace.txt", testOutputTempDir, vschemaWrapper, false) + testFile(t, "show_cases_no_default_keyspace.txt", testOutputTempDir, vschemaWrapper, false) } func TestSysVarSetDisabled(t *testing.T) { @@ -194,7 +202,7 @@ func TestSysVarSetDisabled(t *testing.T) { testOutputTempDir, err := ioutil.TempDir("", "plan_test") require.NoError(t, err) defer os.RemoveAll(testOutputTempDir) - testFile(t, "set_sysvar_disabled_cases.txt", testOutputTempDir, vschemaWrapper) + testFile(t, "set_sysvar_disabled_cases.txt", testOutputTempDir, vschemaWrapper, false) } func TestOne(t *testing.T) { @@ -202,7 +210,7 @@ func TestOne(t *testing.T) { v: loadSchema(t, "schema_test.json"), } - testFile(t, "onecase.txt", "", vschema) + testFile(t, "onecase.txt", "", vschema, true) } func TestBypassPlanningShardTargetFromFile(t *testing.T) { @@ -219,7 +227,7 @@ func TestBypassPlanningShardTargetFromFile(t *testing.T) { tabletType: topodatapb.TabletType_MASTER, dest: key.DestinationShard("-80")} - testFile(t, "bypass_shard_cases.txt", testOutputTempDir, vschema) + testFile(t, "bypass_shard_cases.txt", testOutputTempDir, vschema, true) } func TestBypassPlanningKeyrangeTargetFromFile(t *testing.T) { testOutputTempDir, err := ioutil.TempDir("", "plan_test") @@ -238,7 +246,7 @@ func TestBypassPlanningKeyrangeTargetFromFile(t *testing.T) { dest: key.DestinationExactKeyRange{KeyRange: keyRange[0]}, } - testFile(t, "bypass_keyrange_cases.txt", testOutputTempDir, vschema) + testFile(t, "bypass_keyrange_cases.txt", testOutputTempDir, vschema, true) } func TestWithDefaultKeyspaceFromFile(t *testing.T) { @@ -255,9 +263,26 @@ func TestWithDefaultKeyspaceFromFile(t *testing.T) { tabletType: topodatapb.TabletType_MASTER, } - testFile(t, "alterVschema_cases.txt", testOutputTempDir, vschema) - testFile(t, "ddl_cases.txt", testOutputTempDir, vschema) - testFile(t, "show_cases.txt", testOutputTempDir, vschema) + testFile(t, "alterVschema_cases.txt", testOutputTempDir, vschema, false) + testFile(t, "ddl_cases.txt", testOutputTempDir, vschema, false) + testFile(t, "migration_cases.txt", testOutputTempDir, vschema, false) + testFile(t, "flush_cases.txt", testOutputTempDir, vschema, false) + testFile(t, "show_cases.txt", testOutputTempDir, vschema, false) + testFile(t, "call_cases.txt", testOutputTempDir, vschema, false) +} + +func TestWithSystemSchemaAsDefaultKeyspace(t *testing.T) { + // We are testing this separately so we can set a default keyspace + testOutputTempDir, err := ioutil.TempDir("", "plan_test") + require.NoError(t, err) + defer os.RemoveAll(testOutputTempDir) + vschema := &vschemaWrapper{ + v: loadSchema(t, "schema_test.json"), + keyspace: &vindexes.Keyspace{Name: "mysql"}, + tabletType: topodatapb.TabletType_MASTER, + } + + testFile(t, "sysschema_default.txt", testOutputTempDir, vschema, false) } func TestOtherPlanningFromFile(t *testing.T) { @@ -274,11 +299,11 @@ func TestOtherPlanningFromFile(t *testing.T) { tabletType: topodatapb.TabletType_MASTER, } - testFile(t, "other_read_cases.txt", testOutputTempDir, vschema) - testFile(t, "other_admin_cases.txt", testOutputTempDir, vschema) + testFile(t, "other_read_cases.txt", testOutputTempDir, vschema, false) + testFile(t, "other_admin_cases.txt", testOutputTempDir, vschema, false) } -func loadSchema(t *testing.T, filename string) *vindexes.VSchema { +func loadSchema(t testing.TB, filename string) *vindexes.VSchema { formal, err := vindexes.LoadFormal(locateFile(filename)) if err != nil { t.Fatal(err) @@ -303,6 +328,11 @@ type vschemaWrapper struct { tabletType topodatapb.TabletType dest key.Destination sysVarEnabled bool + version PlannerVersion +} + +func (vw *vschemaWrapper) ForeignKeyMode() string { + return "allow" } func (vw *vschemaWrapper) AllKeyspace() ([]*vindexes.Keyspace, error) { @@ -312,6 +342,13 @@ func (vw *vschemaWrapper) AllKeyspace() ([]*vindexes.Keyspace, error) { return []*vindexes.Keyspace{vw.keyspace}, nil } +func (vw *vschemaWrapper) Planner() PlannerVersion { + return vw.version +} +func (vw *vschemaWrapper) GetSemTable() *semantics.SemTable { + return nil +} + func (vw *vschemaWrapper) KeyspaceExists(keyspace string) bool { if vw.keyspace != nil { return vw.keyspace.Name == keyspace @@ -336,7 +373,7 @@ func (vw *vschemaWrapper) TargetDestination(qualifier string) (key.Destination, } keyspace := vw.v.Keyspaces[keyspaceName] if keyspace == nil { - return nil, nil, 0, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "no keyspace with name [%s] found", keyspaceName) + return nil, nil, 0, vterrors.NewErrorf(vtrpcpb.Code_NOT_FOUND, vterrors.BadDb, "Unknown database '%s' in vschema", keyspaceName) } return vw.dest, keyspace.Keyspace, vw.tabletType, nil @@ -367,8 +404,8 @@ func (vw *vschemaWrapper) FindTableOrVindex(tab sqlparser.TableName) (*vindexes. if err != nil { return nil, nil, destKeyspace, destTabletType, destTarget, err } - if destKeyspace == "" && vw.keyspace != nil { - destKeyspace = vw.keyspace.Name + if destKeyspace == "" { + destKeyspace = vw.getActualKeyspace() } table, vindex, err := vw.v.FindTableOrVindex(destKeyspace, tab.Name.String(), topodatapb.TabletType_MASTER) if err != nil { @@ -377,6 +414,20 @@ func (vw *vschemaWrapper) FindTableOrVindex(tab sqlparser.TableName) (*vindexes. return table, vindex, destKeyspace, destTabletType, destTarget, nil } +func (vw *vschemaWrapper) getActualKeyspace() string { + if vw.keyspace == nil { + return "" + } + if !sqlparser.SystemSchema(vw.keyspace.Name) { + return vw.keyspace.Name + } + ks, err := vw.AnyKeyspace() + if err != nil { + return "" + } + return ks.Name +} + func (vw *vschemaWrapper) DefaultKeyspace() (*vindexes.Keyspace, error) { return vw.v.Keyspaces["main"].Keyspace, nil } @@ -393,36 +444,108 @@ func (vw *vschemaWrapper) TargetString() string { return "targetString" } -func testFile(t *testing.T, filename, tempDir string, vschema *vschemaWrapper) { +func (vw *vschemaWrapper) WarnUnshardedOnly(_ string, _ ...interface{}) { + +} + +func (vw *vschemaWrapper) ErrorIfShardedF(keyspace *vindexes.Keyspace, _, errFmt string, params ...interface{}) error { + if keyspace.Sharded { + return fmt.Errorf(errFmt, params...) + } + return nil +} + +func escapeNewLines(in string) string { + return strings.ReplaceAll(in, "\n", "\\n") +} + +func testFile(t *testing.T, filename, tempDir string, vschema *vschemaWrapper, checkV4equalPlan bool) { + var checkAllTests = false t.Run(filename, func(t *testing.T) { expected := &strings.Builder{} - fail := false + fail := checkAllTests + var outFirstPlanner string for tcase := range iterateExecFile(filename) { - t.Run(tcase.comments, func(t *testing.T) { + t.Run(fmt.Sprintf("%d V3: %s", tcase.lineno, tcase.comments), func(t *testing.T) { + vschema.version = V3 plan, err := TestBuilder(tcase.input, vschema) - out := getPlanOrErrorOutput(err, plan) if out != tcase.output { fail = true - t.Errorf("File: %s, Line: %d\nDiff:\n%s\n[%s] \n[%s]", filename, tcase.lineno, cmp.Diff(tcase.output, out), tcase.output, out) + t.Errorf("V3 - File: %s, Line: %d\nDiff:\n%s\n[%s] \n[%s]", filename, tcase.lineno, cmp.Diff(tcase.output, out), tcase.output, out) } - if err != nil { out = `"` + out + `"` } + outFirstPlanner = out - expected.WriteString(fmt.Sprintf("%s\"%s\"\n%s\n\n", tcase.comments, tcase.input, out)) + expected.WriteString(fmt.Sprintf("%s\"%s\"\n%s\n", tcase.comments, escapeNewLines(tcase.input), out)) }) + + empty := false + if tcase.output2ndPlanner == "" { + empty = true + } + + vschema.version = Gen4 + out, err := getPlanOutput(tcase, vschema) + + // our expectation for the new planner on this query is one of three + // - it produces the same plan as V3 - this is shown using empty brackets: {\n} + // - it produces a different but accepted plan - this is shown using the accepted plan + // - or it produces a different plan that has not yet been accepted, or it fails to produce a plan + // this is shown by not having any info at all after the result for the V3 planner + // with this last expectation, it is an error if the V4 planner + // produces the same plan as the V3 planner does + testName := fmt.Sprintf("%d V4: %s", tcase.lineno, tcase.comments) + if !empty || checkAllTests { + t.Run(testName, func(t *testing.T) { + if out != tcase.output2ndPlanner { + fail = true + t.Errorf("V4 - %s:%d\nDiff:\n%s\n[%s] \n[%s]", filename, tcase.lineno, cmp.Diff(tcase.output2ndPlanner, out), tcase.output, out) + + } + if err != nil { + out = `"` + out + `"` + } + + if outFirstPlanner == out { + expected.WriteString(samePlanMarker) + } else { + expected.WriteString(fmt.Sprintf("%s\n", out)) + } + }) + } else { + if out == tcase.output && checkV4equalPlan { + t.Run(testName, func(t *testing.T) { + t.Errorf("V4 - %s:%d\nplanner produces same output as V3", filename, tcase.lineno) + }) + } + } + + expected.WriteString("\n") } + if fail && tempDir != "" { gotFile := fmt.Sprintf("%s/%s", tempDir, filename) - ioutil.WriteFile(gotFile, []byte(strings.TrimSpace(expected.String())+"\n"), 0644) + _ = ioutil.WriteFile(gotFile, []byte(strings.TrimSpace(expected.String())+"\n"), 0644) fmt.Println(fmt.Sprintf("Errors found in plantests. If the output is correct, run `cp %s/* testdata/` to update test expectations", tempDir)) //nolint } }) } +func getPlanOutput(tcase testCase, vschema *vschemaWrapper) (out string, err error) { + defer func() { + if r := recover(); r != nil { + out = fmt.Sprintf("panicked: %v\n%s", r, string(debug.Stack())) + } + }() + plan, err := TestBuilder(tcase.input, vschema) + out = getPlanOrErrorOutput(err, plan) + return out, err +} + func getPlanOrErrorOutput(err error, plan *engine.Plan) string { if err != nil { return err.Error() @@ -432,11 +555,12 @@ func getPlanOrErrorOutput(err error, plan *engine.Plan) string { } type testCase struct { - file string - lineno int - input string - output string - comments string + file string + lineno int + input string + output string + output2ndPlanner string + comments string } func iterateExecFile(name string) (testCaseIterator chan testCase) { @@ -491,12 +615,42 @@ func iterateExecFile(name string) (testCaseIterator chan testCase) { break } } + + binput, err = r.ReadBytes('\n') + lineno++ + var output2Planner []byte + if err != nil && err != io.EOF { + panic(fmt.Sprintf("error reading file %s line# %d: %s", name, lineno, err.Error())) + } + if len(binput) > 0 && string(binput) == samePlanMarker { + output2Planner = output + } else if len(binput) > 0 && (binput[0] == '"' || binput[0] == '{') { + output2Planner = append(output2Planner, binput...) + for { + l, err := r.ReadBytes('\n') + lineno++ + if err != nil { + panic(fmt.Sprintf("error reading file %s line# %d: %s", name, lineno, err.Error())) + } + output2Planner = append(output2Planner, l...) + if l[0] == '}' { + output2Planner = output2Planner[:len(output2Planner)-1] + break + } + if l[0] == '"' { + output2Planner = output2Planner[1 : len(output2Planner)-2] + break + } + } + } + testCaseIterator <- testCase{ - file: name, - lineno: lineno, - input: input, - output: string(output), - comments: comments, + file: name, + lineno: lineno, + input: input, + output: string(output), + output2ndPlanner: string(output2Planner), + comments: comments, } comments = "" } @@ -507,3 +661,75 @@ func iterateExecFile(name string) (testCaseIterator chan testCase) { func locateFile(name string) string { return "testdata/" + name } + +func BenchmarkPlanner(b *testing.B) { + filenames := []string{"from_cases.txt", "filter_cases.txt", "large_cases.txt", "aggr_cases.txt", "select_cases.txt", "union_cases.txt"} + vschema := &vschemaWrapper{ + v: loadSchema(b, "schema_test.json"), + sysVarEnabled: true, + } + for _, filename := range filenames { + var testCases []testCase + for tc := range iterateExecFile(filename) { + testCases = append(testCases, tc) + } + b.Run(filename+"-v3", func(b *testing.B) { + benchmarkPlanner(b, V3, testCases, vschema) + }) + b.Run(filename+"-v4", func(b *testing.B) { + benchmarkPlanner(b, Gen4, testCases, vschema) + }) + b.Run(filename+"-v4left2right", func(b *testing.B) { + benchmarkPlanner(b, Gen4Left2Right, testCases, vschema) + }) + } +} + +func BenchmarkSelectVsDML(b *testing.B) { + vschema := &vschemaWrapper{ + v: loadSchema(b, "schema_test.json"), + sysVarEnabled: true, + version: V3, + } + + var dmlCases []testCase + var selectCases []testCase + + for tc := range iterateExecFile("dml_cases.txt") { + dmlCases = append(dmlCases, tc) + } + + for tc := range iterateExecFile("select_cases.txt") { + if tc.output2ndPlanner != "" { + selectCases = append(selectCases, tc) + } + } + + rand.Shuffle(len(dmlCases), func(i, j int) { + dmlCases[i], dmlCases[j] = dmlCases[j], dmlCases[i] + }) + + rand.Shuffle(len(selectCases), func(i, j int) { + selectCases[i], selectCases[j] = selectCases[j], selectCases[i] + }) + + b.Run("DML (random sample, N=32)", func(b *testing.B) { + benchmarkPlanner(b, V3, dmlCases[:32], vschema) + }) + + b.Run("Select (random sample, N=32)", func(b *testing.B) { + benchmarkPlanner(b, V3, selectCases[:32], vschema) + }) +} + +func benchmarkPlanner(b *testing.B, version PlannerVersion, testCases []testCase, vschema *vschemaWrapper) { + b.ReportAllocs() + for n := 0; n < b.N; n++ { + for _, tcase := range testCases { + if tcase.output2ndPlanner != "" { + vschema.version = version + _, _ = TestBuilder(tcase.input, vschema) + } + } + } +} diff --git a/go/vt/vtgate/planbuilder/postprocess.go b/go/vt/vtgate/planbuilder/postprocess.go index 4784a9d9c06..7b925b411fa 100644 --- a/go/vt/vtgate/planbuilder/postprocess.go +++ b/go/vt/vtgate/planbuilder/postprocess.go @@ -95,7 +95,7 @@ var _ planVisitor = setUpperLimit // that it does not need to return more than the specified number of rows. // A primitive that cannot perform this can ignore the request. func setUpperLimit(plan logicalPlan) (bool, logicalPlan, error) { - arg := sqlparser.NewArgument([]byte(":__upper_limit")) + arg := sqlparser.NewArgument(":__upper_limit") switch node := plan.(type) { case *join: return false, node, nil diff --git a/go/vt/vtgate/planbuilder/project.go b/go/vt/vtgate/planbuilder/project.go index 2f96f40f3f2..71c1889ee84 100644 --- a/go/vt/vtgate/planbuilder/project.go +++ b/go/vt/vtgate/planbuilder/project.go @@ -167,5 +167,5 @@ func planProjection(pb *primitiveBuilder, in logicalPlan, expr *sqlparser.Aliase return node, rc, len(node.resultColumns) - 1, nil } - return nil, nil, 0, vterrors.Errorf(vtrpc.Code_UNIMPLEMENTED, "%T.projection: unreachable", in) + return nil, nil, 0, vterrors.Errorf(vtrpc.Code_UNIMPLEMENTED, "[BUG] unreachable %T.projection", in) } diff --git a/go/vt/vtgate/planbuilder/pullout_subquery.go b/go/vt/vtgate/planbuilder/pullout_subquery.go index 7f3372a103d..ec0862bcf0d 100644 --- a/go/vt/vtgate/planbuilder/pullout_subquery.go +++ b/go/vt/vtgate/planbuilder/pullout_subquery.go @@ -21,6 +21,7 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/semantics" ) var _ logicalPlan = (*pulloutSubquery)(nil) @@ -86,6 +87,14 @@ func (ps *pulloutSubquery) Wireup(plan logicalPlan, jt *jointab) error { return ps.subquery.Wireup(plan, jt) } +// Wireup2 implements the logicalPlan interface +func (ps *pulloutSubquery) WireupV4(semTable *semantics.SemTable) error { + if err := ps.underlying.WireupV4(semTable); err != nil { + return err + } + return ps.subquery.WireupV4(semTable) +} + // SupplyVar implements the logicalPlan interface func (ps *pulloutSubquery) SupplyVar(from, to int, col *sqlparser.ColName, varname string) { if from <= ps.subquery.Order() { @@ -115,6 +124,11 @@ func (ps *pulloutSubquery) Rewrite(inputs ...logicalPlan) error { return nil } +// Solves implements the logicalPlan interface +func (ps *pulloutSubquery) ContainsTables() semantics.TableSet { + return ps.underlying.ContainsTables().Merge(ps.subquery.ContainsTables()) +} + // Inputs implements the logicalPlan interface func (ps *pulloutSubquery) Inputs() []logicalPlan { return []logicalPlan{ps.underlying, ps.subquery} diff --git a/go/vt/vtgate/planbuilder/querygraph.go b/go/vt/vtgate/planbuilder/querygraph.go new file mode 100644 index 00000000000..608825b5650 --- /dev/null +++ b/go/vt/vtgate/planbuilder/querygraph.go @@ -0,0 +1,226 @@ +/* +Copyright 2020 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package planbuilder + +import ( + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/semantics" +) + +type ( + /* + queryGraph represents the FROM and WHERE parts of a query. + It is an intermediate representation of the query that makes it easier for the planner + to find all possible join combinations. Instead of storing the query information in a form that is close + to the syntax (AST), we extract the interesting parts into a graph form with the nodes being tables in the FROM + clause and the edges between them being predicates. We keep predicates in a hash map keyed by the dependencies of + the predicate. This makes it very fast to look up connections between tables in the query. + */ + queryGraph struct { + // the tables, including predicates that only depend on this particular table + tables []*queryTable + + // crossTable contains the predicates that need multiple tables + crossTable map[semantics.TableSet][]sqlparser.Expr + + // noDeps contains the predicates that can be evaluated anywhere. + noDeps sqlparser.Expr + + // subqueries contains the subqueries that depend on this query graph + subqueries map[*sqlparser.Subquery][]*queryGraph + } + + // queryTable is a single FROM table, including all predicates particular to this table + queryTable struct { + tableID semantics.TableSet + alias *sqlparser.AliasedTableExpr + table sqlparser.TableName + predicates []sqlparser.Expr + } +) + +func (qg *queryGraph) getPredicates(lhs, rhs semantics.TableSet) []sqlparser.Expr { + var allExprs []sqlparser.Expr + for tableSet, exprs := range qg.crossTable { + if tableSet.IsSolvedBy(lhs|rhs) && + tableSet.IsOverlapping(rhs) && + tableSet.IsOverlapping(lhs) { + allExprs = append(allExprs, exprs...) + } + } + return allExprs +} + +func createQGFromSelect(sel *sqlparser.Select, semTable *semantics.SemTable) (*queryGraph, error) { + qg := newQueryGraph() + if err := qg.collectTables(sel.From, semTable); err != nil { + return nil, err + } + + if sel.Where != nil { + err := qg.collectPredicates(sel, semTable) + if err != nil { + return nil, err + } + } + return qg, nil +} + +func createQGFromSelectStatement(selStmt sqlparser.SelectStatement, semTable *semantics.SemTable) ([]*queryGraph, error) { + switch stmt := selStmt.(type) { + case *sqlparser.Select: + qg, err := createQGFromSelect(stmt, semTable) + if err != nil { + return nil, err + } + return []*queryGraph{qg}, err + case *sqlparser.Union: + qg, err := createQGFromSelectStatement(stmt.FirstStatement, semTable) + if err != nil { + return nil, err + } + for _, sel := range stmt.UnionSelects { + qgr, err := createQGFromSelectStatement(sel.Statement, semTable) + if err != nil { + return nil, err + } + qg = append(qg, qgr...) + } + return qg, nil + case *sqlparser.ParenSelect: + return createQGFromSelectStatement(stmt.Select, semTable) + } + + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] not reachable %T", selStmt) +} + +func newQueryGraph() *queryGraph { + return &queryGraph{ + crossTable: map[semantics.TableSet][]sqlparser.Expr{}, + subqueries: map[*sqlparser.Subquery][]*queryGraph{}, + } +} + +func (qg *queryGraph) collectTable(t sqlparser.TableExpr, semTable *semantics.SemTable) error { + switch table := t.(type) { + case *sqlparser.AliasedTableExpr: + tableName := table.Expr.(sqlparser.TableName) + qt := &queryTable{alias: table, table: tableName, tableID: semTable.TableSetFor(table)} + qg.tables = append(qg.tables, qt) + case *sqlparser.JoinTableExpr: + if err := qg.collectTable(table.LeftExpr, semTable); err != nil { + return err + } + if err := qg.collectTable(table.RightExpr, semTable); err != nil { + return err + } + if table.Condition.On != nil { + for _, predicate := range splitAndExpression(nil, table.Condition.On) { + err := qg.collectPredicate(predicate, semTable) + if err != nil { + return err + } + } + } + case *sqlparser.ParenTableExpr: + for _, expr := range table.Exprs { + if err := qg.collectTable(expr, semTable); err != nil { + return err + } + } + } + return nil +} + +func (qg *queryGraph) collectTables(t sqlparser.TableExprs, semTable *semantics.SemTable) error { + for _, expr := range t { + if err := qg.collectTable(expr, semTable); err != nil { + return err + } + } + return nil +} + +func (qg *queryGraph) collectPredicates(sel *sqlparser.Select, semTable *semantics.SemTable) error { + predicates := splitAndExpression(nil, sel.Where.Expr) + + for _, predicate := range predicates { + err := qg.collectPredicate(predicate, semTable) + if err != nil { + return err + } + } + return nil +} + +func (qg *queryGraph) collectPredicate(predicate sqlparser.Expr, semTable *semantics.SemTable) error { + deps := semTable.Dependencies(predicate) + switch deps.NumberOfTables() { + case 0: + qg.addNoDepsPredicate(predicate) + case 1: + found := qg.addToSingleTable(deps, predicate) + if !found { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "table %v for predicate %v not found", deps, sqlparser.String(predicate)) + } + default: + allPredicates, found := qg.crossTable[deps] + if found { + allPredicates = append(allPredicates, predicate) + } else { + allPredicates = []sqlparser.Expr{predicate} + } + qg.crossTable[deps] = allPredicates + } + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch subQuery := node.(type) { + case *sqlparser.Subquery: + + qgr, err := createQGFromSelectStatement(subQuery.Select, semTable) + if err != nil { + return false, err + } + qg.subqueries[subQuery] = qgr + } + return true, nil + }, predicate) + + return err +} + +func (qg *queryGraph) addToSingleTable(table semantics.TableSet, predicate sqlparser.Expr) bool { + for _, t := range qg.tables { + if table == t.tableID { + t.predicates = append(t.predicates, predicate) + return true + } + } + return false +} + +func (qg *queryGraph) addNoDepsPredicate(predicate sqlparser.Expr) { + if qg.noDeps == nil { + qg.noDeps = predicate + } else { + qg.noDeps = &sqlparser.AndExpr{ + Left: qg.noDeps, + Right: predicate, + } + } +} diff --git a/go/vt/vtgate/planbuilder/querygraph_test.go b/go/vt/vtgate/planbuilder/querygraph_test.go new file mode 100644 index 00000000000..43717a4a465 --- /dev/null +++ b/go/vt/vtgate/planbuilder/querygraph_test.go @@ -0,0 +1,234 @@ +/* +Copyright 2020 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package planbuilder + +import ( + "fmt" + "sort" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + + "vitess.io/vitess/go/test/utils" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/semantics" +) + +type tcase struct { + input, output string +} + +var tcases = []tcase{{ + input: "select * from t", + output: `{ +Tables: + 1:t +}`, +}, { + input: "select t.c from t,y,z where t.c = y.c and (t.a = z.a or t.a = y.a) and 1 < 2", + output: `{ +Tables: + 1:t + 2:y + 4:z +JoinPredicates: + 1:2 - t.c = y.c + 1:2:4 - t.a = z.a or t.a = y.a +ForAll: 1 < 2 +}`, +}, { + input: "select t.c from t join y on t.id = y.t_id join z on t.id = z.t_id where t.name = 'foo' and y.col = 42 and z.baz = 101", + output: `{ +Tables: + 1:t where t.` + "`name`" + ` = 'foo' + 2:y where y.col = 42 + 4:z where z.baz = 101 +JoinPredicates: + 1:2 - t.id = y.t_id + 1:4 - t.id = z.t_id +}`, +}, { + input: "select t.c from t,y,z where t.name = 'foo' and y.col = 42 and z.baz = 101 and t.id = y.t_id and t.id = z.t_id", + output: `{ +Tables: + 1:t where t.` + "`name`" + ` = 'foo' + 2:y where y.col = 42 + 4:z where z.baz = 101 +JoinPredicates: + 1:2 - t.id = y.t_id + 1:4 - t.id = z.t_id +}`, +}, { + input: "select 1 from t where '1' = 1 and 12 = '12'", + output: `{ +Tables: + 1:t +ForAll: '1' = 1 and 12 = '12' +}`, +}, { + input: "select 1 from t where exists (select 1)", + output: `{ +Tables: + 1:t +ForAll: exists (select 1 from dual) +SubQueries: +(select 1 from dual) - { + Tables: + 2:dual + } +}`, +}} + +func equals(left, right sqlparser.Expr) sqlparser.Expr { + return &sqlparser.ComparisonExpr{ + Operator: sqlparser.EqualOp, + Left: left, + Right: right, + } +} + +func colName(table, column string) *sqlparser.ColName { + return &sqlparser.ColName{Name: sqlparser.NewColIdent(column), Qualifier: tableName(table)} +} + +func tableName(name string) sqlparser.TableName { + return sqlparser.TableName{Name: sqlparser.NewTableIdent(name)} +} + +func TestQueryGraph(t *testing.T) { + for i, tc := range tcases { + sql := tc.input + t.Run(fmt.Sprintf("%d %s", i, sql), func(t *testing.T) { + tree, err := sqlparser.Parse(sql) + require.NoError(t, err) + semTable, err := semantics.Analyse(tree) + require.NoError(t, err) + qgraph, err := createQGFromSelect(tree.(*sqlparser.Select), semTable) + require.NoError(t, err) + assert.Equal(t, tc.output, qgraph.testString()) + utils.MustMatch(t, tc.output, qgraph.testString(), "incorrect query graph") + }) + } +} + +func TestString(t *testing.T) { + tree, err := sqlparser.Parse("select * from a,b join c on b.id = c.id where a.id = b.id and b.col IN (select 42) and func() = 'foo'") + require.NoError(t, err) + semTable, err := semantics.Analyse(tree) + require.NoError(t, err) + qgraph, err := createQGFromSelect(tree.(*sqlparser.Select), semTable) + require.NoError(t, err) + utils.MustMatch(t, `{ +Tables: + 1:a + 2:b where b.col in (select 42 from dual) + 4:c +JoinPredicates: + 1:2 - a.id = b.id + 2:4 - b.id = c.id +ForAll: func() = 'foo' +SubQueries: +(select 42 from dual) - { + Tables: + 8:dual + } +}`, qgraph.testString()) +} + +func (qt *queryTable) testString() string { + var alias string + if !qt.alias.As.IsEmpty() { + alias = " AS " + sqlparser.String(qt.alias.As) + } + var preds []string + for _, predicate := range qt.predicates { + preds = append(preds, sqlparser.String(predicate)) + } + var where string + if len(preds) > 0 { + where = " where " + strings.Join(preds, " and ") + } + + return fmt.Sprintf("\t%d:%s%s%s", qt.tableID, sqlparser.String(qt.table), alias, where) +} + +func (qg *queryGraph) testString() string { + return fmt.Sprintf(`{ +Tables: +%s%s%s%s +}`, strings.Join(qg.tableNames(), "\n"), qg.crossPredicateString(), qg.noDepsString(), qg.subqueriesString()) +} + +func (qg *queryGraph) crossPredicateString() string { + if len(qg.crossTable) == 0 { + return "" + } + var joinPreds []string + for deps, predicates := range qg.crossTable { + var tables []string + for _, id := range deps.Constituents() { + tables = append(tables, fmt.Sprintf("%d", id)) + } + var expressions []string + for _, expr := range predicates { + expressions = append(expressions, sqlparser.String(expr)) + } + tableConcat := strings.Join(tables, ":") + exprConcat := strings.Join(expressions, " and ") + joinPreds = append(joinPreds, fmt.Sprintf("\t%s - %s", tableConcat, exprConcat)) + } + sort.Strings(joinPreds) + return fmt.Sprintf("\nJoinPredicates:\n%s", strings.Join(joinPreds, "\n")) +} + +func (qg *queryGraph) tableNames() []string { + var tables []string + for _, t := range qg.tables { + tables = append(tables, t.testString()) + } + return tables +} + +func (qg *queryGraph) subqueriesString() string { + if len(qg.subqueries) == 0 { + return "" + } + var graphs []string + for sq, qgraphs := range qg.subqueries { + key := sqlparser.String(sq) + for _, inner := range qgraphs { + str := inner.testString() + splitInner := strings.Split(str, "\n") + for i, s := range splitInner { + splitInner[i] = "\t" + s + } + graphs = append(graphs, fmt.Sprintf("%s - %s", key, strings.Join(splitInner, "\n"))) + } + } + return fmt.Sprintf("\nSubQueries:\n%s", strings.Join(graphs, "\n")) +} + +func (qg *queryGraph) noDepsString() string { + if qg.noDeps == nil { + return "" + } + return fmt.Sprintf("\nForAll: %s", sqlparser.String(qg.noDeps)) +} diff --git a/go/vt/vtgate/planbuilder/route.go b/go/vt/vtgate/planbuilder/route.go index 5e921e0abec..43ac4b1ba93 100644 --- a/go/vt/vtgate/planbuilder/route.go +++ b/go/vt/vtgate/planbuilder/route.go @@ -19,6 +19,7 @@ package planbuilder import ( vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/sqlparser" @@ -63,6 +64,9 @@ type route struct { // eroute is the primitive being built. eroute *engine.Route + + // tables keeps track of which tables this route is covering + tables semantics.TableSet } type tableSubstitution struct { @@ -128,6 +132,23 @@ func (rb *route) SetLimit(limit *sqlparser.Limit) { rb.Select.SetLimit(limit) } +// Wireup2 implements the logicalPlan interface +func (rb *route) WireupV4(semTable *semantics.SemTable) error { + rb.prepareTheAST() + + rb.eroute.Query = sqlparser.String(rb.Select) + buffer := sqlparser.NewTrackedBuffer(nil) + sqlparser.FormatImpossibleQuery(buffer, rb.Select) + rb.eroute.FieldQuery = buffer.ParsedQuery().Query + + return nil +} + +// Solves implements the logicalPlan interface +func (rb *route) ContainsTables() semantics.TableSet { + return rb.tables +} + // Wireup implements the logicalPlan interface func (rb *route) Wireup(plan logicalPlan, jt *jointab) error { // Precaution: update ERoute.Values only if it's not set already. @@ -159,7 +180,7 @@ func (rb *route) Wireup(plan logicalPlan, jt *jointab) error { if len(node.SelectExprs) == 0 { node.SelectExprs = sqlparser.SelectExprs([]sqlparser.SelectExpr{ &sqlparser.AliasedExpr{ - Expr: sqlparser.NewIntLiteral([]byte{'1'}), + Expr: sqlparser.NewIntLiteral("1"), }, }) } @@ -204,6 +225,33 @@ func (rb *route) Wireup(plan logicalPlan, jt *jointab) error { return nil } +// prepareTheAST does minor fixups of the SELECT struct before producing the query string +func (rb *route) prepareTheAST() { + _ = sqlparser.Walk(func(node sqlparser.SQLNode) (bool, error) { + switch node := node.(type) { + case *sqlparser.Select: + if len(node.SelectExprs) == 0 { + node.SelectExprs = []sqlparser.SelectExpr{ + &sqlparser.AliasedExpr{ + Expr: sqlparser.NewIntLiteral("1"), + }, + } + } + case *sqlparser.ComparisonExpr: + // 42 = colName -> colName = 42 + b := node.Operator == sqlparser.EqualOp + value := sqlparser.IsValue(node.Left) + name := sqlparser.IsColName(node.Right) + if b && + value && + name { + node.Left, node.Right = node.Right, node.Left + } + } + return true, nil + }, rb.Select) +} + // procureValues procures and converts the input into // the expected types for rb.Values. func (rb *route) procureValues(plan logicalPlan, jt *jointab, val sqlparser.Expr) (sqltypes.PlanValue, error) { @@ -392,7 +440,23 @@ func (rb *route) JoinCanMerge(pb *primitiveBuilder, rrb *route, ajoin *sqlparser if where == nil { return true } - return ajoin != nil + tableWithRoutingPredicates := make(map[sqlparser.TableName]struct{}) + _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + col, ok := node.(*sqlparser.ColName) + if ok { + hasRuntimeRoutingPredicates := isTableNameCol(col) || isDbNameCol(col) + if hasRuntimeRoutingPredicates && pb.st.tables[col.Qualifier] != nil { + tableWithRoutingPredicates[col.Qualifier] = struct{}{} + } + } + return true, nil + }, where) + // Routes can be merged if only 1 table is used in the predicates that are used for routing + // TODO :- Even if more table are present in the routing, we can merge if they agree + if len(tableWithRoutingPredicates) <= 1 { + return true + } + return len(tableWithRoutingPredicates) == 0 } if ajoin == nil { return false diff --git a/go/vt/vtgate/planbuilder/route_planning.go b/go/vt/vtgate/planbuilder/route_planning.go new file mode 100644 index 00000000000..dc3bfb2e85a --- /dev/null +++ b/go/vt/vtgate/planbuilder/route_planning.go @@ -0,0 +1,790 @@ +/* +Copyright 2020 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package planbuilder + +import ( + "fmt" + "sort" + "strings" + + "vitess.io/vitess/go/sqltypes" + + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vtgate/semantics" + "vitess.io/vitess/go/vt/vtgate/vindexes" + + "vitess.io/vitess/go/vt/vterrors" + + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/engine" +) + +var _ selectPlanner = gen4Planner + +func gen4Planner(_ string) func(sqlparser.Statement, sqlparser.BindVars, ContextVSchema) (engine.Primitive, error) { + return func(stmt sqlparser.Statement, reservedVars sqlparser.BindVars, vschema ContextVSchema) (engine.Primitive, error) { + sel, ok := stmt.(*sqlparser.Select) + if !ok { + return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "%T not yet supported", stmt) + } + return newBuildSelectPlan(sel, vschema) + } +} + +func newBuildSelectPlan(sel *sqlparser.Select, vschema ContextVSchema) (engine.Primitive, error) { + semTable, err := semantics.Analyse(sel) // TODO no nil no + if err != nil { + return nil, err + } + + qgraph, err := createQGFromSelect(sel, semTable) + if err != nil { + return nil, err + } + + var tree joinTree + + switch { + case vschema.Planner() == Gen4Left2Right: + tree, err = leftToRightSolve(qgraph, semTable, vschema) + default: + tree, err = greedySolve(qgraph, semTable, vschema) + } + + if err != nil { + return nil, err + } + + plan, err := transformToLogicalPlan(tree, semTable) + if err != nil { + return nil, err + } + + if err := planProjections(sel, plan, semTable); err != nil { + return nil, err + } + + plan, err = planLimit(sel.Limit, plan) + if err != nil { + return nil, err + } + + if err := plan.WireupV4(semTable); err != nil { + return nil, err + } + return plan.Primitive(), nil +} + +func planLimit(limit *sqlparser.Limit, plan logicalPlan) (logicalPlan, error) { + if limit == nil { + return plan, nil + } + rb, ok := plan.(*route) + if ok && rb.isSingleShard() { + rb.SetLimit(limit) + return plan, nil + } + + lPlan, err := createLimit(plan, limit) + if err != nil { + return nil, err + } + + // visit does not modify the plan. + _, err = visit(lPlan, setUpperLimit) + if err != nil { + return nil, err + } + return lPlan, nil +} + +func planProjections(sel *sqlparser.Select, plan logicalPlan, semTable *semantics.SemTable) error { + rb, ok := plan.(*route) + if ok && rb.isSingleShard() { + ast := rb.Select.(*sqlparser.Select) + ast.Distinct = sel.Distinct + ast.GroupBy = sel.GroupBy + ast.OrderBy = sel.OrderBy + ast.SelectExprs = sel.SelectExprs + ast.Comments = sel.Comments + } else { + // TODO real horizon planning to be done + if sel.Distinct { + return semantics.Gen4NotSupportedF("DISTINCT") + } + if sel.GroupBy != nil { + return semantics.Gen4NotSupportedF("GROUP BY") + } + for _, expr := range sel.SelectExprs { + switch e := expr.(type) { + case *sqlparser.AliasedExpr: + if nodeHasAggregates(e.Expr) { + return semantics.Gen4NotSupportedF("aggregation [%s]", sqlparser.String(e)) + } + if _, err := pushProjection(e, plan, semTable); err != nil { + return err + } + default: + return semantics.Gen4NotSupportedF("%T", e) + } + } + + } + return nil +} + +type ( + joinTree interface { + // tables returns the table identifiers that are solved by this plan + tables() semantics.TableSet + + // cost is simply the number of routes in the joinTree + cost() int + + // creates a copy of the joinTree that can be updated without changing the original + clone() joinTree + + pushOutputColumns([]*sqlparser.ColName, *semantics.SemTable) int + } + routeTable struct { + qtable *queryTable + vtable *vindexes.Table + } + routePlan struct { + routeOpCode engine.RouteOpcode + solved semantics.TableSet + keyspace *vindexes.Keyspace + + // _tables contains all the tables that are solved by this plan. + // the tables also contain any predicates that only depend on that particular table + _tables routeTables + + // predicates are the predicates evaluated by this plan + predicates []sqlparser.Expr + + // vindex and vindexValues is set if a vindex will be used for this route. + vindex vindexes.Vindex + vindexValues []sqltypes.PlanValue + + // here we store the possible vindexes we can use so that when we add predicates to the plan, + // we can quickly check if the new predicates enables any new vindex options + vindexPreds []*vindexPlusPredicates + + // columns needed to feed other plans + columns []*sqlparser.ColName + } + joinPlan struct { + // columns needed to feed other plans + columns []int + + // arguments that need to be copied from the LHS/RHS + vars map[string]int + + lhs, rhs joinTree + } + routeTables []*routeTable +) + +var _ joinTree = (*routePlan)(nil) +var _ joinTree = (*joinPlan)(nil) + +// clone returns a copy of the struct with copies of slices, +// so changing the the contents of them will not be reflected in the original +func (rp *routePlan) clone() joinTree { + result := *rp + result.vindexPreds = make([]*vindexPlusPredicates, len(rp.vindexPreds)) + for i, pred := range rp.vindexPreds { + // we do this to create a copy of the struct + p := *pred + result.vindexPreds[i] = &p + } + return &result +} + +// tables implements the joinTree interface +func (rp *routePlan) tables() semantics.TableSet { + return rp.solved +} + +// cost implements the joinTree interface +func (rp *routePlan) cost() int { + switch rp.routeOpCode { + case // these op codes will never be compared with each other - they are assigned by a rule and not a comparison + engine.SelectDBA, + engine.SelectNext, + engine.SelectNone, + engine.SelectReference, + engine.SelectUnsharded: + return 0 + // TODO revisit these costs when more of the gen4 planner is done + case engine.SelectEqualUnique: + return 1 + case engine.SelectEqual: + return 5 + case engine.SelectIN: + return 10 + case engine.SelectMultiEqual: + return 10 + case engine.SelectScatter: + return 20 + } + return 1 +} + +// vindexPlusPredicates is a struct used to store all the predicates that the vindex can be used to query +type vindexPlusPredicates struct { + vindex *vindexes.ColumnVindex + values []sqltypes.PlanValue + // Vindex is covered if all the columns in the vindex have an associated predicate + covered bool +} + +// addPredicate clones this routePlan and returns a new one with these predicates added to it. if the predicates can help, +// they will improve the routeOpCode +func (rp *routePlan) addPredicate(predicates ...sqlparser.Expr) error { + newVindexFound, err := rp.searchForNewVindexes(predicates) + if err != nil { + return err + } + + // if we didn't open up any new vindex options, no need to enter here + if newVindexFound { + rp.pickBestAvailableVindex() + } + + // any predicates that cover more than a single table need to be added here + rp.predicates = append(rp.predicates, predicates...) + + return nil +} + +func (rp *routePlan) searchForNewVindexes(predicates []sqlparser.Expr) (bool, error) { + newVindexFound := false + for _, filter := range predicates { + switch node := filter.(type) { + case *sqlparser.ComparisonExpr: + switch node.Operator { + case sqlparser.EqualOp: + // here we are searching for predicates in the form n.col = XYZ + if sqlparser.IsNull(node.Left) || sqlparser.IsNull(node.Right) { + // we are looking at ANDed predicates in the WHERE clause. + // since we know that nothing returns true when compared to NULL, + // so we can safely bail out here + rp.routeOpCode = engine.SelectNone + return false, nil + } + // TODO(Manan,Andres): Remove the predicates that are repeated eg. Id=1 AND Id=1 + for _, v := range rp.vindexPreds { + if v.covered { + // already covered by an earlier predicate + continue + } + column, ok := node.Left.(*sqlparser.ColName) + other := node.Right + if !ok { + column, ok = node.Right.(*sqlparser.ColName) + other = node.Left + } + value, err := sqlparser.NewPlanValue(other) + if err != nil { + // if we are unable to create a PlanValue, we can't use a vindex, but we don't have to fail + if strings.Contains(err.Error(), "expression is too complex") { + continue + } + // something else went wrong, return the error + return false, err + } + if ok { + for _, col := range v.vindex.Columns { + // If the column for the predicate matches any column in the vindex add it to the list + if column.Name.Equal(col) { + v.values = append(v.values, value) + // Vindex is covered if all the columns in the vindex have a associated predicate + v.covered = len(v.values) == len(v.vindex.Columns) + newVindexFound = newVindexFound || v.covered + } + } + } + } + default: + return false, semantics.Gen4NotSupportedF("%s", sqlparser.String(filter)) + } + } + } + return newVindexFound, nil +} + +// pickBestAvailableVindex goes over the available vindexes for this route and picks the best one available. +func (rp *routePlan) pickBestAvailableVindex() { + for _, v := range rp.vindexPreds { + if !v.covered { + continue + } + // Choose the minimum cost vindex from the ones which are covered + if rp.vindex == nil || v.vindex.Vindex.Cost() < rp.vindex.Cost() { + rp.vindex = v.vindex.Vindex + rp.vindexValues = v.values + } + } + + if rp.vindex != nil { + rp.routeOpCode = engine.SelectEqual + if rp.vindex.IsUnique() { + rp.routeOpCode = engine.SelectEqualUnique + } + } +} + +// Predicates takes all known predicates for this route and ANDs them together +func (rp *routePlan) Predicates() sqlparser.Expr { + var result sqlparser.Expr + add := func(e sqlparser.Expr) { + if result == nil { + result = e + return + } + result = &sqlparser.AndExpr{ + Left: result, + Right: e, + } + } + for _, p := range rp.predicates { + add(p) + } + return result +} + +func (rp *routePlan) pushOutputColumns(col []*sqlparser.ColName, _ *semantics.SemTable) int { + newCol := len(rp.columns) + rp.columns = append(rp.columns, col...) + return newCol +} + +func (jp *joinPlan) tables() semantics.TableSet { + return jp.lhs.tables() | jp.rhs.tables() +} + +func (jp *joinPlan) cost() int { + return jp.lhs.cost() + jp.rhs.cost() +} + +func (jp *joinPlan) clone() joinTree { + result := &joinPlan{ + lhs: jp.lhs.clone(), + rhs: jp.rhs.clone(), + } + return result +} + +func (jp *joinPlan) pushOutputColumns(columns []*sqlparser.ColName, semTable *semantics.SemTable) int { + resultIdx := len(jp.columns) + var toTheLeft []bool + var lhs, rhs []*sqlparser.ColName + for _, col := range columns { + if semTable.Dependencies(col).IsSolvedBy(jp.lhs.tables()) { + lhs = append(lhs, col) + toTheLeft = append(toTheLeft, true) + } else { + rhs = append(rhs, col) + toTheLeft = append(toTheLeft, false) + } + } + lhsOffset := jp.lhs.pushOutputColumns(lhs, semTable) + rhsOffset := -jp.rhs.pushOutputColumns(rhs, semTable) + + for _, left := range toTheLeft { + if left { + jp.columns = append(jp.columns, lhsOffset) + lhsOffset++ + } else { + jp.columns = append(jp.columns, rhsOffset) + rhsOffset-- + } + } + return resultIdx +} + +func pushPredicate2(exprs []sqlparser.Expr, tree joinTree, semTable *semantics.SemTable) (joinTree, error) { + switch node := tree.(type) { + case *routePlan: + plan := node.clone().(*routePlan) + err := plan.addPredicate(exprs...) + if err != nil { + return nil, err + } + return plan, nil + + case *joinPlan: + // we break up the predicates so that colnames from the LHS are replaced by arguments + var rhsPreds []sqlparser.Expr + var lhsColumns []*sqlparser.ColName + lhsSolves := node.lhs.tables() + for _, expr := range exprs { + cols, predicate, err := breakPredicateInLHSandRHS(expr, semTable, lhsSolves) + if err != nil { + return nil, err + } + lhsColumns = append(lhsColumns, cols...) + rhsPreds = append(rhsPreds, predicate) + } + node.pushOutputColumns(lhsColumns, semTable) + rhsPlan, err := pushPredicate2(rhsPreds, node.rhs, semTable) + if err != nil { + return nil, err + } + return &joinPlan{ + lhs: node.lhs, + rhs: rhsPlan, + }, nil + default: + panic(fmt.Sprintf("BUG: unknown type %T", node)) + } +} + +func breakPredicateInLHSandRHS(expr sqlparser.Expr, semTable *semantics.SemTable, lhs semantics.TableSet) (columns []*sqlparser.ColName, predicate sqlparser.Expr, err error) { + predicate = sqlparser.CloneExpr(expr) + _ = sqlparser.Rewrite(predicate, nil, func(cursor *sqlparser.Cursor) bool { + switch node := cursor.Node().(type) { + case *sqlparser.ColName: + deps := semTable.Dependencies(node) + if deps == 0 { + err = vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unknown column. has the AST been copied?") + return false + } + if deps.IsSolvedBy(lhs) { + columns = append(columns, node) + arg := sqlparser.NewArgument(":" + node.CompliantName("")) + cursor.Replace(arg) + } + } + return true + }) + if err != nil { + return nil, nil, err + } + return +} + +func mergeOrJoin(lhs, rhs joinTree, joinPredicates []sqlparser.Expr, semTable *semantics.SemTable) (joinTree, error) { + newPlan := tryMerge(lhs, rhs, joinPredicates, semTable) + if newPlan != nil { + return newPlan, nil + } + + tree := &joinPlan{lhs: lhs.clone(), rhs: rhs.clone()} + return pushPredicate2(joinPredicates, tree, semTable) +} + +type ( + tableSetPair struct { + left, right semantics.TableSet + } + cacheMap map[tableSetPair]joinTree +) + +/* + The greedy planner will plan a query by finding first finding the best route plan for every table. + Then, iteratively, it finds the cheapest join that can be produced between the remaining plans, + and removes the two inputs to this cheapest plan and instead adds the join. + As an optimization, it first only considers joining tables that have predicates defined between them +*/ +func greedySolve(qg *queryGraph, semTable *semantics.SemTable, vschema ContextVSchema) (joinTree, error) { + joinTrees, err := seedPlanList(qg, semTable, vschema) + planCache := cacheMap{} + if err != nil { + return nil, err + } + + crossJoinsOK := false + for len(joinTrees) > 1 { + bestTree, lIdx, rIdx, err := findBestJoinTree(qg, semTable, joinTrees, planCache, crossJoinsOK) + if err != nil { + return nil, err + } + // if we found a best plan, we'll replace the two plans that were joined with the join plan created + if bestTree != nil { + // we need to remove the larger of the two plans first + if rIdx > lIdx { + joinTrees = removeAt(joinTrees, rIdx) + joinTrees = removeAt(joinTrees, lIdx) + } else { + joinTrees = removeAt(joinTrees, lIdx) + joinTrees = removeAt(joinTrees, rIdx) + } + joinTrees = append(joinTrees, bestTree) + } else { + // we will only fail to find a join plan when there are only cross joins left + // when that happens, we switch over to allow cross joins as well. + // this way we prioritize joining joinTrees with predicates first + crossJoinsOK = true + } + } + + return joinTrees[0], nil +} + +func (cm cacheMap) getJoinTreeFor(lhs, rhs joinTree, joinPredicates []sqlparser.Expr, semTable *semantics.SemTable) (joinTree, error) { + solves := tableSetPair{left: lhs.tables(), right: rhs.tables()} + cachedPlan := cm[solves] + if cachedPlan != nil { + return cachedPlan, nil + } + + join, err := mergeOrJoin(lhs, rhs, joinPredicates, semTable) + if err != nil { + return nil, err + } + cm[solves] = join + return join, nil +} + +func findBestJoinTree( + qg *queryGraph, + semTable *semantics.SemTable, + plans []joinTree, + planCache cacheMap, + crossJoinsOK bool, +) (bestPlan joinTree, lIdx int, rIdx int, err error) { + for i, lhs := range plans { + for j, rhs := range plans { + if i == j { + continue + } + joinPredicates := qg.getPredicates(lhs.tables(), rhs.tables()) + if len(joinPredicates) == 0 && !crossJoinsOK { + // if there are no predicates joining the two tables, + // creating a join between them would produce a + // cartesian product, which is almost always a bad idea + continue + } + plan, err := planCache.getJoinTreeFor(lhs, rhs, joinPredicates, semTable) + if err != nil { + return nil, 0, 0, err + } + if bestPlan == nil || plan.cost() < bestPlan.cost() { + bestPlan = plan + // remember which plans we based on, so we can remove them later + lIdx = i + rIdx = j + } + } + } + return bestPlan, lIdx, rIdx, nil +} + +func leftToRightSolve(qg *queryGraph, semTable *semantics.SemTable, vschema ContextVSchema) (joinTree, error) { + plans, err := seedPlanList(qg, semTable, vschema) + if err != nil { + return nil, err + } + + var acc joinTree + for _, plan := range plans { + if acc == nil { + acc = plan + continue + } + joinPredicates := qg.getPredicates(acc.tables(), plan.tables()) + acc, err = mergeOrJoin(acc, plan, joinPredicates, semTable) + if err != nil { + return nil, err + } + } + + return acc, nil +} + +// seedPlanList returns a routePlan for each table in the qg +func seedPlanList(qg *queryGraph, semTable *semantics.SemTable, vschema ContextVSchema) ([]joinTree, error) { + plans := make([]joinTree, len(qg.tables)) + + // we start by seeding the table with the single routes + for i, table := range qg.tables { + solves := semTable.TableSetFor(table.alias) + plan, err := createRoutePlan(table, solves, vschema) + if err != nil { + return nil, err + } + plans[i] = plan + } + return plans, nil +} + +func removeAt(plans []joinTree, idx int) []joinTree { + return append(plans[:idx], plans[idx+1:]...) +} + +func createRoutePlan(table *queryTable, solves semantics.TableSet, vschema ContextVSchema) (*routePlan, error) { + vschemaTable, _, _, _, _, err := vschema.FindTableOrVindex(table.table) + if err != nil { + return nil, err + } + if vschemaTable.Name.String() != table.table.Name.String() { + return nil, semantics.Gen4NotSupportedF("routed tables") + } + plan := &routePlan{ + solved: solves, + _tables: []*routeTable{{ + qtable: table, + vtable: vschemaTable, + }}, + keyspace: vschemaTable.Keyspace, + } + + for _, columnVindex := range vschemaTable.ColumnVindexes { + plan.vindexPreds = append(plan.vindexPreds, &vindexPlusPredicates{vindex: columnVindex}) + } + + switch { + case vschemaTable.Type == vindexes.TypeSequence: + plan.routeOpCode = engine.SelectNext + case vschemaTable.Type == vindexes.TypeReference: + plan.routeOpCode = engine.SelectReference + case !vschemaTable.Keyspace.Sharded: + plan.routeOpCode = engine.SelectUnsharded + case vschemaTable.Pinned != nil: + + // Pinned tables have their keyspace ids already assigned. + // Use the Binary vindex, which is the identity function + // for keyspace id. + plan.routeOpCode = engine.SelectEqualUnique + default: + plan.routeOpCode = engine.SelectScatter + } + err = plan.addPredicate(table.predicates...) + if err != nil { + return nil, err + } + + return plan, nil +} + +func findColumnVindex(a *routePlan, exp sqlparser.Expr, sem *semantics.SemTable) vindexes.SingleColumn { + left, isCol := exp.(*sqlparser.ColName) + if !isCol { + return nil + } + leftDep := sem.Dependencies(left) + for _, table := range a._tables { + if leftDep.IsSolvedBy(table.qtable.tableID) { + for _, vindex := range table.vtable.ColumnVindexes { + singCol, isSingle := vindex.Vindex.(vindexes.SingleColumn) + if isSingle && vindex.Columns[0].Equal(left.Name) { + return singCol + } + } + } + } + return nil +} + +func canMergeOnFilter(a, b *routePlan, predicate sqlparser.Expr, sem *semantics.SemTable) bool { + comparison, ok := predicate.(*sqlparser.ComparisonExpr) + if !ok { + return false + } + if comparison.Operator != sqlparser.EqualOp { + return false + } + left := comparison.Left + right := comparison.Right + + lVindex := findColumnVindex(a, left, sem) + if lVindex == nil { + left, right = right, left + lVindex = findColumnVindex(a, left, sem) + } + if lVindex == nil || !lVindex.IsUnique() { + return false + } + rVindex := findColumnVindex(b, right, sem) + if rVindex == nil { + return false + } + return rVindex == lVindex +} + +func canMergeOnFilters(a, b *routePlan, joinPredicates []sqlparser.Expr, semTable *semantics.SemTable) bool { + for _, predicate := range joinPredicates { + if canMergeOnFilter(a, b, predicate, semTable) { + return true + } + } + return false +} + +func tryMerge(a, b joinTree, joinPredicates []sqlparser.Expr, semTable *semantics.SemTable) joinTree { + aRoute, ok := a.(*routePlan) + if !ok { + return nil + } + bRoute, ok := b.(*routePlan) + if !ok { + return nil + } + if aRoute.keyspace != bRoute.keyspace { + return nil + } + + newTabletSet := aRoute.solved | bRoute.solved + r := &routePlan{ + routeOpCode: aRoute.routeOpCode, + solved: newTabletSet, + _tables: append(aRoute._tables, bRoute._tables...), + predicates: append( + append(aRoute.predicates, bRoute.predicates...), + joinPredicates...), + keyspace: aRoute.keyspace, + vindexPreds: append(aRoute.vindexPreds, bRoute.vindexPreds...), + } + + switch aRoute.routeOpCode { + case engine.SelectUnsharded, engine.SelectDBA: + if aRoute.routeOpCode != bRoute.routeOpCode { + return nil + } + case engine.SelectScatter, engine.SelectEqualUnique: + if len(joinPredicates) == 0 { + // If we are doing two Scatters, we have to make sure that the + // joins are on the correct vindex to allow them to be merged + // no join predicates - no vindex + return nil + } + + canMerge := canMergeOnFilters(aRoute, bRoute, joinPredicates, semTable) + if !canMerge { + return nil + } + r.pickBestAvailableVindex() + } + + return r +} + +var _ sort.Interface = (routeTables)(nil) + +func (r routeTables) Len() int { + return len(r) +} + +func (r routeTables) Less(i, j int) bool { + return r[i].qtable.tableID < r[j].qtable.tableID +} + +func (r routeTables) Swap(i, j int) { + r[i], r[j] = r[j], r[i] +} diff --git a/go/vt/vtgate/planbuilder/route_planning_test.go b/go/vt/vtgate/planbuilder/route_planning_test.go new file mode 100644 index 00000000000..4209225bd66 --- /dev/null +++ b/go/vt/vtgate/planbuilder/route_planning_test.go @@ -0,0 +1,126 @@ +/* +Copyright 2020 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package planbuilder + +import ( + "fmt" + "testing" + + "vitess.io/vitess/go/vt/vtgate/semantics" + + "github.com/stretchr/testify/assert" + + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +func unsharded(solved semantics.TableSet, keyspace *vindexes.Keyspace) *routePlan { + return &routePlan{ + routeOpCode: engine.SelectUnsharded, + solved: solved, + keyspace: keyspace, + } +} +func selectDBA(solved semantics.TableSet, keyspace *vindexes.Keyspace) *routePlan { + return &routePlan{ + routeOpCode: engine.SelectDBA, + solved: solved, + keyspace: keyspace, + } +} + +func selectScatter(solved semantics.TableSet, keyspace *vindexes.Keyspace) *routePlan { + return &routePlan{ + routeOpCode: engine.SelectScatter, + solved: solved, + keyspace: keyspace, + } +} + +func TestMergeJoins(t *testing.T) { + ks := &vindexes.Keyspace{Name: "apa", Sharded: false} + ks2 := &vindexes.Keyspace{Name: "banan", Sharded: false} + + type testCase struct { + l, r, expected joinTree + predicates []sqlparser.Expr + } + + tests := []testCase{{ + l: unsharded(1, ks), + r: unsharded(2, ks), + expected: unsharded(1|2, ks), + }, { + l: unsharded(1, ks), + r: unsharded(2, ks2), + expected: nil, + }, { + l: unsharded(2, ks), + r: unsharded(1, ks2), + expected: nil, + }, { + l: selectDBA(1, ks), + r: selectDBA(2, ks), + expected: selectDBA(1|2, ks), + }, { + l: selectDBA(1, ks), + r: selectDBA(2, ks2), + expected: nil, + }, { + l: selectDBA(2, ks), + r: selectDBA(1, ks2), + expected: nil, + }, { + l: unsharded(1, ks), + r: selectDBA(2, ks), + expected: nil, + }, { + l: selectScatter(1, ks), + r: selectScatter(2, ks), + predicates: []sqlparser.Expr{ + equals(colName("t1", "id"), colName("t2", "id")), + }, + expected: nil, + }} + for i, tc := range tests { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + result := tryMerge(tc.l, tc.r, tc.predicates, semantics.NewSemTable()) + assert.Equal(t, tc.expected, result) + }) + } +} + +func TestClone(t *testing.T) { + original := &routePlan{ + routeOpCode: engine.SelectEqualUnique, + vindexPreds: []*vindexPlusPredicates{{ + covered: false, + }}, + } + + clone := original.clone() + + clonedRP := clone.(*routePlan) + clonedRP.routeOpCode = engine.SelectDBA + assert.Equal(t, clonedRP.routeOpCode, engine.SelectDBA) + assert.Equal(t, original.routeOpCode, engine.SelectEqualUnique) + + clonedRP.vindexPreds[0].covered = true + assert.True(t, clonedRP.vindexPreds[0].covered) + assert.False(t, original.vindexPreds[0].covered) +} diff --git a/go/vt/vtgate/planbuilder/select.go b/go/vt/vtgate/planbuilder/select.go index 87d48316206..c349f70ba2c 100644 --- a/go/vt/vtgate/planbuilder/select.go +++ b/go/vt/vtgate/planbuilder/select.go @@ -20,7 +20,9 @@ import ( "errors" "fmt" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/vt/orchestrator/external/golib/log" + + "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/key" @@ -33,8 +35,8 @@ import ( "vitess.io/vitess/go/vt/vtgate/engine" ) -func buildSelectPlan(query string) func(sqlparser.Statement, ContextVSchema) (engine.Primitive, error) { - return func(stmt sqlparser.Statement, vschema ContextVSchema) (engine.Primitive, error) { +func buildSelectPlan(query string) func(sqlparser.Statement, sqlparser.BindVars, ContextVSchema) (engine.Primitive, error) { + return func(stmt sqlparser.Statement, reservedVars sqlparser.BindVars, vschema ContextVSchema) (engine.Primitive, error) { sel := stmt.(*sqlparser.Select) p, err := handleDualSelects(sel, vschema) @@ -45,17 +47,168 @@ func buildSelectPlan(query string) func(sqlparser.Statement, ContextVSchema) (en return p, nil } - pb := newPrimitiveBuilder(vschema, newJointab(sqlparser.GetBindvars(sel))) - if err := pb.processSelect(sel, nil, query); err != nil { - return nil, err + getPlan := func(sel *sqlparser.Select) (logicalPlan, error) { + pb := newPrimitiveBuilder(vschema, newJointab(reservedVars)) + if err := pb.processSelect(sel, reservedVars, nil, query); err != nil { + return nil, err + } + if err := pb.plan.Wireup(pb.plan, pb.jt); err != nil { + return nil, err + } + return pb.plan, nil } - if err := pb.plan.Wireup(pb.plan, pb.jt); err != nil { + + plan, err := getPlan(sel) + if err != nil { return nil, err } - return pb.plan.Primitive(), nil + + if shouldRetryWithCNFRewriting(plan) { + // by transforming the predicates to CNF, the planner will sometimes find better plans + primitive := rewriteToCNFAndReplan(stmt, getPlan) + if primitive != nil { + return primitive, nil + } + } + return plan.Primitive(), nil + } +} + +func rewriteToCNFAndReplan(stmt sqlparser.Statement, getPlan func(sel *sqlparser.Select) (logicalPlan, error)) engine.Primitive { + rewritten := sqlparser.RewriteToCNF(stmt) + sel2, isSelect := rewritten.(*sqlparser.Select) + if isSelect { + log.Infof("retrying plan after cnf: %s", sqlparser.String(sel2)) + plan2, err := getPlan(sel2) + if err == nil && !shouldRetryWithCNFRewriting(plan2) { + // we only use this new plan if it's better than the old one we got + return plan2.Primitive() + } + } + return nil +} + +func shouldRetryWithCNFRewriting(plan logicalPlan) bool { + routePlan, isRoute := plan.(*route) + if !isRoute { + return false + } + // if we have a I_S query, but have not found table_schema or table_name, let's try CNF + return routePlan.eroute.Opcode == engine.SelectDBA && + routePlan.eroute.SysTableTableName == nil && + routePlan.eroute.SysTableTableSchema == nil + +} + +func pushProjection(expr *sqlparser.AliasedExpr, plan logicalPlan, semTable *semantics.SemTable) (firstOffset int, err error) { + switch node := plan.(type) { + case *route: + sel := node.Select.(*sqlparser.Select) + offset := len(sel.SelectExprs) + sel.SelectExprs = append(sel.SelectExprs, expr) + return offset, nil + case *joinV4: + lhsSolves := node.Left.ContainsTables() + rhsSolves := node.Right.ContainsTables() + deps := semTable.Dependencies(expr.Expr) + switch { + case deps.IsSolvedBy(lhsSolves): + offset, err := pushProjection(expr, node.Left, semTable) + if err != nil { + return 0, err + } + node.Cols = append(node.Cols, -(offset + 1)) + case deps.IsSolvedBy(rhsSolves): + offset, err := pushProjection(expr, node.Right, semTable) + if err != nil { + return 0, err + } + node.Cols = append(node.Cols, offset+1) + default: + return 0, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unknown dependencies for %s", sqlparser.String(expr)) + } + return len(node.Cols) - 1, nil + default: + return 0, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "%T not yet supported", node) + } +} + +func pushPredicate(exprs []sqlparser.Expr, plan logicalPlan, semTable *semantics.SemTable) (err error) { + if len(exprs) == 0 { + return nil + } + switch node := plan.(type) { + case *route: + sel := node.Select.(*sqlparser.Select) + finalExpr := reorderExpression(exprs[0], node.tables, semTable) + for i, expr := range exprs { + if i == 0 { + continue + } + finalExpr = &sqlparser.AndExpr{ + Left: finalExpr, + Right: reorderExpression(expr, node.tables, semTable), + } + } + if sel.Where != nil { + finalExpr = &sqlparser.AndExpr{ + Left: sel.Where.Expr, + Right: finalExpr, + } + } + sel.Where = &sqlparser.Where{ + Type: sqlparser.WhereClause, + Expr: finalExpr, + } + return nil + case *joinV4: + var lhs, rhs []sqlparser.Expr + lhsSolves := node.Left.ContainsTables() + rhsSolves := node.Right.ContainsTables() + for _, expr := range exprs { + deps := semTable.Dependencies(expr) + switch { + case deps.IsSolvedBy(lhsSolves): + lhs = append(lhs, expr) + case deps.IsSolvedBy(rhsSolves): + rhs = append(rhs, expr) + default: + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unknown dependencies for %s", sqlparser.String(expr)) + } + } + err := pushPredicate(lhs, node.Left, semTable) + if err != nil { + return err + } + err = pushPredicate(rhs, node.Right, semTable) + return err + default: + return vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "%T not yet supported", node) + } +} + +func reorderExpression(expr sqlparser.Expr, solves semantics.TableSet, semTable *semantics.SemTable) sqlparser.Expr { + switch compExpr := expr.(type) { + case *sqlparser.ComparisonExpr: + if compExpr.Operator == sqlparser.EqualOp { + if !dependsOnRoute(solves, compExpr.Left, semTable) && dependsOnRoute(solves, compExpr.Right, semTable) { + compExpr.Left, compExpr.Right = compExpr.Right, compExpr.Left + } + } } + return expr } +func dependsOnRoute(solves semantics.TableSet, expr sqlparser.Expr, semTable *semantics.SemTable) bool { + if node, ok := expr.(*sqlparser.ColName); ok { + return semTable.Dependencies(node).IsSolvedBy(solves) + } + return !sqlparser.IsValue(expr) +} + +var errSQLCalcFoundRows = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.CantUseOptionHere, "Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'") +var errInto = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.CantUseOptionHere, "Incorrect usage/placement of 'INTO'") + // processSelect builds a primitive tree for the given query or subquery. // The tree built by this function has the following general structure: // @@ -91,20 +244,20 @@ func buildSelectPlan(query string) func(sqlparser.Statement, ContextVSchema) (en // The LIMIT clause is the last construct of a query. If it cannot be // pushed into a route, then a primitive is created on top of any // of the above trees to make it discard unwanted rows. -func (pb *primitiveBuilder) processSelect(sel *sqlparser.Select, outer *symtab, query string) error { +func (pb *primitiveBuilder) processSelect(sel *sqlparser.Select, reservedVars sqlparser.BindVars, outer *symtab, query string) error { // Check and error if there is any locking function present in select expression. for _, expr := range sel.SelectExprs { if aExpr, ok := expr.(*sqlparser.AliasedExpr); ok && sqlparser.IsLockingFunc(aExpr.Expr) { - return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "%v allowed only with dual", sqlparser.String(aExpr)) + return vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "%v allowed only with dual", sqlparser.String(aExpr)) } } if sel.SQLCalcFoundRows { if outer != nil || query == "" { - return mysql.NewSQLError(mysql.ERCantUseOptionHere, mysql.SSSyntaxErrorOrAccessViolation, "Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'") + return errSQLCalcFoundRows } sel.SQLCalcFoundRows = false if sel.Limit != nil { - plan, err := buildSQLCalcFoundRowsPlan(query, sel, outer, pb.vschema) + plan, err := buildSQLCalcFoundRowsPlan(query, sel, reservedVars, outer, pb.vschema) if err != nil { return err } @@ -115,14 +268,14 @@ func (pb *primitiveBuilder) processSelect(sel *sqlparser.Select, outer *symtab, // Into is not supported in subquery. if sel.Into != nil && (outer != nil || query == "") { - return mysql.NewSQLError(mysql.ERCantUseOptionHere, mysql.SSSyntaxErrorOrAccessViolation, "Incorrect usage/placement of 'INTO'") + return errInto } var where sqlparser.Expr if sel.Where != nil { where = sel.Where.Expr } - if err := pb.processTableExprs(sel.From, where); err != nil { + if err := pb.processTableExprs(sel.From, reservedVars, where); err != nil { return err } @@ -143,18 +296,18 @@ func (pb *primitiveBuilder) processSelect(sel *sqlparser.Select, outer *symtab, // This is because correlation is not allowed there. pb.st.Outer = outer if sel.Where != nil { - if err := pb.pushFilter(sel.Where.Expr, sqlparser.WhereStr); err != nil { + if err := pb.pushFilter(sel.Where.Expr, sqlparser.WhereStr, reservedVars); err != nil { return err } } if err := pb.checkAggregates(sel); err != nil { return err } - if err := pb.pushSelectExprs(sel); err != nil { + if err := pb.pushSelectExprs(sel, reservedVars); err != nil { return err } if sel.Having != nil { - if err := pb.pushFilter(sel.Having.Expr, sqlparser.HavingStr); err != nil { + if err := pb.pushFilter(sel.Having.Expr, sqlparser.HavingStr, reservedVars); err != nil { return err } } @@ -180,7 +333,7 @@ func setMiscFunc(in logicalPlan, sel *sqlparser.Select) error { query.Lock = sel.Lock if sel.Into != nil { if node.eroute.Opcode != engine.SelectUnsharded { - return false, nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: this construct is not supported on sharded keyspace") + return false, nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "INTO is not supported on sharded keyspace") } query.Into = sel.Into } @@ -195,19 +348,19 @@ func setMiscFunc(in logicalPlan, sel *sqlparser.Select) error { return nil } -func buildSQLCalcFoundRowsPlan(query string, sel *sqlparser.Select, outer *symtab, vschema ContextVSchema) (logicalPlan, error) { - ljt := newJointab(sqlparser.GetBindvars(sel)) +func buildSQLCalcFoundRowsPlan(query string, sel *sqlparser.Select, reservedVars sqlparser.BindVars, outer *symtab, vschema ContextVSchema) (logicalPlan, error) { + ljt := newJointab(reservedVars) frpb := newPrimitiveBuilder(vschema, ljt) - err := frpb.processSelect(sel, outer, "") + err := frpb.processSelect(sel, reservedVars, outer, "") if err != nil { return nil, err } - statement, err := sqlparser.Parse(query) + statement2, reservedVars2, err := sqlparser.Parse2(query) if err != nil { return nil, err } - sel2 := statement.(*sqlparser.Select) + sel2 := statement2.(*sqlparser.Select) sel2.SQLCalcFoundRows = false sel2.OrderBy = nil @@ -239,9 +392,9 @@ func buildSQLCalcFoundRowsPlan(query string, sel *sqlparser.Select, outer *symta sel2 = sel3 } - cjt := newJointab(sqlparser.GetBindvars(sel2)) + cjt := newJointab(reservedVars2) countpb := newPrimitiveBuilder(vschema, cjt) - err = countpb.processSelect(sel2, outer, "") + err = countpb.processSelect(sel2, reservedVars2, outer, "") if err != nil { return nil, err } @@ -315,11 +468,11 @@ func isOnlyDual(sel *sqlparser.Select) bool { // pushFilter identifies the target route for the specified bool expr, // pushes it down, and updates the route info if the new constraint improves // the primitive. This function can push to a WHERE or HAVING clause. -func (pb *primitiveBuilder) pushFilter(in sqlparser.Expr, whereType string) error { +func (pb *primitiveBuilder) pushFilter(in sqlparser.Expr, whereType string, reservedVars sqlparser.BindVars) error { filters := splitAndExpression(nil, in) reorderBySubquery(filters) for _, filter := range filters { - pullouts, origin, expr, err := pb.findOrigin(filter) + pullouts, origin, expr, err := pb.findOrigin(filter, reservedVars) if err != nil { return err } @@ -373,8 +526,8 @@ func (pb *primitiveBuilder) addPullouts(pullouts []*pulloutSubquery) { // pushSelectExprs identifies the target route for the // select expressions and pushes them down. -func (pb *primitiveBuilder) pushSelectExprs(sel *sqlparser.Select) error { - resultColumns, err := pb.pushSelectRoutes(sel.SelectExprs) +func (pb *primitiveBuilder) pushSelectExprs(sel *sqlparser.Select, reservedVars sqlparser.BindVars) error { + resultColumns, err := pb.pushSelectRoutes(sel.SelectExprs, reservedVars) if err != nil { return err } @@ -384,12 +537,12 @@ func (pb *primitiveBuilder) pushSelectExprs(sel *sqlparser.Select) error { // pushSelectRoutes is a convenience function that pushes all the select // expressions and returns the list of resultColumns generated for it. -func (pb *primitiveBuilder) pushSelectRoutes(selectExprs sqlparser.SelectExprs) ([]*resultColumn, error) { +func (pb *primitiveBuilder) pushSelectRoutes(selectExprs sqlparser.SelectExprs, reservedVars sqlparser.BindVars) ([]*resultColumn, error) { resultColumns := make([]*resultColumn, 0, len(selectExprs)) for _, node := range selectExprs { switch node := node.(type) { case *sqlparser.AliasedExpr: - pullouts, origin, expr, err := pb.findOrigin(node.Expr) + pullouts, origin, expr, err := pb.findOrigin(node.Expr, reservedVars) if err != nil { return nil, err } @@ -423,7 +576,7 @@ func (pb *primitiveBuilder) pushSelectRoutes(selectExprs sqlparser.SelectExprs) } } resultColumns = append(resultColumns, rb.PushAnonymous(node)) - case sqlparser.Nextval: + case *sqlparser.Nextval: rb, ok := pb.plan.(*route) if !ok { // This code is unreachable because the parser doesn't allow joins for next val statements. diff --git a/go/vt/vtgate/planbuilder/set.go b/go/vt/vtgate/planbuilder/set.go index 9bd2d921a4b..d34e9397abb 100644 --- a/go/vt/vtgate/planbuilder/set.go +++ b/go/vt/vtgate/planbuilder/set.go @@ -79,7 +79,7 @@ func buildSetPlan(stmt *sqlparser.Set, vschema ContextVSchema) (engine.Primitive case sqlparser.SessionScope: planFunc, ok := sysVarPlanningFunc[expr.Name.Lowered()] if !ok { - return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unsupported construct in set: %s", sqlparser.String(expr)) + return nil, vterrors.NewErrorf(vtrpcpb.Code_NOT_FOUND, vterrors.UnknownSystemVariable, "Unknown system variable '%s'", sqlparser.String(expr)) } setOp, err := planFunc(expr, vschema, ec) if err != nil { @@ -102,6 +102,12 @@ func buildSetPlan(stmt *sqlparser.Set, vschema ContextVSchema) (engine.Primitive }, nil } +func buildSetOpReadOnly(s setting) planFunc { + return func(expr *sqlparser.SetExpr, schema ContextVSchema, _ *expressionConverter) (engine.SetOp, error) { + return nil, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.IncorrectGlobalLocalVar, "Variable '%s' is a read only variable", expr.Name) + } +} + func buildNotSupported(setting) planFunc { return func(expr *sqlparser.SetExpr, schema ContextVSchema, _ *expressionConverter) (engine.SetOp, error) { return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%s: system setting is not supported", expr.Name) @@ -178,7 +184,7 @@ func buildSetOpVitessAware(s setting) planFunc { _, isDefault := astExpr.Expr.(*sqlparser.Default) if isDefault { if s.defaultValue == nil { - return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, defaultNotSupportedErrFmt, astExpr.Name) + return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, defaultNotSupportedErrFmt, astExpr.Name) } runtimeExpr = s.defaultValue } else { diff --git a/go/vt/vtgate/planbuilder/show.go b/go/vt/vtgate/planbuilder/show.go index ab365b3d993..8105d818247 100644 --- a/go/vt/vtgate/planbuilder/show.go +++ b/go/vt/vtgate/planbuilder/show.go @@ -17,9 +17,13 @@ limitations under the License. package planbuilder import ( + "fmt" "regexp" "strings" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/vtgate/vindexes" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" @@ -41,10 +45,8 @@ func buildShowPlan(stmt *sqlparser.Show, vschema ContextVSchema) (engine.Primiti switch show := stmt.Internal.(type) { case *sqlparser.ShowBasic: return buildShowBasicPlan(show, vschema) - case *sqlparser.ShowColumns: - return buildShowColumnsPlan(show, vschema) - case *sqlparser.ShowTableStatus: - return buildShowTableStatusPlan(show, vschema) + case *sqlparser.ShowCreate: + return buildShowCreatePlan(show, vschema) default: return nil, ErrPlanNotSupported } @@ -53,41 +55,41 @@ func buildShowPlan(stmt *sqlparser.Show, vschema ContextVSchema) (engine.Primiti func buildShowBasicPlan(show *sqlparser.ShowBasic, vschema ContextVSchema) (engine.Primitive, error) { switch show.Command { case sqlparser.Charset: - return showCharset(show) - case sqlparser.Collation, sqlparser.Function, sqlparser.Privilege, sqlparser.Procedure, - sqlparser.VariableGlobal, sqlparser.VariableSession: - return showSendAnywhere(show, vschema) - case sqlparser.Database: - ks, err := vschema.AllKeyspace() - if err != nil { - return nil, err - } - - var filter *regexp.Regexp + return buildCharsetPlan(show) + case sqlparser.Collation, sqlparser.Function, sqlparser.Privilege, sqlparser.Procedure: + return buildSendAnywherePlan(show, vschema) + case sqlparser.VariableGlobal, sqlparser.VariableSession: + return buildVariablePlan(show, vschema) + case sqlparser.Column, sqlparser.Index: + return buildShowTblPlan(show, vschema) + case sqlparser.Database, sqlparser.Keyspace: + return buildDBPlan(show, vschema) + case sqlparser.OpenTable, sqlparser.TableStatus, sqlparser.Table, sqlparser.Trigger: + return buildPlanWithDB(show, vschema) + case sqlparser.StatusGlobal, sqlparser.StatusSession: + return engine.NewRowsPrimitive(make([][]sqltypes.Value, 0, 2), buildVarCharFields("Variable_name", "Value")), nil + case sqlparser.VitessMigrations: + return buildShowVMigrationsPlan(show, vschema) + } + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unknown show query type %s", show.Command.ToString()) - if show.Filter != nil { - filter = sqlparser.LikeToRegexp(show.Filter.Like) - } +} - if filter == nil { - filter = regexp.MustCompile(".*") - } +func buildCharsetPlan(show *sqlparser.ShowBasic) (engine.Primitive, error) { + fields := buildVarCharFields("Charset", "Description", "Default collation") + maxLenField := &querypb.Field{Name: "Maxlen", Type: sqltypes.Int32} + fields = append(fields, maxLenField) - rows := make([][]sqltypes.Value, 0, len(ks)) - for _, v := range ks { - if filter.MatchString(v.Name) { - rows = append(rows, buildVarCharRow(v.Name)) - } - } - return engine.NewRowsPrimitive(rows, buildVarCharFields("Database")), nil - case sqlparser.StatusGlobal, sqlparser.StatusSession: - return engine.NewRowsPrimitive(make([][]sqltypes.Value, 0, 2), buildVarCharFields("Variable_name", "Value")), nil + charsets := []string{utf8, utf8mb4} + rows, err := generateCharsetRows(show.Filter, charsets) + if err != nil { + return nil, err } - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "BUG: unknown show query type %s", show.Command.ToString()) + return engine.NewRowsPrimitive(rows, fields), nil } -func showSendAnywhere(show *sqlparser.ShowBasic, vschema ContextVSchema) (engine.Primitive, error) { +func buildSendAnywherePlan(show *sqlparser.ShowBasic, vschema ContextVSchema) (engine.Primitive, error) { ks, err := vschema.AnyKeyspace() if err != nil { return nil, err @@ -101,52 +103,142 @@ func showSendAnywhere(show *sqlparser.ShowBasic, vschema ContextVSchema) (engine }, nil } -func showCharset(show *sqlparser.ShowBasic) (engine.Primitive, error) { - fields := buildVarCharFields("Charset", "Description", "Default collation") - maxLenField := &querypb.Field{Name: "Maxlen", Type: sqltypes.Int32} - fields = append(fields, maxLenField) - - charsets := []string{utf8, utf8mb4} - rows, err := generateCharsetRows(show.Filter, charsets) +func buildVariablePlan(show *sqlparser.ShowBasic, vschema ContextVSchema) (engine.Primitive, error) { + plan, err := buildSendAnywherePlan(show, vschema) if err != nil { return nil, err } + plan = engine.NewReplaceVariables(plan) + return plan, nil +} - return engine.NewRowsPrimitive(rows, fields), nil +func buildShowTblPlan(show *sqlparser.ShowBasic, vschema ContextVSchema) (engine.Primitive, error) { + if !show.DbName.IsEmpty() { + show.Tbl.Qualifier = sqlparser.NewTableIdent(show.DbName.String()) + // Remove Database Name from the query. + show.DbName = sqlparser.NewTableIdent("") + } + + dest := key.Destination(key.DestinationAnyShard{}) + var ks *vindexes.Keyspace + var err error + + if !show.Tbl.Qualifier.IsEmpty() && sqlparser.SystemSchema(show.Tbl.Qualifier.String()) { + ks, err = vschema.AnyKeyspace() + if err != nil { + return nil, err + } + } else { + table, _, _, _, destination, err := vschema.FindTableOrVindex(show.Tbl) + if err != nil { + return nil, err + } + if table == nil { + return nil, vterrors.NewErrorf(vtrpcpb.Code_NOT_FOUND, vterrors.UnknownTable, "Table '%s' doesn't exist", show.Tbl.Name.String()) + } + // Update the table. + show.Tbl.Qualifier = sqlparser.NewTableIdent("") + show.Tbl.Name = table.Name + + if destination != nil { + dest = destination + } + ks = table.Keyspace + } + + return &engine.Send{ + Keyspace: ks, + TargetDestination: dest, + Query: sqlparser.String(show), + IsDML: false, + SingleShardOnly: true, + }, nil } -func buildShowColumnsPlan(show *sqlparser.ShowColumns, vschema ContextVSchema) (engine.Primitive, error) { - if show.DbName != "" { - show.Table.Qualifier = sqlparser.NewTableIdent(show.DbName) +func buildDBPlan(show *sqlparser.ShowBasic, vschema ContextVSchema) (engine.Primitive, error) { + ks, err := vschema.AllKeyspace() + if err != nil { + return nil, err + } + + var filter *regexp.Regexp + + if show.Filter != nil { + filter = sqlparser.LikeToRegexp(show.Filter.Like) + } + + if filter == nil { + filter = regexp.MustCompile(".*") + } + + //rows := make([][]sqltypes.Value, 0, len(ks)+4) + var rows [][]sqltypes.Value + + if show.Command == sqlparser.Database { + //Hard code default databases + rows = append(rows, buildVarCharRow("information_schema")) + rows = append(rows, buildVarCharRow("mysql")) + rows = append(rows, buildVarCharRow("sys")) + rows = append(rows, buildVarCharRow("performance_schema")) + } + + for _, v := range ks { + if filter.MatchString(v.Name) { + rows = append(rows, buildVarCharRow(v.Name)) + } } - table, _, _, _, destination, err := vschema.FindTableOrVindex(show.Table) + return engine.NewRowsPrimitive(rows, buildVarCharFields("Database")), nil +} + +// buildShowVMigrationsPlan serves `SHOW VITESS_MIGRATIONS ...` queries. It invokes queries on _vt.schema_migrations on all MASTER tablets on keyspace's shards. +func buildShowVMigrationsPlan(show *sqlparser.ShowBasic, vschema ContextVSchema) (engine.Primitive, error) { + dest, ks, tabletType, err := vschema.TargetDestination(show.DbName.String()) if err != nil { return nil, err } - if table == nil { - return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "table does not exists: %s", show.Table.Name.String()) + if ks == nil { + return nil, vterrors.NewErrorf(vtrpcpb.Code_FAILED_PRECONDITION, vterrors.NoDB, "No database selected: use keyspace<:shard><@type> or keyspace<[range]><@type> (<> are optional)") } - if destination == nil { - destination = key.DestinationAnyShard{} + + if tabletType != topodatapb.TabletType_MASTER { + return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "show vitess_migrations works only on primary tablet") } - // Remove Database Name from the query. - show.DbName = "" - show.Table.Qualifier = sqlparser.NewTableIdent("") - show.Table.Name = table.Name + if dest == nil { + dest = key.DestinationAllShards{} + } + + sql := "SELECT * FROM _vt.schema_migrations" + if show.Filter != nil { + if show.Filter.Filter != nil { + sql += fmt.Sprintf(" where %s", sqlparser.String(show.Filter.Filter)) + } else if show.Filter.Like != "" { + lit := sqlparser.String(sqlparser.NewStrLiteral(show.Filter.Like)) + sql += fmt.Sprintf(" where migration_uuid LIKE %s OR migration_context LIKE %s OR migration_status LIKE %s", lit, lit, lit) + } + } return &engine.Send{ - Keyspace: table.Keyspace, - TargetDestination: destination, - Query: sqlparser.String(show), - IsDML: false, - SingleShardOnly: true, + Keyspace: ks, + TargetDestination: dest, + Query: sql, }, nil - } -func buildShowTableStatusPlan(show *sqlparser.ShowTableStatus, vschema ContextVSchema) (engine.Primitive, error) { - destination, keyspace, _, err := vschema.TargetDestination(show.DatabaseName) +func buildPlanWithDB(show *sqlparser.ShowBasic, vschema ContextVSchema) (engine.Primitive, error) { + dbName := show.DbName + dbDestination := show.DbName.String() + if sqlparser.SystemSchema(dbDestination) { + ks, err := vschema.AnyKeyspace() + if err != nil { + return nil, err + } + dbDestination = ks.Name + } else { + // Remove Database Name from the query. + show.DbName = sqlparser.NewTableIdent("") + } + destination, keyspace, _, err := vschema.TargetDestination(dbDestination) if err != nil { return nil, err } @@ -154,16 +246,26 @@ func buildShowTableStatusPlan(show *sqlparser.ShowTableStatus, vschema ContextVS destination = key.DestinationAnyShard{} } - // Remove Database Name from the query. - show.DatabaseName = "" + if dbName.IsEmpty() { + dbName = sqlparser.NewTableIdent(keyspace.Name) + } - return &engine.Send{ + query := sqlparser.String(show) + var plan engine.Primitive + plan = &engine.Send{ Keyspace: keyspace, TargetDestination: destination, - Query: sqlparser.String(show), + Query: query, IsDML: false, SingleShardOnly: true, - }, nil + } + if show.Command == sqlparser.Table { + plan, err = engine.NewRenameField([]string{"Tables_in_" + dbName.String()}, []int{0}, plan) + if err != nil { + return nil, err + } + } + return plan, nil } @@ -205,19 +307,19 @@ func generateCharsetRows(showFilter *sqlparser.ShowFilter, colNames []string) ([ } else { cmpExp, ok := showFilter.Filter.(*sqlparser.ComparisonExpr) if !ok { - return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "expect a 'LIKE' or '=' expression") + return nil, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.SyntaxError, "expect a 'LIKE' or '=' expression") } left, ok := cmpExp.Left.(*sqlparser.ColName) if !ok { - return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "expect left side to be 'charset'") + return nil, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.SyntaxError, "expect left side to be 'charset'") } leftOk := left.Name.EqualString(charset) if leftOk { literal, ok := cmpExp.Right.(*sqlparser.Literal) if !ok { - return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "we expect the right side to be a string") + return nil, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.SyntaxError, "we expect the right side to be a string") } rightString := string(literal.Val) @@ -279,3 +381,113 @@ func checkLikeOpt(likeOpt string, colNames []string) (string, error) { return "", nil } + +func buildShowCreatePlan(show *sqlparser.ShowCreate, vschema ContextVSchema) (engine.Primitive, error) { + switch show.Command { + case sqlparser.CreateDb: + return buildCreateDbPlan(show, vschema) + case sqlparser.CreateE, sqlparser.CreateF, sqlparser.CreateProc, sqlparser.CreateTr, sqlparser.CreateV: + return buildCreatePlan(show, vschema) + case sqlparser.CreateTbl: + return buildCreateTblPlan(show, vschema) + } + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unknown show query type %s", show.Command.ToString()) +} + +func buildCreateDbPlan(show *sqlparser.ShowCreate, vschema ContextVSchema) (engine.Primitive, error) { + dbName := show.Op.Name.String() + if sqlparser.SystemSchema(dbName) { + ks, err := vschema.AnyKeyspace() + if err != nil { + return nil, err + } + dbName = ks.Name + } + + dest, ks, _, err := vschema.TargetDestination(dbName) + if err != nil { + return nil, err + } + + if dest == nil { + dest = key.DestinationAnyShard{} + } + + return &engine.Send{ + Keyspace: ks, + TargetDestination: dest, + Query: sqlparser.String(show), + IsDML: false, + SingleShardOnly: true, + }, nil +} + +func buildCreateTblPlan(show *sqlparser.ShowCreate, vschema ContextVSchema) (engine.Primitive, error) { + dest := key.Destination(key.DestinationAnyShard{}) + var ks *vindexes.Keyspace + var err error + + if !show.Op.Qualifier.IsEmpty() && sqlparser.SystemSchema(show.Op.Qualifier.String()) { + ks, err = vschema.AnyKeyspace() + if err != nil { + return nil, err + } + } else { + tbl, _, _, _, destKs, err := vschema.FindTableOrVindex(show.Op) + if err != nil { + return nil, err + } + if tbl == nil { + return nil, vterrors.NewErrorf(vtrpcpb.Code_NOT_FOUND, vterrors.UnknownTable, "Table '%s' doesn't exist", sqlparser.String(show.Op)) + } + ks = tbl.Keyspace + if destKs != nil { + dest = destKs + } + show.Op.Qualifier = sqlparser.NewTableIdent("") + show.Op.Name = tbl.Name + } + + return &engine.Send{ + Keyspace: ks, + TargetDestination: dest, + Query: sqlparser.String(show), + IsDML: false, + SingleShardOnly: true, + }, nil + +} + +func buildCreatePlan(show *sqlparser.ShowCreate, vschema ContextVSchema) (engine.Primitive, error) { + dbName := "" + if !show.Op.Qualifier.IsEmpty() { + dbName = show.Op.Qualifier.String() + } + + if sqlparser.SystemSchema(dbName) { + ks, err := vschema.AnyKeyspace() + if err != nil { + return nil, err + } + dbName = ks.Name + } else { + show.Op.Qualifier = sqlparser.NewTableIdent("") + } + + dest, ks, _, err := vschema.TargetDestination(dbName) + if err != nil { + return nil, err + } + if dest == nil { + dest = key.DestinationAnyShard{} + } + + return &engine.Send{ + Keyspace: ks, + TargetDestination: dest, + Query: sqlparser.String(show), + IsDML: false, + SingleShardOnly: true, + }, nil + +} diff --git a/go/vt/vtgate/planbuilder/sql_calc_found_rows.go b/go/vt/vtgate/planbuilder/sql_calc_found_rows.go index 9ec495e7f4b..21a7bae3d29 100644 --- a/go/vt/vtgate/planbuilder/sql_calc_found_rows.go +++ b/go/vt/vtgate/planbuilder/sql_calc_found_rows.go @@ -21,6 +21,7 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/semantics" ) var _ logicalPlan = (*sqlCalcFoundRows)(nil) @@ -39,6 +40,20 @@ func (s *sqlCalcFoundRows) Wireup(logicalPlan, *jointab) error { return s.CountQuery.Wireup(s.CountQuery, s.cjt) } +//Wireup2 implements the logicalPlan interface +func (s *sqlCalcFoundRows) WireupV4(semTable *semantics.SemTable) error { + err := s.LimitQuery.WireupV4(semTable) + if err != nil { + return err + } + return s.CountQuery.WireupV4(semTable) +} + +// Solves implements the logicalPlan interface +func (s *sqlCalcFoundRows) ContainsTables() semantics.TableSet { + return s.LimitQuery.ContainsTables() +} + //Primitive implements the logicalPlan interface func (s *sqlCalcFoundRows) Primitive() engine.Primitive { return engine.SQLCalcFoundRows{ @@ -76,13 +91,13 @@ func (s *sqlCalcFoundRows) SupplyCol(col *sqlparser.ColName) (*resultColumn, int //SupplyWeightString implements the logicalPlan interface func (s *sqlCalcFoundRows) SupplyWeightString(int) (weightcolNumber int, err error) { - return 0, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unreachable: sqlCalcFoundRows.SupplyWeightString") + return 0, UnsupportedSupplyWeightString{Type: "sqlCalcFoundRows"} } // Rewrite implements the logicalPlan interface func (s *sqlCalcFoundRows) Rewrite(inputs ...logicalPlan) error { if len(inputs) != 2 { - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "sqlCalcFoundRows: wrong number of inputs") + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] wrong number of inputs for SQL_CALC_FOUND_ROWS: %d", len(inputs)) } s.LimitQuery = inputs[0] s.CountQuery = inputs[1] diff --git a/go/vt/vtgate/planbuilder/symtab.go b/go/vt/vtgate/planbuilder/symtab.go index 61cc1d331cf..afaffea1f97 100644 --- a/go/vt/vtgate/planbuilder/symtab.go +++ b/go/vt/vtgate/planbuilder/symtab.go @@ -22,6 +22,9 @@ import ( "strconv" "strings" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -564,9 +567,37 @@ func newResultColumn(expr *sqlparser.AliasedExpr, origin logicalPlan) *resultCol } else { // We don't generate an alias if the expression is non-trivial. // Just to be safe, generate an anonymous column for the expression. + typ, err := GetReturnType(expr.Expr) rc.column = &column{ origin: origin, } + if err == nil { + rc.column.typ = typ + } } return rc } + +// GetReturnType returns the type of the select expression that MySQL will return +func GetReturnType(input sqlparser.Expr) (querypb.Type, error) { + switch node := input.(type) { + case *sqlparser.FuncExpr: + functionName := strings.ToUpper(node.Name.String()) + switch functionName { + case "ABS": + // Returned value depends on the return type of the input + if len(node.Exprs) == 1 { + expr, isAliasedExpr := node.Exprs[0].(*sqlparser.AliasedExpr) + if isAliasedExpr { + return GetReturnType(expr.Expr) + } + } + case "COUNT": + return querypb.Type_INT64, nil + } + case *sqlparser.ColName: + col := node.Metadata.(*column) + return col.typ, nil + } + return 0, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "cannot evaluate return type for %T", input) +} diff --git a/go/vt/vtgate/planbuilder/symtab_test.go b/go/vt/vtgate/planbuilder/symtab_test.go index bd9ac8a6a90..c327ad9c385 100644 --- a/go/vt/vtgate/planbuilder/symtab_test.go +++ b/go/vt/vtgate/planbuilder/symtab_test.go @@ -16,6 +16,16 @@ limitations under the License. package planbuilder +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/sqlparser" +) + /* func TestSymtabAddVSchemaTable(t *testing.T) { tname := sqlparser.TableName{Name: sqlparser.NewTableIdent("t")} @@ -178,3 +188,53 @@ func TestSymtabAddVSchemaTable(t *testing.T) { } } */ + +func TestGetReturnType(t *testing.T) { + tests := []struct { + input sqlparser.Expr + output querypb.Type + expectedErr error + }{{ + input: &sqlparser.FuncExpr{Name: sqlparser.NewColIdent("Abs"), Exprs: sqlparser.SelectExprs{ + &sqlparser.AliasedExpr{ + Expr: &sqlparser.ColName{ + Name: sqlparser.NewColIdent("A"), + Metadata: &column{ + typ: querypb.Type_DECIMAL, + }, + }, + }, + }}, + output: querypb.Type_DECIMAL, + expectedErr: nil, + }, { + input: &sqlparser.FuncExpr{Name: sqlparser.NewColIdent("Count"), Exprs: sqlparser.SelectExprs{ + &sqlparser.StarExpr{}, + }}, + output: querypb.Type_INT64, + expectedErr: nil, + }, { + input: &sqlparser.FuncExpr{Name: sqlparser.NewColIdent("cOunt"), Exprs: sqlparser.SelectExprs{ + &sqlparser.StarExpr{}, + }}, + output: querypb.Type_INT64, + expectedErr: nil, + }, { + input: &sqlparser.FuncExpr{Name: sqlparser.NewColIdent("Abs"), Exprs: sqlparser.SelectExprs{ + &sqlparser.StarExpr{}, + }}, + expectedErr: fmt.Errorf("cannot evaluate return type for *sqlparser.FuncExpr"), + }} + + for _, test := range tests { + t.Run(sqlparser.String(test.input), func(t *testing.T) { + got, err := GetReturnType(test.input) + if test.expectedErr != nil { + require.EqualError(t, err, test.expectedErr.Error()) + } else { + require.NoError(t, err) + require.Equal(t, test.output, got) + } + }) + } +} diff --git a/go/vt/vtgate/planbuilder/system_tables.go b/go/vt/vtgate/planbuilder/system_tables.go index 3d073d63a65..71fe5a6c3b8 100644 --- a/go/vt/vtgate/planbuilder/system_tables.go +++ b/go/vt/vtgate/planbuilder/system_tables.go @@ -18,7 +18,9 @@ package planbuilder import ( "vitess.io/vitess/go/sqltypes" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/evalengine" ) @@ -34,9 +36,15 @@ func (pb *primitiveBuilder) findSysInfoRoutingPredicates(expr sqlparser.Expr, ru } if isTableSchema { - rut.eroute.SysTableTableSchema = append(rut.eroute.SysTableTableSchema, out) + if rut.eroute.SysTableTableSchema != nil && !evalengine.AreExprEqual(rut.eroute.SysTableTableSchema, out) { + return vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "two predicates for specifying the database are not supported") + } + rut.eroute.SysTableTableSchema = out } else { - rut.eroute.SysTableTableName = append(rut.eroute.SysTableTableName, out) + if rut.eroute.SysTableTableName != nil && !evalengine.AreExprEqual(rut.eroute.SysTableTableName, out) { + return vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "two predicates for table_name not supported") + } + rut.eroute.SysTableTableName = out } return nil @@ -66,7 +74,7 @@ func isTableSchemaOrName(e sqlparser.Expr) (isTableSchema bool, isTableName bool } func isDbNameCol(col *sqlparser.ColName) bool { - return col.Name.EqualString("table_schema") || col.Name.EqualString("constraint_schema") || col.Name.EqualString("schema_name") + return col.Name.EqualString("table_schema") || col.Name.EqualString("constraint_schema") || col.Name.EqualString("schema_name") || col.Name.EqualString("routine_schema") } func isTableNameCol(col *sqlparser.ColName) bool { @@ -94,7 +102,7 @@ func extractInfoSchemaRoutingPredicate(in sqlparser.Expr) (bool, evalengine.Expr } else { name += engine.BvTableName } - replaceOther(sqlparser.NewArgument([]byte(name))) + replaceOther(sqlparser.NewArgument(name)) return isSchemaName, evalExpr, nil } } diff --git a/go/vt/vtgate/planbuilder/system_variables.go b/go/vt/vtgate/planbuilder/system_variables.go index ce02cb497ff..d6e9cbeaa14 100644 --- a/go/vt/vtgate/planbuilder/system_variables.go +++ b/go/vt/vtgate/planbuilder/system_variables.go @@ -27,6 +27,7 @@ import ( ) func init() { + forSettings(sysvars.ReadOnly, buildSetOpReadOnly) forSettings(sysvars.IgnoreThese, buildSetOpIgnore) forSettings(sysvars.UseReservedConn, buildSetOpReservedConn) forSettings(sysvars.CheckAndIgnore, buildSetOpCheckAndIgnore) diff --git a/go/vt/vtgate/planbuilder/testdata/aggr_cases.txt b/go/vt/vtgate/planbuilder/testdata/aggr_cases.txt index 8d194f0cf9d..7c76066eec1 100644 --- a/go/vt/vtgate/planbuilder/testdata/aggr_cases.txt +++ b/go/vt/vtgate/planbuilder/testdata/aggr_cases.txt @@ -17,6 +17,7 @@ "Table": "unsharded" } } +Gen4 plan same as above # Aggregate on unique sharded "select count(*), col from user where id = 1" @@ -30,15 +31,16 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select count(*), col from user where 1 != 1", - "Query": "select count(*), col from user where id = 1", - "Table": "user", + "FieldQuery": "select count(*), col from `user` where 1 != 1", + "Query": "select count(*), col from `user` where id = 1", + "Table": "`user`", "Values": [ 1 ], "Vindex": "user_index" } } +Gen4 plan same as above # Aggregate detection (non-aggregate function) "select fun(1), col from user" @@ -52,11 +54,12 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select fun(1), col from user where 1 != 1", - "Query": "select fun(1), col from user", - "Table": "user" + "FieldQuery": "select fun(1), col from `user` where 1 != 1", + "Query": "select fun(1), col from `user`", + "Table": "`user`" } } +Gen4 plan same as above # select distinct with unique vindex for scatter route. "select distinct col1, id from user" @@ -70,9 +73,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col1, id from user where 1 != 1", - "Query": "select distinct col1, id from user", - "Table": "user" + "FieldQuery": "select col1, id from `user` where 1 != 1", + "Query": "select distinct col1, id from `user`", + "Table": "`user`" } } @@ -88,9 +91,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col1, id from user where 1 != 1 group by col1", - "Query": "select distinct col1, id from user group by col1", - "Table": "user" + "FieldQuery": "select col1, id from `user` where 1 != 1 group by col1", + "Query": "select distinct col1, id from `user` group by col1", + "Table": "`user`" } } @@ -113,10 +116,38 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select count(*), a, textcol1, b, weight_string(textcol1) from user where 1 != 1 group by a, textcol1, b", - "OrderBy": "1 ASC, 4 ASC, 3 ASC", - "Query": "select count(*), a, textcol1, b, weight_string(textcol1) from user group by a, textcol1, b order by a asc, textcol1 asc, b asc", - "Table": "user" + "FieldQuery": "select count(*), a, textcol1, b, weight_string(textcol1), weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, textcol1, b", + "OrderBy": "1 ASC, 2 ASC, 3 ASC", + "Query": "select count(*), a, textcol1, b, weight_string(textcol1), weight_string(a), weight_string(b) from `user` group by a, textcol1, b order by a asc, textcol1 asc, b asc", + "Table": "`user`" + } + ] + } +} + +# scatter group by a integer column. Do not add weight strings for this. +"select count(*), intcol from user group by intcol" +{ + "QueryType": "SELECT", + "Original": "select count(*), intcol from user group by intcol", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "count(0)", + "Distinct": "false", + "GroupBy": "1", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "SelectScatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), intcol from `user` where 1 != 1 group by intcol", + "OrderBy": "1 ASC", + "Query": "select count(*), intcol from `user` group by intcol order by intcol asc", + "Table": "`user`" } ] } @@ -130,7 +161,7 @@ "Instructions": { "OperatorType": "Sort", "Variant": "Memory", - "OrderBy": "0 ASC, 4 ASC", + "OrderBy": "0 ASC, 2 ASC", "Inputs": [ { "OperatorType": "Aggregate", @@ -146,10 +177,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select count(*) as k, a, textcol1, b, weight_string(textcol1) from user where 1 != 1 group by a, textcol1, b", - "OrderBy": "4 ASC, 1 ASC, 3 ASC", - "Query": "select count(*) as k, a, textcol1, b, weight_string(textcol1) from user group by a, textcol1, b order by textcol1 asc, a asc, b asc", - "Table": "user" + "FieldQuery": "select count(*) as k, a, textcol1, b, weight_string(textcol1), weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, textcol1, b", + "OrderBy": "2 ASC, 1 ASC, 3 ASC", + "Query": "select count(*) as k, a, textcol1, b, weight_string(textcol1), weight_string(a), weight_string(b) from `user` group by a, textcol1, b order by textcol1 asc, a asc, b asc", + "Table": "`user`" } ] } @@ -175,9 +206,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select count(*) from user where 1 != 1", - "Query": "select count(*) from user", - "Table": "user" + "FieldQuery": "select count(*) from `user` where 1 != 1", + "Query": "select count(*) from `user`", + "Table": "`user`" } ] } @@ -201,9 +232,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select sum(col) from user where 1 != 1", - "Query": "select sum(col) from user", - "Table": "user" + "FieldQuery": "select sum(col) from `user` where 1 != 1", + "Query": "select sum(col) from `user`", + "Table": "`user`" } ] } @@ -227,9 +258,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select min(col) from user where 1 != 1", - "Query": "select min(col) from user", - "Table": "user" + "FieldQuery": "select min(col) from `user` where 1 != 1", + "Query": "select min(col) from `user`", + "Table": "`user`" } ] } @@ -253,9 +284,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select max(col) from user where 1 != 1", - "Query": "select max(col) from user", - "Table": "user" + "FieldQuery": "select max(col) from `user` where 1 != 1", + "Query": "select max(col) from `user`", + "Table": "`user`" } ] } @@ -279,10 +310,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col1, col2 from user where 1 != 1 group by col1", + "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1", "OrderBy": "0 ASC, 1 ASC, 0 ASC", - "Query": "select distinct col1, col2 from user group by col1 order by col1 asc, col2 asc, col1 asc", - "Table": "user" + "Query": "select distinct col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1 order by col1 asc, col2 asc, col1 asc", + "Table": "`user`" } ] } @@ -297,7 +328,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1,1", - "TableName": "user_unsharded", + "TableName": "`user`_unsharded", "Inputs": [ { "OperatorType": "Route", @@ -306,9 +337,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.a from user where 1 != 1", - "Query": "select user.a from user", - "Table": "user" + "FieldQuery": "select `user`.a from `user` where 1 != 1", + "Query": "select `user`.a from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -337,9 +368,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id, count(*) from user where 1 != 1 group by id", - "Query": "select id, count(*) from user group by id", - "Table": "user" + "FieldQuery": "select id, count(*) from `user` where 1 != 1 group by id", + "Query": "select id, count(*) from `user` group by id", + "Table": "`user`" } } @@ -355,9 +386,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id, col, count(*) from user where 1 != 1 group by id, col", - "Query": "select id, col, count(*) from user group by id, col", - "Table": "user" + "FieldQuery": "select id, col, count(*) from `user` where 1 != 1 group by id, col", + "Query": "select id, col, count(*) from `user` group by id, col", + "Table": "`user`" } } @@ -380,10 +411,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col, count(*) from user where 1 != 1 group by col", + "FieldQuery": "select col, count(*), weight_string(col) from `user` where 1 != 1 group by col", "OrderBy": "0 ASC", - "Query": "select col, count(*) from user group by col order by col asc", - "Table": "user" + "Query": "select col, count(*), weight_string(col) from `user` group by col order by col asc", + "Table": "`user`" } ] } @@ -412,10 +443,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select `name`, count(*) from user where 1 != 1 group by `name`", + "FieldQuery": "select `name`, count(*), weight_string(`name`) from `user` where 1 != 1 group by `name`", "OrderBy": "0 ASC", - "Query": "select `name`, count(*) from user group by `name` order by `name` asc", - "Table": "user" + "Query": "select `name`, count(*), weight_string(`name`) from `user` group by `name` order by `name` asc", + "Table": "`user`" } ] } @@ -433,9 +464,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id, 1 + count(*) from user where 1 != 1 group by id", - "Query": "select id, 1 + count(*) from user group by id", - "Table": "user" + "FieldQuery": "select id, 1 + count(*) from `user` where 1 != 1 group by id", + "Query": "select id, 1 + count(*) from `user` group by id", + "Table": "`user`" } } @@ -451,9 +482,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id as val, 1 + count(*) from user where 1 != 1 group by val", - "Query": "select id as val, 1 + count(*) from user group by val", - "Table": "user" + "FieldQuery": "select id as val, 1 + count(*) from `user` where 1 != 1 group by val", + "Query": "select id as val, 1 + count(*) from `user` group by val", + "Table": "`user`" } } @@ -469,9 +500,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select val as id, 1 + count(*) from user where 1 != 1 group by user.id", - "Query": "select val as id, 1 + count(*) from user group by user.id", - "Table": "user" + "FieldQuery": "select val as id, 1 + count(*) from `user` where 1 != 1 group by `user`.id", + "Query": "select val as id, 1 + count(*) from `user` group by `user`.id", + "Table": "`user`" } } @@ -487,9 +518,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select *, id, 1 + count(*) from user where 1 != 1 group by id", - "Query": "select *, id, 1 + count(*) from user group by id", - "Table": "user" + "FieldQuery": "select *, id, 1 + count(*) from `user` where 1 != 1 group by id", + "Query": "select *, id, 1 + count(*) from `user` group by id", + "Table": "`user`" } } @@ -505,9 +536,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id, count(*) as c from user where 1 != 1 group by id", - "Query": "select id, count(*) as c from user group by id having id = 1 and c = 10", - "Table": "user", + "FieldQuery": "select id, count(*) as c from `user` where 1 != 1 group by id", + "Query": "select id, count(*) as c from `user` group by id having id = 1 and c = 10", + "Table": "`user`", "Values": [ 1 ], @@ -539,9 +570,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select count(*) as a from user where 1 != 1", - "Query": "select count(*) as a from user", - "Table": "user" + "FieldQuery": "select count(*) as a from `user` where 1 != 1", + "Query": "select count(*) as a from `user`", + "Table": "`user`" } ] } @@ -567,9 +598,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id, count(*) from user where 1 != 1", - "Query": "select id, count(*) from user", - "Table": "user" + "FieldQuery": "select id, count(*) from `user` where 1 != 1", + "Query": "select id, count(*) from `user`", + "Table": "`user`" } ] } @@ -593,10 +624,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col from user where 1 != 1", + "FieldQuery": "select col, weight_string(col) from `user` where 1 != 1", "OrderBy": "0 ASC", - "Query": "select distinct col from user order by col asc", - "Table": "user" + "Query": "select distinct col, weight_string(col) from `user` order by col asc", + "Table": "`user`" } ] } @@ -620,10 +651,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col from user where 1 != 1 group by col", + "FieldQuery": "select col, weight_string(col) from `user` where 1 != 1 group by col", "OrderBy": "0 ASC", - "Query": "select col from user group by col order by col asc", - "Table": "user" + "Query": "select col, weight_string(col) from `user` group by col order by col asc", + "Table": "`user`" } ] } @@ -641,9 +672,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id, count(distinct col) from user where 1 != 1 group by id", - "Query": "select id, count(distinct col) from user group by id", - "Table": "user" + "FieldQuery": "select id, count(distinct col) from `user` where 1 != 1 group by id", + "Query": "select id, count(distinct col) from `user` group by id", + "Table": "`user`" } } @@ -666,10 +697,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col, count(distinct id) from user where 1 != 1 group by col", + "FieldQuery": "select col, count(distinct id), weight_string(col) from `user` where 1 != 1 group by col", "OrderBy": "0 ASC", - "Query": "select col, count(distinct id) from user group by col order by col asc", - "Table": "user" + "Query": "select col, count(distinct id), weight_string(col) from `user` group by col order by col asc", + "Table": "`user`" } ] } @@ -694,10 +725,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col1, col2 from user where 1 != 1 group by col1, col2", + "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2", "OrderBy": "0 ASC, 1 ASC", - "Query": "select col1, col2 from user group by col1, col2 order by col1 asc, col2 asc", - "Table": "user" + "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2 order by col1 asc, col2 asc", + "Table": "`user`" } ] } @@ -721,10 +752,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col2 from user where 1 != 1 group by col2", + "FieldQuery": "select col2, weight_string(col2) from `user` where 1 != 1 group by col2", "OrderBy": "0 ASC", - "Query": "select col2 from user group by col2 order by col2 asc", - "Table": "user" + "Query": "select col2, weight_string(col2) from `user` group by col2 order by col2 asc", + "Table": "`user`" } ] } @@ -749,10 +780,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col1, col2 from user where 1 != 1 group by col1, col2", + "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2", "OrderBy": "0 ASC, 1 ASC", - "Query": "select col1, col2 from user group by col1, col2 order by col1 asc, col2 asc", - "Table": "user" + "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2 order by col1 asc, col2 asc", + "Table": "`user`" } ] } @@ -777,10 +808,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col1, col2 from user where 1 != 1 group by col1, col2", + "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2", "OrderBy": "0 ASC, 1 ASC", - "Query": "select col1, col2 from user group by col1, col2 order by col1 asc, col2 asc", - "Table": "user" + "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2 order by col1 asc, col2 asc", + "Table": "`user`" } ] } @@ -805,10 +836,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col1, min(distinct col2) from user where 1 != 1 group by col1", + "FieldQuery": "select col1, min(distinct col2), weight_string(col1) from `user` where 1 != 1 group by col1", "OrderBy": "0 ASC", - "Query": "select col1, min(distinct col2) from user group by col1 order by col1 asc", - "Table": "user" + "Query": "select col1, min(distinct col2), weight_string(col1) from `user` group by col1 order by col1 asc", + "Table": "`user`" } ] } @@ -838,10 +869,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col1, col2 from user where 1 != 1 group by col1, col2", + "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2", "OrderBy": "0 ASC, 1 ASC", - "Query": "select col1, col2 from user group by col1, col2 order by col1 asc, col2 asc", - "Table": "user" + "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2 order by col1 asc, col2 asc", + "Table": "`user`" } ] } @@ -872,10 +903,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, b, count(*) from user where 1 != 1 group by b, a", + "FieldQuery": "select a, b, count(*), weight_string(b), weight_string(a) from `user` where 1 != 1 group by b, a", "OrderBy": "1 ASC, 0 ASC", - "Query": "select a, b, count(*) from user group by b, a order by b asc, a asc", - "Table": "user" + "Query": "select a, b, count(*), weight_string(b), weight_string(a) from `user` group by b, a order by b asc, a asc", + "Table": "`user`" } ] } @@ -900,10 +931,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, b, count(*) from user where 1 != 1 group by 2, 1", + "FieldQuery": "select a, b, count(*), weight_string(b), weight_string(a) from `user` where 1 != 1 group by 2, 1", "OrderBy": "1 ASC, 0 ASC", - "Query": "select a, b, count(*) from user group by 2, 1 order by b asc, a asc", - "Table": "user" + "Query": "select a, b, count(*), weight_string(b), weight_string(a) from `user` group by 2, 1 order by b asc, a asc", + "Table": "`user`" } ] } @@ -928,10 +959,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, b, count(*) from user where 1 != 1 group by b, a", + "FieldQuery": "select a, b, count(*), weight_string(b), weight_string(a) from `user` where 1 != 1 group by b, a", "OrderBy": "1 ASC, 0 ASC", - "Query": "select a, b, count(*) from user group by b, a order by b asc, a asc", - "Table": "user" + "Query": "select a, b, count(*), weight_string(b), weight_string(a) from `user` group by b, a order by b asc, a asc", + "Table": "`user`" } ] } @@ -955,10 +986,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col from user where 1 != 1 group by 1", + "FieldQuery": "select col, weight_string(col) from `user` where 1 != 1 group by 1", "OrderBy": "0 ASC", - "Query": "select col from user group by 1 order by col asc", - "Table": "user" + "Query": "select col, weight_string(col) from `user` group by 1 order by col asc", + "Table": "`user`" } ] } @@ -986,9 +1017,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select count(*) from user where 1 != 1", - "Query": "select count(*) from user", - "Table": "user" + "FieldQuery": "select count(*) from `user` where 1 != 1", + "Query": "select count(*) from `user`", + "Table": "`user`" } ] } @@ -1017,10 +1048,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, b, c, d, count(*) from user where 1 != 1 group by 1, 2, 3", + "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` where 1 != 1 group by 1, 2, 3", "OrderBy": "0 ASC, 1 ASC, 2 ASC", - "Query": "select a, b, c, d, count(*) from user group by 1, 2, 3 order by 1 asc, 2 asc, 3 asc", - "Table": "user" + "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` group by 1, 2, 3 order by 1 asc, 2 asc, 3 asc", + "Table": "`user`" } ] } @@ -1045,10 +1076,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, b, c, d, count(*) from user where 1 != 1 group by 1, 2, 3", + "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` where 1 != 1 group by 1, 2, 3", "OrderBy": "0 ASC, 1 ASC, 2 ASC", - "Query": "select a, b, c, d, count(*) from user group by 1, 2, 3 order by a asc, b asc, c asc", - "Table": "user" + "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` group by 1, 2, 3 order by a asc, b asc, c asc", + "Table": "`user`" } ] } @@ -1073,10 +1104,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, b, c, d, count(*) from user where 1 != 1 group by 1, 2, 3, 4", + "FieldQuery": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` where 1 != 1 group by 1, 2, 3, 4", "OrderBy": "3 ASC, 1 ASC, 0 ASC, 2 ASC", - "Query": "select a, b, c, d, count(*) from user group by 1, 2, 3, 4 order by d asc, b asc, a asc, c asc", - "Table": "user" + "Query": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` group by 1, 2, 3, 4 order by d asc, b asc, a asc, c asc", + "Table": "`user`" } ] } @@ -1101,10 +1132,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, b, c, d, count(*) from user where 1 != 1 group by 3, 2, 1, 4", + "FieldQuery": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` where 1 != 1 group by 3, 2, 1, 4", "OrderBy": "3 ASC, 1 ASC, 0 ASC, 2 ASC", - "Query": "select a, b, c, d, count(*) from user group by 3, 2, 1, 4 order by d asc, b asc, a asc, c asc", - "Table": "user" + "Query": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` group by 3, 2, 1, 4 order by d asc, b asc, a asc, c asc", + "Table": "`user`" } ] } @@ -1129,10 +1160,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, b, c, count(*) from user where 1 != 1 group by 3, 2, 1", + "FieldQuery": "select a, b, c, count(*), weight_string(a), weight_string(c), weight_string(b) from `user` where 1 != 1 group by 3, 2, 1", "OrderBy": "0 DESC, 2 DESC, 1 ASC", - "Query": "select a, b, c, count(*) from user group by 3, 2, 1 order by 1 desc, 3 desc, b asc", - "Table": "user" + "Query": "select a, b, c, count(*), weight_string(a), weight_string(c), weight_string(b) from `user` group by 3, 2, 1 order by 1 desc, 3 desc, b asc", + "Table": "`user`" } ] } @@ -1165,10 +1196,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col, count(*) from user where 1 != 1 group by col", + "FieldQuery": "select col, count(*), weight_string(col) from `user` where 1 != 1 group by col", "OrderBy": "0 ASC", - "Query": "select col, count(*) from user group by col order by col asc limit :__upper_limit", - "Table": "user" + "Query": "select col, count(*), weight_string(col) from `user` group by col order by col asc limit :__upper_limit", + "Table": "`user`" } ] } @@ -1188,15 +1219,16 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col1 as a from user where 1 != 1 group by a collate utf8_general_ci", - "Query": "select user.col1 as a from user where user.id = 5 group by a collate utf8_general_ci", - "Table": "user", + "FieldQuery": "select `user`.col1 as a from `user` where 1 != 1 group by a collate utf8_general_ci", + "Query": "select `user`.col1 as a from `user` where `user`.id = 5 group by a collate utf8_general_ci", + "Table": "`user`", "Values": [ 5 ], "Vindex": "user_index" } } +Gen4 plan same as above # routing rules for aggregates "select id, count(*) from route2 group by id" @@ -1233,6 +1265,7 @@ "Table": "ref" } } +Gen4 plan same as above # distinct and aggregate functions missing group by "select distinct a, count(*) from user" @@ -1256,10 +1289,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, count(*) from user where 1 != 1", + "FieldQuery": "select a, count(*), weight_string(a) from `user` where 1 != 1", "OrderBy": "0 ASC", - "Query": "select a, count(*) from user order by a asc", - "Table": "user" + "Query": "select a, count(*), weight_string(a) from `user` order by a asc", + "Table": "`user`" } ] } @@ -1289,10 +1322,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, count(*) from user where 1 != 1 group by a", + "FieldQuery": "select a, count(*), weight_string(a) from `user` where 1 != 1 group by a", "OrderBy": "0 ASC, 0 ASC", - "Query": "select a, count(*) from user group by a order by a asc, a asc", - "Table": "user" + "Query": "select a, count(*), weight_string(a) from `user` group by a order by a asc, a asc", + "Table": "`user`" } ] } diff --git a/go/vt/vtgate/planbuilder/testdata/alterVschema_cases.txt b/go/vt/vtgate/planbuilder/testdata/alterVschema_cases.txt index 91e071130d9..7fe1469ddfd 100644 --- a/go/vt/vtgate/planbuilder/testdata/alterVschema_cases.txt +++ b/go/vt/vtgate/planbuilder/testdata/alterVschema_cases.txt @@ -24,7 +24,7 @@ "Name": "user", "Sharded": true }, - "query": "alter vschema create vindex user.hash_vdx using hash" + "query": "alter vschema create vindex `user`.hash_vdx using hash" } } @@ -84,7 +84,7 @@ "Name": "user", "Sharded": true }, - "query": "alter vschema on user.a add auto_increment id using a_seq" + "query": "alter vschema on `user`.a add auto_increment id using a_seq" } } diff --git a/go/vt/vtgate/planbuilder/testdata/bypass_keyrange_cases.txt b/go/vt/vtgate/planbuilder/testdata/bypass_keyrange_cases.txt index a9bb3e93249..3b121cc2cd0 100644 --- a/go/vt/vtgate/planbuilder/testdata/bypass_keyrange_cases.txt +++ b/go/vt/vtgate/planbuilder/testdata/bypass_keyrange_cases.txt @@ -10,7 +10,9 @@ "Sharded": false }, "TargetDestination": "ExactKeyRange(-)", - "Query": "select count(*), col from unsharded" + "IsDML": false, + "Query": "select count(*), col from unsharded", + "SingleShardOnly": false } } Gen4 plan same as above @@ -28,7 +30,8 @@ Gen4 plan same as above }, "TargetDestination": "ExactKeyRange(-)", "IsDML": true, - "Query": "update `user` set val = 1 where id = 18446744073709551616 and id = 1" + "Query": "update `user` set val = 1 where id = 18446744073709551616 and id = 1", + "SingleShardOnly": false } } Gen4 plan same as above @@ -47,7 +50,8 @@ Gen4 plan same as above "TargetDestination": "ExactKeyRange(-)", "IsDML": true, "MultishardAutocommit": true, - "Query": "update /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ `user` set val = 1 where id = 18446744073709551616 and id = 1" + "Query": "update /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ `user` set val = 1 where id = 18446744073709551616 and id = 1", + "SingleShardOnly": false } } Gen4 plan same as above @@ -65,14 +69,15 @@ Gen4 plan same as above }, "TargetDestination": "ExactKeyRange(-)", "IsDML": true, - "Query": "delete from `USER` where ID = 42" + "Query": "delete from `USER` where ID = 42", + "SingleShardOnly": false } } Gen4 plan same as above # insert bypass: not supported "INSERT INTO USER (ID, NAME) VALUES (42, 'ms X')" -"INSERT not supported when targeting a key range: targetString" +"range queries are not allowed for insert statement: targetString" Gen4 plan same as above # bypass query for into outfile s3 @@ -87,7 +92,9 @@ Gen4 plan same as above "Sharded": false }, "TargetDestination": "ExactKeyRange(-)", - "Query": "select count(*), col from unsharded into outfile s3 'x.txt'" + "IsDML": false, + "Query": "select count(*), col from unsharded into outfile s3 'x.txt'", + "SingleShardOnly": false } } Gen4 plan same as above @@ -104,7 +111,9 @@ Gen4 plan same as above "Sharded": false }, "TargetDestination": "ExactKeyRange(-)", - "Query": "select * from `user` into outfile s3 'x.txt'" + "IsDML": false, + "Query": "select * from `user` into outfile s3 'x.txt'", + "SingleShardOnly": false } } Gen4 plan same as above @@ -157,7 +166,9 @@ Gen4 plan same as above "Sharded": false }, "TargetDestination": "ExactKeyRange(-)", - "Query": "create /* test */ table t1(id bigint, primary key(id)) /* comments */" + "IsDML": false, + "Query": "create /* test */ table t1(id bigint, primary key(id)) /* comments */", + "SingleShardOnly": false } } Gen4 plan same as above diff --git a/go/vt/vtgate/planbuilder/testdata/bypass_shard_cases.txt b/go/vt/vtgate/planbuilder/testdata/bypass_shard_cases.txt index 7bb0fc36af5..0c223d7ce2c 100644 --- a/go/vt/vtgate/planbuilder/testdata/bypass_shard_cases.txt +++ b/go/vt/vtgate/planbuilder/testdata/bypass_shard_cases.txt @@ -15,6 +15,7 @@ "SingleShardOnly": false } } +Gen4 plan same as above # update bypass "update user set val = 1 where id = 18446744073709551616 and id = 1" @@ -29,10 +30,11 @@ }, "TargetDestination": "Shard(-80)", "IsDML": true, - "Query": "update user set val = 1 where id = 18446744073709551616 and id = 1", + "Query": "update `user` set val = 1 where id = 18446744073709551616 and id = 1", "SingleShardOnly": false } } +Gen4 plan same as above # delete bypass "DELETE FROM USER WHERE ID = 42" @@ -47,10 +49,11 @@ }, "TargetDestination": "Shard(-80)", "IsDML": true, - "Query": "delete from USER where ID = 42", + "Query": "delete from `USER` where ID = 42", "SingleShardOnly": false } } +Gen4 plan same as above # insert bypass "INSERT INTO USER (ID, NAME) VALUES (42, 'ms X')" @@ -65,10 +68,11 @@ }, "TargetDestination": "Shard(-80)", "IsDML": true, - "Query": "insert into USER(ID, `NAME`) values (42, 'ms X')", + "Query": "insert into `USER`(ID, `NAME`) values (42, 'ms X')", "SingleShardOnly": false } } +Gen4 plan same as above # insert bypass with sequence: sequences ignored "insert into user(nonid) values (2)" @@ -83,10 +87,11 @@ }, "TargetDestination": "Shard(-80)", "IsDML": true, - "Query": "insert into user(nonid) values (2)", + "Query": "insert into `user`(nonid) values (2)", "SingleShardOnly": false } } +Gen4 plan same as above # bypass query for into outfile s3 "select count(*), col from unsharded into outfile S3 'x.txt'" @@ -105,7 +110,9 @@ "SingleShardOnly": false } } +Gen4 plan same as above +# Select outfile "select * from user into outfile S3 'x.txt'" { "QueryType": "SELECT", @@ -118,10 +125,11 @@ }, "TargetDestination": "Shard(-80)", "IsDML": false, - "Query": "select * from user into outfile s3 'x.txt'", + "Query": "select * from `user` into outfile s3 'x.txt'", "SingleShardOnly": false } } +Gen4 plan same as above "load data from s3 'x.txt' into table x" { @@ -139,6 +147,7 @@ "SingleShardOnly": true } } +Gen4 plan same as above "load data from s3 'x.txt'" { @@ -156,6 +165,7 @@ "SingleShardOnly": true } } +Gen4 plan same as above # create table "create /* test */ table t1(id bigint, primary key(id)) /* comments */" @@ -174,3 +184,4 @@ "SingleShardOnly": false } } +Gen4 plan same as above diff --git a/go/vt/vtgate/planbuilder/testdata/call_cases.txt b/go/vt/vtgate/planbuilder/testdata/call_cases.txt new file mode 100644 index 00000000000..0dcdca17ae8 --- /dev/null +++ b/go/vt/vtgate/planbuilder/testdata/call_cases.txt @@ -0,0 +1,57 @@ +# simple call proc on current keyspace +"call proc()" +{ + "QueryType": "CALL_PROC", + "Original": "call proc()", + "Instructions": { + "OperatorType": "Send", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetDestination": "AnyShard()", + "IsDML": false, + "Query": "call proc()", + "SingleShardOnly": false + } +} + +# call qualified keyspace +"call main.proc()" +{ + "QueryType": "CALL_PROC", + "Original": "call main.proc()", + "Instructions": { + "OperatorType": "Send", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetDestination": "AnyShard()", + "IsDML": false, + "Query": "call proc()", + "SingleShardOnly": false + } +} + +# CALL not allowed on sharded keyspaces +"call user.proc()" +"CALL is not supported for sharded database" + +# CALL with expressions and parameters +"call proc(1, 'foo', @var)" +{ + "QueryType": "CALL_PROC", + "Original": "call proc(1, 'foo', @var)", + "Instructions": { + "OperatorType": "Send", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetDestination": "AnyShard()", + "IsDML": false, + "Query": "call proc(1, 'foo', :__vtudvvar)", + "SingleShardOnly": false + } +} diff --git a/go/vt/vtgate/planbuilder/testdata/ddl_cases.txt b/go/vt/vtgate/planbuilder/testdata/ddl_cases.txt index c1c1fb980ab..091f0bc8dd2 100644 --- a/go/vt/vtgate/planbuilder/testdata/ddl_cases.txt +++ b/go/vt/vtgate/planbuilder/testdata/ddl_cases.txt @@ -45,7 +45,7 @@ # simple create table with table qualifier that does not exists "create table a.b(id int)" -"no keyspace with name [a] found" +"Unknown database 'a' in vschema" #Alter table "alter table a ADD id int" @@ -73,7 +73,7 @@ "Name": "user", "Sharded": true }, - "Query": "alter table user add column id int" + "Query": "alter table `user` add column id int" } } @@ -98,11 +98,21 @@ # create db foo "create database foo" -"create database not allowed" +{ + "QueryType": "DDL", + "Original": "create database foo", + "Instructions": { + "OperatorType": "CREATEDB", + "Keyspace": { + "Name": "foo", + "Sharded": false + } + } +} # create db main "create database main" -"cannot create database 'main'; database exists" +"Can't create database 'main'; database exists" # create db if not exists main "create database if not exists main" @@ -116,23 +126,43 @@ # alter db foo "alter database foo collate utf8" -"cannot alter database 'foo'; database does not exists" +"Can't alter database 'foo'; unknown database" # alter db main "alter database main collate utf8" -"alter database not allowed" +"alter database is not supported" # drop db foo "drop database foo" -"cannot drop database 'foo'; database does not exists" +"Can't drop database 'foo'; database doesn't exists" # drop db main "drop database main" -"drop database not allowed" +{ + "QueryType": "DDL", + "Original": "drop database main", + "Instructions": { + "OperatorType": "DROPDB", + "Keyspace": { + "Name": "main", + "Sharded": false + } + } +} # drop db if exists main "drop database if exists main" -"drop database not allowed" +{ + "QueryType": "DDL", + "Original": "drop database if exists main", + "Instructions": { + "OperatorType": "DROPDB", + "Keyspace": { + "Name": "main", + "Sharded": false + } + } +} # drop db if exists foo "drop schema if exists foo" @@ -155,7 +185,7 @@ "Name": "user", "Sharded": true }, - "Query": "alter table user add index a (id)" + "Query": "alter table `user` add index a (id)" } } @@ -260,7 +290,7 @@ "Name": "user", "Sharded": true }, - "Query": "alter view user_extra as select * from user" + "Query": "alter view user_extra as select * from `user`" } } @@ -293,3 +323,65 @@ "Query": "drop view a" } } + +# Truncate table with qualifier +"truncate user.user_extra" +{ + "QueryType": "DDL", + "Original": "truncate user.user_extra", + "Instructions": { + "OperatorType": "DDL", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "Query": "truncate table user_extra" + } +} + +# Rename table +"rename table a to main.b" +{ + "QueryType": "DDL", + "Original": "rename table a to main.b", + "Instructions": { + "OperatorType": "DDL", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "Query": "rename table a to b" + } +} + +# CREATE temp TABLE +"create temporary table a(id int)" +{ + "QueryType": "DDL", + "Original": "create temporary table a(id int)", + "Instructions": { + "OperatorType": "DDL", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "Query": "create temporary table a (\n\tid int\n)", + "TempTable": true + } +} + +# DROP temp TABLE +"drop temporary table a" +{ + "QueryType": "DDL", + "Original": "drop temporary table a", + "Instructions": { + "OperatorType": "DDL", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "Query": "drop temporary table a", + "TempTable": true + } +} diff --git a/go/vt/vtgate/planbuilder/testdata/ddl_cases_no_default_keyspace.txt b/go/vt/vtgate/planbuilder/testdata/ddl_cases_no_default_keyspace.txt index 83e831983b9..190ed35ad1b 100644 --- a/go/vt/vtgate/planbuilder/testdata/ddl_cases_no_default_keyspace.txt +++ b/go/vt/vtgate/planbuilder/testdata/ddl_cases_no_default_keyspace.txt @@ -9,7 +9,7 @@ "Name": "user", "Sharded": true }, - "Query": "create view a as select * from user" + "Query": "create view a as select * from `user`" } } @@ -24,7 +24,7 @@ "Name": "user", "Sharded": true }, - "Query": "create view a as select * from user" + "Query": "create view a as select * from `user`" } } @@ -39,7 +39,7 @@ "Name": "user", "Sharded": true }, - "Query": "create view view_a as select 1 from user" + "Query": "create view view_a as select 1 from `user`" } } @@ -54,7 +54,7 @@ "Name": "user", "Sharded": true }, - "Query": "create view view_a as select user.* from user" + "Query": "create view view_a as select `user`.* from `user`" } } @@ -69,7 +69,7 @@ "Name": "user", "Sharded": true }, - "Query": "create view view_a as select * from user" + "Query": "create view view_a as select * from `user`" } } @@ -84,7 +84,7 @@ "Name": "user", "Sharded": true }, - "Query": "create view view_a as select user.* from user" + "Query": "create view view_a as select `user`.* from `user`" } } @@ -144,7 +144,7 @@ "Name": "user", "Sharded": true }, - "Query": "create view view_a as select * from authoritative join user on authoritative.user_id = user.id" + "Query": "create view view_a as select * from authoritative join `user` on authoritative.user_id = `user`.id" } } @@ -159,7 +159,7 @@ "Name": "user", "Sharded": true }, - "Query": "create view view_a as select user.id, a.*, user.col1 from authoritative as a join user on a.user_id = user.id" + "Query": "create view view_a as select `user`.id, a.*, `user`.col1 from authoritative as a join `user` on a.user_id = `user`.id" } } @@ -174,7 +174,7 @@ "Name": "user", "Sharded": true }, - "Query": "create view view_a as select col from user join user_extra on user.id = user_extra.user_id" + "Query": "create view view_a as select col from `user` join user_extra on `user`.id = user_extra.user_id" } } @@ -189,7 +189,7 @@ "Name": "user", "Sharded": true }, - "Query": "create view view_a as select user.id from user join user_extra on user.id = user_extra.user_id" + "Query": "create view view_a as select `user`.id from `user` join user_extra on `user`.id = user_extra.user_id" } } @@ -234,7 +234,7 @@ "Name": "user", "Sharded": true }, - "Query": "create view view_a as select user.col, user_extra.id + user_extra.col from user join user_extra on user.id = user_extra.user_id" + "Query": "create view view_a as select `user`.col, user_extra.id + user_extra.col from `user` join user_extra on `user`.id = user_extra.user_id" } } @@ -249,7 +249,7 @@ "Name": "user", "Sharded": true }, - "Query": "create view view_a as select /* comment */ user.col from user join user_extra on user.id = user_extra.user_id" + "Query": "create view view_a as select /* comment */ `user`.col from `user` join user_extra on `user`.id = user_extra.user_id" } } @@ -264,7 +264,7 @@ "Name": "user", "Sharded": true }, - "Query": "create view view_a as select user.col from user join user_extra on user.id = user_extra.user_id for update" + "Query": "create view view_a as select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id for update" } } @@ -279,7 +279,7 @@ "Name": "user", "Sharded": true }, - "Query": "create view view_a as select user.Col, user_extra.Id from user join user_extra on user.id = user_extra.user_id" + "Query": "create view view_a as select `user`.Col, user_extra.Id from `user` join user_extra on `user`.id = user_extra.user_id" } } @@ -298,7 +298,7 @@ "Name": "user", "Sharded": true }, - "Query": "create view view_a as select * from user where id = 0x04" + "Query": "create view view_a as select * from `user` where id = 0x04" } } @@ -313,7 +313,7 @@ "Name": "user", "Sharded": true }, - "Query": "create view view_a as select * from user where `name` = 'abc' and id = 4 limit 5" + "Query": "create view view_a as select * from `user` where `name` = 'abc' and id = 4 limit 5" } } @@ -328,7 +328,7 @@ "Name": "user", "Sharded": true }, - "Query": "create view view_a as select * from user where id = 4 and `name` = 'abc' limit 5" + "Query": "create view view_a as select * from `user` where id = 4 and `name` = 'abc' limit 5" } } @@ -343,7 +343,7 @@ "Name": "user", "Sharded": true }, - "Query": "create view view_a as select * from user where id = 4 and `name` = 'abc' limit 5" + "Query": "create view view_a as select * from `user` where id = 4 and `name` = 'abc' limit 5" } } @@ -358,7 +358,7 @@ "Name": "user", "Sharded": true }, - "Query": "create view view_a as select user0_.col as col0_ from user as user0_ where id = 1 order by user0_.col asc" + "Query": "create view view_a as select user0_.col as col0_ from `user` as user0_ where id = 1 order by user0_.col asc" } } @@ -373,7 +373,7 @@ "Name": "user", "Sharded": true }, - "Query": "create view view_a as select user0_.col as col0_ from user as user0_ where id = 1 order by col0_ desc" + "Query": "create view view_a as select user0_.col as col0_ from `user` as user0_ where id = 1 order by col0_ desc" } } @@ -388,7 +388,7 @@ "Name": "user", "Sharded": true }, - "Query": "create view view_a as select * from user where id = 1 and `name` = true" + "Query": "create view view_a as select * from `user` where id = 1 and `name` = true" } } @@ -403,7 +403,7 @@ "Name": "user", "Sharded": true }, - "Query": "create view view_a as select * from music where user_id = 1 union select * from user where id = 1" + "Query": "create view view_a as select * from music where user_id = 1 union select * from `user` where id = 1" } } @@ -418,7 +418,7 @@ "Name": "user", "Sharded": true }, - "Query": "create view view_a as select 42 from user" + "Query": "create view view_a as select 42 from `user`" } } @@ -448,7 +448,7 @@ "Name": "user", "Sharded": true }, - "Query": "alter table user add index a (id)" + "Query": "alter table `user` add index a (id)" } } @@ -463,7 +463,7 @@ "Name": "user", "Sharded": true }, - "Query": "alter table user add column id int" + "Query": "alter table `user` add column id int" } } @@ -478,7 +478,7 @@ "Name": "user", "Sharded": true }, - "Query": "alter view user_extra as select * from user" + "Query": "alter view user_extra as select * from `user`" } } @@ -497,7 +497,7 @@ "Name": "user", "Sharded": true }, - "Query": "drop table user, user_extra" + "Query": "drop table `user`, user_extra" } } @@ -520,7 +520,7 @@ "Name": "user", "Sharded": true }, - "Query": "drop view user, user_extra" + "Query": "drop view `user`, user_extra" } } @@ -531,3 +531,41 @@ # drop view with unknown view "drop view unknown" "keyspace not specified" + +# Truncate table without qualifier +"truncate user_extra" +{ + "QueryType": "DDL", + "Original": "truncate user_extra", + "Instructions": { + "OperatorType": "DDL", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "Query": "truncate table user_extra" + } +} + +# Rename table +"rename table user_extra to b" +{ + "QueryType": "DDL", + "Original": "rename table user_extra to b", + "Instructions": { + "OperatorType": "DDL", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "Query": "rename table user_extra to b" + } +} + +# Rename table with different keyspace tables +"rename table user_extra to b, main.a to b" +"Tables or Views specified in the query do not belong to the same destination" + +# Rename table with change in keyspace name +"rename table user_extra to main.b" +"Changing schema from 'user' to 'main' is not allowed" diff --git a/go/vt/vtgate/planbuilder/testdata/dml_cases.txt b/go/vt/vtgate/planbuilder/testdata/dml_cases.txt index 435378c7e04..20c91c6c0f8 100644 --- a/go/vt/vtgate/planbuilder/testdata/dml_cases.txt +++ b/go/vt/vtgate/planbuilder/testdata/dml_cases.txt @@ -1,10 +1,12 @@ # update table not found "update nouser set val = 1" "table nouser not found" +Gen4 plan same as above # delete table not found "delete from nouser" "table nouser not found" +Gen4 plan same as above # explicit keyspace reference "update main.m1 set val = 1" @@ -23,6 +25,7 @@ "Query": "update m1 set val = 1" } } +Gen4 plan same as above # update unsharded "update unsharded set val = 1" @@ -41,6 +44,7 @@ "Query": "update unsharded set val = 1" } } +Gen4 plan same as above # subqueries in unsharded update "update unsharded set col = (select col from unsharded limit 1)" @@ -59,6 +63,7 @@ "Query": "update unsharded set col = (select col from unsharded limit 1)" } } +Gen4 plan same as above # unsharded union in subquery of unsharded update "update unsharded set col = (select id from unsharded union select id from unsharded)" @@ -77,6 +82,7 @@ "Query": "update unsharded set col = (select id from unsharded union select id from unsharded)" } } +Gen4 plan same as above # unsharded join in subquery of unsharded update "update unsharded set col = (select id from unsharded a join unsharded b on a.id = b.id)" @@ -95,6 +101,7 @@ "Query": "update unsharded set col = (select id from unsharded as a join unsharded as b on a.id = b.id)" } } +Gen4 plan same as above # update with join subquery "update unsharded as foo left join (select id from unsharded where col is not null order by col desc limit 10) as keepers on foo.id = keepers.id set col1 = 'asdf' where keepers.id is null and foo.col is not null and foo.col < 1000" @@ -113,6 +120,7 @@ "Query": "update unsharded as foo left join (select id from unsharded where col is not null order by col desc limit 10) as keepers on foo.id = keepers.id set col1 = 'asdf' where keepers.id is null and foo.col is not null and foo.col \u003c 1000" } } +Gen4 plan same as above # routing rules: updated of a routed table "update route1 set a=1 where id=1" @@ -128,7 +136,7 @@ }, "TargetTabletType": "MASTER", "MultiShardAutocommit": false, - "Query": "update user as route1 set a = 1 where id = 1", + "Query": "update `user` as route1 set a = 1 where id = 1", "Table": "user", "Values": [ 1 @@ -136,6 +144,7 @@ "Vindex": "user_index" } } +Gen4 plan same as above # update: routing rules for subquery. "update unsharded_a set a=(select a from route2)" @@ -154,6 +163,7 @@ "Query": "update unsharded_a set a = (select a from unsharded as route2)" } } +Gen4 plan same as above # delete unsharded "delete from unsharded" @@ -172,6 +182,7 @@ "Query": "delete from unsharded" } } +Gen4 plan same as above # update by primary keyspace id "update user set val = 1 where id = 1" @@ -187,7 +198,7 @@ }, "TargetTabletType": "MASTER", "MultiShardAutocommit": false, - "Query": "update user set val = 1 where id = 1", + "Query": "update `user` set val = 1 where id = 1", "Table": "user", "Values": [ 1 @@ -195,6 +206,7 @@ "Vindex": "user_index" } } +Gen4 plan same as above # update by primary keyspace id with alias "update user as user_alias set val = 1 where user_alias.id = 1" @@ -210,7 +222,7 @@ }, "TargetTabletType": "MASTER", "MultiShardAutocommit": false, - "Query": "update user as user_alias set val = 1 where user_alias.id = 1", + "Query": "update `user` as user_alias set val = 1 where user_alias.id = 1", "Table": "user", "Values": [ 1 @@ -218,6 +230,7 @@ "Vindex": "user_index" } } +Gen4 plan same as above # update by primary keyspace id with parenthesized expression "update user set val = 1 where (id = 1)" @@ -233,7 +246,7 @@ }, "TargetTabletType": "MASTER", "MultiShardAutocommit": false, - "Query": "update user set val = 1 where id = 1", + "Query": "update `user` set val = 1 where id = 1", "Table": "user", "Values": [ 1 @@ -241,6 +254,7 @@ "Vindex": "user_index" } } +Gen4 plan same as above # update by primary keyspace id with multi-part where clause with parens "update user set val = 1 where (name = 'foo' and id = 1)" @@ -256,7 +270,7 @@ }, "TargetTabletType": "MASTER", "MultiShardAutocommit": false, - "Query": "update user set val = 1 where `name` = 'foo' and id = 1", + "Query": "update `user` set val = 1 where `name` = 'foo' and id = 1", "Table": "user", "Values": [ 1 @@ -264,6 +278,7 @@ "Vindex": "user_index" } } +Gen4 plan same as above # update by primary keyspace id, changing one vindex column "update user_metadata set email = 'juan@vitess.io' where user_id = 1" @@ -292,10 +307,12 @@ "Vindex": "user_index" } } +Gen4 plan same as above # update by primary keyspace id, changing same vindex twice "update user_metadata set email = 'a', email = 'b' where user_id = 1" "column has duplicate set values: 'email'" +Gen4 plan same as above # update by primary keyspace id, changing multiple vindex columns "update user_metadata set email = 'juan@vitess.io', address = '155 5th street' where user_id = 1" @@ -325,6 +342,7 @@ "Vindex": "user_index" } } +Gen4 plan same as above # update by primary keyspace id, changing one vindex column, using order by and limit "update user_metadata set email = 'juan@vitess.io' where user_id = 1 order by user_id asc limit 10" @@ -353,6 +371,7 @@ "Vindex": "user_index" } } +Gen4 plan same as above # update by primary keyspace id, stray where clause "update user set val = 1 where id = id2 and id = 1" @@ -368,7 +387,7 @@ }, "TargetTabletType": "MASTER", "MultiShardAutocommit": false, - "Query": "update user set val = 1 where id = id2 and id = 1", + "Query": "update `user` set val = 1 where id = id2 and id = 1", "Table": "user", "Values": [ 1 @@ -376,6 +395,7 @@ "Vindex": "user_index" } } +Gen4 plan same as above # update by primary keyspace id, stray where clause with conversion error "update user set val = 1 where id = 18446744073709551616 and id = 1" @@ -391,7 +411,7 @@ }, "TargetTabletType": "MASTER", "MultiShardAutocommit": false, - "Query": "update user set val = 1 where id = 18446744073709551616 and id = 1", + "Query": "update `user` set val = 1 where id = 18446744073709551616 and id = 1", "Table": "user", "Values": [ 1 @@ -399,6 +419,7 @@ "Vindex": "user_index" } } +Gen4 plan same as above # delete from by primary keyspace id "delete from user where id = 1" @@ -415,8 +436,8 @@ "TargetTabletType": "MASTER", "KsidVindex": "user_index", "MultiShardAutocommit": false, - "OwnedVindexQuery": "select Id, `Name`, Costly from user where id = 1 for update", - "Query": "delete from user where id = 1", + "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where id = 1 for update", + "Query": "delete from `user` where id = 1", "Table": "user", "Values": [ 1 @@ -424,6 +445,7 @@ "Vindex": "user_index" } } +Gen4 plan same as above # multi-table delete with comma join "delete a from unsharded_a a, unsharded_b b where a.id = b.id and b.val = 1" @@ -442,6 +464,7 @@ "Query": "delete a from unsharded_a as a, unsharded_b as b where a.id = b.id and b.val = 1" } } +Gen4 plan same as above # multi-table delete with ansi join "delete a from unsharded_a a join unsharded_b b on a.id = b.id where b.val = 1" @@ -460,6 +483,7 @@ "Query": "delete a from unsharded_a as a join unsharded_b as b on a.id = b.id where b.val = 1" } } +Gen4 plan same as above #delete with join from subquery "delete foo from unsharded as foo left join (select id from unsharded where col is not null order by col desc limit 10) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col < 1000" @@ -478,6 +502,7 @@ "Query": "delete foo from unsharded as foo left join (select id from unsharded where col is not null order by col desc limit 10) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col \u003c 1000" } } +Gen4 plan same as above # routing rules: deleted from a routed table "delete from route1 where id = 1" @@ -494,8 +519,8 @@ "TargetTabletType": "MASTER", "KsidVindex": "user_index", "MultiShardAutocommit": false, - "OwnedVindexQuery": "select Id, `Name`, Costly from user where id = 1 for update", - "Query": "delete from user as route1 where id = 1", + "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where id = 1 for update", + "Query": "delete from `user` as route1 where id = 1", "Table": "user", "Values": [ 1 @@ -503,6 +528,7 @@ "Vindex": "user_index" } } +Gen4 plan same as above # delete: routing rules for subquery "delete from unsharded_a where a=(select a from route2)" @@ -521,6 +547,7 @@ "Query": "delete from unsharded_a where a = (select a from unsharded as route2)" } } +Gen4 plan same as above # update by lookup "update music set val = 1 where id = 1" @@ -544,6 +571,7 @@ "Vindex": "music_user_map" } } +Gen4 plan same as above # update multi-table ansi join "update unsharded_a a join unsharded_b b on a.id = b.id set a.val = 'foo' where b.val = 1" @@ -562,6 +590,7 @@ "Query": "update unsharded_a as a join unsharded_b as b on a.id = b.id set a.val = 'foo' where b.val = 1" } } +Gen4 plan same as above # update multi-table comma join "update unsharded_a a, unsharded_b b set a.val = 'foo' where a.id = b.id and b.val = 1" @@ -580,6 +609,7 @@ "Query": "update unsharded_a as a, unsharded_b as b set a.val = 'foo' where a.id = b.id and b.val = 1" } } +Gen4 plan same as above # delete from by lookup "delete from music where id = 1" @@ -605,6 +635,7 @@ "Vindex": "music_user_map" } } +Gen4 plan same as above # delete from, no owned vindexes "delete from music_extra where user_id = 1" @@ -628,6 +659,7 @@ "Vindex": "user_index" } } +Gen4 plan same as above # simple insert, no values "insert into unsharded values()" @@ -647,6 +679,7 @@ "TableName": "unsharded" } } +Gen4 plan same as above # simple insert unsharded "insert into unsharded values(1, 2)" @@ -666,6 +699,7 @@ "TableName": "unsharded" } } +Gen4 plan same as above # simple upsert unsharded "insert into unsharded values(1, 2) on duplicate key update x = 3" @@ -685,6 +719,7 @@ "TableName": "unsharded" } } +Gen4 plan same as above # unsharded insert, no col list with auto-inc and authoritative column list "insert into unsharded_authoritative values(1,1)" @@ -704,6 +739,7 @@ "TableName": "unsharded_authoritative" } } +Gen4 plan same as above # sharded upsert with sharding key set to vindex column "insert into music(user_id, id) values(1, 2) on duplicate key update user_id = values(user_id)" @@ -723,6 +759,7 @@ "TableName": "music" } } +Gen4 plan same as above # sharded bulk upsert with sharding key set to vindex column "insert into music(user_id, id) values (1, 2), (3,4) on duplicate key update user_id = values(user_id)" @@ -742,6 +779,7 @@ "TableName": "music" } } +Gen4 plan same as above # insert unsharded with select "insert into unsharded select id from unsharded_auto" @@ -761,6 +799,7 @@ "TableName": "unsharded" } } +Gen4 plan same as above # insert unsharded with select with join "insert into unsharded select id from unsharded join unsharded_auto" @@ -780,10 +819,12 @@ "TableName": "unsharded" } } +Gen4 plan same as above # insert unsharded, invalid value for auto-inc "insert into unsharded_auto(id, val) values(18446744073709551616, 'aa')" "could not compute value for vindex or auto-inc column: strconv.ParseUint: parsing "18446744073709551616": value out of range" +Gen4 plan same as above # insert unsharded, column present "insert into unsharded_auto(id, val) values(1, 'aa')" @@ -803,6 +844,7 @@ "TableName": "unsharded_auto" } } +Gen4 plan same as above # insert unsharded, column absent "insert into unsharded_auto(val) values('aa')" @@ -822,6 +864,7 @@ "TableName": "unsharded_auto" } } +Gen4 plan same as above # insert unsharded, column absent "insert into unsharded_auto(val) values(false)" @@ -841,6 +884,7 @@ "TableName": "unsharded_auto" } } +Gen4 plan same as above # insert unsharded, multi-val "insert into unsharded_auto(id, val) values(1, 'aa'), (null, 'bb')" @@ -860,6 +904,7 @@ "TableName": "unsharded_auto" } } +Gen4 plan same as above # unsharded insert subquery in insert value "insert into unsharded values((select 1 from dual), 1)" @@ -879,6 +924,7 @@ "TableName": "unsharded" } } +Gen4 plan same as above # sharded insert subquery in insert value "insert into user(id, val) values((select 1), 1)" @@ -894,10 +940,11 @@ }, "TargetTabletType": "MASTER", "MultiShardAutocommit": false, - "Query": "insert into user(id, val, `Name`, Costly) values (:_Id_0, 1, :_Name_0, :_Costly_0)", + "Query": "insert into `user`(id, val, `Name`, Costly) values (:_Id_0, 1, :_Name_0, :_Costly_0)", "TableName": "user" } } +Gen4 plan same as above # insert into a routed table "insert into route1(id) values (1)" @@ -913,14 +960,16 @@ }, "TargetTabletType": "MASTER", "MultiShardAutocommit": false, - "Query": "insert into user(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0)", + "Query": "insert into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0)", "TableName": "user" } } +Gen4 plan same as above # insert with mimatched column list "insert into user(id) values (1, 2)" "column list doesn't match values" +Gen4 plan same as above # insert no column list for sharded authoritative table "insert into authoritative values(1, 2, 3)" @@ -940,6 +989,7 @@ "TableName": "authoritative" } } +Gen4 plan same as above # insert sharded, no values "insert into user values()" @@ -955,10 +1005,11 @@ }, "TargetTabletType": "MASTER", "MultiShardAutocommit": false, - "Query": "insert into user(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0)", + "Query": "insert into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0)", "TableName": "user" } } +Gen4 plan same as above # insert with one vindex "insert into user(id) values (1)" @@ -974,10 +1025,11 @@ }, "TargetTabletType": "MASTER", "MultiShardAutocommit": false, - "Query": "insert into user(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0)", + "Query": "insert into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0)", "TableName": "user" } } +Gen4 plan same as above # insert ignore sharded "insert ignore into user(id) values (1)" @@ -993,10 +1045,11 @@ }, "TargetTabletType": "MASTER", "MultiShardAutocommit": false, - "Query": "insert ignore into user(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0)", + "Query": "insert ignore into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0)", "TableName": "user" } } +Gen4 plan same as above # insert on duplicate key "insert into user(id) values(1) on duplicate key update col = 2" @@ -1012,10 +1065,11 @@ }, "TargetTabletType": "MASTER", "MultiShardAutocommit": false, - "Query": "insert into user(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0) on duplicate key update col = 2", + "Query": "insert into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0) on duplicate key update col = 2", "TableName": "user" } } +Gen4 plan same as above # insert with one vindex and bind var "insert into user(id) values (:aa)" @@ -1031,10 +1085,11 @@ }, "TargetTabletType": "MASTER", "MultiShardAutocommit": false, - "Query": "insert into user(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0)", + "Query": "insert into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0)", "TableName": "user" } } +Gen4 plan same as above # insert with non vindex "insert into user(nonid) values (2)" @@ -1050,10 +1105,11 @@ }, "TargetTabletType": "MASTER", "MultiShardAutocommit": false, - "Query": "insert into user(nonid, id, `Name`, Costly) values (2, :_Id_0, :_Name_0, :_Costly_0)", + "Query": "insert into `user`(nonid, id, `Name`, Costly) values (2, :_Id_0, :_Name_0, :_Costly_0)", "TableName": "user" } } +Gen4 plan same as above # insert with default seq "insert into user(id, nonid) values (default, 2)" @@ -1069,10 +1125,11 @@ }, "TargetTabletType": "MASTER", "MultiShardAutocommit": false, - "Query": "insert into user(id, nonid, `Name`, Costly) values (:_Id_0, 2, :_Name_0, :_Costly_0)", + "Query": "insert into `user`(id, nonid, `Name`, Costly) values (:_Id_0, 2, :_Name_0, :_Costly_0)", "TableName": "user" } } +Gen4 plan same as above # insert with non vindex bool value "insert into user(nonid) values (true)" @@ -1088,10 +1145,11 @@ }, "TargetTabletType": "MASTER", "MultiShardAutocommit": false, - "Query": "insert into user(nonid, id, `Name`, Costly) values (true, :_Id_0, :_Name_0, :_Costly_0)", + "Query": "insert into `user`(nonid, id, `Name`, Costly) values (true, :_Id_0, :_Name_0, :_Costly_0)", "TableName": "user" } } +Gen4 plan same as above # insert with all vindexes supplied "insert into user(nonid, name, id) values (2, 'foo', 1)" @@ -1107,10 +1165,11 @@ }, "TargetTabletType": "MASTER", "MultiShardAutocommit": false, - "Query": "insert into user(nonid, `name`, id, Costly) values (2, :_Name_0, :_Id_0, :_Costly_0)", + "Query": "insert into `user`(nonid, `name`, id, Costly) values (2, :_Name_0, :_Id_0, :_Costly_0)", "TableName": "user" } } +Gen4 plan same as above # insert for non-vindex autoinc "insert into user_extra(nonid) values (2)" @@ -1130,6 +1189,7 @@ "TableName": "user_extra" } } +Gen4 plan same as above # insert for non-compliant names "insert into `weird``name`(`a``b*c`, `b*c`) values(1, 2)" @@ -1149,6 +1209,7 @@ "TableName": "weird`name" } } +Gen4 plan same as above # unsharded insert from union "insert into unsharded select 1 from dual union select 1 from dual" @@ -1168,22 +1229,27 @@ "TableName": "unsharded" } } +Gen4 plan same as above # insert for non-vindex autoinc, invalid value "insert into user_extra(nonid, extra_id) values (2, 18446744073709551616)" "could not compute value for vindex or auto-inc column: strconv.ParseUint: parsing "18446744073709551616": value out of range" +Gen4 plan same as above # insert invalid index value "insert into music_extra(music_id, user_id) values(1, 18446744073709551616)" "could not compute value for vindex or auto-inc column: strconv.ParseUint: parsing "18446744073709551616": value out of range" +Gen4 plan same as above # insert invalid index value "insert into music_extra(music_id, user_id) values(1, id)" "could not compute value for vindex or auto-inc column: expression is too complex 'id'" +Gen4 plan same as above # insert invalid table "insert into noexist(music_id, user_id) values(1, 18446744073709551616)" "table noexist not found" +Gen4 plan same as above # insert with multiple rows "insert into user(id) values (1), (2)" @@ -1199,10 +1265,11 @@ }, "TargetTabletType": "MASTER", "MultiShardAutocommit": false, - "Query": "insert into user(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0), (:_Id_1, :_Name_1, :_Costly_1)", + "Query": "insert into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0), (:_Id_1, :_Name_1, :_Costly_1)", "TableName": "user" } } +Gen4 plan same as above # insert with query timeout "insert /*vt+ QUERY_TIMEOUT_MS=1 */ into user(id) values (1), (2)" @@ -1218,11 +1285,12 @@ }, "TargetTabletType": "MASTER", "MultiShardAutocommit": false, - "Query": "insert /*vt+ QUERY_TIMEOUT_MS=1 */ into user(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0), (:_Id_1, :_Name_1, :_Costly_1)", + "Query": "insert /*vt+ QUERY_TIMEOUT_MS=1 */ into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0), (:_Id_1, :_Name_1, :_Costly_1)", "QueryTimeout": 1, "TableName": "user" } } +Gen4 plan same as above # insert with multiple rows - multi-shard autocommit "insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ into user(id) values (1), (2)" @@ -1238,14 +1306,16 @@ }, "TargetTabletType": "MASTER", "MultiShardAutocommit": true, - "Query": "insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ into user(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0), (:_Id_1, :_Name_1, :_Costly_1)", + "Query": "insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0), (:_Id_1, :_Name_1, :_Costly_1)", "TableName": "user" } } +Gen4 plan same as above # insert into a vindex not allowed "insert into user_index(id) values(1)" "unsupported: multi-shard or vindex write statement" +Gen4 plan same as above # simple replace unsharded "replace into unsharded values(1, 2)" @@ -1265,6 +1335,7 @@ "TableName": "unsharded" } } +Gen4 plan same as above # replace unsharded with select "replace into unsharded select id from unsharded_auto" @@ -1284,10 +1355,12 @@ "TableName": "unsharded" } } +Gen4 plan same as above # replace unsharded, invalid value for auto-inc "replace into unsharded_auto(id, val) values(18446744073709551616, 'aa')" "could not compute value for vindex or auto-inc column: strconv.ParseUint: parsing "18446744073709551616": value out of range" +Gen4 plan same as above # replace unsharded, column present "replace into unsharded_auto(id, val) values(1, 'aa')" @@ -1307,6 +1380,7 @@ "TableName": "unsharded_auto" } } +Gen4 plan same as above # replace unsharded, column absent "replace into unsharded_auto(val) values('aa')" @@ -1326,6 +1400,7 @@ "TableName": "unsharded_auto" } } +Gen4 plan same as above # replace unsharded, multi-val "replace into unsharded_auto(id, val) values(1, 'aa'), (null, 'bb')" @@ -1345,10 +1420,12 @@ "TableName": "unsharded_auto" } } +Gen4 plan same as above # replace invalid table "replace into noexist(music_id, user_id) values(1, 18446744073709551616)" "table noexist not found" +Gen4 plan same as above # insert a row in a multi column vindex table "insert multicolvin (column_a, column_b, column_c, kid) VALUES (1,2,3,4)" @@ -1368,6 +1445,7 @@ "TableName": "multicolvin" } } +Gen4 plan same as above # insert for overlapped vindex columns "insert overlap_vindex (kid, column_a, column_b) VALUES (1,2,3)" @@ -1387,6 +1465,7 @@ "TableName": "overlap_vindex" } } +Gen4 plan same as above # insert multiple rows in a multi column vindex table "insert multicolvin (column_a, column_b, column_c, kid) VALUES (1,2,3,4), (5,6,7,8)" @@ -1406,6 +1485,7 @@ "TableName": "multicolvin" } } +Gen4 plan same as above # delete row in a multi column vindex table "delete from multicolvin where kid=1" @@ -1431,6 +1511,7 @@ "Vindex": "kid_index" } } +Gen4 plan same as above # update columns of multi column vindex "update multicolvin set column_b = 1, column_c = 2 where kid = 1" @@ -1459,6 +1540,7 @@ "Vindex": "kid_index" } } +Gen4 plan same as above # update multiple vindexes, with multi column vindex "update multicolvin set column_a = 0, column_b = 1, column_c = 2 where kid = 1" @@ -1488,6 +1570,7 @@ "Vindex": "kid_index" } } +Gen4 plan same as above # update with no primary vindex on where clause (scatter update) "update user_extra set val = 1" @@ -1507,6 +1590,7 @@ "Table": "user_extra" } } +Gen4 plan same as above # update with target destination "update `user[-]`.user_extra set val = 1" @@ -1526,6 +1610,7 @@ "Table": "user_extra" } } +Gen4 plan same as above # update with no primary vindex on where clause (scatter update) - multi shard autocommit "update /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ user_extra set val = 1" @@ -1545,6 +1630,7 @@ "Table": "user_extra" } } +Gen4 plan same as above # update with no primary vindex on where clause (scatter update) - query timeout "update /*vt+ QUERY_TIMEOUT_MS=1 */ user_extra set val = 1" @@ -1565,6 +1651,7 @@ "Table": "user_extra" } } +Gen4 plan same as above # update with non-comparison expr "update user_extra set val = 1 where id between 1 and 2" @@ -1584,6 +1671,7 @@ "Table": "user_extra" } } +Gen4 plan same as above # update with primary id through IN clause "update user_extra set val = 1 where user_id in (1, 2)" @@ -1610,6 +1698,7 @@ "Vindex": "user_index" } } +Gen4 plan same as above # update with non-unique key "update user_extra set val = 1 where name = 'foo'" @@ -1629,6 +1718,7 @@ "Table": "user_extra" } } +Gen4 plan same as above # update by lookup with IN clause "update user_extra set val = 1 where id in (1, 2)" @@ -1648,6 +1738,7 @@ "Table": "user_extra" } } +Gen4 plan same as above # update with where clause with parens "update user_extra set val = 1 where (name = 'foo' or id = 1)" @@ -1667,6 +1758,7 @@ "Table": "user_extra" } } +Gen4 plan same as above # delete from with no where clause "delete from user_extra" @@ -1686,6 +1778,7 @@ "Table": "user_extra" } } +Gen4 plan same as above # delete with target destination "delete from `user[-]`.user_extra" @@ -1705,6 +1798,7 @@ "Table": "user_extra" } } +Gen4 plan same as above # delete with non-comparison expr "delete from user_extra where user_id between 1 and 2" @@ -1724,6 +1818,7 @@ "Table": "user_extra" } } +Gen4 plan same as above # delete from with no index match "delete from user_extra where name = 'jose'" @@ -1743,6 +1838,7 @@ "Table": "user_extra" } } +Gen4 plan same as above # delete from with no index match - multi shard autocommit "delete /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ from user_extra where name = 'jose'" @@ -1762,6 +1858,7 @@ "Table": "user_extra" } } +Gen4 plan same as above # delete from with no index match - query timeout "delete /*vt+ QUERY_TIMEOUT_MS=1 */ from user_extra where name = 'jose'" @@ -1782,6 +1879,7 @@ "Table": "user_extra" } } +Gen4 plan same as above # delete from with primary id in through IN clause "delete from user_extra where user_id in (1, 2)" @@ -1808,6 +1906,7 @@ "Vindex": "user_index" } } +Gen4 plan same as above # unsharded update where inner query references outer query "update unsharded set col = (select id from unsharded_a where id = unsharded.col) where col = (select id from unsharded_b)" @@ -1826,6 +1925,7 @@ "Query": "update unsharded set col = (select id from unsharded_a where id = unsharded.col) where col = (select id from unsharded_b)" } } +Gen4 plan same as above # unsharded delete where inner query references outer query "delete from unsharded where col = (select id from unsharded_a where id = unsharded.col)" @@ -1844,6 +1944,7 @@ "Query": "delete from unsharded where col = (select id from unsharded_a where id = unsharded.col)" } } +Gen4 plan same as above # update vindex value to null "update user set name = null where id = 1" @@ -1863,8 +1964,8 @@ ], "KsidVindex": "user_index", "MultiShardAutocommit": false, - "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = null from user where id = 1 for update", - "Query": "update user set `name` = null where id = 1", + "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = null from `user` where id = 1 for update", + "Query": "update `user` set `name` = null where id = 1", "Table": "user", "Values": [ 1 @@ -1872,6 +1973,7 @@ "Vindex": "user_index" } } +Gen4 plan same as above # insert using last_insert_id "insert into unsharded values(last_insert_id(), 2)" @@ -1891,6 +1993,7 @@ "TableName": "unsharded" } } +Gen4 plan same as above # update vindex value to null with multiple primary keyspace id "update user set name = null where id in (1, 2, 3)" @@ -1910,8 +2013,8 @@ ], "KsidVindex": "user_index", "MultiShardAutocommit": false, - "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = null from user where id in (1, 2, 3) for update", - "Query": "update user set `name` = null where id in (1, 2, 3)", + "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = null from `user` where id in (1, 2, 3) for update", + "Query": "update `user` set `name` = null where id in (1, 2, 3)", "Table": "user", "Values": [ [ @@ -1923,6 +2026,7 @@ "Vindex": "user_index" } } +Gen4 plan same as above # update vindex value to null without a where clause "update user set name = null" @@ -1942,11 +2046,12 @@ ], "KsidVindex": "user_index", "MultiShardAutocommit": false, - "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = null from user for update", - "Query": "update user set `name` = null", + "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = null from `user` for update", + "Query": "update `user` set `name` = null", "Table": "user" } } +Gen4 plan same as above # update vindex value to null with complex where clause "update user set name = null where id + 1 = 2" @@ -1966,11 +2071,12 @@ ], "KsidVindex": "user_index", "MultiShardAutocommit": false, - "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = null from user where id + 1 = 2 for update", - "Query": "update user set `name` = null where id + 1 = 2", + "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = null from `user` where id + 1 = 2 for update", + "Query": "update `user` set `name` = null where id + 1 = 2", "Table": "user" } } +Gen4 plan same as above # delete from user by primary keyspace id with in clause "delete from user where id in (1, 2, 3)" @@ -1987,8 +2093,8 @@ "TargetTabletType": "MASTER", "KsidVindex": "user_index", "MultiShardAutocommit": false, - "OwnedVindexQuery": "select Id, `Name`, Costly from user where id in (1, 2, 3) for update", - "Query": "delete from user where id in (1, 2, 3)", + "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where id in (1, 2, 3) for update", + "Query": "delete from `user` where id in (1, 2, 3)", "Table": "user", "Values": [ [ @@ -2000,6 +2106,7 @@ "Vindex": "user_index" } } +Gen4 plan same as above # delete from user by complex expression "delete from user where id + 1 = 2" @@ -2016,11 +2123,12 @@ "TargetTabletType": "MASTER", "KsidVindex": "user_index", "MultiShardAutocommit": false, - "OwnedVindexQuery": "select Id, `Name`, Costly from user where id + 1 = 2 for update", - "Query": "delete from user where id + 1 = 2", + "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where id + 1 = 2 for update", + "Query": "delete from `user` where id + 1 = 2", "Table": "user" } } +Gen4 plan same as above # delete from user without a where clause "delete from user" @@ -2037,11 +2145,12 @@ "TargetTabletType": "MASTER", "KsidVindex": "user_index", "MultiShardAutocommit": false, - "OwnedVindexQuery": "select Id, `Name`, Costly from user for update", - "Query": "delete from user", + "OwnedVindexQuery": "select Id, `Name`, Costly from `user` for update", + "Query": "delete from `user`", "Table": "user" } } +Gen4 plan same as above # delete with single table targets "delete music from music where id = 1" @@ -2067,6 +2176,7 @@ "Vindex": "music_user_map" } } +Gen4 plan same as above # scatter update table with owned vindexes without changing lookup vindex "update user set val = 1" @@ -2082,10 +2192,11 @@ }, "TargetTabletType": "MASTER", "MultiShardAutocommit": false, - "Query": "update user set val = 1", + "Query": "update `user` set val = 1", "Table": "user" } } +Gen4 plan same as above # scatter delete with owned lookup vindex "delete from user" @@ -2102,11 +2213,12 @@ "TargetTabletType": "MASTER", "KsidVindex": "user_index", "MultiShardAutocommit": false, - "OwnedVindexQuery": "select Id, `Name`, Costly from user for update", - "Query": "delete from user", + "OwnedVindexQuery": "select Id, `Name`, Costly from `user` for update", + "Query": "delete from `user`", "Table": "user" } } +Gen4 plan same as above # update multi column vindex, without values for all the vindex columns "update multicolvin set column_c = 2 where kid = 1" @@ -2135,6 +2247,7 @@ "Vindex": "kid_index" } } +Gen4 plan same as above # update with binary value "update user set name = _binary 'abc' where id = 1" @@ -2154,8 +2267,33 @@ ], "KsidVindex": "user_index", "MultiShardAutocommit": false, - "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = _binary 'abc' from user where id = 1 for update", - "Query": "update user set `name` = _binary 'abc' where id = 1", + "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = _binary 'abc' from `user` where id = 1 for update", + "Query": "update `user` set `name` = _binary 'abc' where id = 1", + "Table": "user", + "Values": [ + 1 + ], + "Vindex": "user_index" + } +} +{ + "QueryType": "UPDATE", + "Original": "update user set name = _binary 'abc' where id = 1", + "Instructions": { + "OperatorType": "Update", + "Variant": "Equal", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "MASTER", + "ChangedVindexValues": [ + "name_user_map:3" + ], + "KsidVindex": "user_index", + "MultiShardAutocommit": false, + "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = _binary 'abc' from `user` where id = 1 for update", + "Query": "update `user` set `name` = _binary 'abc' where id = 1", "Table": "user", "Values": [ 1 @@ -2179,11 +2317,12 @@ "TargetTabletType": "MASTER", "KsidVindex": "user_index", "MultiShardAutocommit": false, - "OwnedVindexQuery": "select Id, `Name`, Costly from user where `name` = _binary 'abc' for update", - "Query": "delete from user where `name` = _binary 'abc'", + "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where `name` = _binary 'abc' for update", + "Query": "delete from `user` where `name` = _binary 'abc'", "Table": "user" } } +Gen4 plan same as above # delete with shard targeting "delete from `user[-]`.user" @@ -2200,11 +2339,12 @@ "TargetTabletType": "MASTER", "KsidVindex": "user_index", "MultiShardAutocommit": false, - "OwnedVindexQuery": "select Id, `Name`, Costly from user for update", - "Query": "delete from user", + "OwnedVindexQuery": "select Id, `Name`, Costly from `user` for update", + "Query": "delete from `user`", "Table": "user" } } +Gen4 plan same as above # update with shard targeting "update `user[-]`.user set name = 'myname'" @@ -2224,11 +2364,12 @@ ], "KsidVindex": "user_index", "MultiShardAutocommit": false, - "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = 'myname' from user for update", - "Query": "update user set `name` = 'myname'", + "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = 'myname' from `user` for update", + "Query": "update `user` set `name` = 'myname'", "Table": "user" } } +Gen4 plan same as above # update with shard targeting without vindex "update `user[-]`.user_extra set val = 1" @@ -2248,3 +2389,4 @@ "Table": "user_extra" } } +Gen4 plan same as above diff --git a/go/vt/vtgate/planbuilder/testdata/filter_cases.txt b/go/vt/vtgate/planbuilder/testdata/filter_cases.txt index fbe30a371ef..a04993f1627 100644 --- a/go/vt/vtgate/planbuilder/testdata/filter_cases.txt +++ b/go/vt/vtgate/planbuilder/testdata/filter_cases.txt @@ -10,11 +10,12 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user", - "Table": "user" + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user`", + "Table": "`user`" } } +Gen4 plan same as above # Query that always return empty "select id from user where someColumn = null" @@ -28,11 +29,12 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where someColumn = null", - "Table": "user" + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where someColumn = null", + "Table": "`user`" } } +Gen4 plan same as above # Single table unique vindex route "select id from user where user.id = 5" @@ -46,15 +48,16 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where user.id = 5", - "Table": "user", + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where `user`.id = 5", + "Table": "`user`", "Values": [ 5 ], "Vindex": "user_index" } } +Gen4 plan same as above # Single table unique vindex route, but complex expr "select id from user where user.id = 5+5" @@ -68,11 +71,12 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where user.id = 5 + 5", - "Table": "user" + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where `user`.id = 5 + 5", + "Table": "`user`" } } +Gen4 plan same as above # Single table multiple unique vindex match "select id from music where id = 5 and user_id = 4" @@ -95,6 +99,7 @@ "Vindex": "user_index" } } +Gen4 plan same as above # Single table multiple non-unique vindex match "select id from user where costly = 'aa' and name = 'bb'" @@ -108,15 +113,16 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where costly = 'aa' and `name` = 'bb'", - "Table": "user", + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where costly = 'aa' and `name` = 'bb'", + "Table": "`user`", "Values": [ "bb" ], "Vindex": "name_user_map" } } +Gen4 plan same as above # Single table multiple non-unique vindex match for IN clause "select id from user where costly in ('aa', 'bb') and name in ('aa', 'bb')" @@ -130,9 +136,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where costly in ('aa', 'bb') and `name` in ::__vals", - "Table": "user", + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where costly in ('aa', 'bb') and `name` in ::__vals", + "Table": "`user`", "Values": [ [ "aa", @@ -155,9 +161,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where (`name`, col) in (('aa', 'bb'), ('cc', 'dd'))", - "Table": "user", + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where (`name`, col) in (('aa', 'bb'), ('cc', 'dd'))", + "Table": "`user`", "Values": [ [ "aa", @@ -180,9 +186,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where (col, `name`) in (('aa', 'bb'), ('cc', 'dd'))", - "Table": "user", + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where (col, `name`) in (('aa', 'bb'), ('cc', 'dd'))", + "Table": "`user`", "Values": [ [ "bb", @@ -205,9 +211,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where (costly, `name`) in (('aa', 'bb'), ('cc', 'dd'))", - "Table": "user", + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where (costly, `name`) in (('aa', 'bb'), ('cc', 'dd'))", + "Table": "`user`", "Values": [ [ "bb", @@ -230,9 +236,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where (`name`, costly) in (('aa', 'bb'), ('cc', 'dd'))", - "Table": "user", + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where (`name`, costly) in (('aa', 'bb'), ('cc', 'dd'))", + "Table": "`user`", "Values": [ [ "aa", @@ -255,9 +261,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where (col, costly) in (('aa', 'bb')) and (col, `name`) in (('cc', 'dd'))", - "Table": "user", + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where (col, costly) in (('aa', 'bb')) and (col, `name`) in (('cc', 'dd'))", + "Table": "`user`", "Values": [ [ "dd" @@ -279,9 +285,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where (col, `name`) in (('aa', 'bb')) and id = 5", - "Table": "user", + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where (col, `name`) in (('aa', 'bb')) and id = 5", + "Table": "`user`", "Values": [ 5 ], @@ -301,9 +307,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where (costly, `name`) in (('aa', 'bb'), ('cc', 'dd'))", - "Table": "user", + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where (costly, `name`) in (('aa', 'bb'), ('cc', 'dd'))", + "Table": "`user`", "Values": [ [ "bb", @@ -326,9 +332,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where ((col1, `name`), col2) in ((('aa', 'bb'), 'cc'), (('dd', 'ee'), 'ff'))", - "Table": "user", + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where ((col1, `name`), col2) in ((('aa', 'bb'), 'cc'), (('dd', 'ee'), 'ff'))", + "Table": "`user`", "Values": [ [ "bb", @@ -351,9 +357,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where (`name`, (col1, col2)) in (('aa', ('bb', 'cc')), ('dd', ('ee', 'ff')))", - "Table": "user", + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where (`name`, (col1, col2)) in (('aa', ('bb', 'cc')), ('dd', ('ee', 'ff')))", + "Table": "`user`", "Values": [ [ "aa", @@ -376,9 +382,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where ((col1, `name`), col2) in (('aa', 'bb', 'cc'), (('dd', 'ee'), 'ff'))", - "Table": "user" + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where ((col1, `name`), col2) in (('aa', 'bb', 'cc'), (('dd', 'ee'), 'ff'))", + "Table": "`user`" } } @@ -394,9 +400,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where (col1, `name`) in (select * from music where music.user_id = user.id)", - "Table": "user" + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where (col1, `name`) in (select * from music where music.user_id = `user`.id)", + "Table": "`user`" } } @@ -412,9 +418,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where (col1, `name`) in (('aa', 1 + 1))", - "Table": "user" + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where (col1, `name`) in (('aa', 1 + 1))", + "Table": "`user`" } } @@ -430,9 +436,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select Id from user where 1 != 1", - "Query": "select Id from user where 1 in ('aa', 'bb')", - "Table": "user" + "FieldQuery": "select Id from `user` where 1 != 1", + "Query": "select Id from `user` where 1 in ('aa', 'bb')", + "Table": "`user`" } } @@ -448,9 +454,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where `name` in (col, 'bb')", - "Table": "user" + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where `name` in (col, 'bb')", + "Table": "`user`" } } @@ -466,15 +472,16 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where `name` = :a", - "Table": "user", + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where `name` = :a", + "Table": "`user`", "Values": [ ":a" ], "Vindex": "name_user_map" } } +Gen4 plan same as above # Single table equality route with unsigned value "select id from user where name = 18446744073709551615" @@ -488,15 +495,16 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where `name` = 18446744073709551615", - "Table": "user", + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where `name` = 18446744073709551615", + "Table": "`user`", "Values": [ 18446744073709551615 ], "Vindex": "name_user_map" } } +Gen4 plan same as above # Single table in clause list arg "select id from user where name in ::list" @@ -510,9 +518,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where `name` in ::__vals", - "Table": "user", + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where `name` in ::__vals", + "Table": "`user`", "Values": [ "::list" ], @@ -532,9 +540,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user_extra.id from user join user_extra on user.id = user_extra.user_id where 1 != 1", - "Query": "select user_extra.id from user join user_extra on user.id = user_extra.user_id where user.id = 5", - "Table": "user", + "FieldQuery": "select user_extra.id from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1", + "Query": "select user_extra.id from `user` join user_extra on `user`.id = user_extra.user_id where `user`.id = 5", + "Table": "`user`", "Values": [ 5 ], @@ -554,9 +562,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user_extra.id from user join user_extra on user.id = user_extra.user_id where 1 != 1", - "Query": "select user_extra.id from user join user_extra on user.id = user_extra.user_id where user_extra.user_id = 5", - "Table": "user", + "FieldQuery": "select user_extra.id from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1", + "Query": "select user_extra.id from `user` join user_extra on `user`.id = user_extra.user_id where user_extra.user_id = 5", + "Table": "`user`", "Values": [ 5 ], @@ -576,9 +584,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user_extra.id from user left join user_extra on user.id = user_extra.user_id where 1 != 1", - "Query": "select user_extra.id from user left join user_extra on user.id = user_extra.user_id where user.id = 5", - "Table": "user", + "FieldQuery": "select user_extra.id from `user` left join user_extra on `user`.id = user_extra.user_id where 1 != 1", + "Query": "select user_extra.id from `user` left join user_extra on `user`.id = user_extra.user_id where `user`.id = 5", + "Table": "`user`", "Values": [ 5 ], @@ -598,9 +606,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user_extra.id from user left join user_extra on user.id = user_extra.user_id where 1 != 1", - "Query": "select user_extra.id from user left join user_extra on user.id = user_extra.user_id where user_extra.user_id = 5", - "Table": "user", + "FieldQuery": "select user_extra.id from `user` left join user_extra on `user`.id = user_extra.user_id where 1 != 1", + "Query": "select user_extra.id from `user` left join user_extra on `user`.id = user_extra.user_id where user_extra.user_id = 5", + "Table": "`user`", "Values": [ 5 ], @@ -617,7 +625,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "1", - "TableName": "user_user_extra", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -626,9 +634,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col from user where 1 != 1", - "Query": "select user.col from user where user.id = 5", - "Table": "user", + "FieldQuery": "select `user`.col from `user` where 1 != 1", + "Query": "select `user`.col from `user` where `user`.id = 5", + "Table": "`user`", "Values": [ 5 ], @@ -648,6 +656,7 @@ ] } } +Gen4 plan same as above # Multi-route unique vindex route on both routes "select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5 and user_extra.user_id = 5" @@ -658,7 +667,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "1", - "TableName": "user_user_extra", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -667,9 +676,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col from user where 1 != 1", - "Query": "select user.col from user where user.id = 5", - "Table": "user", + "FieldQuery": "select `user`.col from `user` where 1 != 1", + "Query": "select `user`.col from `user` where `user`.id = 5", + "Table": "`user`", "Values": [ 5 ], @@ -703,7 +712,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "1", - "TableName": "user_user_extra", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -712,9 +721,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col from user where 1 != 1", - "Query": "select user.col from user", - "Table": "user" + "FieldQuery": "select `user`.col from `user` where 1 != 1", + "Query": "select `user`.col from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -744,7 +753,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "1", - "TableName": "user_user_extra", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -753,9 +762,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col from user where 1 != 1", - "Query": "select user.col from user where 1 = 1", - "Table": "user" + "FieldQuery": "select `user`.col from `user` where 1 != 1", + "Query": "select `user`.col from `user` where 1 = 1", + "Table": "`user`" }, { "OperatorType": "Route", @@ -784,9 +793,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where user.col = 5 and user.id in ::__vals", - "Table": "user", + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where `user`.col = 5 and `user`.id in ::__vals", + "Table": "`user`", "Values": [ [ 1, @@ -809,9 +818,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where user.col = case user.col when 'foo' then true else false end and user.id in ::__vals", - "Table": "user", + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where `user`.col = case `user`.col when 'foo' then true else false end and `user`.id in ::__vals", + "Table": "`user`", "Values": [ [ 1, @@ -834,9 +843,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id or col as val from user where 1 != 1", - "Query": "select id or col as val from user where user.col = 5 and user.id in (1, 2) and user.`name` = 'aa'", - "Table": "user", + "FieldQuery": "select id or col as val from `user` where 1 != 1", + "Query": "select id or col as val from `user` where `user`.col = 5 and `user`.id in (1, 2) and `user`.`name` = 'aa'", + "Table": "`user`", "Values": [ "aa" ], @@ -856,9 +865,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where user.col = false and user.id in (1, 2) and user.`name` = 'aa'", - "Table": "user", + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where `user`.col = false and `user`.id in (1, 2) and `user`.`name` = 'aa'", + "Table": "`user`", "Values": [ "aa" ], @@ -878,9 +887,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where user.col = 5 and user.id in (1, 2) and user.`name` = 'aa' and user.id = 1", - "Table": "user", + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where `user`.col = 5 and `user`.id in (1, 2) and `user`.`name` = 'aa' and `user`.id = 1", + "Table": "`user`", "Values": [ 1 ], @@ -900,9 +909,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where user.id = 1 and user.`name` = 'aa' and user.id in (1, 2) and user.col = 5", - "Table": "user", + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where `user`.id = 1 and `user`.`name` = 'aa' and `user`.id in (1, 2) and `user`.col = 5", + "Table": "`user`", "Values": [ 1 ], @@ -922,11 +931,12 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where user.id = 1 or user.`name` = 'aa' and user.id in (1, 2)", - "Table": "user" + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where `user`.id = 1 or `user`.`name` = 'aa' and `user`.id in (1, 2)", + "Table": "`user`" } } +Gen4 plan same as above # Unsharded route "select unsharded.id from user join unsharded where unsharded.id = user.id" @@ -937,7 +947,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "1", - "TableName": "user_unsharded", + "TableName": "`user`_unsharded", "Inputs": [ { "OperatorType": "Route", @@ -946,9 +956,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.id from user where 1 != 1", - "Query": "select user.id from user", - "Table": "user" + "FieldQuery": "select `user`.id from `user` where 1 != 1", + "Query": "select `user`.id from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -964,6 +974,44 @@ ] } } +{ + "QueryType": "SELECT", + "Original": "select unsharded.id from user join unsharded where unsharded.id = user.id", + "Instructions": { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "-2", + "TableName": "unsharded_`user`", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "SelectUnsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select unsharded.id, unsharded.id from unsharded where 1 != 1", + "Query": "select unsharded.id, unsharded.id from unsharded", + "Table": "unsharded" + }, + { + "OperatorType": "Route", + "Variant": "SelectEqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from `user` where 1 != 1", + "Query": "select 1 from `user` where `user`.id = :unsharded_id", + "Table": "`user`", + "Values": [ + ":unsharded_id" + ], + "Vindex": "user_index" + } + ] + } +} # routing rules: choose the redirected table "select col from route1 where id = 1" @@ -977,9 +1025,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col from user as route1 where 1 != 1", - "Query": "select col from user as route1 where id = 1", - "Table": "user", + "FieldQuery": "select col from `user` as route1 where 1 != 1", + "Query": "select col from `user` as route1 where id = 1", + "Table": "`user`", "Values": [ 1 ], @@ -1013,7 +1061,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "1", - "TableName": "user_extra_user", + "TableName": "user_extra_`user`", "Inputs": [ { "OperatorType": "Route", @@ -1033,9 +1081,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select u.m from user as u where 1 != 1", - "Query": "select u.m from user as u where u.id in ::__vals and u.id in (select m2 from user where user.id = u.id and user.col = :user_extra_col)", - "Table": "user", + "FieldQuery": "select u.m from `user` as u where 1 != 1", + "Query": "select u.m from `user` as u where u.id in ::__vals and u.id in (select m2 from `user` where `user`.id = u.id and `user`.col = :user_extra_col)", + "Table": "`user`", "Values": [ [ ":user_extra_col", @@ -1057,7 +1105,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "1", - "TableName": "user_extra_user", + "TableName": "user_extra_`user`", "Inputs": [ { "OperatorType": "Route", @@ -1077,9 +1125,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select u.m from user as u where 1 != 1", - "Query": "select u.m from user as u where u.id = 5 and u.id in (select m2 from user where user.id = 5)", - "Table": "user", + "FieldQuery": "select u.m from `user` as u where 1 != 1", + "Query": "select u.m from `user` as u where u.id = 5 and u.id in (select m2 from `user` where `user`.id = 5)", + "Table": "`user`", "Values": [ 5 ], @@ -1098,7 +1146,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "1", - "TableName": "user_extra_user", + "TableName": "user_extra_`user`", "Inputs": [ { "OperatorType": "Route", @@ -1118,9 +1166,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select u.m from user as u where 1 != 1", - "Query": "select u.m from user as u where u.id in ::__vals and u.id in (select m2 from user where user.id = u.id and user.col = :user_extra_col and user.id in (select m3 from user_extra where user_extra.user_id = user.id))", - "Table": "user", + "FieldQuery": "select u.m from `user` as u where 1 != 1", + "Query": "select u.m from `user` as u where u.id in ::__vals and u.id in (select m2 from `user` where `user`.id = u.id and `user`.col = :user_extra_col and `user`.id in (select m3 from user_extra where user_extra.user_id = `user`.id))", + "Table": "`user`", "Values": [ [ ":user_extra_col", @@ -1145,9 +1193,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where user.col in (select user_extra.col from user_extra where user_extra.user_id = user.id)", - "Table": "user" + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where `user`.col in (select user_extra.col from user_extra where user_extra.user_id = `user`.id)", + "Table": "`user`" } } @@ -1163,9 +1211,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where id = 5 and user.col in (select user_extra.col from user_extra where user_extra.user_id = 5)", - "Table": "user", + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where id = 5 and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = 5)", + "Table": "`user`", "Values": [ 5 ], @@ -1185,9 +1233,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where id = 'aa' and user.col in (select user_extra.col from user_extra where user_extra.user_id = 'aa')", - "Table": "user", + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where id = 'aa' and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = 'aa')", + "Table": "`user`", "Values": [ "aa" ], @@ -1207,9 +1255,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where id = :a and user.col in (select user_extra.col from user_extra where user_extra.user_id = :a)", - "Table": "user", + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where id = :a and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = :a)", + "Table": "`user`", "Values": [ ":a" ], @@ -1233,9 +1281,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id2 from user as uu where 1 != 1", - "Query": "select id2 from user as uu where id in (select id from user where id = uu.id and user.col in (select user_extra.col from user_extra where user_extra.user_id = uu.id))", - "Table": "user" + "FieldQuery": "select id2 from `user` as uu where 1 != 1", + "Query": "select id2 from `user` as uu where id in (select id from `user` where id = uu.id and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = uu.id))", + "Table": "`user`" } } @@ -1256,9 +1304,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col from user where 1 != 1", - "Query": "select col from user", - "Table": "user" + "FieldQuery": "select col from `user` where 1 != 1", + "Query": "select col from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -1267,9 +1315,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where :__sq_has_values1 = 1 and id in ::__vals", - "Table": "user", + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals", + "Table": "`user`", "Values": [ "::__sq1" ], @@ -1295,9 +1343,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col from user where 1 != 1", - "Query": "select col from user", - "Table": "user" + "FieldQuery": "select col from `user` where 1 != 1", + "Query": "select col from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -1306,9 +1354,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where :__sq_has_values1 = 0 or id not in ::__sq1", - "Table": "user" + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where :__sq_has_values1 = 0 or id not in ::__sq1", + "Table": "`user`" } ] } @@ -1330,9 +1378,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col from user where 1 != 1", - "Query": "select col from user", - "Table": "user" + "FieldQuery": "select col from `user` where 1 != 1", + "Query": "select col from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -1341,9 +1389,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where :__sq_has_values1", - "Table": "user" + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where :__sq_has_values1", + "Table": "`user`" } ] } @@ -1365,9 +1413,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col from user where 1 != 1", - "Query": "select col from user", - "Table": "user" + "FieldQuery": "select col from `user` where 1 != 1", + "Query": "select col from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -1376,9 +1424,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where id = :__sq1", - "Table": "user", + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where id = :__sq1", + "Table": "`user`", "Values": [ ":__sq1" ], @@ -1408,9 +1456,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id3 from user where 1 != 1", - "Query": "select id3 from user", - "Table": "user" + "FieldQuery": "select id3 from `user` where 1 != 1", + "Query": "select id3 from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -1419,9 +1467,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id2 from user where 1 != 1", - "Query": "select id2 from user where :__sq_has_values1 = 1 and id2 in ::__sq1", - "Table": "user" + "FieldQuery": "select id2 from `user` where 1 != 1", + "Query": "select id2 from `user` where :__sq_has_values1 = 1 and id2 in ::__sq1", + "Table": "`user`" } ] }, @@ -1432,9 +1480,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id1 from user where 1 != 1", - "Query": "select id1 from user where id = :__sq2", - "Table": "user", + "FieldQuery": "select id1 from `user` where 1 != 1", + "Query": "select id1 from `user` where id = :__sq2", + "Table": "`user`", "Values": [ ":__sq2" ], @@ -1456,9 +1504,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col from user where 1 != 1", - "Query": "select col from user where id = (select id from user as route1 where route1.id = user.id)", - "Table": "user" + "FieldQuery": "select col from `user` where 1 != 1", + "Query": "select col from `user` where id = (select id from `user` as route1 where route1.id = `user`.id)", + "Table": "`user`" } } @@ -1489,9 +1537,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col from user where 1 != 1", - "Query": "select col from user where id = :__sq1", - "Table": "user", + "FieldQuery": "select col from `user` where 1 != 1", + "Query": "select col from `user` where id = :__sq1", + "Table": "`user`", "Values": [ ":__sq1" ], @@ -1513,9 +1561,28 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user_extra.Id from user join user_extra on user.iD = user_extra.User_Id where 1 != 1", - "Query": "select user_extra.Id from user join user_extra on user.iD = user_extra.User_Id where user.Id = 5", - "Table": "user", + "FieldQuery": "select user_extra.Id from `user` join user_extra on `user`.iD = user_extra.User_Id where 1 != 1", + "Query": "select user_extra.Id from `user` join user_extra on `user`.iD = user_extra.User_Id where `user`.Id = 5", + "Table": "`user`", + "Values": [ + 5 + ], + "Vindex": "user_index" + } +} +{ + "QueryType": "SELECT", + "Original": "select user_extra.Id from user join user_extra on user.iD = user_extra.User_Id where user.Id = 5", + "Instructions": { + "OperatorType": "Route", + "Variant": "SelectEqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select user_extra.Id from `user`, user_extra where 1 != 1", + "Query": "select user_extra.Id from `user`, user_extra where `user`.Id = 5 and `user`.iD = user_extra.User_Id", + "Table": "`user`, user_extra", "Values": [ 5 ], @@ -1535,9 +1602,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where database()", - "Table": "user" + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where database()", + "Table": "`user`" } } @@ -1564,6 +1631,7 @@ "Table": "music" } } +Gen4 plan same as above # SELECT with IS NULL "select id from music where id is null" @@ -1604,6 +1672,7 @@ "Table": "music" } } +Gen4 plan same as above # Single table with unique vindex match and null match "select id from music where user_id = 4 and id = null" @@ -1622,6 +1691,7 @@ "Table": "music" } } +Gen4 plan same as above # Single table with unique vindex match and IN (null) "select id from music where user_id = 4 and id IN (null)" @@ -1683,21 +1753,7 @@ # query trying to query two different keyspaces at the same time "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'user' AND TABLE_SCHEMA = 'main'" -{ - "QueryType": "SELECT", - "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'user' AND TABLE_SCHEMA = 'main'", - "Instructions": { - "OperatorType": "Route", - "Variant": "SelectDBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and TABLE_SCHEMA = :__vtschemaname", - "SysTableTableSchema": "[VARBINARY(\"user\"), VARBINARY(\"main\")]" - } -} +"two predicates for specifying the database are not supported" # information_schema query using database() func "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = database()" @@ -1730,7 +1786,7 @@ }, "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1", "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname", - "SysTableTableSchema": "[VARBINARY(\"ks\")]" + "SysTableTableSchema": "VARBINARY(\"ks\")" } } @@ -1765,8 +1821,8 @@ }, "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1", "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and TABLE_NAME = :__vttablename", - "SysTableTableName": "[VARBINARY(\"route1\")]", - "SysTableTableSchema": "[VARBINARY(\"ks\")]" + "SysTableTableName": "VARBINARY(\"route1\")", + "SysTableTableSchema": "VARBINARY(\"ks\")" } } @@ -1784,6 +1840,88 @@ }, "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1", "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and other_column = 42", - "SysTableTableSchema": "[VARBINARY(\"ks\")]" + "SysTableTableSchema": "VARBINARY(\"ks\")" + } +} + +# pullout sq after pullout sq +"select id from user where not id in (select user_extra.col from user_extra where user_extra.user_id = 42) and id in (select user_extra.col from user_extra where user_extra.user_id = 411)" +{ + "QueryType": "SELECT", + "Original": "select id from user where not id in (select user_extra.col from user_extra where user_extra.user_id = 42) and id in (select user_extra.col from user_extra where user_extra.user_id = 411)", + "Instructions": { + "OperatorType": "Subquery", + "Variant": "PulloutIn", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "SelectEqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select user_extra.col from user_extra where 1 != 1", + "Query": "select user_extra.col from user_extra where user_extra.user_id = 42", + "Table": "user_extra", + "Values": [ + 42 + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Subquery", + "Variant": "PulloutIn", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "SelectEqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select user_extra.col from user_extra where 1 != 1", + "Query": "select user_extra.col from user_extra where user_extra.user_id = 411", + "Table": "user_extra", + "Values": [ + 411 + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "SelectIN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals and not (:__sq_has_values2 = 1 and id in ::__sq2)", + "Table": "`user`", + "Values": [ + "::__sq1" + ], + "Vindex": "user_index" + } + ] + } + ] + } +} + +# able to isolate table_schema value even when hidden inside of ORs +"SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE (TABLE_SCHEMA = 'ks' and other_column = 42) OR (TABLE_SCHEMA = 'ks' and foobar = 'value')" +{ + "QueryType": "SELECT", + "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE (TABLE_SCHEMA = 'ks' and other_column = 42) OR (TABLE_SCHEMA = 'ks' and foobar = 'value')", + "Instructions": { + "OperatorType": "Route", + "Variant": "SelectDBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1", + "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and (other_column = 42 or TABLE_SCHEMA = 'ks') and (other_column = 42 or foobar = 'value')", + "SysTableTableSchema": "VARBINARY(\"ks\")" } } diff --git a/go/vt/vtgate/planbuilder/testdata/flush_cases.txt b/go/vt/vtgate/planbuilder/testdata/flush_cases.txt new file mode 100644 index 00000000000..b01cf42494b --- /dev/null +++ b/go/vt/vtgate/planbuilder/testdata/flush_cases.txt @@ -0,0 +1,53 @@ +# Flush statement +"flush tables unsharded, music" +{ + "QueryType": "FLUSH", + "Original": "flush tables unsharded, music", + "Instructions": { + "OperatorType": "Send", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetDestination": "AllShards()", + "IsDML": false, + "Query": "flush tables unsharded, music", + "SingleShardOnly": false + } +} + +# Flush statement with no tables +"flush local tables with read lock" +{ + "QueryType": "FLUSH", + "Original": "flush local tables with read lock", + "Instructions": { + "OperatorType": "Send", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetDestination": "AllShards()", + "IsDML": false, + "Query": "flush local tables with read lock", + "SingleShardOnly": false + } +} + +# Flush statement with flush options +"flush no_write_to_binlog hosts, logs" +{ + "QueryType": "FLUSH", + "Original": "flush no_write_to_binlog hosts, logs", + "Instructions": { + "OperatorType": "Send", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetDestination": "AllShards()", + "IsDML": false, + "Query": "flush local hosts, logs", + "SingleShardOnly": false + } +} diff --git a/go/vt/vtgate/planbuilder/testdata/flush_cases_no_default_keyspace.txt b/go/vt/vtgate/planbuilder/testdata/flush_cases_no_default_keyspace.txt new file mode 100644 index 00000000000..991b495c36e --- /dev/null +++ b/go/vt/vtgate/planbuilder/testdata/flush_cases_no_default_keyspace.txt @@ -0,0 +1,138 @@ +# Flush statement +"flush local tables user, unsharded_a, user_extra with read lock" +{ + "QueryType": "FLUSH", + "Original": "flush local tables user, unsharded_a, user_extra with read lock", + "Instructions": { + "OperatorType": "Concatenate", + "Inputs": [ + { + "OperatorType": "Send", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetDestination": "AllShards()", + "IsDML": false, + "Query": "flush local tables unsharded_a with read lock", + "SingleShardOnly": false + }, + { + "OperatorType": "Send", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetDestination": "AllShards()", + "IsDML": false, + "Query": "flush local tables `user`, user_extra with read lock", + "SingleShardOnly": false + } + ] + } +} + +# Flush statement with flush options +"flush no_write_to_binlog hosts, logs" +"keyspace not specified" + +# Flush statement with routing rules +"flush local tables route1, route2" +{ + "QueryType": "FLUSH", + "Original": "flush local tables route1, route2", + "Instructions": { + "OperatorType": "Concatenate", + "Inputs": [ + { + "OperatorType": "Send", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetDestination": "AllShards()", + "IsDML": false, + "Query": "flush local tables unsharded", + "SingleShardOnly": false + }, + { + "OperatorType": "Send", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetDestination": "AllShards()", + "IsDML": false, + "Query": "flush local tables `user`", + "SingleShardOnly": false + } + ] + } +} + +# Incorrect tables in flush +"flush tables user.a with read lock" +"table a not found" + +# Unknown tables in unsharded keyspaces are allowed +"flush tables main.a with read lock" +{ + "QueryType": "FLUSH", + "Original": "flush tables main.a with read lock", + "Instructions": { + "OperatorType": "Send", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetDestination": "AllShards()", + "IsDML": false, + "Query": "flush tables a with read lock", + "SingleShardOnly": false + } +} + +# Flush statement with 3 keyspaces +"flush local tables user, unsharded_a, user_extra, unsharded_tab with read lock" +{ + "QueryType": "FLUSH", + "Original": "flush local tables user, unsharded_a, user_extra, unsharded_tab with read lock", + "Instructions": { + "OperatorType": "Concatenate", + "Inputs": [ + { + "OperatorType": "Send", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetDestination": "AllShards()", + "IsDML": false, + "Query": "flush local tables unsharded_a with read lock", + "SingleShardOnly": false + }, + { + "OperatorType": "Send", + "Keyspace": { + "Name": "main_2", + "Sharded": false + }, + "TargetDestination": "AllShards()", + "IsDML": false, + "Query": "flush local tables unsharded_tab with read lock", + "SingleShardOnly": false + }, + { + "OperatorType": "Send", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetDestination": "AllShards()", + "IsDML": false, + "Query": "flush local tables `user`, user_extra with read lock", + "SingleShardOnly": false + } + ] + } +} diff --git a/go/vt/vtgate/planbuilder/testdata/from_cases.txt b/go/vt/vtgate/planbuilder/testdata/from_cases.txt index a50a63fcee0..45aea022d18 100644 --- a/go/vt/vtgate/planbuilder/testdata/from_cases.txt +++ b/go/vt/vtgate/planbuilder/testdata/from_cases.txt @@ -10,11 +10,12 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col from user where 1 != 1", - "Query": "select col from user", - "Table": "user" + "FieldQuery": "select col from `user` where 1 != 1", + "Query": "select col from `user`", + "Table": "`user`" } } +Gen4 plan same as above # Single table unsharded "select col from unsharded" @@ -33,6 +34,7 @@ "Table": "unsharded" } } +Gen4 plan same as above # Select from sequence "select next 2 values from seq" @@ -51,6 +53,7 @@ "Table": "seq" } } +Gen4 plan same as above # Select from reference "select * from ref" @@ -69,6 +72,7 @@ "Table": "ref" } } +Gen4 plan same as above # Single information_schema query "select col from information_schema.foo" @@ -114,7 +118,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "1", - "TableName": "user_music", + "TableName": "`user`_music", "Inputs": [ { "OperatorType": "Route", @@ -123,9 +127,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select 1 from user where 1 != 1", - "Query": "select 1 from user", - "Table": "user" + "FieldQuery": "select 1 from `user` where 1 != 1", + "Query": "select 1 from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -141,6 +145,7 @@ ] } } +Gen4 plan same as above # routing rules where table name matches, and there's no alias. "select * from second_user.user" @@ -154,9 +159,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select * from user where 1 != 1", - "Query": "select * from user", - "Table": "user" + "FieldQuery": "select * from `user` where 1 != 1", + "Query": "select * from `user`", + "Table": "`user`" } } @@ -172,9 +177,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select * from user as a where 1 != 1", - "Query": "select * from user as a", - "Table": "user" + "FieldQuery": "select * from `user` as a where 1 != 1", + "Query": "select * from `user` as a", + "Table": "`user`" } } @@ -190,9 +195,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select * from user as route1 where 1 != 1", - "Query": "select * from user as route1", - "Table": "user" + "FieldQuery": "select * from `user` as route1 where 1 != 1", + "Query": "select * from `user` as route1", + "Table": "`user`" } } @@ -208,9 +213,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select * from user as a where 1 != 1", - "Query": "select * from user as a", - "Table": "user" + "FieldQuery": "select * from `user` as a where 1 != 1", + "Query": "select * from `user` as a", + "Table": "`user`" } } @@ -226,19 +231,21 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select * from user as master_redirect where 1 != 1", - "Query": "select * from user as master_redirect", - "Table": "user" + "FieldQuery": "select * from `user` as master_redirect where 1 != 1", + "Query": "select * from `user` as master_redirect", + "Table": "`user`" } } # routing rules bad table "select * from bad_table" "keyspace noks not found in vschema" +Gen4 plan same as above # routing rules disabled table "select * from disabled" "table disabled has been disabled" +Gen4 plan same as above # ',' join "select music.col from user, music" @@ -249,7 +256,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "1", - "TableName": "user_music", + "TableName": "`user`_music", "Inputs": [ { "OperatorType": "Route", @@ -258,9 +265,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select 1 from user where 1 != 1", - "Query": "select 1 from user", - "Table": "user" + "FieldQuery": "select 1 from `user` where 1 != 1", + "Query": "select 1 from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -276,6 +283,7 @@ ] } } +Gen4 plan same as above # ',' join unsharded "select u1.a, u2.a from unsharded u1, unsharded u2" @@ -357,7 +365,7 @@ "OperatorType": "Join", "Variant": "LeftJoin", "JoinColumnIndexes": "-1", - "TableName": "user_unsharded", + "TableName": "`user`_unsharded", "Inputs": [ { "OperatorType": "Route", @@ -366,9 +374,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select u.col, u.a from user as u where 1 != 1", - "Query": "select u.col, u.a from user as u", - "Table": "user" + "FieldQuery": "select u.col, u.a from `user` as u where 1 != 1", + "Query": "select u.col, u.a from `user` as u", + "Table": "`user`" }, { "OperatorType": "Route", @@ -394,13 +402,13 @@ "OperatorType": "Join", "Variant": "LeftJoin", "JoinColumnIndexes": "-1", - "TableName": "user_unsharded_unsharded", + "TableName": "`user`_unsharded_unsharded", "Inputs": [ { "OperatorType": "Join", "Variant": "LeftJoin", "JoinColumnIndexes": "-1,1", - "TableName": "user_unsharded", + "TableName": "`user`_unsharded", "Inputs": [ { "OperatorType": "Route", @@ -409,9 +417,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col from user where 1 != 1", - "Query": "select user.col from user", - "Table": "user" + "FieldQuery": "select `user`.col from `user` where 1 != 1", + "Query": "select `user`.col from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -450,7 +458,7 @@ "OperatorType": "Join", "Variant": "LeftJoin", "JoinColumnIndexes": "-1", - "TableName": "user_user_extra_unsharded", + "TableName": "`user`_user_extra_unsharded", "Inputs": [ { "OperatorType": "Route", @@ -459,9 +467,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col from user where 1 != 1", - "Query": "select user.col from user", - "Table": "user" + "FieldQuery": "select `user`.col from `user` where 1 != 1", + "Query": "select `user`.col from `user`", + "Table": "`user`" }, { "OperatorType": "Join", @@ -559,13 +567,13 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1", - "TableName": "user_unsharded_unsharded", + "TableName": "`user`_unsharded_unsharded", "Inputs": [ { "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1", - "TableName": "user_unsharded", + "TableName": "`user`_unsharded", "Inputs": [ { "OperatorType": "Route", @@ -574,9 +582,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col from user where 1 != 1", - "Query": "select user.col from user", - "Table": "user" + "FieldQuery": "select `user`.col from `user` where 1 != 1", + "Query": "select `user`.col from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -615,7 +623,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1", - "TableName": "user_unsharded", + "TableName": "`user`_unsharded", "Inputs": [ { "OperatorType": "Route", @@ -624,9 +632,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col from user where 1 != 1", - "Query": "select user.col from user", - "Table": "user" + "FieldQuery": "select `user`.col from `user` where 1 != 1", + "Query": "select `user`.col from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -652,7 +660,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1", - "TableName": "user_user_unsharded", + "TableName": "`user`_`user`_unsharded", "Inputs": [ { "OperatorType": "Route", @@ -661,14 +669,14 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col from user where 1 != 1", - "Query": "select user.col from user", - "Table": "user" + "FieldQuery": "select `user`.col from `user` where 1 != 1", + "Query": "select `user`.col from `user`", + "Table": "`user`" }, { "OperatorType": "Join", "Variant": "Join", - "TableName": "user_unsharded", + "TableName": "`user`_unsharded", "Inputs": [ { "OperatorType": "Route", @@ -677,9 +685,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select 1 from user as u1 where 1 != 1", - "Query": "select 1 from user as u1", - "Table": "user" + "FieldQuery": "select 1 from `user` as u1 where 1 != 1", + "Query": "select 1 from `user` as u1", + "Table": "`user`" }, { "OperatorType": "Route", @@ -710,9 +718,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col from user use index (a) where 1 != 1", - "Query": "select user.col from user use index (a)", - "Table": "user" + "FieldQuery": "select `user`.col from `user` use index (a) where 1 != 1", + "Query": "select `user`.col from `user` use index (a)", + "Table": "`user`" } } @@ -728,9 +736,24 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col from user join user_extra on user.id = user_extra.user_id where 1 != 1", - "Query": "select user.col from user join user_extra on user.id = user_extra.user_id", - "Table": "user" + "FieldQuery": "select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1", + "Query": "select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id", + "Table": "`user`" + } +} +{ + "QueryType": "SELECT", + "Original": "select user.col from user join user_extra on user.id = user_extra.user_id", + "Instructions": { + "OperatorType": "Route", + "Variant": "SelectScatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.col from `user`, user_extra where 1 != 1", + "Query": "select `user`.col from `user`, user_extra where `user`.id = user_extra.user_id", + "Table": "`user`, user_extra" } } @@ -746,9 +769,24 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col from user join user_extra on user.id = user_extra.user_id where 1 != 1", - "Query": "select user.col from user join user_extra on user.id = user_extra.user_id", - "Table": "user" + "FieldQuery": "select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1", + "Query": "select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id", + "Table": "`user`" + } +} +{ + "QueryType": "SELECT", + "Original": "select user.col from user join user_extra on (user.id = user_extra.user_id)", + "Instructions": { + "OperatorType": "Route", + "Variant": "SelectScatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.col from `user`, user_extra where 1 != 1", + "Query": "select `user`.col from `user`, user_extra where `user`.id = user_extra.user_id", + "Table": "`user`, user_extra" } } @@ -764,9 +802,24 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col from user join user_extra on user.col between 1 and 2 and user.id = user_extra.user_id where 1 != 1", - "Query": "select user.col from user join user_extra on user.col between 1 and 2 and user.id = user_extra.user_id", - "Table": "user" + "FieldQuery": "select `user`.col from `user` join user_extra on `user`.col between 1 and 2 and `user`.id = user_extra.user_id where 1 != 1", + "Query": "select `user`.col from `user` join user_extra on `user`.col between 1 and 2 and `user`.id = user_extra.user_id", + "Table": "`user`" + } +} +{ + "QueryType": "SELECT", + "Original": "select user.col from user join user_extra on user.col between 1 and 2 and user.id = user_extra.user_id", + "Instructions": { + "OperatorType": "Route", + "Variant": "SelectScatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.col from `user`, user_extra where 1 != 1", + "Query": "select `user`.col from `user`, user_extra where `user`.col between 1 and 2 and `user`.id = user_extra.user_id", + "Table": "`user`, user_extra" } } @@ -782,9 +835,24 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col from user join user_extra on user_extra.user_id = user.id where 1 != 1", - "Query": "select user.col from user join user_extra on user_extra.user_id = user.id", - "Table": "user" + "FieldQuery": "select `user`.col from `user` join user_extra on user_extra.user_id = `user`.id where 1 != 1", + "Query": "select `user`.col from `user` join user_extra on user_extra.user_id = `user`.id", + "Table": "`user`" + } +} +{ + "QueryType": "SELECT", + "Original": "select user.col from user join user_extra on user_extra.user_id = user.id", + "Instructions": { + "OperatorType": "Route", + "Variant": "SelectScatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.col from `user`, user_extra where 1 != 1", + "Query": "select `user`.col from `user`, user_extra where user_extra.user_id = `user`.id", + "Table": "`user`, user_extra" } } @@ -800,9 +868,28 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col from user join user_extra on user.id = 5 and user.id = user_extra.user_id where 1 != 1", - "Query": "select user.col from user join user_extra on user.id = 5 and user.id = user_extra.user_id", - "Table": "user", + "FieldQuery": "select `user`.col from `user` join user_extra on `user`.id = 5 and `user`.id = user_extra.user_id where 1 != 1", + "Query": "select `user`.col from `user` join user_extra on `user`.id = 5 and `user`.id = user_extra.user_id", + "Table": "`user`", + "Values": [ + 5 + ], + "Vindex": "user_index" + } +} +{ + "QueryType": "SELECT", + "Original": "select user.col from user join user_extra on user.id = 5 and user.id = user_extra.user_id", + "Instructions": { + "OperatorType": "Route", + "Variant": "SelectEqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.col from `user`, user_extra where 1 != 1", + "Query": "select `user`.col from `user`, user_extra where `user`.id = 5 and `user`.id = user_extra.user_id", + "Table": "`user`, user_extra", "Values": [ 5 ], @@ -819,7 +906,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1", - "TableName": "user_user_extra", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -828,9 +915,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col, user.id from user where 1 != 1", - "Query": "select user.col, user.id from user", - "Table": "user" + "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1", + "Query": "select `user`.col, `user`.id from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -856,7 +943,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1", - "TableName": "user_user_extra", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -865,9 +952,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col from user where 1 != 1", - "Query": "select user.col from user where user.id = 5", - "Table": "user", + "FieldQuery": "select `user`.col from `user` where 1 != 1", + "Query": "select `user`.col from `user` where `user`.id = 5", + "Table": "`user`", "Values": [ 5 ], @@ -887,6 +974,7 @@ ] } } +Gen4 plan same as above # sharded join, non-col reference LHS "select user.col from user join user_extra on 5 = user.id" @@ -897,7 +985,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1", - "TableName": "user_user_extra", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -906,9 +994,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col from user where 1 != 1", - "Query": "select user.col from user where user.id = 5", - "Table": "user", + "FieldQuery": "select `user`.col from `user` where 1 != 1", + "Query": "select `user`.col from `user` where `user`.id = 5", + "Table": "`user`", "Values": [ 5 ], @@ -928,6 +1016,7 @@ ] } } +Gen4 plan same as above # sharded join, non-vindex col "select user.col from user join user_extra on user.id = user_extra.col" @@ -938,7 +1027,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1", - "TableName": "user_user_extra", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -947,9 +1036,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col, user.id from user where 1 != 1", - "Query": "select user.col, user.id from user", - "Table": "user" + "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1", + "Query": "select `user`.col, `user`.id from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -965,6 +1054,44 @@ ] } } +{ + "QueryType": "SELECT", + "Original": "select user.col from user join user_extra on user.id = user_extra.col", + "Instructions": { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "1", + "TableName": "user_extra_`user`", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "SelectScatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select user_extra.col from user_extra where 1 != 1", + "Query": "select user_extra.col from user_extra", + "Table": "user_extra" + }, + { + "OperatorType": "Route", + "Variant": "SelectEqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.col from `user` where 1 != 1", + "Query": "select `user`.col from `user` where `user`.id = :user_extra_col", + "Table": "`user`", + "Values": [ + ":user_extra_col" + ], + "Vindex": "user_index" + } + ] + } +} # sharded join, non-unique vindex "select user.col from user_extra join user on user_extra.user_id = user.name" @@ -975,7 +1102,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "1", - "TableName": "user_extra_user", + "TableName": "user_extra_`user`", "Inputs": [ { "OperatorType": "Route", @@ -995,9 +1122,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col from user where 1 != 1", - "Query": "select user.col from user where user.`name` = :user_extra_user_id", - "Table": "user", + "FieldQuery": "select `user`.col from `user` where 1 != 1", + "Query": "select `user`.col from `user` where `user`.`name` = :user_extra_user_id", + "Table": "`user`", "Values": [ ":user_extra_user_id" ], @@ -1019,9 +1146,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col from user join ref where 1 != 1", - "Query": "select user.col from user join ref", - "Table": "user" + "FieldQuery": "select `user`.col from `user` join ref where 1 != 1", + "Query": "select `user`.col from `user` join ref", + "Table": "`user`" } } @@ -1055,9 +1182,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select ref.col from ref join user where 1 != 1", - "Query": "select ref.col from ref join user", - "Table": "user" + "FieldQuery": "select ref.col from ref join `user` where 1 != 1", + "Query": "select ref.col from ref join `user`", + "Table": "`user`" } } @@ -1074,9 +1201,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select ref.col from ref join (select aa from user where 1 != 1) as user where 1 != 1", - "Query": "select ref.col from ref join (select aa from user where user.id = 1) as user", - "Table": "user", + "FieldQuery": "select ref.col from ref join (select aa from `user` where 1 != 1) as `user` where 1 != 1", + "Query": "select ref.col from ref join (select aa from `user` where `user`.id = 1) as `user`", + "Table": "`user`", "Values": [ 1 ], @@ -1133,9 +1260,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from (select id, col from user where 1 != 1) as t where 1 != 1", - "Query": "select id from (select id, col from user where id = 5) as t", - "Table": "user", + "FieldQuery": "select id from (select id, col from `user` where 1 != 1) as t where 1 != 1", + "Query": "select id from (select id, col from `user` where id = 5) as t", + "Table": "`user`", "Values": [ 5 ], @@ -1155,9 +1282,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select t.id from (select id from user where 1 != 1) as t join user_extra on t.id = user_extra.user_id where 1 != 1", - "Query": "select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.user_id", - "Table": "user", + "FieldQuery": "select t.id from (select id from `user` where 1 != 1) as t join user_extra on t.id = user_extra.user_id where 1 != 1", + "Query": "select t.id from (select id from `user` where id = 5) as t join user_extra on t.id = user_extra.user_id", + "Table": "`user`", "Values": [ 5 ], @@ -1177,9 +1304,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select t.id from (select user.id from user where 1 != 1) as t join user_extra on t.id = user_extra.user_id where 1 != 1", - "Query": "select t.id from (select user.id from user where user.id = 5) as t join user_extra on t.id = user_extra.user_id", - "Table": "user", + "FieldQuery": "select t.id from (select `user`.id from `user` where 1 != 1) as t join user_extra on t.id = user_extra.user_id where 1 != 1", + "Query": "select t.id from (select `user`.id from `user` where `user`.id = 5) as t join user_extra on t.id = user_extra.user_id", + "Table": "`user`", "Values": [ 5 ], @@ -1203,8 +1330,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select t.id from user_extra join (select id from user where 1 != 1) as t on t.id = user_extra.user_id where 1 != 1", - "Query": "select t.id from user_extra join (select id from user where id = 5) as t on t.id = user_extra.user_id", + "FieldQuery": "select t.id from user_extra join (select id from `user` where 1 != 1) as t on t.id = user_extra.user_id where 1 != 1", + "Query": "select t.id from user_extra join (select id from `user` where id = 5) as t on t.id = user_extra.user_id", "Table": "user_extra" } } @@ -1218,7 +1345,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1", - "TableName": "user_user_extra", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -1227,9 +1354,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select t.id from (select id from user where 1 != 1) as t where 1 != 1", - "Query": "select t.id from (select id from user where id = 5) as t", - "Table": "user", + "FieldQuery": "select t.id from (select id from `user` where 1 != 1) as t where 1 != 1", + "Query": "select t.id from (select id from `user` where id = 5) as t", + "Table": "`user`", "Values": [ 5 ], @@ -1262,9 +1389,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from (select id, col from user as route1 where 1 != 1) as t where 1 != 1", - "Query": "select id from (select id, col from user as route1 where id = 5) as t", - "Table": "user", + "FieldQuery": "select id from (select id, col from `user` as route1 where 1 != 1) as t where 1 != 1", + "Query": "select id from (select id, col from `user` as route1 where id = 5) as t", + "Table": "`user`", "Values": [ 5 ], @@ -1284,9 +1411,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from (select id, col from user as route1 where 1 != 1) as t where 1 != 1", - "Query": "select id from (select id, col from user as route1) as t where id = 5", - "Table": "user", + "FieldQuery": "select id from (select id, col from `user` as route1 where 1 != 1) as t where 1 != 1", + "Query": "select id from (select id, col from `user` as route1) as t where id = 5", + "Table": "`user`", "Values": [ 5 ], @@ -1306,9 +1433,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select u.col, e.col from (select col from user where 1 != 1) as u join (select col from user_extra where 1 != 1) as e where 1 != 1", - "Query": "select u.col, e.col from (select col from user where id = 5) as u join (select col from user_extra where user_id = 5) as e", - "Table": "user", + "FieldQuery": "select u.col, e.col from (select col from `user` where 1 != 1) as u join (select col from user_extra where 1 != 1) as e where 1 != 1", + "Query": "select u.col, e.col from (select col from `user` where id = 5) as u join (select col from user_extra where user_id = 5) as e", + "Table": "`user`", "Values": [ 5 ], @@ -1431,7 +1558,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1", - "TableName": "user_user_extra_unsharded", + "TableName": "`user`_user_extra_unsharded", "Inputs": [ { "OperatorType": "Subquery", @@ -1444,7 +1571,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1,-2", - "TableName": "user_user_extra", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -1453,9 +1580,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.id, user.col1 from user where 1 != 1", - "Query": "select user.id, user.col1 from user", - "Table": "user" + "FieldQuery": "select `user`.id, `user`.col1 from `user` where 1 != 1", + "Query": "select `user`.id, `user`.col1 from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -1502,7 +1629,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1,-2", - "TableName": "user_user_extra", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -1511,9 +1638,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.id, user.col1, user.col from user where 1 != 1", - "Query": "select user.id, user.col1, user.col from user", - "Table": "user" + "FieldQuery": "select `user`.id, `user`.col1, `user`.col from `user` where 1 != 1", + "Query": "select `user`.id, `user`.col1, `user`.col from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -1541,7 +1668,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "1", - "TableName": "unsharded_a_user_user_extra", + "TableName": "unsharded_a_`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -1564,7 +1691,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1,-2", - "TableName": "user_user_extra", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -1573,9 +1700,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.id, user.col1 from user where 1 != 1", - "Query": "select user.id, user.col1 from user", - "Table": "user" + "FieldQuery": "select `user`.id, `user`.col1 from `user` where 1 != 1", + "Query": "select `user`.id, `user`.col1 from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -1612,9 +1739,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col from user where 1 != 1", - "Query": "select col from user", - "Table": "user" + "FieldQuery": "select col from `user` where 1 != 1", + "Query": "select col from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -1647,9 +1774,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col from user where 1 != 1", - "Query": "select col from user", - "Table": "user" + "FieldQuery": "select col from `user` where 1 != 1", + "Query": "select col from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -1682,9 +1809,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col from user where 1 != 1", - "Query": "select col from user", - "Table": "user" + "FieldQuery": "select col from `user` where 1 != 1", + "Query": "select col from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -1717,15 +1844,15 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col from user where 1 != 1", - "Query": "select col from user", - "Table": "user" + "FieldQuery": "select col from `user` where 1 != 1", + "Query": "select col from `user`", + "Table": "`user`" }, { "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1", - "TableName": "unsharded_user", + "TableName": "unsharded_`user`", "Inputs": [ { "OperatorType": "Route", @@ -1745,9 +1872,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select 1 from user where 1 != 1", - "Query": "select 1 from user where :__sq_has_values1 = 1 and user.col in ::__sq1", - "Table": "user" + "FieldQuery": "select 1 from `user` where 1 != 1", + "Query": "select 1 from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1", + "Table": "`user`" } ] } @@ -1765,7 +1892,7 @@ "OperatorType": "Join", "Variant": "LeftJoin", "JoinColumnIndexes": "-1", - "TableName": "unsharded_user", + "TableName": "unsharded_`user`", "Inputs": [ { "OperatorType": "Route", @@ -1789,9 +1916,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col from user where 1 != 1", - "Query": "select col from user", - "Table": "user" + "FieldQuery": "select col from `user` where 1 != 1", + "Query": "select col from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -1800,9 +1927,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select 1 from user where 1 != 1", - "Query": "select 1 from user where :__sq_has_values1 = 1 and user.col in ::__sq1", - "Table": "user" + "FieldQuery": "select 1 from `user` where 1 != 1", + "Query": "select 1 from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1", + "Table": "`user`" } ] } @@ -1820,7 +1947,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1", - "TableName": "unsharded_user_unsharded_a", + "TableName": "unsharded_`user`_unsharded_a", "Inputs": [ { "OperatorType": "Subquery", @@ -1833,15 +1960,15 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col from user where 1 != 1", - "Query": "select col from user", - "Table": "user" + "FieldQuery": "select col from `user` where 1 != 1", + "Query": "select col from `user`", + "Table": "`user`" }, { "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1", - "TableName": "unsharded_user", + "TableName": "unsharded_`user`", "Inputs": [ { "OperatorType": "Route", @@ -1861,9 +1988,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select 1 from user where 1 != 1", - "Query": "select 1 from user where :__sq_has_values1 = 1 and user.col in ::__sq1", - "Table": "user" + "FieldQuery": "select 1 from `user` where 1 != 1", + "Query": "select 1 from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1", + "Table": "`user`" } ] } @@ -1893,7 +2020,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1,1", - "TableName": "user_unsharded", + "TableName": "`user`_unsharded", "Inputs": [ { "OperatorType": "Route", @@ -1902,9 +2029,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col1, user.col2 from user where 1 != 1", - "Query": "select user.col1, user.col2 from user", - "Table": "user" + "FieldQuery": "select `user`.col1, `user`.col2 from `user` where 1 != 1", + "Query": "select `user`.col1, `user`.col2 from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -1951,9 +2078,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col from user join user_extra on user.ID = user_extra.User_Id where 1 != 1", - "Query": "select user.col from user join user_extra on user.ID = user_extra.User_Id", - "Table": "user" + "FieldQuery": "select `user`.col from `user` join user_extra on `user`.ID = user_extra.User_Id where 1 != 1", + "Query": "select `user`.col from `user` join user_extra on `user`.ID = user_extra.User_Id", + "Table": "`user`" } } @@ -1973,7 +2100,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1", - "TableName": "user_user_extra", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -1982,9 +2109,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.id from user where 1 != 1", - "Query": "select user.id from user", - "Table": "user" + "FieldQuery": "select `user`.id from `user` where 1 != 1", + "Query": "select `user`.id from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -2055,11 +2182,12 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select :__lastInsertId as `last_insert_id()` from user where 1 != 1", - "Query": "select :__lastInsertId as `last_insert_id()` from user", - "Table": "user" + "FieldQuery": "select :__lastInsertId as `last_insert_id()` from `user` where 1 != 1", + "Query": "select :__lastInsertId as `last_insert_id()` from `user`", + "Table": "`user`" } } +Gen4 plan same as above # last_insert_id for unsharded route "select last_insert_id() from main.unsharded" @@ -2078,6 +2206,7 @@ "Table": "unsharded" } } +Gen4 plan same as above # join with USING construct "select user.id from user join user_extra using(id)" @@ -2088,7 +2217,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1", - "TableName": "user_user_extra", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -2097,9 +2226,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.id from user where 1 != 1", - "Query": "select user.id from user", - "Table": "user" + "FieldQuery": "select `user`.id from `user` where 1 != 1", + "Query": "select `user`.id from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -2115,6 +2244,44 @@ ] } } +{ + "QueryType": "SELECT", + "Original": "select user.id from user join user_extra using(id)", + "Instructions": { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "1", + "TableName": "user_extra_`user`", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "SelectScatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select user_extra.id from user_extra where 1 != 1", + "Query": "select user_extra.id from user_extra", + "Table": "user_extra" + }, + { + "OperatorType": "Route", + "Variant": "SelectEqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.id from `user` where 1 != 1", + "Query": "select `user`.id from `user` where `user`.id = :user_extra_id", + "Table": "`user`", + "Values": [ + ":user_extra_id" + ], + "Vindex": "user_index" + } + ] + } +} # verify ',' vs JOIN precedence "select u1.a from unsharded u1, unsharded u2 join unsharded u3 on u1.a = u2.a" @@ -2123,6 +2290,7 @@ # first expression fails for ',' join (code coverage: ensure error is returned) "select user.foo.col from user.foo, user" "table foo not found" +Gen4 plan same as above # table names should be case-sensitive "select unsharded.id from unsharded where Unsharded.val = 1" @@ -2131,18 +2299,20 @@ # implicit table reference for sharded keyspace "select user.foo.col from user.foo" "table foo not found" +Gen4 plan same as above # duplicate symbols "select user.id from user join user" -"duplicate symbol: user" +"duplicate symbol: `user`" # duplicate symbols for merging routes "select user.id from user join user_extra user on user.id = user.user_id" -"duplicate symbol: user" +"duplicate symbol: `user`" # non-existent table "select c from t" "table t not found" +Gen4 plan same as above # non-existent table on left of join "select c from t join user" @@ -2192,15 +2362,34 @@ "QueryType": "SELECT", "Original": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as name, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = ':vtg1' and rc.constraint_schema = database() and rc.table_name = ':vtg1'", "Instructions": { - "OperatorType": "Route", - "Variant": "SelectDBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk on rc.constraint_schema = fk.constraint_schema and rc.constraint_name = fk.constraint_name where 1 != 1", - "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk on rc.constraint_schema = fk.constraint_schema and rc.constraint_name = fk.constraint_name where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = :__vttablename and rc.constraint_schema = database() and rc.table_name = :__vttablename", - "SysTableTableName": "[VARBINARY(\":vtg1\"), VARBINARY(\":vtg1\")]" + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "1,2,3,4,-1,-2", + "TableName": "_", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "SelectDBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select rc.update_rule as on_update, rc.delete_rule as on_delete, rc.constraint_schema, rc.constraint_name from information_schema.referential_constraints as rc where 1 != 1", + "Query": "select rc.update_rule as on_update, rc.delete_rule as on_delete, rc.constraint_schema, rc.constraint_name from information_schema.referential_constraints as rc where rc.constraint_schema = database() and rc.table_name = :__vttablename", + "SysTableTableName": "VARBINARY(\":vtg1\")" + }, + { + "OperatorType": "Route", + "Variant": "SelectDBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name` from information_schema.key_column_usage as fk where 1 != 1", + "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name` from information_schema.key_column_usage as fk where fk.constraint_schema = :rc_constraint_schema and fk.constraint_name = :rc_constraint_name and fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = :__vttablename", + "SysTableTableName": "VARBINARY(\":vtg1\")" + } + ] } } @@ -2218,7 +2407,7 @@ }, "FieldQuery": "select * from information_schema.schemata where 1 != 1", "Query": "select * from information_schema.schemata where schema_name = :__vtschemaname", - "SysTableTableSchema": "[VARBINARY(\"user\")]" + "SysTableTableSchema": "VARBINARY(\"user\")" } } @@ -2236,8 +2425,8 @@ }, "FieldQuery": "select table_comment from information_schema.`tables` where 1 != 1", "Query": "select table_comment from information_schema.`tables` where table_schema = :__vtschemaname and table_name = :__vttablename", - "SysTableTableName": "[VARBINARY(\"table_name\")]", - "SysTableTableSchema": "[VARBINARY(\"schema_name\")]" + "SysTableTableName": "VARBINARY(\"table_name\")", + "SysTableTableSchema": "VARBINARY(\"schema_name\")" } } @@ -2247,16 +2436,36 @@ "QueryType": "SELECT", "Original": "SELECT fk.referenced_table_name AS 'to_table', fk.referenced_column_name AS 'primary_key',fk.column_name AS 'column',fk.constraint_name AS 'name',rc.update_rule AS 'on_update',rc.delete_rule AS 'on_delete' FROM information_schema.referential_constraints rc JOIN information_schema.key_column_usage fk USING (constraint_schema, constraint_name) WHERE fk.referenced_column_name IS NOT NULL AND fk.table_schema = 'table_schema' AND fk.table_name = 'table_name' AND rc.constraint_schema = 'table_schema' AND rc.table_name = 'table_name'", "Instructions": { - "OperatorType": "Route", - "Variant": "SelectDBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk on rc.constraint_schema = fk.constraint_schema and rc.constraint_name = fk.constraint_name where 1 != 1", - "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk on rc.constraint_schema = fk.constraint_schema and rc.constraint_name = fk.constraint_name where fk.referenced_column_name is not null and fk.table_schema = :__vtschemaname and fk.table_name = :__vttablename and rc.constraint_schema = :__vtschemaname and rc.table_name = :__vttablename", - "SysTableTableName": "[VARBINARY(\"table_name\"), VARBINARY(\"table_name\")]", - "SysTableTableSchema": "[VARBINARY(\"table_schema\"), VARBINARY(\"table_schema\")]" + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "1,2,3,4,-1,-2", + "TableName": "_", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "SelectDBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select rc.update_rule as on_update, rc.delete_rule as on_delete, rc.constraint_schema, rc.constraint_name from information_schema.referential_constraints as rc where 1 != 1", + "Query": "select rc.update_rule as on_update, rc.delete_rule as on_delete, rc.constraint_schema, rc.constraint_name from information_schema.referential_constraints as rc where rc.constraint_schema = :__vtschemaname and rc.table_name = :__vttablename", + "SysTableTableName": "VARBINARY(\"table_name\")", + "SysTableTableSchema": "VARBINARY(\"table_schema\")" + }, + { + "OperatorType": "Route", + "Variant": "SelectDBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name` from information_schema.key_column_usage as fk where 1 != 1", + "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name` from information_schema.key_column_usage as fk where fk.constraint_schema = :rc_constraint_schema and fk.constraint_name = :rc_constraint_name and fk.referenced_column_name is not null and fk.table_schema = :__vtschemaname and fk.table_name = :__vttablename", + "SysTableTableName": "VARBINARY(\"table_name\")", + "SysTableTableSchema": "VARBINARY(\"table_schema\")" + } + ] } } @@ -2266,16 +2475,35 @@ "QueryType": "SELECT", "Original": "SELECT cc.constraint_name AS 'name', cc.check_clause AS 'expression' FROM information_schema.check_constraints cc JOIN information_schema.table_constraints tc USING (constraint_schema, constraint_name) WHERE tc.table_schema = 'table_schema' AND tc.table_name = 'table_name' AND cc.constraint_schema = 'constraint_schema'", "Instructions": { - "OperatorType": "Route", - "Variant": "SelectDBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select cc.constraint_name as `name`, cc.check_clause as expression from information_schema.check_constraints as cc join information_schema.table_constraints as tc on cc.constraint_schema = tc.constraint_schema and cc.constraint_name = tc.constraint_name where 1 != 1", - "Query": "select cc.constraint_name as `name`, cc.check_clause as expression from information_schema.check_constraints as cc join information_schema.table_constraints as tc on cc.constraint_schema = tc.constraint_schema and cc.constraint_name = tc.constraint_name where tc.table_schema = :__vtschemaname and tc.table_name = :__vttablename and cc.constraint_schema = :__vtschemaname", - "SysTableTableName": "[VARBINARY(\"table_name\")]", - "SysTableTableSchema": "[VARBINARY(\"table_schema\"), VARBINARY(\"constraint_schema\")]" + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "-1,-2", + "TableName": "_", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "SelectDBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select cc.constraint_name as `name`, cc.check_clause as expression, cc.constraint_schema from information_schema.check_constraints as cc where 1 != 1", + "Query": "select cc.constraint_name as `name`, cc.check_clause as expression, cc.constraint_schema from information_schema.check_constraints as cc where cc.constraint_schema = :__vtschemaname", + "SysTableTableSchema": "VARBINARY(\"constraint_schema\")" + }, + { + "OperatorType": "Route", + "Variant": "SelectDBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select 1 from information_schema.table_constraints as tc where 1 != 1", + "Query": "select 1 from information_schema.table_constraints as tc where tc.constraint_schema = :cc_constraint_schema and tc.constraint_name = :cc_constraint_name and tc.table_schema = :__vtschemaname and tc.table_name = :__vttablename", + "SysTableTableName": "VARBINARY(\"table_name\")", + "SysTableTableSchema": "VARBINARY(\"table_schema\")" + } + ] } } @@ -2293,8 +2521,8 @@ }, "FieldQuery": "select column_name from information_schema.statistics where 1 != 1", "Query": "select column_name from information_schema.statistics where index_name = 'PRIMARY' and table_schema = :__vtschemaname and table_name = :__vttablename order by seq_in_index asc", - "SysTableTableName": "[VARBINARY(\"table_name\")]", - "SysTableTableSchema": "[VARBINARY(\"table_schema\")]" + "SysTableTableName": "VARBINARY(\"table_name\")", + "SysTableTableSchema": "VARBINARY(\"table_schema\")" } } @@ -2312,8 +2540,8 @@ }, "FieldQuery": "select generation_expression from information_schema.`columns` where 1 != 1", "Query": "select generation_expression from information_schema.`columns` where table_schema = :__vtschemaname and table_name = :__vttablename and column_name = 'column_name'", - "SysTableTableName": "[VARBINARY(\"table_name\")]", - "SysTableTableSchema": "[VARBINARY(\"table_schema\")]" + "SysTableTableName": "VARBINARY(\"table_name\")", + "SysTableTableSchema": "VARBINARY(\"table_schema\")" } } @@ -2348,7 +2576,7 @@ }, "FieldQuery": "select table_name from (select * from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1", "Query": "select table_name from (select * from information_schema.`tables` where table_schema = :__vtschemaname) as _subquery", - "SysTableTableSchema": "[VARBINARY(\"table_schema\")]" + "SysTableTableSchema": "VARBINARY(\"table_schema\")" } } @@ -2366,8 +2594,8 @@ }, "FieldQuery": "select table_name from (select * from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1", "Query": "select table_name from (select * from information_schema.`tables` where table_schema = :__vtschemaname) as _subquery where _subquery.table_type = 'table_type' and _subquery.table_name = :__vttablename", - "SysTableTableName": "[VARBINARY(\"table_name\")]", - "SysTableTableSchema": "[VARBINARY(\"table_schema\")]" + "SysTableTableName": "VARBINARY(\"table_name\")", + "SysTableTableSchema": "VARBINARY(\"table_schema\")" } } @@ -2385,7 +2613,7 @@ }, "FieldQuery": "select cc.constraint_name as `name` from information_schema.check_constraints as cc where 1 != 1", "Query": "select cc.constraint_name as `name` from information_schema.check_constraints as cc where cc.constraint_schema = :__vtschemaname and cc.table_schema = :__vtschemaname", - "SysTableTableSchema": "[VARBINARY(\"a\"), VARBINARY(\"a\")]" + "SysTableTableSchema": "VARBINARY(\"a\")" } } @@ -2403,7 +2631,7 @@ }, "FieldQuery": "select COUNT(*) from INFORMATION_SCHEMA.`TABLES` where 1 != 1", "Query": "select COUNT(*) from INFORMATION_SCHEMA.`TABLES` where table_schema = :__vtschemaname and table_name = :__vttablename", - "SysTableTableName": "[VARBINARY(\"foo\")]", - "SysTableTableSchema": "[VARBINARY(\"performance_schema\")]" + "SysTableTableName": "VARBINARY(\"foo\")", + "SysTableTableSchema": "VARBINARY(\"performance_schema\")" } } diff --git a/go/vt/vtgate/planbuilder/testdata/large_cases.txt b/go/vt/vtgate/planbuilder/testdata/large_cases.txt new file mode 100644 index 00000000000..6db381e46c6 --- /dev/null +++ b/go/vt/vtgate/planbuilder/testdata/large_cases.txt @@ -0,0 +1,226 @@ +"select user.id from user, user_extra, user_metadata, music, unsharded, unsharded_a, unsharded_b, unsharded_auto, music_extra where user.id = user_extra.user_id and user_metadata.user_id = user_extra.user_id and music.id = music_extra.music_id and unsharded.x = unsharded_a.y" +{ + "QueryType": "SELECT", + "Original": "select user.id from user, user_extra, user_metadata, music, unsharded, unsharded_a, unsharded_b, unsharded_auto, music_extra where user.id = user_extra.user_id and user_metadata.user_id = user_extra.user_id and music.id = music_extra.music_id and unsharded.x = unsharded_a.y", + "Instructions": { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "-1", + "TableName": "`user`_user_extra_user_metadata_music_unsharded_unsharded_a_unsharded_b_unsharded_auto_music_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "SelectScatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.id from `user` where 1 != 1", + "Query": "select `user`.id from `user`", + "Table": "`user`" + }, + { + "OperatorType": "Join", + "Variant": "Join", + "TableName": "user_extra_user_metadata_music_unsharded_unsharded_a_unsharded_b_unsharded_auto_music_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "SelectEqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select user_extra.user_id from user_extra where 1 != 1", + "Query": "select user_extra.user_id from user_extra where user_extra.user_id = :user_id", + "Table": "user_extra", + "Values": [ + ":user_id" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Join", + "Variant": "Join", + "TableName": "user_metadata_music_unsharded_unsharded_a_unsharded_b_unsharded_auto_music_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "SelectEqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from user_metadata where 1 != 1", + "Query": "select 1 from user_metadata where user_metadata.user_id = :user_extra_user_id", + "Table": "user_metadata", + "Values": [ + ":user_extra_user_id" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Join", + "Variant": "Join", + "TableName": "music_unsharded_unsharded_a_unsharded_b_unsharded_auto_music_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "SelectScatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.id from music where 1 != 1", + "Query": "select music.id from music", + "Table": "music" + }, + { + "OperatorType": "Join", + "Variant": "Join", + "TableName": "unsharded_unsharded_a_unsharded_b_unsharded_auto_music_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "SelectUnsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select unsharded.x from unsharded where 1 != 1", + "Query": "select unsharded.x from unsharded", + "Table": "unsharded" + }, + { + "OperatorType": "Join", + "Variant": "Join", + "TableName": "unsharded_a_unsharded_b_unsharded_auto_music_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "SelectUnsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select 1 from unsharded_a where 1 != 1", + "Query": "select 1 from unsharded_a where unsharded_a.y = :unsharded_x", + "Table": "unsharded_a" + }, + { + "OperatorType": "Join", + "Variant": "Join", + "TableName": "unsharded_b_unsharded_auto_music_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "SelectUnsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select 1 from unsharded_b where 1 != 1", + "Query": "select 1 from unsharded_b", + "Table": "unsharded_b" + }, + { + "OperatorType": "Join", + "Variant": "Join", + "TableName": "unsharded_auto_music_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "SelectUnsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select 1 from unsharded_auto where 1 != 1", + "Query": "select 1 from unsharded_auto", + "Table": "unsharded_auto" + }, + { + "OperatorType": "Route", + "Variant": "SelectEqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from music_extra where 1 != 1", + "Query": "select 1 from music_extra where music_extra.music_id = :music_id", + "Table": "music_extra", + "Values": [ + ":music_id" + ], + "Vindex": "music_user_map" + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + } +} +{ + "QueryType": "SELECT", + "Original": "select user.id from user, user_extra, user_metadata, music, unsharded, unsharded_a, unsharded_b, unsharded_auto, music_extra where user.id = user_extra.user_id and user_metadata.user_id = user_extra.user_id and music.id = music_extra.music_id and unsharded.x = unsharded_a.y", + "Instructions": { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "1", + "TableName": "music, music_extra_`user`, user_extra, user_metadata_unsharded, unsharded_a, unsharded_auto, unsharded_b", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "SelectScatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from music, music_extra where 1 != 1", + "Query": "select 1 from music, music_extra where music.id = music_extra.music_id", + "Table": "music, music_extra" + }, + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "-1", + "TableName": "`user`, user_extra, user_metadata_unsharded, unsharded_a, unsharded_auto, unsharded_b", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "SelectScatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.id from `user`, user_extra, user_metadata where 1 != 1", + "Query": "select `user`.id from `user`, user_extra, user_metadata where `user`.id = user_extra.user_id and user_metadata.user_id = user_extra.user_id", + "Table": "`user`, user_extra, user_metadata" + }, + { + "OperatorType": "Route", + "Variant": "SelectUnsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select 1 from unsharded, unsharded_a, unsharded_b, unsharded_auto where 1 != 1", + "Query": "select 1 from unsharded, unsharded_a, unsharded_b, unsharded_auto where unsharded.x = unsharded_a.y", + "Table": "unsharded, unsharded_a, unsharded_auto, unsharded_b" + } + ] + } + ] + } +} diff --git a/go/vt/vtgate/planbuilder/testdata/lock_cases.txt b/go/vt/vtgate/planbuilder/testdata/lock_cases.txt index 91f88105e8e..17737bc1171 100644 --- a/go/vt/vtgate/planbuilder/testdata/lock_cases.txt +++ b/go/vt/vtgate/planbuilder/testdata/lock_cases.txt @@ -39,6 +39,7 @@ "OperatorType": "Rows" } } +Gen4 plan same as above # lock tables write "lock tables t low_priority write" @@ -49,6 +50,7 @@ "OperatorType": "Rows" } } +Gen4 plan same as above # unlock tables "unlock tables" @@ -59,3 +61,4 @@ "OperatorType": "Rows" } } +Gen4 plan same as above diff --git a/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.txt b/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.txt index 1db6f534f56..53b22207510 100644 --- a/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.txt +++ b/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.txt @@ -23,10 +23,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, b, count(*) from user where 1 != 1 group by a", + "FieldQuery": "select a, b, count(*), weight_string(b), weight_string(a) from `user` where 1 != 1 group by a", "OrderBy": "0 ASC", - "Query": "select a, b, count(*) from user group by a order by a asc", - "Table": "user" + "Query": "select a, b, count(*), weight_string(b), weight_string(a) from `user` group by a order by a asc", + "Table": "`user`" } ] } @@ -58,10 +58,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, b, count(*) as k from user where 1 != 1 group by a", + "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a", "OrderBy": "0 ASC", - "Query": "select a, b, count(*) as k from user group by a order by a asc", - "Table": "user" + "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a order by a asc", + "Table": "`user`" } ] } @@ -93,10 +93,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, b, count(*) as k from user where 1 != 1 group by a", + "FieldQuery": "select a, b, count(*) as k, weight_string(b), weight_string(a) from `user` where 1 != 1 group by a", "OrderBy": "0 ASC", - "Query": "select a, b, count(*) as k from user group by a order by a asc", - "Table": "user" + "Query": "select a, b, count(*) as k, weight_string(b), weight_string(a) from `user` group by a order by a asc", + "Table": "`user`" } ] } @@ -132,10 +132,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, b, count(*) as k from user where 1 != 1 group by a", + "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a", "OrderBy": "0 ASC", - "Query": "select a, b, count(*) as k from user group by a order by a asc", - "Table": "user" + "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a order by a asc", + "Table": "`user`" } ] } @@ -169,10 +169,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, b, count(*) as k from user where 1 != 1 group by a", + "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a", "OrderBy": "0 ASC", - "Query": "select a, b, count(*) as k from user group by a order by 1 asc", - "Table": "user" + "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a order by 1 asc", + "Table": "`user`" } ] } @@ -189,7 +189,7 @@ "Instructions": { "OperatorType": "Sort", "Variant": "Memory", - "OrderBy": "2 ASC, 1 ASC, 2 ASC", + "OrderBy": "0 ASC, 1 ASC, 0 ASC", "Inputs": [ { "OperatorType": "Aggregate", @@ -205,10 +205,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select textcol1 as t, count(*) as k, weight_string(textcol1) from user where 1 != 1 group by textcol1", - "OrderBy": "2 ASC, 2 ASC", - "Query": "select textcol1 as t, count(*) as k, weight_string(textcol1) from user group by textcol1 order by textcol1 asc, textcol1 asc", - "Table": "user" + "FieldQuery": "select textcol1 as t, count(*) as k, weight_string(textcol1) from `user` where 1 != 1 group by textcol1", + "OrderBy": "0 ASC, 0 ASC", + "Query": "select textcol1 as t, count(*) as k, weight_string(textcol1) from `user` group by textcol1 order by textcol1 asc, textcol1 asc", + "Table": "`user`" } ] } @@ -235,8 +235,8 @@ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "-1,-2", - "TableName": "user_user_extra", + "JoinColumnIndexes": "-1,-2,-3", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -245,9 +245,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.id, user.col from user where 1 != 1", - "Query": "select user.id, user.col from user", - "Table": "user" + "FieldQuery": "select `user`.id, `user`.col, weight_string(`user`.id) from `user` where 1 != 1", + "Query": "select `user`.id, `user`.col, weight_string(`user`.id) from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -281,8 +281,8 @@ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "-1,-2,1", - "TableName": "user_music", + "JoinColumnIndexes": "-1,-2,1,2", + "TableName": "`user`_music", "Inputs": [ { "OperatorType": "Route", @@ -291,9 +291,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col1 as a, user.col2 as b, user.id from user where 1 != 1", - "Query": "select user.col1 as a, user.col2 as b, user.id from user where user.id = 1", - "Table": "user", + "FieldQuery": "select `user`.col1 as a, `user`.col2 as b, `user`.id from `user` where 1 != 1", + "Query": "select `user`.col1 as a, `user`.col2 as b, `user`.id from `user` where `user`.id = 1", + "Table": "`user`", "Values": [ 1 ], @@ -306,8 +306,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select music.col3 as c from music where 1 != 1", - "Query": "select music.col3 as c from music where music.id = :user_id", + "FieldQuery": "select music.col3 as c, weight_string(music.col3) from music where 1 != 1", + "Query": "select music.col3 as c, weight_string(music.col3) from music where music.id = :user_id", "Table": "music", "Values": [ ":user_id" @@ -333,8 +333,8 @@ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "-1,-2,1", - "TableName": "user_music", + "JoinColumnIndexes": "-1,-2,1,-3,2,-4", + "TableName": "`user`_music", "Inputs": [ { "OperatorType": "Route", @@ -343,9 +343,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col1 as a, user.col2, user.id from user where 1 != 1", - "Query": "select user.col1 as a, user.col2, user.id from user where user.id = 1", - "Table": "user", + "FieldQuery": "select `user`.col1 as a, `user`.col2, weight_string(`user`.col1), weight_string(`user`.col2), `user`.id from `user` where 1 != 1", + "Query": "select `user`.col1 as a, `user`.col2, weight_string(`user`.col1), weight_string(`user`.col2), `user`.id from `user` where `user`.id = 1", + "Table": "`user`", "Values": [ 1 ], @@ -358,8 +358,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select music.col3 from music where 1 != 1", - "Query": "select music.col3 from music where music.id = :user_id", + "FieldQuery": "select music.col3, weight_string(music.col3) from music where 1 != 1", + "Query": "select music.col3, weight_string(music.col3) from music where music.id = :user_id", "Table": "music", "Values": [ ":user_id" @@ -380,13 +380,13 @@ "Instructions": { "OperatorType": "Sort", "Variant": "Memory", - "OrderBy": "3 ASC, 2 ASC", + "OrderBy": "1 ASC, 2 ASC", "Inputs": [ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "-1,-2,1,-3", - "TableName": "user_unsharded", + "JoinColumnIndexes": "-1,-2,1,-3,2", + "TableName": "`user`_unsharded", "Inputs": [ { "OperatorType": "Route", @@ -395,9 +395,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select u.a, u.textcol1, weight_string(u.textcol1) from user as u where 1 != 1", - "Query": "select u.a, u.textcol1, weight_string(u.textcol1) from user as u", - "Table": "user" + "FieldQuery": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u where 1 != 1", + "Query": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u", + "Table": "`user`" }, { "OperatorType": "Route", @@ -406,8 +406,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select un.col2 from unsharded as un where 1 != 1", - "Query": "select un.col2 from unsharded as un", + "FieldQuery": "select un.col2, weight_string(un.col2) from unsharded as un where 1 != 1", + "Query": "select un.col2, weight_string(un.col2) from unsharded as un", "Table": "unsharded" } ] @@ -424,13 +424,13 @@ "Instructions": { "OperatorType": "Sort", "Variant": "Memory", - "OrderBy": "3 ASC, 2 ASC", + "OrderBy": "1 ASC, 2 ASC", "Inputs": [ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "1,2,-1,3", - "TableName": "unsharded_user", + "JoinColumnIndexes": "1,2,-1,3,-2", + "TableName": "unsharded_`user`", "Inputs": [ { "OperatorType": "Route", @@ -439,8 +439,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select un.col2 from unsharded as un where 1 != 1", - "Query": "select un.col2 from unsharded as un", + "FieldQuery": "select un.col2, weight_string(un.col2) from unsharded as un where 1 != 1", + "Query": "select un.col2, weight_string(un.col2) from unsharded as un", "Table": "unsharded" }, { @@ -450,9 +450,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select u.a, u.textcol1, weight_string(u.textcol1) from user as u where 1 != 1", - "Query": "select u.a, u.textcol1, weight_string(u.textcol1) from user as u", - "Table": "user" + "FieldQuery": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u where 1 != 1", + "Query": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u", + "Table": "`user`" } ] } @@ -504,10 +504,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a from user where 1 != 1", + "FieldQuery": "select a, weight_string(a) from `user` where 1 != 1", "OrderBy": "0 DESC", - "Query": "select a from user order by binary a desc", - "Table": "user" + "Query": "select a, weight_string(a) from `user` order by binary a desc", + "Table": "`user`" } } @@ -520,7 +520,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1", - "TableName": "user_music", + "TableName": "`user`_music", "Inputs": [ { "OperatorType": "Route", @@ -529,10 +529,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select u.a from user as u where 1 != 1", + "FieldQuery": "select u.a, weight_string(u.a) from `user` as u where 1 != 1", "OrderBy": "0 DESC", - "Query": "select u.a from user as u order by binary a desc", - "Table": "user" + "Query": "select u.a, weight_string(u.a) from `user` as u order by binary a desc", + "Table": "`user`" }, { "OperatorType": "Route", diff --git a/go/vt/vtgate/planbuilder/testdata/migration_cases.txt b/go/vt/vtgate/planbuilder/testdata/migration_cases.txt new file mode 100644 index 00000000000..aa953f17598 --- /dev/null +++ b/go/vt/vtgate/planbuilder/testdata/migration_cases.txt @@ -0,0 +1,86 @@ +# revert migration +"revert vitess_migration 'abc'" +{ + "QueryType": "REVERT", + "Original": "revert vitess_migration 'abc'", + "Instructions": { + "OperatorType": "RevertMigration", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "query": "revert vitess_migration 'abc'" + } +} + +# retry migration +"alter vitess_migration 'abc' retry" +{ + "QueryType": "UNKNOWN", + "Original": "alter vitess_migration 'abc' retry", + "Instructions": { + "OperatorType": "Send", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetDestination": "AllShards()", + "IsDML": false, + "Query": "alter vitess_migration 'abc' retry", + "SingleShardOnly": false + } +} + +# complete migration +"alter vitess_migration 'abc' complete" +{ + "QueryType": "UNKNOWN", + "Original": "alter vitess_migration 'abc' complete", + "Instructions": { + "OperatorType": "Send", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetDestination": "AllShards()", + "IsDML": false, + "Query": "alter vitess_migration 'abc' complete", + "SingleShardOnly": false + } +} + +# cancel migration +"alter vitess_migration 'abc' cancel" +{ + "QueryType": "UNKNOWN", + "Original": "alter vitess_migration 'abc' cancel", + "Instructions": { + "OperatorType": "Send", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetDestination": "AllShards()", + "IsDML": false, + "Query": "alter vitess_migration 'abc' cancel", + "SingleShardOnly": false + } +} + +# cancel all migrations +"alter vitess_migration cancel all" +{ + "QueryType": "UNKNOWN", + "Original": "alter vitess_migration cancel all", + "Instructions": { + "OperatorType": "Send", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetDestination": "AllShards()", + "IsDML": false, + "Query": "alter vitess_migration cancel all", + "SingleShardOnly": false + } +} diff --git a/go/vt/vtgate/planbuilder/testdata/other_read_cases.txt b/go/vt/vtgate/planbuilder/testdata/other_read_cases.txt index d183afb081c..3f537118c0e 100644 --- a/go/vt/vtgate/planbuilder/testdata/other_read_cases.txt +++ b/go/vt/vtgate/planbuilder/testdata/other_read_cases.txt @@ -11,7 +11,7 @@ }, "TargetDestination": "AnyShard()", "IsDML": false, - "Query": "explain select * from user", + "Query": "explain select * from `user`", "SingleShardOnly": true } } @@ -57,7 +57,7 @@ }, "TargetDestination": "AnyShard()", "IsDML": false, - "Query": "describe select * from t", + "Query": "explain select * from t", "SingleShardOnly": true } } @@ -75,7 +75,7 @@ }, "TargetDestination": "AnyShard()", "IsDML": false, - "Query": "desc select * from t", + "Query": "explain select * from t", "SingleShardOnly": true } } diff --git a/go/vt/vtgate/planbuilder/testdata/postprocess_cases.txt b/go/vt/vtgate/planbuilder/testdata/postprocess_cases.txt index ff2628fb9b3..a8e9497f8e4 100644 --- a/go/vt/vtgate/planbuilder/testdata/postprocess_cases.txt +++ b/go/vt/vtgate/planbuilder/testdata/postprocess_cases.txt @@ -10,9 +10,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col1 from user where 1 != 1", - "Query": "select user.col1 from user having col2 = 2", - "Table": "user" + "FieldQuery": "select `user`.col1 from `user` where 1 != 1", + "Query": "select `user`.col1 from `user` having col2 = 2", + "Table": "`user`" } } @@ -29,7 +29,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1,1", - "TableName": "user_user_extra", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -38,9 +38,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col1 from user where 1 != 1", - "Query": "select user.col1 from user", - "Table": "user" + "FieldQuery": "select `user`.col1 from `user` where 1 != 1", + "Query": "select `user`.col1 from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -66,7 +66,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1,-2,1", - "TableName": "user_user_extra", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -75,9 +75,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col1 as a, user.col2 from user where 1 != 1", - "Query": "select user.col1 as a, user.col2 from user having 1 = 1 and a = 1 and a = user.col2", - "Table": "user" + "FieldQuery": "select `user`.col1 as a, `user`.col2 from `user` where 1 != 1", + "Query": "select `user`.col1 as a, `user`.col2 from `user` having 1 = 1 and a = 1 and a = `user`.col2", + "Table": "`user`" }, { "OperatorType": "Route", @@ -110,9 +110,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col from user where 1 != 1", - "Query": "select col from user", - "Table": "user" + "FieldQuery": "select col from `user` where 1 != 1", + "Query": "select col from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -121,9 +121,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user having :__sq_has_values1 = 1 and id in ::__vals", - "Table": "user", + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` having :__sq_has_values1 = 1 and id in ::__vals", + "Table": "`user`", "Values": [ "::__sq1" ], @@ -145,15 +145,16 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col from user where 1 != 1", - "Query": "select col from user where id = 5 order by aa asc", - "Table": "user", + "FieldQuery": "select col from `user` where 1 != 1", + "Query": "select col from `user` where id = 5 order by aa asc", + "Table": "`user`", "Values": [ 5 ], "Vindex": "user_index" } } +Gen4 plan same as above # ORDER BY uses column numbers "select col from user where id = 1 order by 1" @@ -167,15 +168,16 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col from user where 1 != 1", - "Query": "select col from user where id = 1 order by 1 asc", - "Table": "user", + "FieldQuery": "select col from `user` where 1 != 1", + "Query": "select col from `user` where id = 1 order by 1 asc", + "Table": "`user`", "Values": [ 1 ], "Vindex": "user_index" } } +Gen4 plan same as above # ORDER BY on scatter "select col from user order by col" @@ -189,10 +191,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col from user where 1 != 1", + "FieldQuery": "select col, weight_string(col) from `user` where 1 != 1", "OrderBy": "0 ASC", - "Query": "select col from user order by col asc", - "Table": "user" + "Query": "select col, weight_string(col) from `user` order by col asc", + "Table": "`user`" } } @@ -208,9 +210,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user_id, col1, col2 from authoritative where 1 != 1", + "FieldQuery": "select user_id, col1, col2, weight_string(user_id) from authoritative where 1 != 1", "OrderBy": "0 ASC", - "Query": "select user_id, col1, col2 from authoritative order by user_id asc", + "Query": "select user_id, col1, col2, weight_string(user_id) from authoritative order by user_id asc", "Table": "authoritative" } } @@ -228,7 +230,7 @@ "Sharded": true }, "FieldQuery": "select user_id, col1, col2, weight_string(col1) from authoritative where 1 != 1", - "OrderBy": "3 ASC", + "OrderBy": "1 ASC", "Query": "select user_id, col1, col2, weight_string(col1) from authoritative order by col1 asc", "Table": "authoritative" } @@ -246,10 +248,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, textcol1, b, weight_string(textcol1) from user where 1 != 1", - "OrderBy": "0 ASC, 3 ASC, 2 ASC", - "Query": "select a, textcol1, b, weight_string(textcol1) from user order by a asc, textcol1 asc, b asc", - "Table": "user" + "FieldQuery": "select a, textcol1, b, weight_string(a), weight_string(textcol1), weight_string(b) from `user` where 1 != 1", + "OrderBy": "0 ASC, 1 ASC, 2 ASC", + "Query": "select a, textcol1, b, weight_string(a), weight_string(textcol1), weight_string(b) from `user` order by a asc, textcol1 asc, b asc", + "Table": "`user`" } } @@ -265,10 +267,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, user.textcol1, b, weight_string(user.textcol1) from user where 1 != 1", - "OrderBy": "0 ASC, 3 ASC, 2 ASC", - "Query": "select a, user.textcol1, b, weight_string(user.textcol1) from user order by a asc, textcol1 asc, b asc", - "Table": "user" + "FieldQuery": "select a, `user`.textcol1, b, weight_string(a), weight_string(`user`.textcol1), weight_string(b) from `user` where 1 != 1", + "OrderBy": "0 ASC, 1 ASC, 2 ASC", + "Query": "select a, `user`.textcol1, b, weight_string(a), weight_string(`user`.textcol1), weight_string(b) from `user` order by a asc, textcol1 asc, b asc", + "Table": "`user`" } } @@ -284,10 +286,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, textcol1, b, textcol2, weight_string(textcol1), weight_string(textcol2) from user where 1 != 1", - "OrderBy": "0 ASC, 4 ASC, 2 ASC, 5 ASC", - "Query": "select a, textcol1, b, textcol2, weight_string(textcol1), weight_string(textcol2) from user order by a asc, textcol1 asc, b asc, textcol2 asc", - "Table": "user" + "FieldQuery": "select a, textcol1, b, textcol2, weight_string(a), weight_string(textcol1), weight_string(b), weight_string(textcol2) from `user` where 1 != 1", + "OrderBy": "0 ASC, 1 ASC, 2 ASC, 3 ASC", + "Query": "select a, textcol1, b, textcol2, weight_string(a), weight_string(textcol1), weight_string(b), weight_string(textcol2) from `user` order by a asc, textcol1 asc, b asc, textcol2 asc", + "Table": "`user`" } } @@ -307,9 +309,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col from user where 1 != 1", - "Query": "select col from user order by null", - "Table": "user" + "FieldQuery": "select col from `user` where 1 != 1", + "Query": "select col from `user` order by null", + "Table": "`user`" } } @@ -329,9 +331,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col2 from user where 1 != 1", - "Query": "select col2 from user", - "Table": "user" + "FieldQuery": "select col2 from `user` where 1 != 1", + "Query": "select col2 from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -340,10 +342,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col from user where 1 != 1", + "FieldQuery": "select col, weight_string(col) from `user` where 1 != 1", "OrderBy": "0 ASC", - "Query": "select col from user where :__sq_has_values1 = 1 and col in ::__sq1 order by col asc", - "Table": "user" + "Query": "select col, weight_string(col) from `user` where :__sq_has_values1 = 1 and col in ::__sq1 order by col asc", + "Table": "`user`" } ] } @@ -358,7 +360,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1,-2,1", - "TableName": "user_music", + "TableName": "`user`_music", "Inputs": [ { "OperatorType": "Route", @@ -367,9 +369,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col1 as a, user.col2, user.id from user where 1 != 1", - "Query": "select user.col1 as a, user.col2, user.id from user where user.id = 1 order by null", - "Table": "user", + "FieldQuery": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where 1 != 1", + "Query": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where `user`.id = 1 order by null", + "Table": "`user`", "Values": [ 1 ], @@ -403,7 +405,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1,-2,1", - "TableName": "user_music", + "TableName": "`user`_music", "Inputs": [ { "OperatorType": "Route", @@ -412,9 +414,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col1 as a, user.col2, user.id from user where 1 != 1", - "Query": "select user.col1 as a, user.col2, user.id from user where user.id = 1 order by a asc", - "Table": "user", + "FieldQuery": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where 1 != 1", + "Query": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where `user`.id = 1 order by a asc", + "Table": "`user`", "Values": [ 1 ], @@ -448,7 +450,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1,-2,1", - "TableName": "user_music", + "TableName": "`user`_music", "Inputs": [ { "OperatorType": "Route", @@ -457,9 +459,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col1 as a, user.col2, user.id from user where 1 != 1", - "Query": "select user.col1 as a, user.col2, user.id from user where user.id = 1 order by a asc", - "Table": "user", + "FieldQuery": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where 1 != 1", + "Query": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where `user`.id = 1 order by a asc", + "Table": "`user`", "Values": [ 1 ], @@ -500,9 +502,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col2 from user where 1 != 1", - "Query": "select col2 from user", - "Table": "user" + "FieldQuery": "select col2 from `user` where 1 != 1", + "Query": "select col2 from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -511,9 +513,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col from user where 1 != 1", - "Query": "select col from user where :__sq_has_values1 = 1 and col in ::__sq1 order by null", - "Table": "user" + "FieldQuery": "select col from `user` where 1 != 1", + "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 order by null", + "Table": "`user`" } ] } @@ -531,9 +533,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col from user where 1 != 1", - "Query": "select col from user order by RAND()", - "Table": "user" + "FieldQuery": "select col from `user` where 1 != 1", + "Query": "select col from `user` order by RAND()", + "Table": "`user`" } } @@ -546,7 +548,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1,-2,1", - "TableName": "user_music", + "TableName": "`user`_music", "Inputs": [ { "OperatorType": "Route", @@ -555,9 +557,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col1 as a, user.col2, user.id from user where 1 != 1", - "Query": "select user.col1 as a, user.col2, user.id from user where user.id = 1 order by RAND()", - "Table": "user", + "FieldQuery": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where 1 != 1", + "Query": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where `user`.id = 1 order by RAND()", + "Table": "`user`", "Values": [ 1 ], @@ -598,9 +600,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col2 from user where 1 != 1", - "Query": "select col2 from user", - "Table": "user" + "FieldQuery": "select col2 from `user` where 1 != 1", + "Query": "select col2 from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -609,9 +611,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col from user where 1 != 1", - "Query": "select col from user where :__sq_has_values1 = 1 and col in ::__sq1 order by rand()", - "Table": "user" + "FieldQuery": "select col from `user` where 1 != 1", + "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 order by rand()", + "Table": "`user`" } ] } @@ -629,15 +631,16 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select * from user where 1 != 1", - "Query": "select * from user where id = 5 order by col asc", - "Table": "user", + "FieldQuery": "select * from `user` where 1 != 1", + "Query": "select * from `user` where id = 5 order by col asc", + "Table": "`user`", "Values": [ 5 ], "Vindex": "user_index" } } +Gen4 plan same as above # Order by, qualified '*' expression "select user.* from user where id = 5 order by user.col" @@ -651,15 +654,16 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.* from user where 1 != 1", - "Query": "select user.* from user where id = 5 order by user.col asc", - "Table": "user", + "FieldQuery": "select `user`.* from `user` where 1 != 1", + "Query": "select `user`.* from `user` where id = 5 order by `user`.col asc", + "Table": "`user`", "Values": [ 5 ], "Vindex": "user_index" } } +Gen4 plan same as above # Order by, '*' expression with qualified reference "select * from user where id = 5 order by user.col" @@ -673,15 +677,16 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select * from user where 1 != 1", - "Query": "select * from user where id = 5 order by user.col asc", - "Table": "user", + "FieldQuery": "select * from `user` where 1 != 1", + "Query": "select * from `user` where id = 5 order by `user`.col asc", + "Table": "`user`", "Values": [ 5 ], "Vindex": "user_index" } } +Gen4 plan same as above # Order by, '*' expression in a subquery "select u.id, e.id from user u join user_extra e where u.col = e.col and u.col in (select * from user where user.id = u.id order by col)" @@ -692,7 +697,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1,1", - "TableName": "user_user_extra", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -701,9 +706,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select u.id, u.col from user as u where 1 != 1", - "Query": "select u.id, u.col from user as u where u.col in (select * from user where user.id = u.id order by col asc)", - "Table": "user" + "FieldQuery": "select u.id, u.col from `user` as u where 1 != 1", + "Query": "select u.id, u.col from `user` as u where u.col in (select * from `user` where `user`.id = u.id order by col asc)", + "Table": "`user`" }, { "OperatorType": "Route", @@ -748,15 +753,16 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select * from user where 1 != 1", - "Query": "select * from user where id = 5 order by user.col collate utf8_general_ci asc", - "Table": "user", + "FieldQuery": "select * from `user` where 1 != 1", + "Query": "select * from `user` where id = 5 order by `user`.col collate utf8_general_ci asc", + "Table": "`user`", "Values": [ 5 ], "Vindex": "user_index" } } +Gen4 plan same as above #Order by with math functions "select * from user where id = 5 order by -col1" @@ -770,15 +776,16 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select * from user where 1 != 1", - "Query": "select * from user where id = 5 order by -col1 asc", - "Table": "user", + "FieldQuery": "select * from `user` where 1 != 1", + "Query": "select * from `user` where id = 5 order by -col1 asc", + "Table": "`user`", "Values": [ 5 ], "Vindex": "user_index" } } +Gen4 plan same as above #Order by with string operations "select * from user where id = 5 order by concat(col,col1) collate utf8_general_ci desc" @@ -792,15 +799,16 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select * from user where 1 != 1", - "Query": "select * from user where id = 5 order by concat(col, col1) collate utf8_general_ci desc", - "Table": "user", + "FieldQuery": "select * from `user` where 1 != 1", + "Query": "select * from `user` where id = 5 order by concat(col, col1) collate utf8_general_ci desc", + "Table": "`user`", "Values": [ 5 ], "Vindex": "user_index" } } +Gen4 plan same as above #Order by with math operations "select * from user where id = 5 order by id+col collate utf8_general_ci desc" @@ -814,15 +822,16 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select * from user where 1 != 1", - "Query": "select * from user where id = 5 order by id + col collate utf8_general_ci desc", - "Table": "user", + "FieldQuery": "select * from `user` where 1 != 1", + "Query": "select * from `user` where id = 5 order by id + col collate utf8_general_ci desc", + "Table": "`user`", "Values": [ 5 ], "Vindex": "user_index" } } +Gen4 plan same as above #Order by subquery column "select * from user u join (select user_id from user_extra where user_id = 5) eu on u.id = eu.user_id where u.id = 5 order by eu.user_id" @@ -836,9 +845,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select * from user as u join (select user_id from user_extra where 1 != 1) as eu on u.id = eu.user_id where 1 != 1", - "Query": "select * from user as u join (select user_id from user_extra where user_id = 5) as eu on u.id = eu.user_id where u.id = 5 order by eu.user_id asc", - "Table": "user", + "FieldQuery": "select * from `user` as u join (select user_id from user_extra where 1 != 1) as eu on u.id = eu.user_id where 1 != 1", + "Query": "select * from `user` as u join (select user_id from user_extra where user_id = 5) as eu on u.id = eu.user_id where u.id = 5 order by eu.user_id asc", + "Table": "`user`", "Values": [ 5 ], @@ -858,9 +867,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col from user as route1 where 1 != 1", - "Query": "select col from user as route1 where id = 1 order by col asc", - "Table": "user", + "FieldQuery": "select col from `user` as route1 where 1 != 1", + "Query": "select col from `user` as route1 where id = 1 order by col asc", + "Table": "`user`", "Values": [ 1 ], @@ -880,15 +889,16 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col1 from user where 1 != 1", - "Query": "select col1 from user where id = 1 limit 1", - "Table": "user", + "FieldQuery": "select col1 from `user` where 1 != 1", + "Query": "select col1 from `user` where id = 1 limit 1", + "Table": "`user`", "Values": [ 1 ], "Vindex": "user_index" } } +Gen4 plan same as above # limit for joins. Can't push down the limit because result # counts get multiplied by join operations. @@ -904,7 +914,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1", - "TableName": "user_user_extra", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -913,9 +923,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col from user where 1 != 1", - "Query": "select user.col from user", - "Table": "user" + "FieldQuery": "select `user`.col from `user` where 1 != 1", + "Query": "select `user`.col from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -950,13 +960,14 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col from user where 1 != 1", - "Query": "select col from user limit :__upper_limit", - "Table": "user" + "FieldQuery": "select col from `user` where 1 != 1", + "Query": "select col from `user` limit :__upper_limit", + "Table": "`user`" } ] } } +Gen4 plan same as above # limit for scatter with bind var "select col from user limit :a" @@ -974,13 +985,14 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col from user where 1 != 1", - "Query": "select col from user limit :__upper_limit", - "Table": "user" + "FieldQuery": "select col from `user` where 1 != 1", + "Query": "select col from `user` limit :__upper_limit", + "Table": "`user`" } ] } } +Gen4 plan same as above # cross-shard expression in parenthesis with limit "select * from user where (id1 = 4 AND name1 ='abc') limit 5" @@ -998,9 +1010,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select * from user where 1 != 1", - "Query": "select * from user where id1 = 4 and name1 = 'abc' limit :__upper_limit", - "Table": "user" + "FieldQuery": "select * from `user` where 1 != 1", + "Query": "select * from `user` where id1 = 4 and name1 = 'abc' limit :__upper_limit", + "Table": "`user`" } ] } @@ -1026,9 +1038,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col1 from user where 1 != 1", - "Query": "select col1 from user", - "Table": "user" + "FieldQuery": "select col1 from `user` where 1 != 1", + "Query": "select col1 from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -1037,9 +1049,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col from user where 1 != 1", - "Query": "select col from user where :__sq_has_values1 = 1 and col in ::__sq1 limit :__upper_limit", - "Table": "user" + "FieldQuery": "select col from `user` where 1 != 1", + "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 limit :__upper_limit", + "Table": "`user`" } ] } @@ -1064,7 +1076,9 @@ "Table": "ref" } } +Gen4 plan same as above # invalid limit expression "select id from user limit 1+1" "unexpected expression in LIMIT: expression is too complex '1 + 1'" +Gen4 plan same as above diff --git a/go/vt/vtgate/planbuilder/testdata/schema_test.json b/go/vt/vtgate/planbuilder/testdata/schema_test.json index bc55db85684..fd56617707b 100644 --- a/go/vt/vtgate/planbuilder/testdata/schema_test.json +++ b/go/vt/vtgate/planbuilder/testdata/schema_test.json @@ -111,6 +111,10 @@ "name": "textcol1", "type": "VARCHAR" }, + { + "name": "intcol", + "type": "INT16" + }, { "name": "textcol2", "type": "VARCHAR" @@ -306,6 +310,20 @@ "type": "sequence" } } + }, + "main_2": { + "tables": { + "unsharded_tab": { + "columns": [ + { + "name": "predef1" + }, + { + "name": "predef3" + } + ] + } + } } } } diff --git a/go/vt/vtgate/planbuilder/testdata/select_cases.txt b/go/vt/vtgate/planbuilder/testdata/select_cases.txt index b94fd79accd..b2b7906ad3d 100644 --- a/go/vt/vtgate/planbuilder/testdata/select_cases.txt +++ b/go/vt/vtgate/planbuilder/testdata/select_cases.txt @@ -10,11 +10,12 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select 1 from user where 1 != 1", - "Query": "select 1 from user", - "Table": "user" + "FieldQuery": "select 1 from `user` where 1 != 1", + "Query": "select 1 from `user`", + "Table": "`user`" } } +Gen4 plan same as above # '*' expression for simple route "select user.* from user" @@ -28,9 +29,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.* from user where 1 != 1", - "Query": "select user.* from user", - "Table": "user" + "FieldQuery": "select `user`.* from `user` where 1 != 1", + "Query": "select `user`.* from `user`", + "Table": "`user`" } } @@ -46,9 +47,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select * from user where 1 != 1", - "Query": "select * from user", - "Table": "user" + "FieldQuery": "select * from `user` where 1 != 1", + "Query": "select * from `user`", + "Table": "`user`" } } @@ -64,9 +65,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select * from user where 1 != 1", - "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user", - "Table": "user" + "FieldQuery": "select * from `user` where 1 != 1", + "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from `user`", + "Table": "`user`" } } @@ -88,9 +89,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select count(*) from user where 1 != 1", - "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ count(*) from user", - "Table": "user" + "FieldQuery": "select count(*) from `user` where 1 != 1", + "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ count(*) from `user`", + "Table": "`user`" } ] } @@ -112,9 +113,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select * from user where 1 != 1", - "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user limit :__upper_limit", - "Table": "user" + "FieldQuery": "select * from `user` where 1 != 1", + "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from `user` limit :__upper_limit", + "Table": "`user`" } ] } @@ -132,9 +133,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select * from user where 1 != 1", - "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ * from user", - "Table": "user" + "FieldQuery": "select * from `user` where 1 != 1", + "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ * from `user`", + "Table": "`user`" } } @@ -156,9 +157,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select count(*) from user where 1 != 1", - "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user", - "Table": "user" + "FieldQuery": "select count(*) from `user` where 1 != 1", + "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from `user`", + "Table": "`user`" } ] } @@ -182,9 +183,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select count(*) from user where 1 != 1", - "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user", - "Table": "user" + "FieldQuery": "select count(*) from `user` where 1 != 1", + "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from `user`", + "Table": "`user`" } ] } @@ -206,9 +207,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select * from user where 1 != 1", - "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ * from user limit :__upper_limit", - "Table": "user" + "FieldQuery": "select * from `user` where 1 != 1", + "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ * from `user` limit :__upper_limit", + "Table": "`user`" } ] } @@ -226,9 +227,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.* from user where 1 != 1", - "Query": "select user.* from user", - "Table": "user" + "FieldQuery": "select `user`.* from `user` where 1 != 1", + "Query": "select `user`.* from `user`", + "Table": "`user`" } } @@ -244,9 +245,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.* from user where 1 != 1", - "Query": "select user.* from user", - "Table": "user" + "FieldQuery": "select `user`.* from `user` where 1 != 1", + "Query": "select `user`.* from `user`", + "Table": "`user`" } } @@ -320,8 +321,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select * from authoritative join user on authoritative.user_id = user.id where 1 != 1", - "Query": "select * from authoritative join user on authoritative.user_id = user.id", + "FieldQuery": "select * from authoritative join `user` on authoritative.user_id = `user`.id where 1 != 1", + "Query": "select * from authoritative join `user` on authoritative.user_id = `user`.id", "Table": "authoritative" } } @@ -338,8 +339,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.id, a.user_id, a.col1, a.col2, user.col1 from authoritative as a join user on a.user_id = user.id where 1 != 1", - "Query": "select user.id, a.user_id, a.col1, a.col2, user.col1 from authoritative as a join user on a.user_id = user.id", + "FieldQuery": "select `user`.id, a.user_id, a.col1, a.col2, `user`.col1 from authoritative as a join `user` on a.user_id = `user`.id where 1 != 1", + "Query": "select `user`.id, a.user_id, a.col1, a.col2, `user`.col1 from authoritative as a join `user` on a.user_id = `user`.id", "Table": "authoritative" } } @@ -356,9 +357,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col from user join user_extra on user.id = user_extra.user_id where 1 != 1", - "Query": "select col from user join user_extra on user.id = user_extra.user_id", - "Table": "user" + "FieldQuery": "select col from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1", + "Query": "select col from `user` join user_extra on `user`.id = user_extra.user_id", + "Table": "`user`" } } @@ -375,7 +376,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1,1", - "TableName": "user_user_extra", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -384,9 +385,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user", - "Table": "user" + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -445,6 +446,7 @@ "Table": "unsharded" } } +Gen4 plan same as above # select from dual on unqualified keyspace "select @@session.auto_increment_increment from dual" @@ -463,6 +465,7 @@ "Table": "dual" } } +Gen4 plan same as above # select from pinned table "select * from pin_test" @@ -503,6 +506,7 @@ "Table": "dual" } } +Gen4 plan same as above # RHS route referenced "select user_extra.id from user join user_extra" @@ -513,7 +517,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "1", - "TableName": "user_user_extra", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -522,9 +526,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select 1 from user where 1 != 1", - "Query": "select 1 from user", - "Table": "user" + "FieldQuery": "select 1 from `user` where 1 != 1", + "Query": "select 1 from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -540,6 +544,7 @@ ] } } +Gen4 plan same as above # Both routes referenced "select user.col, user_extra.id from user join user_extra" @@ -550,7 +555,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1,1", - "TableName": "user_user_extra", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -559,9 +564,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col from user where 1 != 1", - "Query": "select user.col from user", - "Table": "user" + "FieldQuery": "select `user`.col from `user` where 1 != 1", + "Query": "select `user`.col from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -577,6 +582,7 @@ ] } } +Gen4 plan same as above # Expression with single-route reference "select user.col, user_extra.id + user_extra.col from user join user_extra" @@ -587,7 +593,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1,1", - "TableName": "user_user_extra", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -596,9 +602,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col from user where 1 != 1", - "Query": "select user.col from user", - "Table": "user" + "FieldQuery": "select `user`.col from `user` where 1 != 1", + "Query": "select `user`.col from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -614,6 +620,7 @@ ] } } +Gen4 plan same as above # Jumbled references "select user.col, user_extra.id, user.col2 from user join user_extra" @@ -624,7 +631,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1,1,-2", - "TableName": "user_user_extra", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -633,9 +640,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col, user.col2 from user where 1 != 1", - "Query": "select user.col, user.col2 from user", - "Table": "user" + "FieldQuery": "select `user`.col, `user`.col2 from `user` where 1 != 1", + "Query": "select `user`.col, `user`.col2 from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -651,6 +658,7 @@ ] } } +Gen4 plan same as above # Comments "select /* comment */ user.col from user join user_extra" @@ -661,7 +669,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1", - "TableName": "user_user_extra", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -670,9 +678,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col from user where 1 != 1", - "Query": "select /* comment */ user.col from user", - "Table": "user" + "FieldQuery": "select `user`.col from `user` where 1 != 1", + "Query": "select /* comment */ `user`.col from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -698,7 +706,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1", - "TableName": "user_user_extra", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -707,9 +715,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.col from user where 1 != 1", - "Query": "select user.col from user for update", - "Table": "user" + "FieldQuery": "select `user`.col from `user` where 1 != 1", + "Query": "select `user`.col from `user` for update", + "Table": "`user`" }, { "OperatorType": "Route", @@ -735,7 +743,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1,1", - "TableName": "user_unsharded", + "TableName": "`user`_unsharded", "Inputs": [ { "OperatorType": "Route", @@ -744,9 +752,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.id from user where 1 != 1", - "Query": "select user.id from user", - "Table": "user" + "FieldQuery": "select `user`.id from `user` where 1 != 1", + "Query": "select `user`.id from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -772,7 +780,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1,1", - "TableName": "user_user_extra", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -781,9 +789,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.Col from user where 1 != 1", - "Query": "select user.Col from user", - "Table": "user" + "FieldQuery": "select `user`.Col from `user` where 1 != 1", + "Query": "select `user`.Col from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -799,10 +807,12 @@ ] } } +Gen4 plan same as above # syntax error "the quick brown fox" "syntax error at position 4 near 'the'" +Gen4 plan same as above # Hex number is not treated as a simple value "select * from user where id = 0x04" @@ -816,9 +826,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select * from user where 1 != 1", - "Query": "select * from user where id = 0x04", - "Table": "user" + "FieldQuery": "select * from `user` where 1 != 1", + "Query": "select * from `user` where id = 0x04", + "Table": "`user`" } } @@ -839,9 +849,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user_id from music where 1 != 1", + "FieldQuery": "select user_id, weight_string(user_id) from music where 1 != 1", "OrderBy": "0 ASC", - "Query": "select user_id from music order by user_id asc limit :__upper_limit", + "Query": "select user_id, weight_string(user_id) from music order by user_id asc limit :__upper_limit", "Table": "music" } ] @@ -860,15 +870,16 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select * from user where 1 != 1", - "Query": "select * from user where `name` = 'abc' and id = 4 limit 5", - "Table": "user", + "FieldQuery": "select * from `user` where 1 != 1", + "Query": "select * from `user` where `name` = 'abc' and id = 4 limit 5", + "Table": "`user`", "Values": [ 4 ], "Vindex": "user_index" } } +Gen4 plan same as above # Multiple parenthesized expressions "select * from user where (id = 4) AND (name ='abc') limit 5" @@ -882,15 +893,16 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select * from user where 1 != 1", - "Query": "select * from user where id = 4 and `name` = 'abc' limit 5", - "Table": "user", + "FieldQuery": "select * from `user` where 1 != 1", + "Query": "select * from `user` where id = 4 and `name` = 'abc' limit 5", + "Table": "`user`", "Values": [ 4 ], "Vindex": "user_index" } } +Gen4 plan same as above # Multiple parenthesized expressions "select * from user where (id = 4 and name ='abc') limit 5" @@ -904,15 +916,16 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select * from user where 1 != 1", - "Query": "select * from user where id = 4 and `name` = 'abc' limit 5", - "Table": "user", + "FieldQuery": "select * from `user` where 1 != 1", + "Query": "select * from `user` where id = 4 and `name` = 'abc' limit 5", + "Table": "`user`", "Values": [ 4 ], "Vindex": "user_index" } } +Gen4 plan same as above # Column Aliasing with Table.Column "select user0_.col as col0_ from user user0_ where id = 1 order by user0_.col desc limit 2" @@ -926,9 +939,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user0_.col as col0_ from user as user0_ where 1 != 1", - "Query": "select user0_.col as col0_ from user as user0_ where id = 1 order by user0_.col desc limit 2", - "Table": "user", + "FieldQuery": "select user0_.col as col0_ from `user` as user0_ where 1 != 1", + "Query": "select user0_.col as col0_ from `user` as user0_ where id = 1 order by user0_.col desc limit 2", + "Table": "`user`", "Values": [ 1 ], @@ -948,9 +961,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user0_.col as col0_ from user as user0_ where 1 != 1", - "Query": "select user0_.col as col0_ from user as user0_ where id = 1 order by col0_ desc limit 3", - "Table": "user", + "FieldQuery": "select user0_.col as col0_ from `user` as user0_ where 1 != 1", + "Query": "select user0_.col as col0_ from `user` as user0_ where id = 1 order by col0_ desc limit 3", + "Table": "`user`", "Values": [ 1 ], @@ -970,15 +983,16 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select * from user where 1 != 1", - "Query": "select * from user where id = 1 and `name` = true limit 5", - "Table": "user", + "FieldQuery": "select * from `user` where 1 != 1", + "Query": "select * from `user` where id = 1 and `name` = true limit 5", + "Table": "`user`", "Values": [ 1 ], "Vindex": "user_index" } } +Gen4 plan same as above # Column as boolean-ish "select * from user where (id = 1) AND name limit 5" @@ -992,15 +1006,16 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select * from user where 1 != 1", - "Query": "select * from user where id = 1 and `name` limit 5", - "Table": "user", + "FieldQuery": "select * from `user` where 1 != 1", + "Query": "select * from `user` where id = 1 and `name` limit 5", + "Table": "`user`", "Values": [ 1 ], "Vindex": "user_index" } } +Gen4 plan same as above # PK as fake boolean, and column as boolean-ish "select * from user where (id = 5) AND name = true limit 5" @@ -1014,15 +1029,16 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select * from user where 1 != 1", - "Query": "select * from user where id = 5 and `name` = true limit 5", - "Table": "user", + "FieldQuery": "select * from `user` where 1 != 1", + "Query": "select * from `user` where id = 5 and `name` = true limit 5", + "Table": "`user`", "Values": [ 5 ], "Vindex": "user_index" } } +Gen4 plan same as above # top level subquery in select "select a, (select col from user) from unsharded" @@ -1040,9 +1056,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col from user where 1 != 1", - "Query": "select col from user", - "Table": "user" + "FieldQuery": "select col from `user` where 1 != 1", + "Query": "select col from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -1075,9 +1091,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col from user where 1 != 1", - "Query": "select col from user", - "Table": "user" + "FieldQuery": "select col from `user` where 1 != 1", + "Query": "select col from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -1110,7 +1126,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1,1", - "TableName": "user_user_extra", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -1119,9 +1135,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.id as id1 from user where 1 != 1", - "Query": "select user.id as id1 from user", - "Table": "user" + "FieldQuery": "select `user`.id as id1 from `user` where 1 != 1", + "Query": "select `user`.id as id1 from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -1164,6 +1180,7 @@ "Query": "select * from information_schema.a union select * from information_schema.b" } } +Gen4 plan same as above # union with the same target shard "select * from music where user_id = 1 union select * from user where id = 1" @@ -1177,8 +1194,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select * from music where 1 != 1 union select * from user where 1 != 1", - "Query": "select * from music where user_id = 1 union select * from user where id = 1", + "FieldQuery": "select * from music where 1 != 1 union select * from `user` where 1 != 1", + "Query": "select * from music where user_id = 1 union select * from `user` where id = 1", "Table": "music", "Values": [ 1 @@ -1186,6 +1203,7 @@ "Vindex": "user_index" } } +Gen4 plan same as above # union with the same target shard last_insert_id "select *, last_insert_id() from music where user_id = 1 union select * from user where id = 1" @@ -1199,8 +1217,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select *, :__lastInsertId as `last_insert_id()` from music where 1 != 1 union select * from user where 1 != 1", - "Query": "select *, :__lastInsertId as `last_insert_id()` from music where user_id = 1 union select * from user where id = 1", + "FieldQuery": "select *, :__lastInsertId as `last_insert_id()` from music where 1 != 1 union select * from `user` where 1 != 1", + "Query": "select *, :__lastInsertId as `last_insert_id()` from music where user_id = 1 union select * from `user` where id = 1", "Table": "music", "Values": [ 1 @@ -1208,6 +1226,7 @@ "Vindex": "user_index" } } +Gen4 plan same as above "select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a" { @@ -1259,6 +1278,7 @@ "Table": "unsharded" } } +Gen4 plan same as above "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)" { @@ -1276,6 +1296,7 @@ "Table": "unsharded" } } +Gen4 plan same as above "(select id from unsharded union select id from unsharded_auto) union (select id from unsharded_auto union select name from unsharded)" { @@ -1293,6 +1314,7 @@ "Table": "unsharded" } } +Gen4 plan same as above "(select id from unsharded order by id asc limit 1) union (select id from unsharded order by id desc limit 1) order by id asc limit 1" { @@ -1310,6 +1332,7 @@ "Table": "unsharded" } } +Gen4 plan same as above # routing rules: ensure directives are not lost "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from route2" @@ -1410,6 +1433,7 @@ "Vindex": "user_index" } } +Gen4 plan same as above # sql_calc_found_rows with limit "select sql_calc_found_rows * from music limit 100" @@ -1520,9 +1544,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user_id, count(id) from music where 1 != 1 group by user_id", + "FieldQuery": "select user_id, count(id), weight_string(user_id) from music where 1 != 1 group by user_id", "OrderBy": "0 ASC", - "Query": "select user_id, count(id) from music group by user_id having count(user_id) = 1 order by user_id asc limit :__upper_limit", + "Query": "select user_id, count(id), weight_string(user_id) from music group by user_id having count(user_id) = 1 order by user_id asc limit :__upper_limit", "Table": "music" } ] @@ -1552,11 +1576,11 @@ # sql_calc_found_rows in sub queries "select * from music where user_id IN (select sql_calc_found_rows * from music limit 10)" -"Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS' (errno 1234) (sqlstate 42000)" +"Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'" # sql_calc_found_rows in derived table "select sql_calc_found_rows * from (select sql_calc_found_rows * from music limit 10) t limit 1" -"Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS' (errno 1234) (sqlstate 42000)" +"Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'" # select from unsharded keyspace into dumpfile "select * from main.unsharded into Dumpfile 'x.txt'" @@ -1577,10 +1601,10 @@ } # select from unsharded keyspace into outfile -"select * from main.unsharded into outfile 'x.txt' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n'" +"select * from main.unsharded into outfile 'x.txt' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\\n'" { "QueryType": "SELECT", - "Original": "select * from main.unsharded into outfile 'x.txt' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n'", + "Original": "select * from main.unsharded into outfile 'x.txt' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\\n'", "Instructions": { "OperatorType": "Route", "Variant": "SelectUnsharded", @@ -1589,16 +1613,16 @@ "Sharded": false }, "FieldQuery": "select * from unsharded where 1 != 1", - "Query": "select * from unsharded into outfile 'x.txt' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n'", + "Query": "select * from unsharded into outfile 'x.txt' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\\n'", "Table": "unsharded" } } # select from unsharded keyspace into outfile s3 -"select * from main.unsharded into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n' manifest on overwrite off" +"select * from main.unsharded into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\\n' manifest on overwrite off" { "QueryType": "SELECT", - "Original": "select * from main.unsharded into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n' manifest on overwrite off", + "Original": "select * from main.unsharded into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\\n' manifest on overwrite off", "Instructions": { "OperatorType": "Route", "Variant": "SelectUnsharded", @@ -1607,18 +1631,19 @@ "Sharded": false }, "FieldQuery": "select * from unsharded where 1 != 1", - "Query": "select * from unsharded into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n' manifest on overwrite off", + "Query": "select * from unsharded into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\\n' manifest on overwrite off", "Table": "unsharded" } } # Union after into outfile is incorrect "select id from user into outfile 'out_file_name' union all select id from music" -"Incorrect usage/placement of 'INTO' (errno 1234) (sqlstate 42000)" +"Incorrect usage/placement of 'INTO'" +Gen4 plan same as above # Into outfile s3 in sub-query is incorrect "select id from (select id from user into outfile s3 'inner_outfile') as t2" -"Incorrect usage/placement of 'INTO' (errno 1234) (sqlstate 42000)" +"Incorrect usage/placement of 'INTO'" # Distinct with cross shard query "select distinct user.a from user join user_extra" @@ -1632,7 +1657,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1", - "TableName": "user_user_extra", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -1641,9 +1666,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.a from user where 1 != 1", - "Query": "select user.a from user", - "Table": "user" + "FieldQuery": "select `user`.a from `user` where 1 != 1", + "Query": "select `user`.a from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -1676,8 +1701,8 @@ }, "FieldQuery": "select DELETE_RULE, UPDATE_RULE from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where 1 != 1", "Query": "select DELETE_RULE, UPDATE_RULE from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where KCU.TABLE_SCHEMA = :__vtschemaname and KCU.TABLE_NAME = :__vttablename and KCU.COLUMN_NAME = 'id' and KCU.REFERENCED_TABLE_SCHEMA = 'test' and KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc", - "SysTableTableName": "[VARBINARY(\"data_type_table\")]", - "SysTableTableSchema": "[VARBINARY(\"test\")]" + "SysTableTableName": "VARBINARY(\"data_type_table\")", + "SysTableTableSchema": "VARBINARY(\"test\")" } } @@ -1701,8 +1726,8 @@ }, "FieldQuery": "select KCU.DELETE_RULE from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where 1 != 1", "Query": "select KCU.DELETE_RULE from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where KCU.TABLE_SCHEMA = :__vtschemaname and KCU.TABLE_NAME = :__vttablename and KCU.TABLE_NAME = :__vttablename order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc", - "SysTableTableName": "[VARBINARY(\"data_type_table\"), VARBINARY(\"data_type_table\")]", - "SysTableTableSchema": "[VARBINARY(\"test\")]" + "SysTableTableName": "VARBINARY(\"data_type_table\")", + "SysTableTableSchema": "VARBINARY(\"test\")" }, { "OperatorType": "Route", @@ -1713,13 +1738,31 @@ }, "FieldQuery": "select S.UPDATE_RULE from INFORMATION_SCHEMA.K as S where 1 != 1", "Query": "select S.UPDATE_RULE from INFORMATION_SCHEMA.K as S where S.TABLE_SCHEMA = :__vtschemaname and S.TABLE_NAME = :__vttablename", - "SysTableTableName": "[VARBINARY(\"sc\")]", - "SysTableTableSchema": "[VARBINARY(\"test\")]" + "SysTableTableName": "VARBINARY(\"sc\")", + "SysTableTableSchema": "VARBINARY(\"test\")" } ] } } +#information_schema.routines +"SELECT routine_name AS name, routine_definition AS definition FROM information_schema.routines WHERE ROUTINE_SCHEMA = ? AND ROUTINE_TYPE = 'PROCEDURE'" +{ + "QueryType": "SELECT", + "Original": "SELECT routine_name AS name, routine_definition AS definition FROM information_schema.routines WHERE ROUTINE_SCHEMA = ? AND ROUTINE_TYPE = 'PROCEDURE'", + "Instructions": { + "OperatorType": "Route", + "Variant": "SelectDBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select routine_name as `name`, routine_definition as definition from information_schema.routines where 1 != 1", + "Query": "select routine_name as `name`, routine_definition as definition from information_schema.routines where ROUTINE_SCHEMA = :__vtschemaname and ROUTINE_TYPE = 'PROCEDURE'", + "SysTableTableSchema": ":v1" + } +} + #information_schema table sizes "SELECT SUM(data_length + index_length) as size FROM information_schema.TABLES WHERE table_schema = ?" { @@ -1734,7 +1777,7 @@ }, "FieldQuery": "select SUM(data_length + index_length) as size from information_schema.`TABLES` where 1 != 1", "Query": "select SUM(data_length + index_length) as size from information_schema.`TABLES` where table_schema = :__vtschemaname", - "SysTableTableSchema": "[:v1]" + "SysTableTableSchema": ":v1" } } @@ -1744,14 +1787,34 @@ "QueryType": "SELECT", "Original": "SELECT kcu.constraint_name constraint_name, kcu.column_name column_name, kcu.referenced_table_name referenced_table_name, kcu.referenced_column_name referenced_column_name, kcu.ordinal_position ordinal_position, kcu.table_name table_name, rc.delete_rule delete_rule, rc.update_rule update_rule FROM information_schema.key_column_usage AS kcu INNER JOIN information_schema.referential_constraints AS rc ON kcu.constraint_name = rc.constraint_name WHERE kcu.table_schema = ? AND rc.constraint_schema = ? AND kcu.referenced_column_name IS NOT NULL ORDER BY ordinal_position", "Instructions": { - "OperatorType": "Route", - "Variant": "SelectDBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name, rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.key_column_usage as kcu join information_schema.referential_constraints as rc on kcu.constraint_name = rc.constraint_name where 1 != 1", - "Query": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name, rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.key_column_usage as kcu join information_schema.referential_constraints as rc on kcu.constraint_name = rc.constraint_name where kcu.table_schema = :__vtschemaname and rc.constraint_schema = :__vtschemaname and kcu.referenced_column_name is not null order by ordinal_position asc", - "SysTableTableSchema": "[:v1, :v2]" + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "-1,-2,-3,-4,-5,-6,1,2", + "TableName": "_", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "SelectDBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name from information_schema.key_column_usage as kcu where 1 != 1", + "Query": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name from information_schema.key_column_usage as kcu where kcu.table_schema = :__vtschemaname and kcu.referenced_column_name is not null order by ordinal_position asc", + "SysTableTableSchema": ":v1" + }, + { + "OperatorType": "Route", + "Variant": "SelectDBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.referential_constraints as rc where 1 != 1", + "Query": "select rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.referential_constraints as rc where rc.constraint_name = :kcu_constraint_name and rc.constraint_schema = :__vtschemaname", + "SysTableTableSchema": ":v2" + } + ] } } + diff --git a/go/vt/vtgate/planbuilder/testdata/set_cases.txt b/go/vt/vtgate/planbuilder/testdata/set_cases.txt index 0d2289bf207..fd4b95a5ba8 100644 --- a/go/vt/vtgate/planbuilder/testdata/set_cases.txt +++ b/go/vt/vtgate/planbuilder/testdata/set_cases.txt @@ -19,6 +19,7 @@ ] } } +Gen4 plan same as above # set multi user defined variable "set @foo = 42, @bar = @foo" @@ -46,6 +47,7 @@ ] } } +Gen4 plan same as above # set multi user defined variable with complex expression "set @foo = 42, @bar = @foo + 1" @@ -73,6 +75,7 @@ ] } } +Gen4 plan same as above # set UDV to expression that can't be evaluated at vtgate "set @foo = CONCAT('Any','Expression','Is','Valid')" @@ -103,6 +106,7 @@ ] } } +Gen4 plan same as above # single sysvar cases "SET sql_mode = 'STRICT_ALL_TABLES,NO_AUTO_VALUE_ON_ZERO'" @@ -129,6 +133,7 @@ ] } } +Gen4 plan same as above # multiple sysvar cases "SET @@SESSION.sql_mode = CONCAT(CONCAT(@@sql_mode, ',STRICT_ALL_TABLES'), ',NO_AUTO_VALUE_ON_ZERO'), @@SESSION.sql_safe_updates = 0" @@ -164,6 +169,7 @@ ] } } +Gen4 plan same as above # autocommit case "SET autocommit = 1, autocommit = on, autocommit = 'on', autocommit = @myudv, autocommit = `on`, autocommit = `off`" @@ -211,6 +217,7 @@ ] } } +Gen4 plan same as above # set ignore plan "set @@default_storage_engine = 'DONOTCHANGEME'" @@ -233,6 +240,7 @@ ] } } +Gen4 plan same as above # set check and ignore plan "set @@sql_mode = concat(@@sql_mode, ',NO_AUTO_CREATE_USER')" @@ -259,6 +267,7 @@ ] } } +Gen4 plan same as above # set system settings "set @@sql_safe_updates = 1" @@ -285,6 +294,7 @@ ] } } +Gen4 plan same as above # set plan building with ON/OFF enum "set @@innodb_strict_mode = OFF" @@ -307,6 +317,7 @@ ] } } +Gen4 plan same as above # set plan building with string literal "set @@innodb_strict_mode = 'OFF'" @@ -329,6 +340,7 @@ ] } } +Gen4 plan same as above # set plan building with string literal "set @@innodb_tmpdir = 'OFF'" @@ -351,10 +363,12 @@ ] } } +Gen4 plan same as above # set system settings "set @@ndbinfo_max_bytes = 192" "ndbinfo_max_bytes: system setting is not supported" +Gen4 plan same as above # set autocommit "set autocommit = 1" @@ -377,6 +391,7 @@ ] } } +Gen4 plan same as above # set autocommit false "set autocommit = 0" @@ -399,6 +414,7 @@ ] } } +Gen4 plan same as above # set autocommit with backticks "set @@session.`autocommit` = 0" @@ -421,6 +437,7 @@ ] } } +Gen4 plan same as above # more vitess aware settings "set client_found_rows = off, skip_query_plan_cache = ON, sql_select_limit=20" @@ -453,6 +470,7 @@ ] } } +Gen4 plan same as above # set autocommit to default "set @@autocommit = default" @@ -475,6 +493,7 @@ ] } } +Gen4 plan same as above # set global autocommit to default "set global autocommit = off" @@ -502,3 +521,9 @@ ] } } +Gen4 plan same as above + +# change read only variable +"set socket = ''" +"Variable 'socket' is a read only variable" +Gen4 plan same as above diff --git a/go/vt/vtgate/planbuilder/testdata/show_cases.txt b/go/vt/vtgate/planbuilder/testdata/show_cases.txt index 517d48187c3..ca2b4d8ffee 100644 --- a/go/vt/vtgate/planbuilder/testdata/show_cases.txt +++ b/go/vt/vtgate/planbuilder/testdata/show_cases.txt @@ -219,6 +219,80 @@ { "QueryType": "SHOW", "Original": "show variables", + "Instructions": { + "OperatorType": "ReplaceVariables", + "Inputs": [ + { + "OperatorType": "Send", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetDestination": "AnyShard()", + "IsDML": false, + "Query": "show variables", + "SingleShardOnly": true + } + ] + } +} + +# show global variables +"show global variables" +{ + "QueryType": "SHOW", + "Original": "show global variables", + "Instructions": { + "OperatorType": "ReplaceVariables", + "Inputs": [ + { + "OperatorType": "Send", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetDestination": "AnyShard()", + "IsDML": false, + "Query": "show global variables", + "SingleShardOnly": true + } + ] + } +} + +# show databases +"show databases" +{ + "QueryType": "SHOW", + "Original": "show databases", + "Instructions": { + "OperatorType": "Rows" + } +} + +# show create database +"show create database user" +{ + "QueryType": "SHOW", + "Original": "show create database user", + "Instructions": { + "OperatorType": "Send", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetDestination": "AnyShard()", + "IsDML": false, + "Query": "show create database `user`", + "SingleShardOnly": true + } +} + +# show create database system_schema +"show create database mysql" +{ + "QueryType": "SHOW", + "Original": "show create database mysql", "Instructions": { "OperatorType": "Send", "Keyspace": { @@ -227,17 +301,242 @@ }, "TargetDestination": "AnyShard()", "IsDML": false, - "Query": "show variables", + "Query": "show create database mysql", "SingleShardOnly": true } } -# show databases -"show databases" +# show create procedure +"show create procedure proc" { "QueryType": "SHOW", - "Original": "show databases", + "Original": "show create procedure proc", "Instructions": { - "OperatorType": "Rows" + "OperatorType": "Send", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetDestination": "AnyShard()", + "IsDML": false, + "Query": "show create procedure proc", + "SingleShardOnly": true + } +} + +# show create procedure from system_schema +"show create procedure information_schema.proc" +{ + "QueryType": "SHOW", + "Original": "show create procedure information_schema.proc", + "Instructions": { + "OperatorType": "Send", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetDestination": "AnyShard()", + "IsDML": false, + "Query": "show create procedure information_schema.proc", + "SingleShardOnly": true + } +} + +# show create table on table present in sharded but as unsharded is selected it goes to unsharded keyspace +"show create table user_extra" +{ + "QueryType": "SHOW", + "Original": "show create table user_extra", + "Instructions": { + "OperatorType": "Send", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetDestination": "AnyShard()", + "IsDML": false, + "Query": "show create table user_extra", + "SingleShardOnly": true + } +} + +# show create table with qualifier +"show create table user.user_extra" +{ + "QueryType": "SHOW", + "Original": "show create table user.user_extra", + "Instructions": { + "OperatorType": "Send", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetDestination": "AnyShard()", + "IsDML": false, + "Query": "show create table user_extra", + "SingleShardOnly": true + } +} + +# show create table with unsharded as default keyspace +"show create table unknown" +{ + "QueryType": "SHOW", + "Original": "show create table unknown", + "Instructions": { + "OperatorType": "Send", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetDestination": "AnyShard()", + "IsDML": false, + "Query": "show create table unknown", + "SingleShardOnly": true + } +} + +# show create table with table not present with qualifier +"show create table user.unknown" +"table unknown not found" + +# show create table from system_schema +"show create table information_schema.tables" +{ + "QueryType": "SHOW", + "Original": "show create table information_schema.tables", + "Instructions": { + "OperatorType": "Send", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetDestination": "AnyShard()", + "IsDML": false, + "Query": "show create table information_schema.`tables`", + "SingleShardOnly": true + } +} + +# show tables +"show tables" +{ + "QueryType": "SHOW", + "Original": "show tables", + "Instructions": { + "OperatorType": "RenameFields", + "Columns": [ + "Tables_in_main" + ], + "Indices": [ + 0 + ], + "Inputs": [ + { + "OperatorType": "Send", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetDestination": "AnyShard()", + "IsDML": false, + "Query": "show tables", + "SingleShardOnly": true + } + ] + } +} + +# show tables from db +"show tables from user" +{ + "QueryType": "SHOW", + "Original": "show tables from user", + "Instructions": { + "OperatorType": "RenameFields", + "Columns": [ + "Tables_in_user" + ], + "Indices": [ + 0 + ], + "Inputs": [ + { + "OperatorType": "Send", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetDestination": "AnyShard()", + "IsDML": false, + "Query": "show tables", + "SingleShardOnly": true + } + ] + } +} + +# show tables from system schema +"show tables from performance_schema" +{ + "QueryType": "SHOW", + "Original": "show tables from performance_schema", + "Instructions": { + "OperatorType": "RenameFields", + "Columns": [ + "Tables_in_performance_schema" + ], + "Indices": [ + 0 + ], + "Inputs": [ + { + "OperatorType": "Send", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetDestination": "AnyShard()", + "IsDML": false, + "Query": "show tables from performance_schema", + "SingleShardOnly": true + } + ] + } +} + +# show migrations with db and like +"show vitess_migrations from user like '%format'" +{ + "QueryType": "SHOW", + "Original": "show vitess_migrations from user like '%format'", + "Instructions": { + "OperatorType": "Send", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetDestination": "AllShards()", + "IsDML": false, + "Query": "SELECT * FROM _vt.schema_migrations where migration_uuid LIKE '%format' OR migration_context LIKE '%format' OR migration_status LIKE '%format'", + "SingleShardOnly": false + } +} + +# show migrations with db and where +"show vitess_migrations from user where id = 5" +{ + "QueryType": "SHOW", + "Original": "show vitess_migrations from user where id = 5", + "Instructions": { + "OperatorType": "Send", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetDestination": "AllShards()", + "IsDML": false, + "Query": "SELECT * FROM _vt.schema_migrations where id = 5", + "SingleShardOnly": false } } diff --git a/go/vt/vtgate/planbuilder/testdata/show_cases_no_default_keyspace.txt b/go/vt/vtgate/planbuilder/testdata/show_cases_no_default_keyspace.txt index 8a837a8a889..43395759844 100644 --- a/go/vt/vtgate/planbuilder/testdata/show_cases_no_default_keyspace.txt +++ b/go/vt/vtgate/planbuilder/testdata/show_cases_no_default_keyspace.txt @@ -29,7 +29,7 @@ }, "TargetDestination": "AnyShard()", "IsDML": false, - "Query": "show full columns from user", + "Query": "show full columns from `user`", "SingleShardOnly": true } } @@ -39,6 +39,47 @@ { "QueryType": "SHOW", "Original": "show variables", + "Instructions": { + "OperatorType": "ReplaceVariables", + "Inputs": [ + { + "OperatorType": "Send", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetDestination": "AnyShard()", + "IsDML": false, + "Query": "show variables", + "SingleShardOnly": true + } + ] + } +} + +# show full columns from system schema +"show full columns from sys.sys_config" +{ + "QueryType": "SHOW", + "Original": "show full columns from sys.sys_config", + "Instructions": { + "OperatorType": "Send", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetDestination": "AnyShard()", + "IsDML": false, + "Query": "show full columns from sys.sys_config", + "SingleShardOnly": true + } +} + +# show full columns from system schema replacing qualifier +"show full columns from x.sys_config from sys" +{ + "QueryType": "SHOW", + "Original": "show full columns from x.sys_config from sys", "Instructions": { "OperatorType": "Send", "Keyspace": { @@ -47,7 +88,7 @@ }, "TargetDestination": "AnyShard()", "IsDML": false, - "Query": "show variables", + "Query": "show full columns from sys.sys_config", "SingleShardOnly": true } } diff --git a/go/vt/vtgate/planbuilder/testdata/symtab_cases.txt b/go/vt/vtgate/planbuilder/testdata/symtab_cases.txt index 7b07e057f64..24811cbc907 100644 --- a/go/vt/vtgate/planbuilder/testdata/symtab_cases.txt +++ b/go/vt/vtgate/planbuilder/testdata/symtab_cases.txt @@ -9,7 +9,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1,1", - "TableName": "user_unsharded", + "TableName": "`user`_unsharded", "Inputs": [ { "OperatorType": "Route", @@ -18,9 +18,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select predef2 from user where 1 != 1", - "Query": "select predef2 from user", - "Table": "user" + "FieldQuery": "select predef2 from `user` where 1 != 1", + "Query": "select predef2 from `user`", + "Table": "`user`" }, { "OperatorType": "Route", diff --git a/go/vt/vtgate/planbuilder/testdata/sysschema_default.txt b/go/vt/vtgate/planbuilder/testdata/sysschema_default.txt new file mode 100644 index 00000000000..f6e58dd2c18 --- /dev/null +++ b/go/vt/vtgate/planbuilder/testdata/sysschema_default.txt @@ -0,0 +1,17 @@ +# max_allowed_packet +"select @@max_allowed_packet from dual" +{ + "QueryType": "SELECT", + "Original": "select @@max_allowed_packet from dual", + "Instructions": { + "OperatorType": "Route", + "Variant": "SelectReference", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select @@max_allowed_packet from dual where 1 != 1", + "Query": "select @@max_allowed_packet from dual", + "Table": "dual" + } +} diff --git a/go/vt/vtgate/planbuilder/testdata/transaction_cases.txt b/go/vt/vtgate/planbuilder/testdata/transaction_cases.txt index b29317c7a26..68be3ff6d8e 100644 --- a/go/vt/vtgate/planbuilder/testdata/transaction_cases.txt +++ b/go/vt/vtgate/planbuilder/testdata/transaction_cases.txt @@ -4,6 +4,7 @@ "QueryType": "BEGIN", "Original": "begin" } +Gen4 plan same as above # Start Transaction "start transaction" @@ -11,6 +12,7 @@ "QueryType": "BEGIN", "Original": "start transaction" } +Gen4 plan same as above # Commit "commit" @@ -18,6 +20,7 @@ "QueryType": "COMMIT", "Original": "commit" } +Gen4 plan same as above # Rollback "rollback" @@ -25,6 +28,7 @@ "QueryType": "ROLLBACK", "Original": "rollback" } +Gen4 plan same as above # Savepoint "savepoint a" @@ -32,6 +36,7 @@ "QueryType": "SAVEPOINT", "Original": "savepoint a" } +Gen4 plan same as above # Savepoint rollback "rollback work to savepoint a" @@ -39,6 +44,7 @@ "QueryType": "SAVEPOINT_ROLLBACK", "Original": "rollback work to savepoint a" } +Gen4 plan same as above # Savepoint release "release savepoint a" @@ -46,3 +52,4 @@ "QueryType": "RELEASE", "Original": "release savepoint a" } +Gen4 plan same as above diff --git a/go/vt/vtgate/planbuilder/testdata/union_cases.txt b/go/vt/vtgate/planbuilder/testdata/union_cases.txt index 18ae0f63053..c7572346b1d 100644 --- a/go/vt/vtgate/planbuilder/testdata/union_cases.txt +++ b/go/vt/vtgate/planbuilder/testdata/union_cases.txt @@ -10,11 +10,12 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1 union all select id from music where 1 != 1", - "Query": "select id from user union all select id from music", - "Table": "user" + "FieldQuery": "select id from `user` where 1 != 1 union all select id from music where 1 != 1", + "Query": "select id from `user` union all select id from music", + "Table": "`user`" } } +Gen4 plan same as above # union distinct between two scatter selects "select id from user union select id from music" @@ -34,9 +35,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user", - "Table": "user" + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -54,6 +55,7 @@ ] } } +Gen4 plan same as above # union all between two SelectEqualUnique "select id from user where id = 1 union all select id from user where id = 5" @@ -70,9 +72,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where id = 1", - "Table": "user", + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where id = 1", + "Table": "`user`", "Values": [ 1 ], @@ -85,9 +87,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user where id = 5", - "Table": "user", + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where id = 5", + "Table": "`user`", "Values": [ 5 ], @@ -96,6 +98,7 @@ ] } } +Gen4 plan same as above #almost dereks query - two queries with order by and limit being scattered to two different sets of tablets "(SELECT id FROM user ORDER BY id DESC LIMIT 1) UNION ALL (SELECT id FROM music ORDER BY id DESC LIMIT 1)" @@ -116,10 +119,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", + "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", "OrderBy": "0 DESC", - "Query": "select id from user order by id desc limit :__upper_limit", - "Table": "user" + "Query": "select id, weight_string(id) from `user` order by id desc limit :__upper_limit", + "Table": "`user`" } ] }, @@ -134,9 +137,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from music where 1 != 1", + "FieldQuery": "select id, weight_string(id) from music where 1 != 1", "OrderBy": "0 DESC", - "Query": "select id from music order by id desc limit :__upper_limit", + "Query": "select id, weight_string(id) from music order by id desc limit :__upper_limit", "Table": "music" } ] @@ -144,6 +147,7 @@ ] } } +Gen4 plan same as above # Union all "select col1, col2 from user union all select col1, col2 from user_extra" @@ -157,11 +161,12 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col1, col2 from user where 1 != 1 union all select col1, col2 from user_extra where 1 != 1", - "Query": "select col1, col2 from user union all select col1, col2 from user_extra", - "Table": "user" + "FieldQuery": "select col1, col2 from `user` where 1 != 1 union all select col1, col2 from user_extra where 1 != 1", + "Query": "select col1, col2 from `user` union all select col1, col2 from user_extra", + "Table": "`user`" } } +Gen4 plan same as above # union operations in subqueries (FROM) "select * from (select * from user union all select * from user_extra) as t" @@ -175,9 +180,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select * from (select * from user where 1 != 1 union all select * from user_extra where 1 != 1) as t where 1 != 1", - "Query": "select * from (select * from user union all select * from user_extra) as t", - "Table": "user" + "FieldQuery": "select * from (select * from `user` where 1 != 1 union all select * from user_extra where 1 != 1) as t where 1 != 1", + "Query": "select * from (select * from `user` union all select * from user_extra) as t", + "Table": "`user`" } } @@ -200,10 +205,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", + "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", "OrderBy": "0 ASC", - "Query": "select id from user order by id asc limit :__upper_limit", - "Table": "user" + "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit", + "Table": "`user`" } ] }, @@ -218,9 +223,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from music where 1 != 1", + "FieldQuery": "select id, weight_string(id) from music where 1 != 1", "OrderBy": "0 DESC", - "Query": "select id from music order by id desc limit :__upper_limit", + "Query": "select id, weight_string(id) from music order by id desc limit :__upper_limit", "Table": "music" } ] @@ -228,6 +233,7 @@ ] } } +Gen4 plan same as above # union all on scatter and single route "select id from user where id = 1 union select id from user where id = 1 union all select id from user" @@ -244,9 +250,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1 union select id from user where 1 != 1", - "Query": "select id from user where id = 1 union select id from user where id = 1", - "Table": "user", + "FieldQuery": "select id from `user` where 1 != 1 union select id from `user` where 1 != 1", + "Query": "select id from `user` where id = 1 union select id from `user` where id = 1", + "Table": "`user`", "Values": [ 1 ], @@ -259,13 +265,14 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user", - "Table": "user" + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user`", + "Table": "`user`" } ] } } +Gen4 plan same as above # union of information_schema with normal table "select * from information_schema.a union select * from unsharded" @@ -304,6 +311,7 @@ ] } } +Gen4 plan same as above # union of information_schema with normal table "select * from unsharded union select * from information_schema.a" @@ -342,6 +350,7 @@ ] } } +Gen4 plan same as above # multi-shard union "(select id from user union select id from music) union select 1 from dual" @@ -367,9 +376,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user", - "Table": "user" + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -402,6 +411,7 @@ ] } } +Gen4 plan same as above # multi-shard union "select 1 from music union (select id from user union all select name from unsharded)" @@ -435,9 +445,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user", - "Table": "user" + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -457,6 +467,7 @@ ] } } +Gen4 plan same as above # multi-shard union "select 1 from music union (select id from user union select name from unsharded)" @@ -493,9 +504,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from user where 1 != 1", - "Query": "select id from user", - "Table": "user" + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -517,6 +528,7 @@ ] } } +Gen4 plan same as above # union with the same target shard because of vindex "select * from music where id = 1 union select * from user where id = 1" @@ -551,9 +563,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select * from user where 1 != 1", - "Query": "select * from user where id = 1", - "Table": "user", + "FieldQuery": "select * from `user` where 1 != 1", + "Query": "select * from `user` where id = 1", + "Table": "`user`", "Values": [ 1 ], @@ -564,6 +576,7 @@ ] } } +Gen4 plan same as above # union with different target shards "select 1 from music where id = 1 union select 1 from music where id = 2" @@ -611,6 +624,7 @@ ] } } +Gen4 plan same as above # multiple select statement have inner order by with union - TODO (systay) no need to send down ORDER BY if we are going to loose it with UNION DISTINCT "(select 1 from user order by 1 desc) union (select 1 from user order by 1 asc)" @@ -630,10 +644,10 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select 1 from user where 1 != 1", + "FieldQuery": "select 1, weight_string(1) from `user` where 1 != 1", "OrderBy": "0 DESC", - "Query": "select 1 from user order by 1 desc", - "Table": "user" + "Query": "select 1, weight_string(1) from `user` order by 1 desc", + "Table": "`user`" }, { "OperatorType": "Route", @@ -642,16 +656,17 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select 1 from user where 1 != 1", + "FieldQuery": "select 1, weight_string(1) from `user` where 1 != 1", "OrderBy": "0 ASC", - "Query": "select 1 from user order by 1 asc", - "Table": "user" + "Query": "select 1, weight_string(1) from `user` order by 1 asc", + "Table": "`user`" } ] } ] } } +Gen4 plan same as above # multiple unions "select 1 union select null union select 1.0 union select '1' union select 2 union select 2.0 from user" @@ -682,15 +697,16 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select 2.0 from user where 1 != 1", - "Query": "select 2.0 from user", - "Table": "user" + "FieldQuery": "select 2.0 from `user` where 1 != 1", + "Query": "select 2.0 from `user`", + "Table": "`user`" } ] } ] } } +Gen4 plan same as above # union distinct between a scatter query and a join (other side) "(select user.id, user.name from user join user_extra where user_extra.extra = 'asdf') union select 'b','c' from user" @@ -707,7 +723,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1,-2", - "TableName": "user_user_extra", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -716,9 +732,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.id, user.`name` from user where 1 != 1", - "Query": "select user.id, user.`name` from user", - "Table": "user" + "FieldQuery": "select `user`.id, `user`.`name` from `user` where 1 != 1", + "Query": "select `user`.id, `user`.`name` from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -740,15 +756,16 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select 'b', 'c' from user where 1 != 1", - "Query": "select 'b', 'c' from user", - "Table": "user" + "FieldQuery": "select 'b', 'c' from `user` where 1 != 1", + "Query": "select 'b', 'c' from `user`", + "Table": "`user`" } ] } ] } } +Gen4 plan same as above # union distinct between a scatter query and a join (other side) "select 'b','c' from user union (select user.id, user.name from user join user_extra where user_extra.extra = 'asdf')" @@ -768,15 +785,15 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select 'b', 'c' from user where 1 != 1", - "Query": "select 'b', 'c' from user", - "Table": "user" + "FieldQuery": "select 'b', 'c' from `user` where 1 != 1", + "Query": "select 'b', 'c' from `user`", + "Table": "`user`" }, { "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1,-2", - "TableName": "user_user_extra", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -785,9 +802,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user.id, user.`name` from user where 1 != 1", - "Query": "select user.id, user.`name` from user", - "Table": "user" + "FieldQuery": "select `user`.id, `user`.`name` from `user` where 1 != 1", + "Query": "select `user`.id, `user`.`name` from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -807,11 +824,14 @@ ] } } +Gen4 plan same as above # ambiguous LIMIT "select id from user limit 1 union all select id from music limit 1" "Incorrect usage of UNION and LIMIT - add parens to disambiguate your query (errno 1221) (sqlstate 21000)" +Gen4 plan same as above # different number of columns "select id, 42 from user where id = 1 union all select id from user where id = 5" -"The used SELECT statements have a different number of columns (errno 1222) (sqlstate 21000) during query: select id, 42 from user where id = 1 union all select id from user where id = 5" +"The used SELECT statements have a different number of columns (errno 1222) (sqlstate 21000) during query: select id, 42 from `user` where id = 1 union all select id from `user` where id = 5" +Gen4 plan same as above diff --git a/go/vt/vtgate/planbuilder/testdata/unsupported_cases.txt b/go/vt/vtgate/planbuilder/testdata/unsupported_cases.txt index 99970fe9d1a..ef39f949223 100644 --- a/go/vt/vtgate/planbuilder/testdata/unsupported_cases.txt +++ b/go/vt/vtgate/planbuilder/testdata/unsupported_cases.txt @@ -1,7 +1,3 @@ -# SHOW -"show create database" -"plan building not supported" - # union operations in subqueries (expressions) "select * from user where id in (select * from user union select * from user_extra)" "unsupported: '*' expression in cross-shard query" @@ -13,14 +9,17 @@ # Unsupported INSERT statement with a target destination "insert into `user[-]`.user_metadata (a, b) values (1,2)" "unsupported: INSERT with a target destination" +Gen4 plan same as above # Unsupported delete statement with a replica target destination "DELETE FROM `user[-]@replica`.user_metadata limit 1" "unsupported: delete statement with a replica target" +Gen4 plan same as above # Unsupported update statement with a replica target destination "update `user[-]@replica`.user_metadata set id=2" "unsupported: update statement with a replica target" +Gen4 plan same as above # scatter order by with * expression "select * from user order by id" @@ -169,146 +168,182 @@ # subqueries in update "update user set col = (select id from unsharded)" "unsupported: subqueries in sharded DML" +Gen4 plan same as above # sharded subqueries in unsharded update "update unsharded set col = (select id from user)" "unsupported: sharded subqueries in DML" +Gen4 plan same as above # sharded join unsharded subqueries in unsharded update "update unsharded set col = (select id from unsharded join user on unsharded.id = user.id)" "unsupported: sharded subqueries in DML" +Gen4 plan same as above # subqueries in delete "delete from user where col = (select id from unsharded)" "unsupported: subqueries in sharded DML" +Gen4 plan same as above # sharded subqueries in unsharded delete "delete from unsharded where col = (select id from user)" "unsupported: sharded subqueries in DML" +Gen4 plan same as above # sharded delete with limit clasue "delete from user_extra limit 10" -"unsupported: multi shard delete with limit" +"multi shard delete with limit is not supported" +Gen4 plan same as above # sharded subquery in unsharded subquery in unsharded delete "delete from unsharded where col = (select id from unsharded where id = (select id from user))" "unsupported: sharded subqueries in DML" +Gen4 plan same as above # sharded join unsharded subqueries in unsharded delete "delete from unsharded where col = (select id from unsharded join user on unsharded.id = user.id)" "unsupported: sharded subqueries in DML" +Gen4 plan same as above # scatter update with limit clause "update user_extra set val = 1 where (name = 'foo' or id = 1) limit 1" -"unsupported: multi shard update with limit" +"multi shard update with limit is not supported" +Gen4 plan same as above # multi delete multi table "delete user from user join user_extra on user.id = user_extra.id where user.name = 'foo'" "unsupported: multi-shard or vindex write statement" +Gen4 plan same as above # update changes primary vindex column "update user set id = 1 where id = 1" "unsupported: You can't update primary vindex columns. Invalid update on vindex: user_index" +Gen4 plan same as above # update changes non owned vindex column "update music_extra set music_id = 1 where user_id = 1" "unsupported: You can only update owned vindexes. Invalid update on vindex: music_user_map" +Gen4 plan same as above # update changes non lookup vindex column "update user_metadata set md5 = 1 where user_id = 1" "unsupported: You can only update lookup vindexes. Invalid update on vindex: user_md5_index" +Gen4 plan same as above # update with complex set clause "update music set id = id + 1 where id = 1" "unsupported: Only values are supported. Invalid update on column: id" +Gen4 plan same as above # update by primary keyspace id, changing one vindex column, limit without order clause "update user_metadata set email = 'juan@vitess.io' where user_id = 1 limit 10" "unsupported: Need to provide order by clause when using limit. Invalid update on vindex: email_user_map" +Gen4 plan same as above # cross-shard update tables "update (select id from user) as u set id = 4" "unsupported: subqueries in sharded DML" +Gen4 plan same as above # join in update tables "update user join user_extra on user.id = user_extra.id set user.name = 'foo'" "unsupported: multi-shard or vindex write statement" +Gen4 plan same as above # multiple tables in update "update user as u, user_extra as ue set u.name = 'foo' where u.id = ue.id" "unsupported: multi-shard or vindex write statement" +Gen4 plan same as above # unsharded insert with cross-shard join" "insert into unsharded select u.col from user u join user u1" "unsupported: sharded subquery in insert values" +Gen4 plan same as above # unsharded insert with mismatched keyspaces" "insert into unsharded select col from user where id=1" "unsupported: sharded subquery in insert values" +Gen4 plan same as above # unsharded insert, unqualified names and auto-inc combined "insert into unsharded_auto select col from unsharded" "unsupported: auto-inc and select in insert" +Gen4 plan same as above # unsharded insert, with sharded subquery in insert value "insert into unsharded values((select 1 from user), 1)" "unsupported: sharded subquery in insert values" +Gen4 plan same as above # unsharded insert, no col list with auto-inc "insert into unsharded_auto values(1,1)" "column list required for tables with auto-inc columns" +Gen4 plan same as above # unsharded insert, col list does not match values "insert into unsharded_auto(id, val) values(1)" "column list doesn't match values" +Gen4 plan same as above # sharded upsert can't change vindex "insert into user(id) values(1) on duplicate key update id = 3" "unsupported: DML cannot change vindex column" +Gen4 plan same as above # sharded upsert can't change vindex using values function "insert into music(user_id, id) values(1, 2) on duplicate key update user_id = values(id)" "unsupported: DML cannot change vindex column" +Gen4 plan same as above # sharded insert from select "insert into user(id) select 1 from dual" "unsupported: insert into select" +Gen4 plan same as above # sharded replace no vindex "replace into user(val) values(1, 'foo')" "unsupported: REPLACE INTO with sharded schema" +Gen4 plan same as above # sharded replace with vindex "replace into user(id, name) values(1, 'foo')" "unsupported: REPLACE INTO with sharded schema" +Gen4 plan same as above # replace no column list "replace into user values(1, 2, 3)" "unsupported: REPLACE INTO with sharded schema" +Gen4 plan same as above # replace with mimatched column list "replace into user(id) values (1, 2)" "unsupported: REPLACE INTO with sharded schema" +Gen4 plan same as above # replace with one vindex "replace into user(id) values (1)" "unsupported: REPLACE INTO with sharded schema" +Gen4 plan same as above # replace with non vindex on vindex-enabled table "replace into user(nonid) values (2)" "unsupported: REPLACE INTO with sharded schema" +Gen4 plan same as above # replace with all vindexes supplied "replace into user(nonid, name, id) values (2, 'foo', 1)" "unsupported: REPLACE INTO with sharded schema" +Gen4 plan same as above # replace for non-vindex autoinc "replace into user_extra(nonid) values (2)" "unsupported: REPLACE INTO with sharded schema" +Gen4 plan same as above # replace with multiple rows "replace into user(id) values (1), (2)" "unsupported: REPLACE INTO with sharded schema" +Gen4 plan same as above "select keyspace_id from user_index where id = 1 and id = 2" "unsupported: where clause for vindex function must be of the form id = (multiple filters)" @@ -337,18 +372,22 @@ # delete with unknown reference "delete music from user where id = 1" "Unknown table 'music' in MULTI DELETE" +Gen4 plan same as above # delete with multi-table targets "delete music,user from music inner join user where music.id = user.id" "unsupported: multi-shard or vindex write statement" +Gen4 plan same as above # order by inside and outside parenthesis select "(select 1 from user order by 1 desc) order by 1 asc limit 2" "can't do ORDER BY on top of ORDER BY" +Gen4 plan same as above # ambiguous ORDER BY "select id from user order by id union all select id from music order by id desc" "Incorrect usage of UNION and ORDER BY - add parens to disambiguate your query (errno 1221) (sqlstate 21000)" +Gen4 plan same as above # select get_lock with non-dual table "select get_lock('xyz', 10) from user" @@ -361,41 +400,30 @@ # insert using select get_lock from table "insert into user(pattern) SELECT GET_LOCK('xyz1', 10)" "unsupported: insert into select" +Gen4 plan same as above # union with SQL_CALC_FOUND_ROWS "(select sql_calc_found_rows id from user where id = 1 limit 1) union select id from user where id = 1" "SQL_CALC_FOUND_ROWS not supported with union" +Gen4 plan same as above # set with DEFAULT - vitess aware "set workload = default" "DEFAULT not supported for @@workload" +Gen4 plan same as above # set with DEFAULT - reserved connection "set sql_mode = default" "DEFAULT not supported for @@sql_mode" +Gen4 plan same as above # Multi shard query using into outfile s3 "select * from user into outfile s3 'out_file_name'" -"unsupported: this construct is not supported on sharded keyspace" +"INTO is not supported on sharded keyspace" # unsupported two predicates specifying the database for the same table if they are different -# will fail on run time but will pass on the planbuilder "SELECT cc.constraint_name AS 'name' FROM information_schema.check_constraints cc WHERE cc.constraint_schema = 'constraint_schema' AND cc.table_schema = 'a'" -{ - "QueryType": "SELECT", - "Original": "SELECT cc.constraint_name AS 'name' FROM information_schema.check_constraints cc WHERE cc.constraint_schema = 'constraint_schema' AND cc.table_schema = 'a'", - "Instructions": { - "OperatorType": "Route", - "Variant": "SelectDBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select cc.constraint_name as `name` from information_schema.check_constraints as cc where 1 != 1", - "Query": "select cc.constraint_name as `name` from information_schema.check_constraints as cc where cc.constraint_schema = :__vtschemaname and cc.table_schema = :__vtschemaname", - "SysTableTableSchema": "[VARBINARY(\"constraint_schema\"), VARBINARY(\"a\")]" - } -} +"two predicates for specifying the database are not supported" # create view with Cannot auto-resolve for cross-shard joins "create view user.view_a as select col from user join user_extra" @@ -404,10 +432,12 @@ # create view with join that cannot be served in each shard separately "create view user.view_a as select user_extra.id from user join user_extra" "Complex select queries are not supported in create or alter view statements" +Gen4 plan same as above # create view with sharded limit "create view user.view_a as select id from user order by id limit 10" "Complex select queries are not supported in create or alter view statements" +Gen4 plan same as above # create view with top level subquery in select "create view user.view_a as select a, (select col from user) from unsharded" diff --git a/go/vt/vtgate/planbuilder/testdata/use_cases.txt b/go/vt/vtgate/planbuilder/testdata/use_cases.txt index 0e8d1f6449d..bc7ce1e9687 100644 --- a/go/vt/vtgate/planbuilder/testdata/use_cases.txt +++ b/go/vt/vtgate/planbuilder/testdata/use_cases.txt @@ -8,6 +8,7 @@ "target": "ks" } } +Gen4 plan same as above # use db tablet "use ks@replica" @@ -19,6 +20,7 @@ "target": "ks@replica" } } +Gen4 plan same as above # use db target with : "use `ks:-80@replica`" @@ -30,6 +32,7 @@ "target": "ks:-80@replica" } } +Gen4 plan same as above # use db target with / "use `ks/80-@replica`" @@ -41,6 +44,7 @@ "target": "ks/80-@replica" } } +Gen4 plan same as above # reset db "use" @@ -51,3 +55,4 @@ "OperatorType": "UpdateTarget" } } +Gen4 plan same as above diff --git a/go/vt/vtgate/planbuilder/testdata/vindex_func_cases.txt b/go/vt/vtgate/planbuilder/testdata/vindex_func_cases.txt index c492ad196a7..d2ae819cd37 100644 --- a/go/vt/vtgate/planbuilder/testdata/vindex_func_cases.txt +++ b/go/vt/vtgate/planbuilder/testdata/vindex_func_cases.txt @@ -81,6 +81,7 @@ # ambiguous vindex reference "select id, keyspace_id, id from hash_dup where id = :id" "ambiguous vindex reference: hash_dup" +Gen4 plan same as above # disambiguated vindex reference "select id, keyspace_id, id from second_user.hash_dup where id = :id" diff --git a/go/vt/vtgate/planbuilder/testdata/wireup_cases.txt b/go/vt/vtgate/planbuilder/testdata/wireup_cases.txt index 187b59e158d..4fc36b8dd1e 100644 --- a/go/vt/vtgate/planbuilder/testdata/wireup_cases.txt +++ b/go/vt/vtgate/planbuilder/testdata/wireup_cases.txt @@ -7,7 +7,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "1,-1,2", - "TableName": "user_user_extra", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -16,9 +16,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select u.id as uid from user as u where 1 != 1", - "Query": "select u.id as uid from user as u", - "Table": "user" + "FieldQuery": "select u.id as uid from `user` as u where 1 != 1", + "Query": "select u.id as uid from `user` as u", + "Table": "`user`" }, { "OperatorType": "Route", @@ -44,7 +44,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "1,-1,2", - "TableName": "user_user_extra", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -53,9 +53,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select u.id as uid from user as u where 1 != 1", - "Query": "select u.id as uid from user as u", - "Table": "user" + "FieldQuery": "select u.id as uid from `user` as u where 1 != 1", + "Query": "select u.id as uid from `user` as u", + "Table": "`user`" }, { "OperatorType": "Route", @@ -81,13 +81,13 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1", - "TableName": "user_user_user", + "TableName": "`user`_`user`_`user`", "Inputs": [ { "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1,-2", - "TableName": "user_user", + "TableName": "`user`_`user`", "Inputs": [ { "OperatorType": "Route", @@ -96,9 +96,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select u1.id, u1.col from user as u1 where 1 != 1", - "Query": "select u1.id, u1.col from user as u1", - "Table": "user" + "FieldQuery": "select u1.id, u1.col from `user` as u1 where 1 != 1", + "Query": "select u1.id, u1.col from `user` as u1", + "Table": "`user`" }, { "OperatorType": "Route", @@ -107,9 +107,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select 1 from user as u2 where 1 != 1", - "Query": "select 1 from user as u2", - "Table": "user" + "FieldQuery": "select 1 from `user` as u2 where 1 != 1", + "Query": "select 1 from `user` as u2", + "Table": "`user`" } ] }, @@ -120,9 +120,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select 1 from user as u3 where 1 != 1", - "Query": "select 1 from user as u3 where u3.col = :u1_col", - "Table": "user" + "FieldQuery": "select 1 from `user` as u3 where 1 != 1", + "Query": "select 1 from `user` as u3 where u3.col = :u1_col", + "Table": "`user`" } ] } @@ -137,13 +137,13 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1", - "TableName": "user_user_user", + "TableName": "`user`_`user`_`user`", "Inputs": [ { "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1,1", - "TableName": "user_user", + "TableName": "`user`_`user`", "Inputs": [ { "OperatorType": "Route", @@ -152,9 +152,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select u1.id from user as u1 where 1 != 1", - "Query": "select u1.id from user as u1", - "Table": "user" + "FieldQuery": "select u1.id from `user` as u1 where 1 != 1", + "Query": "select u1.id from `user` as u1", + "Table": "`user`" }, { "OperatorType": "Route", @@ -163,9 +163,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select u2.col from user as u2 where 1 != 1", - "Query": "select u2.col from user as u2", - "Table": "user" + "FieldQuery": "select u2.col from `user` as u2 where 1 != 1", + "Query": "select u2.col from `user` as u2", + "Table": "`user`" } ] }, @@ -176,9 +176,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select 1 from user as u3 where 1 != 1", - "Query": "select 1 from user as u3 where u3.col = :u2_col", - "Table": "user" + "FieldQuery": "select 1 from `user` as u3 where 1 != 1", + "Query": "select 1 from `user` as u3 where u3.col = :u2_col", + "Table": "`user`" } ] } @@ -193,13 +193,13 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1", - "TableName": "user_user_user", + "TableName": "`user`_`user`_`user`", "Inputs": [ { "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1,-2", - "TableName": "user_user", + "TableName": "`user`_`user`", "Inputs": [ { "OperatorType": "Route", @@ -208,9 +208,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select u1.id, u1.col from user as u1 where 1 != 1", - "Query": "select u1.id, u1.col from user as u1", - "Table": "user" + "FieldQuery": "select u1.id, u1.col from `user` as u1 where 1 != 1", + "Query": "select u1.id, u1.col from `user` as u1", + "Table": "`user`" }, { "OperatorType": "Route", @@ -219,9 +219,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select 1 from user as u2 where 1 != 1", - "Query": "select 1 from user as u2 where u2.col = :u1_col", - "Table": "user" + "FieldQuery": "select 1 from `user` as u2 where 1 != 1", + "Query": "select 1 from `user` as u2 where u2.col = :u1_col", + "Table": "`user`" } ] }, @@ -232,9 +232,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select 1 from user as u3 where 1 != 1", - "Query": "select 1 from user as u3 where u3.col = :u1_col", - "Table": "user" + "FieldQuery": "select 1 from `user` as u3 where 1 != 1", + "Query": "select 1 from `user` as u3 where u3.col = :u1_col", + "Table": "`user`" } ] } @@ -252,19 +252,19 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1", - "TableName": "user_user_user_user", + "TableName": "`user`_`user`_`user`_`user`", "Inputs": [ { "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1,-2", - "TableName": "user_user_user", + "TableName": "`user`_`user`_`user`", "Inputs": [ { "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1,-2", - "TableName": "user_user", + "TableName": "`user`_`user`", "Inputs": [ { "OperatorType": "Route", @@ -273,9 +273,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select u1.id, u1.col from user as u1 where 1 != 1", - "Query": "select u1.id, u1.col from user as u1", - "Table": "user" + "FieldQuery": "select u1.id, u1.col from `user` as u1 where 1 != 1", + "Query": "select u1.id, u1.col from `user` as u1", + "Table": "`user`" }, { "OperatorType": "Route", @@ -284,9 +284,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select 1 from user as u2 where 1 != 1", - "Query": "select 1 from user as u2", - "Table": "user" + "FieldQuery": "select 1 from `user` as u2 where 1 != 1", + "Query": "select 1 from `user` as u2", + "Table": "`user`" } ] }, @@ -297,9 +297,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select 1 from user as u3 where 1 != 1", - "Query": "select 1 from user as u3 where u3.id = :u1_col", - "Table": "user", + "FieldQuery": "select 1 from `user` as u3 where 1 != 1", + "Query": "select 1 from `user` as u3 where u3.id = :u1_col", + "Table": "`user`", "Values": [ ":u1_col" ], @@ -314,9 +314,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select 1 from user as u4 where 1 != 1", - "Query": "select 1 from user as u4 where u4.col = :u1_col", - "Table": "user" + "FieldQuery": "select 1 from `user` as u4 where 1 != 1", + "Query": "select 1 from `user` as u4 where u4.col = :u1_col", + "Table": "`user`" } ] } @@ -331,7 +331,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1", - "TableName": "user_user_user", + "TableName": "`user`_`user`_`user`", "Inputs": [ { "OperatorType": "Route", @@ -340,14 +340,14 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select u1.id, u1.col from user as u1 where 1 != 1", - "Query": "select u1.id, u1.col from user as u1", - "Table": "user" + "FieldQuery": "select u1.id, u1.col from `user` as u1 where 1 != 1", + "Query": "select u1.id, u1.col from `user` as u1", + "Table": "`user`" }, { "OperatorType": "Join", "Variant": "Join", - "TableName": "user_user", + "TableName": "`user`_`user`", "Inputs": [ { "OperatorType": "Route", @@ -356,9 +356,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select 1 from user as u2 where 1 != 1", - "Query": "select 1 from user as u2 where u2.id = :u1_col", - "Table": "user", + "FieldQuery": "select 1 from `user` as u2 where 1 != 1", + "Query": "select 1 from `user` as u2 where u2.id = :u1_col", + "Table": "`user`", "Values": [ ":u1_col" ], @@ -371,9 +371,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select 1 from user as u3 where 1 != 1", - "Query": "select 1 from user as u3 where u3.id = :u1_col", - "Table": "user", + "FieldQuery": "select 1 from `user` as u3 where 1 != 1", + "Query": "select 1 from `user` as u3 where u3.id = :u1_col", + "Table": "`user`", "Values": [ ":u1_col" ], @@ -394,7 +394,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1,1", - "TableName": "weird`name_unsharded", + "TableName": "`weird``name`_unsharded", "Inputs": [ { "OperatorType": "Route", @@ -405,7 +405,7 @@ }, "FieldQuery": "select `weird``name`.a, `weird``name`.`a``b*c` from `weird``name` where 1 != 1", "Query": "select `weird``name`.a, `weird``name`.`a``b*c` from `weird``name`", - "Table": "weird`name" + "Table": "`weird``name`" }, { "OperatorType": "Route", @@ -431,7 +431,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "1", - "TableName": "weird`name_unsharded", + "TableName": "`weird``name`_unsharded", "Inputs": [ { "OperatorType": "Route", @@ -442,7 +442,7 @@ }, "FieldQuery": "select `weird``name`.`a``b*c` from `weird``name` where 1 != 1", "Query": "select `weird``name`.`a``b*c` from `weird``name`", - "Table": "weird`name" + "Table": "`weird``name`" }, { "OperatorType": "Route", @@ -472,7 +472,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1,1", - "TableName": "user_user_extra", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -481,9 +481,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select u.id, u.col from user as u where 1 != 1", - "Query": "select u.id, u.col from user as u", - "Table": "user" + "FieldQuery": "select u.id, u.col from `user` as u where 1 != 1", + "Query": "select u.id, u.col from `user` as u", + "Table": "`user`" }, { "OperatorType": "Route", @@ -519,7 +519,7 @@ "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1,1", - "TableName": "user_user_extra", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -528,9 +528,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select u.id, u.col from user as u where 1 != 1", - "Query": "select u.id, u.col from user as u", - "Table": "user" + "FieldQuery": "select u.id, u.col from `user` as u where 1 != 1", + "Query": "select u.id, u.col from `user` as u", + "Table": "`user`" }, { "OperatorType": "Route", @@ -554,9 +554,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select 1 from user where 1 != 1", - "Query": "select 1 from user where :__sq_has_values1 = 1 and id in ::__vals", - "Table": "user", + "FieldQuery": "select 1 from `user` where 1 != 1", + "Query": "select 1 from `user` where :__sq_has_values1 = 1 and id in ::__vals", + "Table": "`user`", "Values": [ "::__sq1" ], @@ -586,15 +586,15 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col from user where 1 != 1", - "Query": "select col from user", - "Table": "user" + "FieldQuery": "select col from `user` where 1 != 1", + "Query": "select col from `user`", + "Table": "`user`" }, { "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "-1,1,-2", - "TableName": "user_user_extra", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -603,9 +603,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select u.id, :__sq1, u.col from user as u where 1 != 1", - "Query": "select u.id, :__sq1, u.col from user as u", - "Table": "user" + "FieldQuery": "select u.id, :__sq1, u.col from `user` as u where 1 != 1", + "Query": "select u.id, :__sq1, u.col from `user` as u", + "Table": "`user`" }, { "OperatorType": "Route", diff --git a/go/vt/vtgate/planbuilder/union.go b/go/vt/vtgate/planbuilder/union.go index fe561b63f3f..7583934b6ef 100644 --- a/go/vt/vtgate/planbuilder/union.go +++ b/go/vt/vtgate/planbuilder/union.go @@ -29,11 +29,11 @@ import ( "vitess.io/vitess/go/vt/vtgate/engine" ) -func buildUnionPlan(stmt sqlparser.Statement, vschema ContextVSchema) (engine.Primitive, error) { +func buildUnionPlan(stmt sqlparser.Statement, reservedVars sqlparser.BindVars, vschema ContextVSchema) (engine.Primitive, error) { union := stmt.(*sqlparser.Union) // For unions, create a pb with anonymous scope. - pb := newPrimitiveBuilder(vschema, newJointab(sqlparser.GetBindvars(union))) - if err := pb.processUnion(union, nil); err != nil { + pb := newPrimitiveBuilder(vschema, newJointab(reservedVars)) + if err := pb.processUnion(union, reservedVars, nil); err != nil { return nil, err } if err := pb.plan.Wireup(pb.plan, pb.jt); err != nil { @@ -42,13 +42,13 @@ func buildUnionPlan(stmt sqlparser.Statement, vschema ContextVSchema) (engine.Pr return pb.plan.Primitive(), nil } -func (pb *primitiveBuilder) processUnion(union *sqlparser.Union, outer *symtab) error { - if err := pb.processPart(union.FirstStatement, outer, false); err != nil { +func (pb *primitiveBuilder) processUnion(union *sqlparser.Union, reservedVars sqlparser.BindVars, outer *symtab) error { + if err := pb.processPart(union.FirstStatement, reservedVars, outer, false); err != nil { return err } for _, us := range union.UnionSelects { rpb := newPrimitiveBuilder(pb.vschema, pb.jt) - if err := rpb.processPart(us.Statement, outer, false); err != nil { + if err := rpb.processPart(us.Statement, reservedVars, outer, false); err != nil { return err } err := unionRouteMerge(pb.plan, rpb.plan, us) @@ -87,10 +87,10 @@ func (pb *primitiveBuilder) processUnion(union *sqlparser.Union, outer *symtab) return pb.pushLimit(union.Limit) } -func (pb *primitiveBuilder) processPart(part sqlparser.SelectStatement, outer *symtab, hasParens bool) error { +func (pb *primitiveBuilder) processPart(part sqlparser.SelectStatement, reservedVars sqlparser.BindVars, outer *symtab, hasParens bool) error { switch part := part.(type) { case *sqlparser.Union: - return pb.processUnion(part, outer) + return pb.processUnion(part, reservedVars, outer) case *sqlparser.Select: if part.SQLCalcFoundRows { return vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "SQL_CALC_FOUND_ROWS not supported with union") @@ -101,9 +101,9 @@ func (pb *primitiveBuilder) processPart(part sqlparser.SelectStatement, outer *s return err } } - return pb.processSelect(part, outer, "") + return pb.processSelect(part, reservedVars, outer, "") case *sqlparser.ParenSelect: - err := pb.processPart(part.Select, outer, true) + err := pb.processPart(part.Select, reservedVars, outer, true) if err != nil { return err } @@ -168,7 +168,7 @@ func setLock(in logicalPlan, lock sqlparser.Lock) error { node.Select.SetLock(lock) return false, node, nil case *sqlCalcFoundRows, *vindexFunc: - return false, nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "%T.locking: unreachable", in) + return false, nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unreachable %T.locking", in) } return true, plan, nil }) diff --git a/go/vt/vtgate/planbuilder/update.go b/go/vt/vtgate/planbuilder/update.go index 79e4060dc8b..1d16b756a6a 100644 --- a/go/vt/vtgate/planbuilder/update.go +++ b/go/vt/vtgate/planbuilder/update.go @@ -27,9 +27,9 @@ import ( ) // buildUpdatePlan builds the instructions for an UPDATE statement. -func buildUpdatePlan(stmt sqlparser.Statement, vschema ContextVSchema) (engine.Primitive, error) { +func buildUpdatePlan(stmt sqlparser.Statement, reservedVars sqlparser.BindVars, vschema ContextVSchema) (engine.Primitive, error) { upd := stmt.(*sqlparser.Update) - dml, ksidVindex, ksidCol, err := buildDMLPlan(vschema, "update", upd, upd.TableExprs, upd.Where, upd.OrderBy, upd.Limit, upd.Comments, upd.Exprs) + dml, ksidVindex, ksidCol, err := buildDMLPlan(vschema, "update", stmt, reservedVars, upd.TableExprs, upd.Where, upd.OrderBy, upd.Limit, upd.Comments, upd.Exprs) if err != nil { return nil, err } diff --git a/go/vt/vtgate/planbuilder/vindex_func.go b/go/vt/vtgate/planbuilder/vindex_func.go index e29eb7a2308..8d9a6d0a74a 100644 --- a/go/vt/vtgate/planbuilder/vindex_func.go +++ b/go/vt/vtgate/planbuilder/vindex_func.go @@ -17,7 +17,9 @@ limitations under the License. package planbuilder import ( - "errors" + "fmt" + + "vitess.io/vitess/go/vt/vtgate/semantics" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" @@ -92,7 +94,12 @@ func (vf *vindexFunc) ResultColumns() []*resultColumn { } // Wireup implements the logicalPlan interface -func (vf *vindexFunc) Wireup(plan logicalPlan, jt *jointab) error { +func (vf *vindexFunc) Wireup(logicalPlan, *jointab) error { + return nil +} + +// Wireup2 implements the logicalPlan interface +func (vf *vindexFunc) WireupV4(*semantics.SemTable) error { return nil } @@ -124,9 +131,19 @@ func (vf *vindexFunc) SupplyCol(col *sqlparser.ColName) (rc *resultColumn, colNu return rc, len(vf.resultColumns) - 1 } +// UnsupportedSupplyWeightString represents the error where the supplying a weight string is not supported +type UnsupportedSupplyWeightString struct { + Type string +} + +// Error function implements the error interface +func (err UnsupportedSupplyWeightString) Error() string { + return fmt.Sprintf("cannot do collation on %s", err.Type) +} + // SupplyWeightString implements the logicalPlan interface func (vf *vindexFunc) SupplyWeightString(colNumber int) (weightcolNumber int, err error) { - return 0, errors.New("cannot do collation on vindex function") + return 0, UnsupportedSupplyWeightString{Type: "vindex function"} } // Rewrite implements the logicalPlan interface @@ -137,6 +154,11 @@ func (vf *vindexFunc) Rewrite(inputs ...logicalPlan) error { return nil } +// ContainsTables implements the logicalPlan interface +func (vf *vindexFunc) ContainsTables() semantics.TableSet { + return 0 +} + // Inputs implements the logicalPlan interface func (vf *vindexFunc) Inputs() []logicalPlan { return []logicalPlan{} diff --git a/go/vt/vtgate/plugin_mysql_server.go b/go/vt/vtgate/plugin_mysql_server.go index 5ee65dd5c45..7bd91787635 100644 --- a/go/vt/vtgate/plugin_mysql_server.go +++ b/go/vt/vtgate/plugin_mysql_server.go @@ -56,7 +56,6 @@ var ( mysqlTCPVersion = flag.String("mysql_tcp_version", "tcp", "Select tcp, tcp4, or tcp6 to control the socket type.") mysqlAuthServerImpl = flag.String("mysql_auth_server_impl", "static", "Which auth server implementation to use. Options: none, ldap, clientcert, static, vault.") mysqlAllowClearTextWithoutTLS = flag.Bool("mysql_allow_clear_text_without_tls", false, "If set, the server will allow the use of a clear text password over non-SSL connections.") - mysqlServerVersion = flag.String("mysql_server_version", mysql.DefaultServerVersion, "MySQL server version to advertise.") mysqlProxyProtocol = flag.Bool("proxy_protocol", false, "Enable HAProxy PROXY protocol on MySQL listener socket") mysqlServerRequireSecureTransport = flag.Bool("mysql_server_require_secure_transport", false, "Reject insecure connections but only if mysql_server_ssl_cert and mysql_server_ssl_key are provided") @@ -65,13 +64,15 @@ var ( mysqlSslKey = flag.String("mysql_server_ssl_key", "", "Path to ssl key for mysql server plugin SSL") mysqlSslCa = flag.String("mysql_server_ssl_ca", "", "Path to ssl CA for mysql server plugin SSL. If specified, server will require and validate client certs.") + mysqlSslServerCA = flag.String("mysql_server_ssl_server_ca", "", "path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients") + mysqlSlowConnectWarnThreshold = flag.Duration("mysql_slow_connect_warn_threshold", 0, "Warn if it takes more than the given threshold for a mysql connection to establish") mysqlConnReadTimeout = flag.Duration("mysql_server_read_timeout", 0, "connection read timeout") mysqlConnWriteTimeout = flag.Duration("mysql_server_write_timeout", 0, "connection write timeout") mysqlQueryTimeout = flag.Duration("mysql_server_query_timeout", 0, "mysql query timeout") - mysqlDefaultWorkloadName = flag.String("mysql_default_workload", "UNSPECIFIED", "Default session workload (OLTP, OLAP, DBA)") + mysqlDefaultWorkloadName = flag.String("mysql_default_workload", "OLTP", "Default session workload (OLTP, OLAP, DBA)") mysqlDefaultWorkload int32 busyConnections int32 @@ -342,9 +343,10 @@ func (vh *vtgateHandler) session(c *mysql.Conn) *vtgatepb.Session { IncludedFields: querypb.ExecuteOptions_ALL, Workload: querypb.ExecuteOptions_Workload(mysqlDefaultWorkload), }, - Autocommit: true, - DDLStrategy: *defaultDDLStrategy, - SessionUUID: u.String(), + Autocommit: true, + DDLStrategy: *defaultDDLStrategy, + SessionUUID: u.String(), + EnableSystemSettings: *sysVarSetEnabled, } if c.Capabilities&mysql.CapabilityClientFoundRows != 0 { session.Options.ClientFoundRows = true @@ -360,8 +362,8 @@ var sigChan chan os.Signal var vtgateHandle *vtgateHandler // initTLSConfig inits tls config for the given mysql listener -func initTLSConfig(mysqlListener *mysql.Listener, mysqlSslCert, mysqlSslKey, mysqlSslCa string, mysqlServerRequireSecureTransport bool) error { - serverConfig, err := vttls.ServerConfig(mysqlSslCert, mysqlSslKey, mysqlSslCa) +func initTLSConfig(mysqlListener *mysql.Listener, mysqlSslCert, mysqlSslKey, mysqlSslCa, mysqlSslServerCA string, mysqlServerRequireSecureTransport bool) error { + serverConfig, err := vttls.ServerConfig(mysqlSslCert, mysqlSslKey, mysqlSslCa, mysqlSslServerCA) if err != nil { log.Exitf("grpcutils.TLSServerConfig failed: %v", err) return err @@ -372,7 +374,7 @@ func initTLSConfig(mysqlListener *mysql.Listener, mysqlSslCert, mysqlSslKey, mys signal.Notify(sigChan, syscall.SIGHUP) go func() { for range sigChan { - serverConfig, err := vttls.ServerConfig(mysqlSslCert, mysqlSslKey, mysqlSslCa) + serverConfig, err := vttls.ServerConfig(mysqlSslCert, mysqlSslKey, mysqlSslCa, mysqlSslServerCA) if err != nil { log.Errorf("grpcutils.TLSServerConfig failed: %v", err) } else { @@ -424,11 +426,11 @@ func initMySQLProtocol() { if err != nil { log.Exitf("mysql.NewListener failed: %v", err) } - if *mysqlServerVersion != "" { - mysqlListener.ServerVersion = *mysqlServerVersion + if *servenv.MySQLServerVersion != "" { + mysqlListener.ServerVersion = *servenv.MySQLServerVersion } if *mysqlSslCert != "" && *mysqlSslKey != "" { - initTLSConfig(mysqlListener, *mysqlSslCert, *mysqlSslKey, *mysqlSslCa, *mysqlServerRequireSecureTransport) + initTLSConfig(mysqlListener, *mysqlSslCert, *mysqlSslKey, *mysqlSslCa, *mysqlSslServerCA, *mysqlServerRequireSecureTransport) } mysqlListener.AllowClearTextWithoutTLS.Set(*mysqlAllowClearTextWithoutTLS) // Check for the connection threshold @@ -540,6 +542,13 @@ func rollbackAtShutdown() { log.Errorf("All connections did not go idle. Shutting down anyway.") } +func mysqlSocketPath() string { + if mysqlServerSocketPath == nil { + return "" + } + return *mysqlServerSocketPath +} + func init() { servenv.OnRun(initMySQLProtocol) servenv.OnTermSync(shutdownMysqlProtocolAndDrain) diff --git a/go/vt/vtgate/plugin_mysql_server_test.go b/go/vt/vtgate/plugin_mysql_server_test.go index 842e1909959..0fa7ff36810 100644 --- a/go/vt/vtgate/plugin_mysql_server_test.go +++ b/go/vt/vtgate/plugin_mysql_server_test.go @@ -234,8 +234,8 @@ func newTestAuthServerStatic() *mysql.AuthServerStatic { func TestDefaultWorkloadEmpty(t *testing.T) { vh := &vtgateHandler{} sess := vh.session(&mysql.Conn{}) - if sess.Options.Workload != querypb.ExecuteOptions_UNSPECIFIED { - t.Fatalf("Expected default workload UNSPECIFIED") + if sess.Options.Workload != querypb.ExecuteOptions_OLTP { + t.Fatalf("Expected default workload OLTP") } } @@ -248,7 +248,15 @@ func TestDefaultWorkloadOLAP(t *testing.T) { } } -func TestInitTLSConfig(t *testing.T) { +func TestInitTLSConfigWithoutServerCA(t *testing.T) { + testInitTLSConfig(t, false) +} + +func TestInitTLSConfigWithServerCA(t *testing.T) { + testInitTLSConfig(t, true) +} + +func testInitTLSConfig(t *testing.T, serverCA bool) { // Create the certs. root, err := ioutil.TempDir("", "TestInitTLSConfig") if err != nil { @@ -258,8 +266,13 @@ func TestInitTLSConfig(t *testing.T) { tlstest.CreateCA(root) tlstest.CreateSignedCert(root, tlstest.CA, "01", "server", "server.example.com") + serverCACert := "" + if serverCA { + serverCACert = path.Join(root, "ca-cert.pem") + } + listener := &mysql.Listener{} - if err := initTLSConfig(listener, path.Join(root, "server-cert.pem"), path.Join(root, "server-key.pem"), path.Join(root, "ca-cert.pem"), true); err != nil { + if err := initTLSConfig(listener, path.Join(root, "server-cert.pem"), path.Join(root, "server-key.pem"), path.Join(root, "ca-cert.pem"), serverCACert, true); err != nil { t.Fatalf("init tls config failure due to: +%v", err) } diff --git a/go/vt/vtgate/queryz.go b/go/vt/vtgate/queryz.go index 19298574988..de4ca65dc58 100644 --- a/go/vt/vtgate/queryz.go +++ b/go/vt/vtgate/queryz.go @@ -37,11 +37,13 @@ var ( Count Time Shard Queries - Rows + RowsAffected + RowsReturned Errors Time per query Shard queries per query - Rows per query + RowsAffected per query + RowsReturned per query Errors per query @@ -52,11 +54,13 @@ var ( {{.Count}} {{.Time}} {{.ShardQueries}} - {{.Rows}} + {{.RowsAffected}} + {{.RowsReturned}} {{.Errors}} {{.TimePQ}} {{.ShardQueriesPQ}} - {{.RowsPQ}} + {{.RowsAffectedPQ}} + {{.RowsReturnedPQ}} {{.ErrorsPQ}} `)) @@ -70,7 +74,8 @@ type queryzRow struct { Count uint64 tm time.Duration ShardQueries uint64 - Rows uint64 + RowsAffected uint64 + RowsReturned uint64 Errors uint64 Color string } @@ -95,9 +100,15 @@ func (qzs *queryzRow) ShardQueriesPQ() string { return fmt.Sprintf("%.6f", val) } -// RowsPQ returns the row count per query as a string. -func (qzs *queryzRow) RowsPQ() string { - val := float64(qzs.Rows) / float64(qzs.Count) +// RowsAffectedPQ returns the row affected per query as a string. +func (qzs *queryzRow) RowsAffectedPQ() string { + val := float64(qzs.RowsAffected) / float64(qzs.Count) + return fmt.Sprintf("%.6f", val) +} + +// RowsReturnedPQ returns the row returned per query as a string. +func (qzs *queryzRow) RowsReturnedPQ() string { + val := float64(qzs.RowsReturned) / float64(qzs.Count) return fmt.Sprintf("%.6f", val) } @@ -124,23 +135,19 @@ func queryzHandler(e *Executor, w http.ResponseWriter, r *http.Request) { defer logz.EndHTMLTable(w) w.Write(queryzHeader) - keys := e.plans.Keys() sorter := queryzSorter{ - rows: make([]*queryzRow, 0, len(keys)), + rows: nil, less: func(row1, row2 *queryzRow) bool { return row1.timePQ() > row2.timePQ() }, } - for _, v := range e.plans.Keys() { - result, ok := e.plans.Get(v) - if !ok { - continue - } - plan := result.(*engine.Plan) + + e.plans.ForEach(func(value interface{}) bool { + plan := value.(*engine.Plan) Value := &queryzRow{ Query: logz.Wrappable(sqlparser.TruncateForUI(plan.Original)), } - Value.Count, Value.tm, Value.ShardQueries, Value.Rows, Value.Errors = plan.Stats() + Value.Count, Value.tm, Value.ShardQueries, Value.RowsAffected, Value.RowsReturned, Value.Errors = plan.Stats() var timepq time.Duration if Value.Count != 0 { timepq = time.Duration(uint64(Value.tm) / Value.Count) @@ -153,10 +160,12 @@ func queryzHandler(e *Executor, w http.ResponseWriter, r *http.Request) { Value.Color = "high" } sorter.rows = append(sorter.rows, Value) - } + return true + }) + sort.Sort(&sorter) - for _, Value := range sorter.rows { - if err := queryzTmpl.Execute(w, Value); err != nil { + for _, row := range sorter.rows { + if err := queryzTmpl.Execute(w, row); err != nil { log.Errorf("queryz: couldn't execute template: %v", err) } } diff --git a/go/vt/vtgate/queryz_test.go b/go/vt/vtgate/queryz_test.go index c82dd3f9b76..67147c4e678 100644 --- a/go/vt/vtgate/queryz_test.go +++ b/go/vt/vtgate/queryz_test.go @@ -43,23 +43,25 @@ func TestQueryzHandler(t *testing.T) { sql := "select id from user where id = 1" _, err := executorExec(executor, sql, nil) require.NoError(t, err) + executor.plans.Wait() result, ok := executor.plans.Get("@master:" + sql) if !ok { t.Fatalf("couldn't get plan from cache") } plan1 := result.(*engine.Plan) - plan1.ExecTime = time.Duration(1 * time.Millisecond) + plan1.ExecTime = uint64(1 * time.Millisecond) // scatter sql = "select id from user" _, err = executorExec(executor, sql, nil) require.NoError(t, err) + executor.plans.Wait() result, ok = executor.plans.Get("@master:" + sql) if !ok { t.Fatalf("couldn't get plan from cache") } plan2 := result.(*engine.Plan) - plan2.ExecTime = time.Duration(1 * time.Second) + plan2.ExecTime = uint64(1 * time.Second) sql = "insert into user (id, name) values (:id, :name)" _, err = executorExec(executor, sql, map[string]*querypb.BindVariable{ @@ -67,6 +69,7 @@ func TestQueryzHandler(t *testing.T) { "name": sqltypes.BytesBindVariable([]byte("myname")), }) require.NoError(t, err) + executor.plans.Wait() result, ok = executor.plans.Get("@master:" + sql) if !ok { t.Fatalf("couldn't get plan from cache") @@ -88,8 +91,8 @@ func TestQueryzHandler(t *testing.T) { require.NoError(t, err) - plan3.ExecTime = time.Duration(100 * time.Millisecond) - plan4.ExecTime = time.Duration(200 * time.Millisecond) + plan3.ExecTime = uint64(100 * time.Millisecond) + plan4.ExecTime = uint64(200 * time.Millisecond) queryzHandler(executor, resp, req) body, _ := ioutil.ReadAll(resp.Body) @@ -99,10 +102,12 @@ func TestQueryzHandler(t *testing.T) { `1`, `0.001000`, `1`, + `0`, `1`, `0`, `0.001000`, `1.000000`, + `0.000000`, `1.000000`, `0.000000`, ``, @@ -114,10 +119,12 @@ func TestQueryzHandler(t *testing.T) { `1`, `1.000000`, `8`, + `0`, `8`, `0`, `1.000000`, `8.000000`, + `0.000000`, `8.000000`, `0.000000`, ``, @@ -131,10 +138,12 @@ func TestQueryzHandler(t *testing.T) { `2`, `2`, `0`, + `0`, `0.050000`, `1.000000`, `1.000000`, `0.000000`, + `0.000000`, ``, } checkQueryzHasPlan(t, planPattern3, plan3, body) @@ -146,10 +155,12 @@ func TestQueryzHandler(t *testing.T) { `2`, `2`, `0`, + `0`, `0.100000`, `1.000000`, `1.000000`, `0.000000`, + `0.000000`, ``, } checkQueryzHasPlan(t, planPattern4, plan4, body) diff --git a/go/vt/vtgate/resolver.go b/go/vt/vtgate/resolver.go index 25d466f1c82..a06a1949eab 100644 --- a/go/vt/vtgate/resolver.go +++ b/go/vt/vtgate/resolver.go @@ -74,7 +74,7 @@ func (res *Resolver) Execute( return nil, err } if logStats != nil { - logStats.ShardQueries = uint32(len(rss)) + logStats.ShardQueries = uint64(len(rss)) } autocommit := len(rss) == 1 && canAutocommit && session.AutocommitApproval() diff --git a/go/vt/vtgate/safe_session.go b/go/vt/vtgate/safe_session.go index 5a83abe1fce..dd63c2cd301 100644 --- a/go/vt/vtgate/safe_session.go +++ b/go/vt/vtgate/safe_session.go @@ -228,11 +228,11 @@ func (session *SafeSession) AppendOrUpdate(shardSession *vtgatepb.Session_ShardS // that needs to be stored as shard session. if session.autocommitState == autocommitted && shardSession.TransactionId != 0 { // Should be unreachable - return vterrors.New(vtrpcpb.Code_INTERNAL, "BUG: SafeSession.AppendOrUpdate: unexpected autocommit state") + return vterrors.New(vtrpcpb.Code_INTERNAL, "[BUG] unexpected 'autocommitted' state in transaction") } if !(session.Session.InTransaction || session.Session.InReservedConn) { // Should be unreachable - return vterrors.New(vtrpcpb.Code_INTERNAL, "BUG: SafeSession.AppendOrUpdate: not in transaction and not in reserved connection") + return vterrors.New(vtrpcpb.Code_INTERNAL, "[BUG] current session neither in transaction nor in reserved connection") } session.autocommitState = notAutocommittable @@ -247,7 +247,7 @@ func (session *SafeSession) AppendOrUpdate(shardSession *vtgatepb.Session_ShardS // isSingle is enforced only for normmal commit order operations. if session.isSingleDB(txMode) && len(session.ShardSessions) > 1 { session.mustRollback = true - return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "multi-db transaction attempted: %v", session.ShardSessions) + return vterrors.Errorf(vtrpcpb.Code_ABORTED, "multi-db transaction attempted: %v", session.ShardSessions) } case vtgatepb.CommitOrder_PRE: newSessions, err := addOrUpdate(shardSession, session.PreSessions) @@ -263,7 +263,7 @@ func (session *SafeSession) AppendOrUpdate(shardSession *vtgatepb.Session_ShardS session.PostSessions = newSessions default: // Should be unreachable - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "BUG: SafeSession.AppendOrUpdate: unexpected commitOrder") + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] SafeSession.AppendOrUpdate: unexpected commitOrder") } return nil @@ -452,7 +452,7 @@ func (session *SafeSession) ResetShard(tabletAlias *topodatapb.TabletAlias) erro session.PostSessions = newSessions default: // Should be unreachable - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "BUG: SafeSession.ResetShard: unexpected commitOrder") + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] SafeSession.ResetShard: unexpected commitOrder") } return nil } @@ -478,6 +478,20 @@ func (session *SafeSession) GetSessionUUID() string { return session.SessionUUID } +// SetSessionEnableSystemSettings set the SessionEnableSystemSettings setting. +func (session *SafeSession) SetSessionEnableSystemSettings(allow bool) { + session.mu.Lock() + defer session.mu.Unlock() + session.EnableSystemSettings = allow +} + +// GetSessionEnableSystemSettings returns the SessionEnableSystemSettings value. +func (session *SafeSession) GetSessionEnableSystemSettings() bool { + session.mu.Lock() + defer session.mu.Unlock() + return session.EnableSystemSettings +} + // SetReadAfterWriteGTID set the ReadAfterWriteGtid setting. func (session *SafeSession) SetReadAfterWriteGTID(vtgtid string) { session.mu.Lock() @@ -513,7 +527,7 @@ func removeShard(tabletAlias *topodatapb.TabletAlias, sessions []*vtgatepb.Sessi for i, session := range sessions { if proto.Equal(session.TabletAlias, tabletAlias) { if session.TransactionId != 0 { - return nil, vterrors.New(vtrpcpb.Code_INTERNAL, "BUG: SafeSession.ResetShard: in transaction") + return nil, vterrors.New(vtrpcpb.Code_INTERNAL, "[BUG] removing shard session when in transaction") } idx = i } diff --git a/go/vt/vtgate/scatter_conn.go b/go/vt/vtgate/scatter_conn.go index beea97c3d84..1e4b0600241 100644 --- a/go/vt/vtgate/scatter_conn.go +++ b/go/vt/vtgate/scatter_conn.go @@ -169,7 +169,7 @@ func (stc *ScatterConn) ExecuteMultiShard( ) (qr *sqltypes.Result, errs []error) { if len(rss) != len(queries) { - return nil, []error{vterrors.Errorf(vtrpcpb.Code_INTERNAL, "BUG: got mismatched number of queries and shards")} + return nil, []error{vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] got mismatched number of queries and shards")} } // mu protects qr @@ -267,7 +267,7 @@ func (stc *ScatterConn) ExecuteMultiShard( case reserveBegin: innerqr, transactionID, reservedID, alias, err = qs.ReserveBeginExecute(ctx, rs.Target, session.SetPreQueries(), queries[i].Sql, queries[i].BindVariables, opts) default: - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "BUG: unexpected actionNeeded on ScatterConn#ExecuteMultiShard %v", info.actionNeeded) + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unexpected actionNeeded on query execution: %v", info.actionNeeded) } // We need to new shard info irrespective of the error. newInfo := info.updateTransactionAndReservedID(transactionID, reservedID, alias) @@ -286,7 +286,7 @@ func (stc *ScatterConn) ExecuteMultiShard( ) if !ignoreMaxMemoryRows && len(qr.Rows) > *maxMemoryRows { - return nil, []error{mysql.NewSQLError(mysql.ERNetPacketTooLarge, "", "in-memory row count exceeded allowed limit of %d", *maxMemoryRows)} + return nil, []error{vterrors.NewErrorf(vtrpcpb.Code_RESOURCE_EXHAUSTED, vterrors.NetPacketTooLarge, "in-memory row count exceeded allowed limit of %d", *maxMemoryRows)} } return qr, allErrors.GetErrors() @@ -312,7 +312,7 @@ func getQueryService(rs *srvtopo.ResolvedShard, info *shardActionInfo) (queryser _, usingLegacyGw := rs.Gateway.(*DiscoveryGateway) if usingLegacyGw && (info.actionNeeded == reserve || info.actionNeeded == reserveBegin) { - return nil, vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, "reserved connections are not supported on old gen gateway") + return nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "reserved connections are not supported on old gen gateway") } if usingLegacyGw || info.alias == nil { return rs.Gateway, nil @@ -331,7 +331,7 @@ func (stc *ScatterConn) processOneStreamingResult(mu *sync.Mutex, fieldSent *boo } else { if len(qr.Fields) == 0 { // Unreachable: this can happen only if vttablet misbehaves. - return vterrors.New(vtrpcpb.Code_INTERNAL, "received rows before fields for shard") + return vterrors.New(vtrpcpb.Code_INTERNAL, "received rows before fields") } *fieldSent = true } @@ -662,7 +662,7 @@ func (stc *ScatterConn) ExecuteLock( switch info.actionNeeded { case nothing: if reservedID == 0 { - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "BUG: reservedID zero not expected %v", reservedID) + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] reserved id zero not expected %v", reservedID) } qr, err = qs.Execute(ctx, rs.Target, query.Sql, query.BindVariables, 0 /* transactionID */, reservedID, opts) if err != nil && wasConnectionClosed(err) { @@ -684,7 +684,7 @@ func (stc *ScatterConn) ExecuteLock( }) } default: - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "BUG: unexpected actionNeeded on ScatterConn#ExecuteLock %v", info.actionNeeded) + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unexpected actionNeeded on lock execution: %v", info.actionNeeded) } if err != nil { @@ -749,7 +749,7 @@ func lockInfo(target *querypb.Target, session *SafeSession) (*shardActionInfo, e } if !proto.Equal(target, session.LockSession.Target) { - return nil, vterrors.Errorf(vtrpcpb.Code_ALREADY_EXISTS, "target does match the existing lock session target: (%v, %v)", target, session.LockSession.Target) + return nil, vterrors.Errorf(vtrpcpb.Code_NOT_FOUND, "target does match the existing lock session target: (%v, %v)", target, session.LockSession.Target) } return &shardActionInfo{ diff --git a/go/vt/vtgate/scatter_conn_test.go b/go/vt/vtgate/scatter_conn_test.go index 2e820ed2a09..ff1c4af62bc 100644 --- a/go/vt/vtgate/scatter_conn_test.go +++ b/go/vt/vtgate/scatter_conn_test.go @@ -402,11 +402,11 @@ func TestIsConnClosed(t *testing.T) { conClosed bool }{{ "server gone", - mysql.NewSQLError(mysql.CRServerGone, mysql.SSServerShutdown, ""), + mysql.NewSQLError(mysql.CRServerGone, mysql.SSNetError, ""), true, }, { "connection lost", - mysql.NewSQLError(mysql.CRServerLost, mysql.SSServerShutdown, ""), + mysql.NewSQLError(mysql.CRServerLost, mysql.SSNetError, ""), true, }, { "tx ended", diff --git a/go/vt/vtgate/semantics/analyzer.go b/go/vt/vtgate/semantics/analyzer.go new file mode 100644 index 00000000000..fe7c65a86d7 --- /dev/null +++ b/go/vt/vtgate/semantics/analyzer.go @@ -0,0 +1,213 @@ +/* +Copyright 2020 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package semantics + +import ( + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" +) + +type ( + // analyzer is a struct to work with analyzing the query. + analyzer struct { + Tables []table + + scopes []*scope + exprDeps map[sqlparser.Expr]TableSet + err error + } +) + +// newAnalyzer create the semantic analyzer +func newAnalyzer() *analyzer { + return &analyzer{ + exprDeps: map[sqlparser.Expr]TableSet{}, + } +} + +// analyzeDown pushes new scopes when we encounter sub queries, +// and resolves the table a column is using +func (a *analyzer) analyzeDown(cursor *sqlparser.Cursor) bool { + current := a.currentScope() + n := cursor.Node() + switch node := n.(type) { + case *sqlparser.Select: + a.push(newScope(current)) + if err := a.analyzeTableExprs(node.From); err != nil { + a.err = err + return false + } + case *sqlparser.DerivedTable: + a.err = Gen4NotSupportedF("derived tables") + case *sqlparser.TableExprs: + // this has already been visited when we encountered the SELECT struct + return false + + // we don't need to push new scope for sub queries since we do that for SELECT and UNION + + case *sqlparser.Union: + a.push(newScope(current)) + case *sqlparser.ColName: + t, err := a.resolveColumn(node, current) + if err != nil { + a.err = err + } + a.exprDeps[node] = t + } + return a.shouldContinue() +} + +func (a *analyzer) resolveColumn(colName *sqlparser.ColName, current *scope) (TableSet, error) { + var t table + var err error + if colName.Qualifier.IsEmpty() { + t, err = a.resolveUnQualifiedColumn(current, colName) + } else { + t, err = a.resolveQualifiedColumn(current, colName) + } + if err != nil { + return 0, err + } + return a.tableSetFor(t), nil +} + +func (a *analyzer) analyzeTableExprs(tablExprs sqlparser.TableExprs) error { + for _, tableExpr := range tablExprs { + if err := a.analyzeTableExpr(tableExpr); err != nil { + return err + } + } + return nil +} + +func (a *analyzer) analyzeTableExpr(tableExpr sqlparser.TableExpr) error { + switch table := tableExpr.(type) { + case *sqlparser.AliasedTableExpr: + if !table.As.IsEmpty() { + return Gen4NotSupportedF("table aliases") + } + return a.bindTable(table, table.Expr) + case *sqlparser.JoinTableExpr: + if table.Join != sqlparser.NormalJoinType { + return Gen4NotSupportedF("join type %s", table.Join.ToString()) + } + if err := a.analyzeTableExpr(table.LeftExpr); err != nil { + return err + } + if err := a.analyzeTableExpr(table.RightExpr); err != nil { + return err + } + case *sqlparser.ParenTableExpr: + return a.analyzeTableExprs(table.Exprs) + } + return nil +} + +// resolveQualifiedColumn handles `tabl.col` expressions +func (a *analyzer) resolveQualifiedColumn(current *scope, expr *sqlparser.ColName) (table, error) { + qualifier := expr.Qualifier.Name.String() + + for current != nil { + tableExpr, found := current.tables[qualifier] + if found { + return tableExpr, nil + } + current = current.parent + } + + return nil, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.BadFieldError, "Unknown table referenced by '%s'", sqlparser.String(expr)) +} + +// resolveUnQualifiedColumn +func (a *analyzer) resolveUnQualifiedColumn(current *scope, expr *sqlparser.ColName) (table, error) { + if len(current.tables) == 1 { + for _, tableExpr := range current.tables { + return tableExpr, nil + } + } + return nil, Gen4NotSupportedF("unable to map column to a table: %s", sqlparser.String(expr)) +} + +func (a *analyzer) tableSetFor(t table) TableSet { + for i, t2 := range a.Tables { + if t == t2 { + return TableSet(1 << i) + } + } + panic("unknown table") +} + +func (a *analyzer) bindTable(alias *sqlparser.AliasedTableExpr, expr sqlparser.SimpleTableExpr) error { + switch t := expr.(type) { + case *sqlparser.DerivedTable: + a.push(newScope(nil)) + if err := a.analyze(t.Select); err != nil { + return err + } + a.popScope() + scope := a.currentScope() + return scope.addTable(alias.As.String(), alias) + case sqlparser.TableName: + scope := a.currentScope() + a.Tables = append(a.Tables, alias) + if alias.As.IsEmpty() { + return scope.addTable(t.Name.String(), alias) + } + return scope.addTable(alias.As.String(), alias) + } + return nil +} + +func (a *analyzer) analyze(statement sqlparser.Statement) error { + _ = sqlparser.Rewrite(statement, a.analyzeDown, a.analyzeUp) + return a.err +} + +func (a *analyzer) analyzeUp(cursor *sqlparser.Cursor) bool { + switch cursor.Node().(type) { + case *sqlparser.Union, *sqlparser.Select: + a.popScope() + } + return true +} + +func (a *analyzer) shouldContinue() bool { + return a.err == nil +} + +func (a *analyzer) push(s *scope) { + a.scopes = append(a.scopes, s) +} + +func (a *analyzer) popScope() { + l := len(a.scopes) - 1 + a.scopes = a.scopes[:l] +} + +func (a *analyzer) currentScope() *scope { + size := len(a.scopes) + if size == 0 { + return nil + } + return a.scopes[size-1] +} + +// Gen4NotSupportedF returns a common error for shortcomings in the gen4 planner +func Gen4NotSupportedF(format string, args ...interface{}) error { + return vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "gen4 does not yet support: "+format, args...) +} diff --git a/go/vt/vtgate/semantics/analyzer_test.go b/go/vt/vtgate/semantics/analyzer_test.go new file mode 100644 index 00000000000..dc30488cac2 --- /dev/null +++ b/go/vt/vtgate/semantics/analyzer_test.go @@ -0,0 +1,194 @@ +/* +Copyright 2020 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package semantics + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/sqlparser" +) + +const ( + // Just here to make outputs more readable + T0 TableSet = 1 << iota + T1 + T2 + _ // T3 is not used in the tests + T4 +) + +func extract(in *sqlparser.Select, idx int) sqlparser.Expr { + return in.SelectExprs[idx].(*sqlparser.AliasedExpr).Expr +} + +func TestScopeForSubqueries(t *testing.T) { + t.Skip("subqueries not yet supported") + query := ` +select t.col1, ( + select t.col2 from z as t) +from x as t` + stmt, semTable := parseAndAnalyze(t, query) + + sel, _ := stmt.(*sqlparser.Select) + + // extract the `t.col2` expression from the subquery + sel2 := sel.SelectExprs[1].(*sqlparser.AliasedExpr).Expr.(*sqlparser.Subquery).Select.(*sqlparser.Select) + s1 := semTable.Dependencies(extract(sel2, 0)) + + // if scoping works as expected, we should be able to see the inner table being used by the inner expression + assert.Equal(t, T1, s1) +} + +func TestBindingSingleTable(t *testing.T) { + queries := []string{ + "select col from tabl", + "select tabl.col from tabl", + "select d.tabl.col from tabl", + "select col from d.tabl", + "select tabl.col from d.tabl", + "select d.tabl.col from d.tabl", + } + for _, query := range queries { + t.Run(query, func(t *testing.T) { + stmt, semTable := parseAndAnalyze(t, query) + sel, _ := stmt.(*sqlparser.Select) + t1 := sel.From[0].(*sqlparser.AliasedTableExpr) + ts := semTable.TableSetFor(t1) + assert.EqualValues(t, 1, ts) + + d := semTable.Dependencies(extract(sel, 0)) + require.Equal(t, T0, d, query) + }) + } +} + +func TestUnion(t *testing.T) { + query := "select col1 from tabl1 union select col2 from tabl2" + + stmt, semTable := parseAndAnalyze(t, query) + union, _ := stmt.(*sqlparser.Union) + sel1 := union.FirstStatement.(*sqlparser.Select) + sel2 := union.UnionSelects[0].Statement.(*sqlparser.Select) + + t1 := sel1.From[0].(*sqlparser.AliasedTableExpr) + t2 := sel2.From[0].(*sqlparser.AliasedTableExpr) + ts1 := semTable.TableSetFor(t1) + ts2 := semTable.TableSetFor(t2) + assert.EqualValues(t, 1, ts1) + assert.EqualValues(t, 2, ts2) + + d1 := semTable.Dependencies(extract(sel1, 0)) + d2 := semTable.Dependencies(extract(sel2, 0)) + assert.Equal(t, T0, d1) + assert.Equal(t, T1, d2) +} + +func TestBindingMultiTable(t *testing.T) { + type testCase struct { + query string + deps TableSet + } + queries := []testCase{{ + query: "select t.col from t, s", + deps: T0, + }, { + query: "select t.col from t join s", + deps: T0, + }, { + query: "select max(t.col+s.col) from t, s", + deps: T0 | T1, + }, { + query: "select max(t.col+s.col) from t join s", + deps: T0 | T1, + }, { + query: "select case t.col when s.col then r.col else u.col end from t, s, r, w, u", + deps: T0 | T1 | T2 | T4, + //}, { + // // make sure that we don't let sub-query Dependencies leak out by mistake + // query: "select t.col + (select 42 from s) from t", + // deps: T0, + //}, { + // query: "select (select 42 from s where r.id = s.id) from r", + // deps: T0 | T1, + }} + for _, query := range queries { + t.Run(query.query, func(t *testing.T) { + stmt, semTable := parseAndAnalyze(t, query.query) + sel, _ := stmt.(*sqlparser.Select) + assert.Equal(t, query.deps, semTable.Dependencies(extract(sel, 0)), query.query) + }) + } +} + +func TestBindingSingleDepPerTable(t *testing.T) { + query := "select t.col + t.col2 from t" + stmt, semTable := parseAndAnalyze(t, query) + sel, _ := stmt.(*sqlparser.Select) + + d := semTable.Dependencies(extract(sel, 0)) + assert.Equal(t, 1, d.NumberOfTables(), "size wrong") + assert.Equal(t, T0, d) +} + +func TestNotUniqueTableName(t *testing.T) { + queries := []string{ + "select * from t, t", + "select * from t, (select 1 from x) as t", + "select * from t join t", + "select * from t join (select 1 from x) as t", + } + + for _, query := range queries { + t.Run(query, func(t *testing.T) { + if strings.Contains(query, "as") { + t.Skip("table alias not implemented") + } + parse, _ := sqlparser.Parse(query) + _, err := Analyse(parse) + require.Error(t, err) + require.Contains(t, err.Error(), "Not unique table/alias") + }) + } +} + +func TestMissingTable(t *testing.T) { + queries := []string{ + "select t.col from a", + } + + for _, query := range queries { + t.Run(query, func(t *testing.T) { + parse, _ := sqlparser.Parse(query) + _, err := Analyse(parse) + require.Error(t, err) + require.Contains(t, err.Error(), "Unknown table") + }) + } +} + +func parseAndAnalyze(t *testing.T, query string) (sqlparser.Statement, *SemTable) { + parse, err := sqlparser.Parse(query) + require.NoError(t, err) + semTable, err := Analyse(parse) + require.NoError(t, err) + return parse, semTable +} diff --git a/go/vt/vtgate/semantics/semantic_state.go b/go/vt/vtgate/semantics/semantic_state.go new file mode 100644 index 00000000000..674b6c1b6d0 --- /dev/null +++ b/go/vt/vtgate/semantics/semantic_state.go @@ -0,0 +1,140 @@ +/* +Copyright 2020 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package semantics + +import ( + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" + + "vitess.io/vitess/go/vt/sqlparser" +) + +type ( + table = *sqlparser.AliasedTableExpr + + // TableSet is how a set of tables is expressed. + // Tables get unique bits assigned in the order that they are encountered during semantic analysis + TableSet uint64 // we can only join 64 tables with this underlying data type + // TODO : change uint64 to struct to support arbitrary number of tables. + + // SemTable contains semantic analysis information about the query. + SemTable struct { + Tables []table + exprDependencies map[sqlparser.Expr]TableSet + } + + scope struct { + parent *scope + tables map[string]*sqlparser.AliasedTableExpr + } +) + +// NewSemTable creates a new empty SemTable +func NewSemTable() *SemTable { + return &SemTable{exprDependencies: map[sqlparser.Expr]TableSet{}} +} + +// TableSetFor returns the bitmask for this particular tableshoe +func (st *SemTable) TableSetFor(t table) TableSet { + for idx, t2 := range st.Tables { + if t == t2 { + return 1 << idx + } + } + return 0 +} + +// Dependencies return the table dependencies of the expression. +func (st *SemTable) Dependencies(expr sqlparser.Expr) TableSet { + deps, found := st.exprDependencies[expr] + if found { + return deps + } + + _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + colName, ok := node.(*sqlparser.ColName) + if ok { + set := st.exprDependencies[colName] + deps |= set + } + return true, nil + }, expr) + + st.exprDependencies[expr] = deps + + return deps +} + +func newScope(parent *scope) *scope { + return &scope{tables: map[string]*sqlparser.AliasedTableExpr{}, parent: parent} +} + +func (s *scope) addTable(name string, table *sqlparser.AliasedTableExpr) error { + _, found := s.tables[name] + if found { + return vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.NonUniqTable, "Not unique table/alias: '%s'", name) + } + s.tables[name] = table + return nil +} + +// Analyse analyzes the parsed query. +func Analyse(statement sqlparser.Statement) (*SemTable, error) { + analyzer := newAnalyzer() + // Initial scope + err := analyzer.analyze(statement) + if err != nil { + return nil, err + } + return &SemTable{exprDependencies: analyzer.exprDeps, Tables: analyzer.Tables}, nil +} + +// IsOverlapping returns true if at least one table exists in both sets +func (ts TableSet) IsOverlapping(b TableSet) bool { return ts&b != 0 } + +// IsSolvedBy returns true if all of `ts` is contained in `b` +func (ts TableSet) IsSolvedBy(b TableSet) bool { return ts&b == ts } + +// NumberOfTables returns the number of bits set +func (ts TableSet) NumberOfTables() int { + // Brian Kernighan’s Algorithm + count := 0 + for ts > 0 { + ts &= ts - 1 + count++ + } + return count +} + +// Constituents returns an slice with all the +// individual tables in their own TableSet identifier +func (ts TableSet) Constituents() (result []TableSet) { + mask := ts + + for mask > 0 { + maskLeft := mask & (mask - 1) + constituent := mask ^ maskLeft + mask = maskLeft + result = append(result, constituent) + } + return +} + +// Merge creates a TableSet that contains both inputs +func (ts TableSet) Merge(other TableSet) TableSet { + return ts | other +} diff --git a/go/vt/vtgate/semantics/tabletset_test.go b/go/vt/vtgate/semantics/tabletset_test.go new file mode 100644 index 00000000000..8ff41c57ecb --- /dev/null +++ b/go/vt/vtgate/semantics/tabletset_test.go @@ -0,0 +1,53 @@ +/* +Copyright 2020 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package semantics + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +const ( + _ TableSet = 1 << iota + F1 + F2 + F3 +) + +func TestTableSet_IsOverlapping(t *testing.T) { + assert.True(t, (F1 | F2).IsOverlapping(F1|F2)) + assert.True(t, F1.IsOverlapping(F1|F2)) + assert.True(t, (F1 | F2).IsOverlapping(F1)) + assert.False(t, F3.IsOverlapping(F1|F2)) + assert.False(t, (F1 | F2).IsOverlapping(F3)) +} + +func TestTableSet_IsSolvedBy(t *testing.T) { + assert.True(t, F1.IsSolvedBy(F1|F2)) + assert.False(t, (F1 | F2).IsSolvedBy(F1)) + assert.False(t, F3.IsSolvedBy(F1|F2)) + assert.False(t, (F1 | F2).IsSolvedBy(F3)) +} + +func TestTableSet_Constituents(t *testing.T) { + assert.Equal(t, []TableSet{F1, F2, F3}, (F1 | F2 | F3).Constituents()) + assert.Equal(t, []TableSet{F1, F2}, (F1 | F2).Constituents()) + assert.Equal(t, []TableSet{F1, F3}, (F1 | F3).Constituents()) + assert.Equal(t, []TableSet{F2, F3}, (F2 | F3).Constituents()) + assert.Empty(t, TableSet(0).Constituents()) +} diff --git a/go/vt/vtgate/tabletgateway.go b/go/vt/vtgate/tabletgateway.go index 6e70058d241..504945bd70a 100644 --- a/go/vt/vtgate/tabletgateway.go +++ b/go/vt/vtgate/tabletgateway.go @@ -25,8 +25,6 @@ import ( "sync" "time" - "vitess.io/vitess/go/vt/topotools" - "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/discovery" @@ -219,8 +217,7 @@ func (gw *TabletGateway) withRetry(ctx context.Context, target *querypb.Target, retryDone, bufferErr := gw.buffer.WaitForFailoverEnd(ctx, target.Keyspace, target.Shard, err) if bufferErr != nil { // Buffering failed e.g. buffer is already full. Do not retry. - err = vterrors.Errorf( - vterrors.Code(bufferErr), + err = vterrors.Errorf(vterrors.Code(bufferErr), "failed to automatically buffer and retry failed request during failover: %v original err (type=%T): %v", bufferErr, err, err) break @@ -238,7 +235,7 @@ func (gw *TabletGateway) withRetry(ctx context.Context, target *querypb.Target, tablets := gw.hc.GetHealthyTabletStats(target) if len(tablets) == 0 { // fail fast if there is no tablet - err = vterrors.New(vtrpcpb.Code_UNAVAILABLE, "no valid tablet") + err = vterrors.Errorf(vtrpcpb.Code_UNAVAILABLE, "no healthy tablet available for '%s'", target.String()) break } gw.shuffleTablets(gw.localCell, tablets) @@ -277,7 +274,7 @@ func (gw *TabletGateway) withRetry(ctx context.Context, target *querypb.Target, } break } - return NewShardError(err, target, tabletLastUsed) + return NewShardError(err, target) } func (gw *TabletGateway) updateStats(target *querypb.Target, startTime time.Time, err error) { @@ -355,13 +352,10 @@ func (gw *TabletGateway) TabletsCacheStatus() discovery.TabletsCacheStatusList { } // NewShardError returns a new error with the shard info amended. -func NewShardError(in error, target *querypb.Target, tablet *topodatapb.Tablet) error { +func NewShardError(in error, target *querypb.Target) error { if in == nil { return nil } - if tablet != nil { - return vterrors.Wrapf(in, "target: %s.%s.%s, used tablet: %s", target.Keyspace, target.Shard, topoproto.TabletTypeLString(target.TabletType), topotools.TabletIdent(tablet)) - } if target != nil { return vterrors.Wrapf(in, "target: %s.%s.%s", target.Keyspace, target.Shard, topoproto.TabletTypeLString(target.TabletType)) } diff --git a/go/vt/vtgate/tabletgateway_test.go b/go/vt/vtgate/tabletgateway_test.go index acb788cfe97..09ffbc41b3d 100644 --- a/go/vt/vtgate/tabletgateway_test.go +++ b/go/vt/vtgate/tabletgateway_test.go @@ -31,12 +31,10 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/discovery" - "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/topotools" - querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/topo" ) func TestTabletGatewayExecute(t *testing.T) { @@ -202,7 +200,7 @@ func testTabletGatewayGeneric(t *testing.T, f func(tg *TabletGateway, target *qu tg := NewTabletGateway(context.Background(), hc, nil, "cell") // no tablet - want := []string{"target: ks.0.replica", "no valid tablet"} + want := []string{"target: ks.0.replica", `no healthy tablet available for 'keyspace:"ks" shard:"0" tablet_type:REPLICA`} err := f(tg, target) verifyShardErrors(t, err, want, vtrpcpb.Code_UNAVAILABLE) @@ -224,14 +222,9 @@ func testTabletGatewayGeneric(t *testing.T, f func(tg *TabletGateway, target *qu sc2 := hc.AddTestTablet("cell", host, port+1, keyspace, shard, tabletType, true, 10, nil) sc1.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 1 sc2.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 1 - ep1 := sc1.Tablet() - ep2 := sc2.Tablet() err = f(tg, target) verifyContainsError(t, err, "target: ks.0.replica", vtrpcpb.Code_FAILED_PRECONDITION) - verifyShardErrorEither(t, err, - fmt.Sprintf(`used tablet: %s`, topotools.TabletIdent(ep1)), - fmt.Sprintf(`used tablet: %s`, topotools.TabletIdent(ep2))) // fatal error hc.Reset() @@ -239,21 +232,15 @@ func testTabletGatewayGeneric(t *testing.T, f func(tg *TabletGateway, target *qu sc2 = hc.AddTestTablet("cell", host, port+1, keyspace, shard, tabletType, true, 10, nil) sc1.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 1 sc2.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 1 - ep1 = sc1.Tablet() - ep2 = sc2.Tablet() err = f(tg, target) verifyContainsError(t, err, "target: ks.0.replica", vtrpcpb.Code_FAILED_PRECONDITION) - verifyShardErrorEither(t, err, - fmt.Sprintf(`used tablet: %s`, topotools.TabletIdent(ep1)), - fmt.Sprintf(`used tablet: %s`, topotools.TabletIdent(ep2))) // server error - no retry hc.Reset() sc1 = hc.AddTestTablet("cell", host, port, keyspace, shard, tabletType, true, 10, nil) sc1.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 - ep1 = sc1.Tablet() err = f(tg, target) - verifyContainsError(t, err, fmt.Sprintf(`used tablet: %s`, topotools.TabletIdent(ep1)), vtrpcpb.Code_INVALID_ARGUMENT) + assert.Equal(t, vtrpcpb.Code_INVALID_ARGUMENT, vterrors.Code(err)) // no failure hc.Reset() @@ -284,24 +271,16 @@ func testTabletGatewayTransact(t *testing.T, f func(tg *TabletGateway, target *q sc2 := hc.AddTestTablet("cell", host, port+1, keyspace, shard, tabletType, true, 10, nil) sc1.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 1 sc2.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 1 - ep1 := sc1.Tablet() - ep2 := sc2.Tablet() err := f(tg, target) verifyContainsError(t, err, "target: ks.0.master", vtrpcpb.Code_FAILED_PRECONDITION) - format := `used tablet: %s` - verifyShardErrorEither(t, err, - fmt.Sprintf(format, topotools.TabletIdent(ep1)), - fmt.Sprintf(format, topotools.TabletIdent(ep2))) // server error - no retry hc.Reset() sc1 = hc.AddTestTablet("cell", host, port, keyspace, shard, tabletType, true, 10, nil) sc1.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 - ep1 = sc1.Tablet() err = f(tg, target) verifyContainsError(t, err, "target: ks.0.master", vtrpcpb.Code_INVALID_ARGUMENT) - verifyContainsError(t, err, fmt.Sprintf(format, topotools.TabletIdent(ep1)), vtrpcpb.Code_INVALID_ARGUMENT) } func verifyContainsError(t *testing.T, err error, wantErr string, wantCode vtrpcpb.Code) { @@ -314,13 +293,6 @@ func verifyContainsError(t *testing.T, err error, wantErr string, wantCode vtrpc } } -func verifyShardErrorEither(t *testing.T, err error, a, b string) { - require.Error(t, err) - if !strings.Contains(err.Error(), a) && !strings.Contains(err.Error(), b) { - assert.Failf(t, "", "wanted error to contain: %v or %v\n, got error: [[%v]]", a, b, err) - } -} - func verifyShardErrors(t *testing.T, err error, wantErrors []string, wantCode vtrpcpb.Code) { require.Error(t, err) for _, wantErr := range wantErrors { diff --git a/go/vt/vtgate/tx_conn_test.go b/go/vt/vtgate/tx_conn_test.go index dea545b5bf5..a71feb9cbfc 100644 --- a/go/vt/vtgate/tx_conn_test.go +++ b/go/vt/vtgate/tx_conn_test.go @@ -469,7 +469,7 @@ func TestTxConnCommitOrderFailure3(t *testing.T) { // The last failed commit must generate a warning. wantSession := vtgatepb.Session{ Warnings: []*querypb.QueryWarning{{ - Message: "post-operation transaction had an error: Code: INVALID_ARGUMENT\nINVALID_ARGUMENT error\n\ntarget: TestTxConn.1.master, used tablet: aa-0 (1)", + Message: "post-operation transaction had an error: Code: INVALID_ARGUMENT\nINVALID_ARGUMENT error\n\ntarget: TestTxConn.1.master", }}, } utils.MustMatch(t, &wantSession, session.Session, "Session") diff --git a/go/vt/vtgate/vcursor_impl.go b/go/vt/vtgate/vcursor_impl.go index 54e258bff97..9477be91939 100644 --- a/go/vt/vtgate/vcursor_impl.go +++ b/go/vt/vtgate/vcursor_impl.go @@ -23,10 +23,14 @@ import ( "sync/atomic" "time" - "golang.org/x/sync/errgroup" - "vitess.io/vitess/go/mysql" + "github.com/prometheus/common/log" + + "vitess.io/vitess/go/vt/vtgate/semantics" + + "golang.org/x/sync/errgroup" + "vitess.io/vitess/go/vt/callerid" vschemapb "vitess.io/vitess/go/vt/proto/vschema" "vitess.io/vitess/go/vt/schema" @@ -69,6 +73,7 @@ type iExecute interface { // TODO: remove when resolver is gone ParseDestinationTarget(targetString string) (string, topodatapb.TabletType, key.Destination, error) + VSchema() *vindexes.VSchema } //VSchemaOperator is an interface to Vschema Operations @@ -98,6 +103,10 @@ type vcursorImpl struct { ignoreMaxMemoryRows bool vschema *vindexes.VSchema vm VSchemaOperator + semTable *semantics.SemTable + warnShardedOnly bool // when using sharded only features, a warning will be warnings field + + warnings []*querypb.QueryWarning // any warnings that are accumulated during the planning phase are stored here } func (vc *vcursorImpl) GetKeyspace() string { @@ -110,10 +119,10 @@ func (vc *vcursorImpl) ExecuteVSchema(keyspace string, vschemaDDL *sqlparser.Alt return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "vschema not loaded") } - allowed := vschemaacl.Authorized(callerid.ImmediateCallerIDFromContext(vc.ctx)) + user := callerid.ImmediateCallerIDFromContext(vc.ctx) + allowed := vschemaacl.Authorized(user) if !allowed { - return vterrors.Errorf(vtrpcpb.Code_PERMISSION_DENIED, "not authorized to perform vschema operations") - + return vterrors.NewErrorf(vtrpcpb.Code_PERMISSION_DENIED, vterrors.AccessDeniedError, "User '%s' is not allowed to perform vschema operations", user.GetUsername()) } // Resolve the keyspace either from the table qualifier or the target keyspace @@ -155,6 +164,7 @@ func newVCursorImpl( vschema *vindexes.VSchema, resolver *srvtopo.Resolver, serv srvtopo.Server, + warnShardedOnly bool, ) (*vcursorImpl, error) { keyspace, tabletType, destination, err := parseDestinationTarget(safeSession.TargetString, vschema) if err != nil { @@ -163,7 +173,7 @@ func newVCursorImpl( // With DiscoveryGateway transactions are only allowed on master. if UsingLegacyGateway() && safeSession.InTransaction() && tabletType != topodatapb.TabletType_MASTER { - return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "newVCursorImpl: transactions are supported only for master tablet types, current type: %v", tabletType) + return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "transaction is supported only for master tablet type, current type: %v", tabletType) } var ts *topo.Server // We don't have access to the underlying TopoServer if this vtgate is @@ -176,18 +186,19 @@ func newVCursorImpl( } return &vcursorImpl{ - ctx: ctx, - safeSession: safeSession, - keyspace: keyspace, - tabletType: tabletType, - destination: destination, - marginComments: marginComments, - executor: executor, - logStats: logStats, - resolver: resolver, - vschema: vschema, - vm: vm, - topoServer: ts, + ctx: ctx, + safeSession: safeSession, + keyspace: keyspace, + tabletType: tabletType, + destination: destination, + marginComments: marginComments, + executor: executor, + logStats: logStats, + resolver: resolver, + vschema: vschema, + vm: vm, + topoServer: ts, + warnShardedOnly: warnShardedOnly, }, nil } @@ -275,7 +286,7 @@ func (vc *vcursorImpl) FindTableOrVindex(name sqlparser.TableName) (*vindexes.Ta return nil, nil, "", destTabletType, nil, err } if destKeyspace == "" { - destKeyspace = vc.keyspace + destKeyspace = vc.getActualKeyspace() } table, vindex, err := vc.vschema.FindTableOrVindex(destKeyspace, name.Name.String(), vc.tabletType) if err != nil { @@ -284,6 +295,17 @@ func (vc *vcursorImpl) FindTableOrVindex(name sqlparser.TableName) (*vindexes.Ta return table, vindex, destKeyspace, destTabletType, dest, nil } +func (vc *vcursorImpl) getActualKeyspace() string { + if !sqlparser.SystemSchema(vc.keyspace) { + return vc.keyspace + } + ks, err := vc.AnyKeyspace() + if err != nil { + return "" + } + return ks.Name +} + // DefaultKeyspace returns the default keyspace of the current request // if there is one. If the keyspace specified in the target cannot be // identified, it returns an error. @@ -293,11 +315,13 @@ func (vc *vcursorImpl) DefaultKeyspace() (*vindexes.Keyspace, error) { } ks, ok := vc.vschema.Keyspaces[vc.keyspace] if !ok { - return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "keyspace %s not found in vschema", vc.keyspace) + return nil, vterrors.NewErrorf(vtrpcpb.Code_NOT_FOUND, vterrors.BadDb, "Unknown database '%s' in vschema", vc.keyspace) } return ks.Keyspace, nil } +var errNoDbAvailable = vterrors.NewErrorf(vtrpcpb.Code_FAILED_PRECONDITION, vterrors.NoDB, "no database available") + func (vc *vcursorImpl) AnyKeyspace() (*vindexes.Keyspace, error) { keyspace, err := vc.DefaultKeyspace() if err == nil { @@ -308,7 +332,7 @@ func (vc *vcursorImpl) AnyKeyspace() (*vindexes.Keyspace, error) { } if len(vc.vschema.Keyspaces) == 0 { - return nil, vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, "no keyspaces available") + return nil, errNoDbAvailable } // Looks for any sharded keyspace if present, otherwise take any keyspace. @@ -323,7 +347,7 @@ func (vc *vcursorImpl) AnyKeyspace() (*vindexes.Keyspace, error) { func (vc *vcursorImpl) FirstSortedKeyspace() (*vindexes.Keyspace, error) { if len(vc.vschema.Keyspaces) == 0 { - return nil, vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, "no keyspaces available") + return nil, errNoDbAvailable } kss := vc.vschema.Keyspaces keys := make([]string, 0, len(kss)) @@ -337,7 +361,7 @@ func (vc *vcursorImpl) FirstSortedKeyspace() (*vindexes.Keyspace, error) { // SysVarSetEnabled implements the ContextVSchema interface func (vc *vcursorImpl) SysVarSetEnabled() bool { - return *sysVarSetEnabled + return vc.GetSessionEnableSystemSettings() } // KeyspaceExists provides whether the keyspace exists or not. @@ -345,9 +369,10 @@ func (vc *vcursorImpl) KeyspaceExists(ks string) bool { return vc.vschema.Keyspaces[ks] != nil } +// AllKeyspace implements the ContextVSchema interface func (vc *vcursorImpl) AllKeyspace() ([]*vindexes.Keyspace, error) { if len(vc.vschema.Keyspaces) == 0 { - return nil, vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, "no keyspaces available") + return nil, errNoDbAvailable } var kss []*vindexes.Keyspace for _, ks := range vc.vschema.Keyspaces { @@ -356,6 +381,34 @@ func (vc *vcursorImpl) AllKeyspace() ([]*vindexes.Keyspace, error) { return kss, nil } +// Planner implements the ContextVSchema interface +func (vc *vcursorImpl) Planner() planbuilder.PlannerVersion { + if vc.safeSession.Options != nil && + vc.safeSession.Options.PlannerVersion != querypb.ExecuteOptions_DEFAULT_PLANNER { + return vc.safeSession.Options.PlannerVersion + } + switch strings.ToLower(*plannerVersion) { + case "v3": + return planbuilder.V3 + case "gen4": + return planbuilder.Gen4 + case "gen4greedy", "greedy": + return planbuilder.Gen4GreedyOnly + case "left2right": + return planbuilder.Gen4Left2Right + case "gen4fallback": + return planbuilder.Gen4WithFallback + } + + log.Warn("unknown planner version configured. using the default") + return planbuilder.V3 +} + +// GetSemTable implements the ContextVSchema interface +func (vc *vcursorImpl) GetSemTable() *semantics.SemTable { + return vc.semTable +} + // TargetString returns the current TargetString of the session. func (vc *vcursorImpl) TargetString() string { return vc.safeSession.TargetString @@ -381,7 +434,7 @@ func (vc *vcursorImpl) Execute(method string, query string, bindVars map[string] // ExecuteMultiShard is part of the engine.VCursor interface. func (vc *vcursorImpl) ExecuteMultiShard(rss []*srvtopo.ResolvedShard, queries []*querypb.BoundQuery, rollbackOnError, autocommit bool) (*sqltypes.Result, []error) { - atomic.AddUint32(&vc.logStats.ShardQueries, uint32(len(queries))) + atomic.AddUint64(&vc.logStats.ShardQueries, uint64(len(queries))) qr, errs := vc.executor.ExecuteMultiShard(vc.ctx, rss, commentedShardQueries(queries, vc.marginComments), vc.safeSession, autocommit, vc.ignoreMaxMemoryRows) if errs == nil && rollbackOnError { @@ -436,13 +489,13 @@ func (vc *vcursorImpl) ExecuteStandalone(query string, bindVars map[string]*quer // StreamExeculteMulti is the streaming version of ExecuteMultiShard. func (vc *vcursorImpl) StreamExecuteMulti(query string, rss []*srvtopo.ResolvedShard, bindVars []map[string]*querypb.BindVariable, callback func(reply *sqltypes.Result) error) error { - atomic.AddUint32(&vc.logStats.ShardQueries, uint32(len(rss))) + atomic.AddUint64(&vc.logStats.ShardQueries, uint64(len(rss))) return vc.executor.StreamExecuteMulti(vc.ctx, vc.marginComments.Leading+query+vc.marginComments.Trailing, rss, bindVars, vc.safeSession.Options, callback) } // ExecuteKeyspaceID is part of the engine.VCursor interface. func (vc *vcursorImpl) ExecuteKeyspaceID(keyspace string, ksid []byte, query string, bindVars map[string]*querypb.BindVariable, rollbackOnError, autocommit bool) (*sqltypes.Result, error) { - atomic.AddUint32(&vc.logStats.ShardQueries, 1) + atomic.AddUint64(&vc.logStats.ShardQueries, 1) rss, _, err := vc.ResolveDestinations(keyspace, nil, []key.Destination{key.DestinationKeyspaceID(ksid)}) if err != nil { return nil, err @@ -476,11 +529,11 @@ func (vc *vcursorImpl) SetTarget(target string) error { return err } if _, ok := vc.vschema.Keyspaces[keyspace]; !ignoreKeyspace(keyspace) && !ok { - return mysql.NewSQLError(mysql.ERBadDb, mysql.SSSyntaxErrorOrAccessViolation, "Unknown database '%s'", keyspace) + return vterrors.NewErrorf(vtrpcpb.Code_NOT_FOUND, vterrors.BadDb, "Unknown database '%s'", keyspace) } if vc.safeSession.InTransaction() && tabletType != topodatapb.TabletType_MASTER { - return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "cannot change to a non-master type in the middle of a transaction: %v", tabletType) + return vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.LockOrActiveTransaction, "Can't execute the given command because you have an active transaction") } vc.safeSession.SetTargetString(target) return nil @@ -540,7 +593,7 @@ func (vc *vcursorImpl) TabletType() topodatapb.TabletType { // SubmitOnlineDDL implements the VCursor interface func (vc *vcursorImpl) SubmitOnlineDDL(onlineDDl *schema.OnlineDDL) error { if vc.topoServer == nil { - return vterrors.New(vtrpcpb.Code_INTERNAL, "Unable to apply DDL toposerver unavailable, ensure this vtgate is not using filtered keyspaces") + return vterrors.New(vtrpcpb.Code_INTERNAL, "Unable to apply DDL because toposerver is unavailable, ensure this vtgate is not using filtered keyspaces") } conn, err := vc.topoServer.ConnForCell(vc.ctx, topo.GlobalCell) if err != nil { @@ -571,11 +624,11 @@ func (vc *vcursorImpl) TargetDestination(qualifier string) (key.Destination, *vi keyspaceName = qualifier } if keyspaceName == "" { - return nil, nil, 0, vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "keyspace not specified") + return nil, nil, 0, errNoKeyspace } keyspace := vc.vschema.Keyspaces[keyspaceName] if keyspace == nil { - return nil, nil, 0, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "no keyspace with name [%s] found", keyspaceName) + return nil, nil, 0, vterrors.NewErrorf(vtrpcpb.Code_NOT_FOUND, vterrors.BadDb, "Unknown database '%s' in vschema", keyspaceName) } return vc.destination, keyspace.Keyspace, vc.tabletType, nil } @@ -619,6 +672,11 @@ func (vc *vcursorImpl) SetWorkload(workload querypb.ExecuteOptions_Workload) { vc.safeSession.GetOrCreateOptions().Workload = workload } +// SetPlannerVersion implements the SessionActions interface +func (vc *vcursorImpl) SetPlannerVersion(v planbuilder.PlannerVersion) { + vc.safeSession.GetOrCreateOptions().PlannerVersion = v +} + // SetFoundRows implements the SessionActions interface func (vc *vcursorImpl) SetFoundRows(foundRows uint64) { vc.safeSession.FoundRows = foundRows @@ -640,21 +698,76 @@ func (vc *vcursorImpl) GetSessionUUID() string { return vc.safeSession.GetSessionUUID() } +// SetSessionEnableSystemSettings implements the SessionActions interface +func (vc *vcursorImpl) SetSessionEnableSystemSettings(allow bool) error { + vc.safeSession.SetSessionEnableSystemSettings(allow) + return nil +} + +// GetSessionEnableSystemSettings implements the SessionActions interface +func (vc *vcursorImpl) GetSessionEnableSystemSettings() bool { + return vc.safeSession.GetSessionEnableSystemSettings() +} + // SetReadAfterWriteGTID implements the SessionActions interface func (vc *vcursorImpl) SetReadAfterWriteGTID(vtgtid string) { vc.safeSession.SetReadAfterWriteGTID(vtgtid) } -//SetReadAfterWriteTimeout implements the SessionActions interface +// SetReadAfterWriteTimeout implements the SessionActions interface func (vc *vcursorImpl) SetReadAfterWriteTimeout(timeout float64) { vc.safeSession.SetReadAfterWriteTimeout(timeout) } -//SetSessionTrackGTIDs implements the SessionActions interface +// SetSessionTrackGTIDs implements the SessionActions interface func (vc *vcursorImpl) SetSessionTrackGTIDs(enable bool) { vc.safeSession.SetSessionTrackGtids(enable) } +// HasCreatedTempTable implements the SessionActions interface +func (vc *vcursorImpl) HasCreatedTempTable() { + vc.safeSession.GetOrCreateOptions().HasCreatedTempTables = true +} + +// GetDBDDLPluginName implements the VCursor interface +func (vc *vcursorImpl) GetDBDDLPluginName() string { + return *dbDDLPlugin +} + +// KeyspaceAvailable implements the VCursor interface +func (vc *vcursorImpl) KeyspaceAvailable(ks string) bool { + _, exists := vc.executor.VSchema().Keyspaces[ks] + return exists +} + +// ErrorIfShardedF implements the VCursor interface +func (vc *vcursorImpl) ErrorIfShardedF(ks *vindexes.Keyspace, warn, errFormat string, params ...interface{}) error { + if ks.Sharded { + return vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, errFormat, params...) + } + vc.WarnUnshardedOnly("'%s' not supported in sharded mode", warn) + + return nil +} + +// WarnUnshardedOnly implements the VCursor interface +func (vc *vcursorImpl) WarnUnshardedOnly(format string, params ...interface{}) { + if vc.warnShardedOnly { + vc.warnings = append(vc.warnings, &querypb.QueryWarning{ + Code: mysql.ERNotSupportedYet, + Message: fmt.Sprintf(format, params...), + }) + } +} + +// ForeignKey implements the VCursor interface +func (vc *vcursorImpl) ForeignKeyMode() string { + if foreignKeyMode == nil { + return "" + } + return strings.ToLower(*foreignKeyMode) +} + // ParseDestinationTarget parses destination target string and sets default keyspace if possible. func parseDestinationTarget(targetString string, vschema *vindexes.VSchema) (string, topodatapb.TabletType, key.Destination, error) { destKeyspace, destTabletType, dest, err := topoprotopb.ParseDestination(targetString, defaultTabletType) diff --git a/go/vt/vtgate/vcursor_impl_test.go b/go/vt/vtgate/vcursor_impl_test.go index fba03cc17fa..418c53d00ee 100644 --- a/go/vt/vtgate/vcursor_impl_test.go +++ b/go/vt/vtgate/vcursor_impl_test.go @@ -4,6 +4,7 @@ import ( "context" "encoding/hex" "fmt" + "strconv" "testing" "vitess.io/vitess/go/vt/proto/vschema" @@ -165,35 +166,37 @@ func TestDestinationKeyspace(t *testing.T) { vschema: vschemaWith1KS, targetString: "ks2", qualifier: "", - expectedError: "no keyspace with name [ks2] found", + expectedError: "Unknown database 'ks2' in vschema", }, { vschema: vschemaWith1KS, targetString: "ks2:-80", qualifier: "", - expectedError: "no keyspace with name [ks2] found", + expectedError: "Unknown database 'ks2' in vschema", }, { vschema: vschemaWith1KS, targetString: "", qualifier: "ks2", - expectedError: "no keyspace with name [ks2] found", + expectedError: "Unknown database 'ks2' in vschema", }, { vschema: vschemaWith2KS, targetString: "", - expectedError: "keyspace not specified", + expectedError: errNoKeyspace.Error(), }} - for _, tc := range tests { - impl, _ := newVCursorImpl(context.Background(), NewSafeSession(&vtgatepb.Session{TargetString: tc.targetString}), sqlparser.MarginComments{}, nil, nil, &fakeVSchemaOperator{vschema: tc.vschema}, tc.vschema, nil, nil) - impl.vschema = tc.vschema - dest, keyspace, tabletType, err := impl.TargetDestination(tc.qualifier) - if tc.expectedError == "" { - require.NoError(t, err) - require.Equal(t, tc.expectedDest, dest) - require.Equal(t, tc.expectedKeyspace, keyspace.Name) - require.Equal(t, tc.expectedTabletType, tabletType) - } else { - require.EqualError(t, err, tc.expectedError) - } + for i, tc := range tests { + t.Run(strconv.Itoa(i)+tc.targetString, func(t *testing.T) { + impl, _ := newVCursorImpl(context.Background(), NewSafeSession(&vtgatepb.Session{TargetString: tc.targetString}), sqlparser.MarginComments{}, nil, nil, &fakeVSchemaOperator{vschema: tc.vschema}, tc.vschema, nil, nil, false) + impl.vschema = tc.vschema + dest, keyspace, tabletType, err := impl.TargetDestination(tc.qualifier) + if tc.expectedError == "" { + require.NoError(t, err) + require.Equal(t, tc.expectedDest, dest) + require.Equal(t, tc.expectedKeyspace, keyspace.Name) + require.Equal(t, tc.expectedTabletType, tabletType) + } else { + require.EqualError(t, err, tc.expectedError) + } + }) } } @@ -231,16 +234,16 @@ func TestSetTarget(t *testing.T) { }, { vschema: vschemaWith2KS, targetString: "ks3", - expectedError: "Unknown database 'ks3' (errno 1049) (sqlstate 42000)", + expectedError: "Unknown database 'ks3'", }, { vschema: vschemaWith2KS, targetString: "ks2@replica", - expectedError: "cannot change to a non-master type in the middle of a transaction: REPLICA", + expectedError: "Can't execute the given command because you have an active transaction", }} for i, tc := range tests { t.Run(fmt.Sprintf("%d#%s", i, tc.targetString), func(t *testing.T) { - vc, _ := newVCursorImpl(context.Background(), NewSafeSession(&vtgatepb.Session{InTransaction: true}), sqlparser.MarginComments{}, nil, nil, &fakeVSchemaOperator{vschema: tc.vschema}, tc.vschema, nil, nil) + vc, _ := newVCursorImpl(context.Background(), NewSafeSession(&vtgatepb.Session{InTransaction: true}), sqlparser.MarginComments{}, nil, nil, &fakeVSchemaOperator{vschema: tc.vschema}, tc.vschema, nil, nil, false) vc.vschema = tc.vschema err := vc.SetTarget(tc.targetString) if tc.expectedError == "" { @@ -282,7 +285,7 @@ func TestPlanPrefixKey(t *testing.T) { t.Run(fmt.Sprintf("%d#%s", i, tc.targetString), func(t *testing.T) { ss := NewSafeSession(&vtgatepb.Session{InTransaction: false}) ss.SetTargetString(tc.targetString) - vc, err := newVCursorImpl(context.Background(), ss, sqlparser.MarginComments{}, nil, nil, &fakeVSchemaOperator{vschema: tc.vschema}, tc.vschema, srvtopo.NewResolver(&fakeTopoServer{}, nil, ""), nil) + vc, err := newVCursorImpl(context.Background(), ss, sqlparser.MarginComments{}, nil, nil, &fakeVSchemaOperator{vschema: tc.vschema}, tc.vschema, srvtopo.NewResolver(&fakeTopoServer{}, nil, ""), nil, false) require.NoError(t, err) vc.vschema = tc.vschema require.Equal(t, tc.expectedPlanPrefixKey, vc.planPrefixKey()) @@ -301,7 +304,7 @@ func TestFirstSortedKeyspace(t *testing.T) { ks3Schema.Keyspace.Name: ks3Schema, }} - vc, err := newVCursorImpl(context.Background(), NewSafeSession(nil), sqlparser.MarginComments{}, nil, nil, &fakeVSchemaOperator{vschema: vschemaWith2KS}, vschemaWith2KS, srvtopo.NewResolver(&fakeTopoServer{}, nil, ""), nil) + vc, err := newVCursorImpl(context.Background(), NewSafeSession(nil), sqlparser.MarginComments{}, nil, nil, &fakeVSchemaOperator{vschema: vschemaWith2KS}, vschemaWith2KS, srvtopo.NewResolver(&fakeTopoServer{}, nil, ""), nil, false) require.NoError(t, err) ks, err := vc.FirstSortedKeyspace() require.NoError(t, err) diff --git a/go/vt/vtgate/vindexes/binary_test.go b/go/vt/vtgate/vindexes/binary_test.go index 838686f41cb..167a05e8ef2 100644 --- a/go/vt/vtgate/vindexes/binary_test.go +++ b/go/vt/vtgate/vindexes/binary_test.go @@ -49,6 +49,9 @@ func TestBinaryMap(t *testing.T) { }{{ in: sqltypes.NewVarChar("test1"), out: []byte("test1"), + }, { + in: sqltypes.NULL, + out: []byte(nil), }, { in: sqltypes.NewVarChar("test2"), out: []byte("test2"), diff --git a/go/vt/vtgate/vindexes/binarymd5_test.go b/go/vt/vtgate/vindexes/binarymd5_test.go index 175c44d4517..5dfd3ae770f 100644 --- a/go/vt/vtgate/vindexes/binarymd5_test.go +++ b/go/vt/vtgate/vindexes/binarymd5_test.go @@ -44,19 +44,23 @@ func TestBinaryMD5Info(t *testing.T) { func TestBinaryMD5Map(t *testing.T) { tcases := []struct { - in, out string + in sqltypes.Value + out string }{{ - in: "Test", - out: "\f\xbcf\x11\xf5T\vЀ\x9a8\x8d\xc9Za[", + in: sqltypes.NewVarBinary("test1"), + out: "Z\x10^\x8b\x9d@\xe12\x97\x80\xd6.\xa2&]\x8a", }, { - in: "TEST", + in: sqltypes.NewVarBinary("TEST"), out: "\x03;\xd9K\x11h\xd7\xe4\xf0\xd6D\xc3\xc9^5\xbf", }, { - in: "Test", + in: sqltypes.NULL, + out: "\xd4\x1d\x8cُ\x00\xb2\x04\xe9\x80\t\x98\xec\xf8B~", + }, { + in: sqltypes.NewVarBinary("Test"), out: "\f\xbcf\x11\xf5T\vЀ\x9a8\x8d\xc9Za[", }} for _, tcase := range tcases { - got, err := binVindex.Map(nil, []sqltypes.Value{sqltypes.NewVarBinary(tcase.in)}) + got, err := binVindex.Map(nil, []sqltypes.Value{tcase.in}) if err != nil { t.Error(err) } diff --git a/go/vt/vtgate/vindexes/cached_size.go b/go/vt/vtgate/vindexes/cached_size.go new file mode 100644 index 00000000000..70c7fd0ede6 --- /dev/null +++ b/go/vt/vtgate/vindexes/cached_size.go @@ -0,0 +1,481 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by Sizegen. DO NOT EDIT. + +package vindexes + +import ( + "math" + "reflect" + "unsafe" +) + +type cachedObject interface { + CachedSize(alloc bool) int64 +} + +func (cached *AutoIncrement) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field Column vitess.io/vitess/go/vt/sqlparser.ColIdent + size += cached.Column.CachedSize(false) + // field Sequence *vitess.io/vitess/go/vt/vtgate/vindexes.Table + size += cached.Sequence.CachedSize(true) + return size +} +func (cached *Binary) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(16) + } + // field name string + size += int64(len(cached.name)) + return size +} +func (cached *BinaryMD5) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(16) + } + // field name string + size += int64(len(cached.name)) + return size +} +func (cached *Column) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(44) + } + // field Name vitess.io/vitess/go/vt/sqlparser.ColIdent + size += cached.Name.CachedSize(false) + return size +} +func (cached *ColumnVindex) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(80) + } + // field Columns []vitess.io/vitess/go/vt/sqlparser.ColIdent + { + size += int64(cap(cached.Columns)) * int64(40) + for _, elem := range cached.Columns { + size += elem.CachedSize(false) + } + } + // field Type string + size += int64(len(cached.Type)) + // field Name string + size += int64(len(cached.Name)) + // field Vindex vitess.io/vitess/go/vt/vtgate/vindexes.Vindex + if cc, ok := cached.Vindex.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *ConsistentLookup) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(8) + } + // field clCommon *vitess.io/vitess/go/vt/vtgate/vindexes.clCommon + size += cached.clCommon.CachedSize(true) + return size +} +func (cached *ConsistentLookupUnique) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(8) + } + // field clCommon *vitess.io/vitess/go/vt/vtgate/vindexes.clCommon + size += cached.clCommon.CachedSize(true) + return size +} +func (cached *Hash) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(16) + } + // field name string + size += int64(len(cached.name)) + return size +} +func (cached *Keyspace) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(17) + } + // field Name string + size += int64(len(cached.Name)) + return size +} +func (cached *LookupHash) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(136) + } + // field name string + size += int64(len(cached.name)) + // field lkp vitess.io/vitess/go/vt/vtgate/vindexes.lookupInternal + size += cached.lkp.CachedSize(false) + return size +} +func (cached *LookupHashUnique) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(136) + } + // field name string + size += int64(len(cached.name)) + // field lkp vitess.io/vitess/go/vt/vtgate/vindexes.lookupInternal + size += cached.lkp.CachedSize(false) + return size +} +func (cached *LookupNonUnique) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(136) + } + // field name string + size += int64(len(cached.name)) + // field lkp vitess.io/vitess/go/vt/vtgate/vindexes.lookupInternal + size += cached.lkp.CachedSize(false) + return size +} +func (cached *LookupUnicodeLooseMD5Hash) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(136) + } + // field name string + size += int64(len(cached.name)) + // field lkp vitess.io/vitess/go/vt/vtgate/vindexes.lookupInternal + size += cached.lkp.CachedSize(false) + return size +} +func (cached *LookupUnicodeLooseMD5HashUnique) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(136) + } + // field name string + size += int64(len(cached.name)) + // field lkp vitess.io/vitess/go/vt/vtgate/vindexes.lookupInternal + size += cached.lkp.CachedSize(false) + return size +} +func (cached *LookupUnique) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(136) + } + // field name string + size += int64(len(cached.name)) + // field lkp vitess.io/vitess/go/vt/vtgate/vindexes.lookupInternal + size += cached.lkp.CachedSize(false) + return size +} +func (cached *Null) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(16) + } + // field name string + size += int64(len(cached.name)) + return size +} +func (cached *Numeric) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(16) + } + // field name string + size += int64(len(cached.name)) + return size +} + +//go:nocheckptr +func (cached *NumericStaticMap) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + // field name string + size += int64(len(cached.name)) + // field lookup vitess.io/vitess/go/vt/vtgate/vindexes.NumericLookupTable + if cached.lookup != nil { + size += int64(48) + hmap := reflect.ValueOf(cached.lookup) + numBuckets := int(math.Pow(2, float64((*(*uint8)(unsafe.Pointer(hmap.Pointer() + uintptr(9))))))) + numOldBuckets := (*(*uint16)(unsafe.Pointer(hmap.Pointer() + uintptr(10)))) + size += int64(numOldBuckets * 144) + if len(cached.lookup) > 0 || numBuckets > 1 { + size += int64(numBuckets * 144) + } + } + return size +} +func (cached *RegionExperimental) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + // field name string + size += int64(len(cached.name)) + return size +} + +//go:nocheckptr +func (cached *RegionJSON) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field name string + size += int64(len(cached.name)) + // field regionMap vitess.io/vitess/go/vt/vtgate/vindexes.RegionMap + if cached.regionMap != nil { + size += int64(48) + hmap := reflect.ValueOf(cached.regionMap) + numBuckets := int(math.Pow(2, float64((*(*uint8)(unsafe.Pointer(hmap.Pointer() + uintptr(9))))))) + numOldBuckets := (*(*uint16)(unsafe.Pointer(hmap.Pointer() + uintptr(10)))) + size += int64(numOldBuckets * 208) + if len(cached.regionMap) > 0 || numBuckets > 1 { + size += int64(numBuckets * 208) + } + for k := range cached.regionMap { + size += int64(len(k)) + } + } + return size +} +func (cached *ReverseBits) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(16) + } + // field name string + size += int64(len(cached.name)) + return size +} +func (cached *Table) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(169) + } + // field Type string + size += int64(len(cached.Type)) + // field Name vitess.io/vitess/go/vt/sqlparser.TableIdent + size += cached.Name.CachedSize(false) + // field Keyspace *vitess.io/vitess/go/vt/vtgate/vindexes.Keyspace + size += cached.Keyspace.CachedSize(true) + // field ColumnVindexes []*vitess.io/vitess/go/vt/vtgate/vindexes.ColumnVindex + { + size += int64(cap(cached.ColumnVindexes)) * int64(8) + for _, elem := range cached.ColumnVindexes { + size += elem.CachedSize(true) + } + } + // field Ordered []*vitess.io/vitess/go/vt/vtgate/vindexes.ColumnVindex + { + size += int64(cap(cached.Ordered)) * int64(8) + for _, elem := range cached.Ordered { + size += elem.CachedSize(true) + } + } + // field Owned []*vitess.io/vitess/go/vt/vtgate/vindexes.ColumnVindex + { + size += int64(cap(cached.Owned)) * int64(8) + for _, elem := range cached.Owned { + size += elem.CachedSize(true) + } + } + // field AutoIncrement *vitess.io/vitess/go/vt/vtgate/vindexes.AutoIncrement + size += cached.AutoIncrement.CachedSize(true) + // field Columns []vitess.io/vitess/go/vt/vtgate/vindexes.Column + { + size += int64(cap(cached.Columns)) * int64(44) + for _, elem := range cached.Columns { + size += elem.CachedSize(false) + } + } + // field Pinned []byte + size += int64(cap(cached.Pinned)) + return size +} +func (cached *UnicodeLooseMD5) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(16) + } + // field name string + size += int64(len(cached.name)) + return size +} +func (cached *UnicodeLooseXXHash) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(16) + } + // field name string + size += int64(len(cached.name)) + return size +} +func (cached *XXHash) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(16) + } + // field name string + size += int64(len(cached.name)) + return size +} +func (cached *clCommon) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(256) + } + // field name string + size += int64(len(cached.name)) + // field lkp vitess.io/vitess/go/vt/vtgate/vindexes.lookupInternal + size += cached.lkp.CachedSize(false) + // field keyspace string + size += int64(len(cached.keyspace)) + // field ownerTable string + size += int64(len(cached.ownerTable)) + // field ownerColumns []string + { + size += int64(cap(cached.ownerColumns)) * int64(16) + for _, elem := range cached.ownerColumns { + size += int64(len(elem)) + } + } + // field lockLookupQuery string + size += int64(len(cached.lockLookupQuery)) + // field lockOwnerQuery string + size += int64(len(cached.lockOwnerQuery)) + // field insertLookupQuery string + size += int64(len(cached.insertLookupQuery)) + // field updateLookupQuery string + size += int64(len(cached.updateLookupQuery)) + return size +} +func (cached *lookupInternal) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(112) + } + // field Table string + size += int64(len(cached.Table)) + // field FromColumns []string + { + size += int64(cap(cached.FromColumns)) * int64(16) + for _, elem := range cached.FromColumns { + size += int64(len(elem)) + } + } + // field To string + size += int64(len(cached.To)) + // field sel string + size += int64(len(cached.sel)) + // field ver string + size += int64(len(cached.ver)) + // field del string + size += int64(len(cached.del)) + return size +} diff --git a/go/vt/vtgate/vindexes/hash.go b/go/vt/vtgate/vindexes/hash.go index 9d68b167b56..781571b07f9 100644 --- a/go/vt/vtgate/vindexes/hash.go +++ b/go/vt/vtgate/vindexes/hash.go @@ -29,7 +29,6 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" - "vitess.io/vitess/go/vt/vterrors" ) var ( @@ -103,7 +102,7 @@ func (vind *Hash) Verify(_ VCursor, ids []sqltypes.Value, ksids [][]byte) ([]boo for i := range ids { num, err := evalengine.ToUint64(ids[i]) if err != nil { - return nil, vterrors.Wrap(err, "hash.Verify") + return nil, err } out[i] = bytes.Equal(vhash(num), ksids[i]) } diff --git a/go/vt/vtgate/vindexes/hash_test.go b/go/vt/vtgate/vindexes/hash_test.go index 021db8f070a..d2473f7aabb 100644 --- a/go/vt/vtgate/vindexes/hash_test.go +++ b/go/vt/vtgate/vindexes/hash_test.go @@ -99,10 +99,7 @@ func TestHashVerify(t *testing.T) { // Failure test _, err = hash.Verify(nil, []sqltypes.Value{sqltypes.NewVarBinary("aa")}, [][]byte{nil}) - wantErr := "hash.Verify: could not parse value: 'aa'" - if err == nil || err.Error() != wantErr { - t.Errorf("hash.Verify err: %v, want %s", err, wantErr) - } + require.EqualError(t, err, "could not parse value: 'aa'") } func TestHashReverseMap(t *testing.T) { diff --git a/go/vt/vtgate/vindexes/lookup_internal.go b/go/vt/vtgate/vindexes/lookup_internal.go index e14162e8efd..ccecaaae16a 100644 --- a/go/vt/vtgate/vindexes/lookup_internal.go +++ b/go/vt/vtgate/vindexes/lookup_internal.go @@ -81,7 +81,7 @@ func (lkp *lookupInternal) Lookup(vcursor VCursor, ids []sqltypes.Value, co vtga sel = sel + " for update" } if ids[0].IsIntegral() { - // for integral or binary type, batch query all ids and then map them back to the input order + // for integral types, batch query all ids and then map them back to the input order vars, err := sqltypes.BuildBindVariable(ids) if err != nil { return nil, fmt.Errorf("lookup.Map: %v", err) diff --git a/go/vt/vtgate/vindexes/null_test.go b/go/vt/vtgate/vindexes/null_test.go index 2863e1abe30..a71fcb83a22 100644 --- a/go/vt/vtgate/vindexes/null_test.go +++ b/go/vt/vtgate/vindexes/null_test.go @@ -53,6 +53,7 @@ func TestNullMap(t *testing.T) { sqltypes.NewInt64(5), sqltypes.NewInt64(6), sqltypes.NewVarChar("1234567890123"), + sqltypes.NULL, }) require.NoError(t, err) want := []key.Destination{ @@ -63,6 +64,7 @@ func TestNullMap(t *testing.T) { key.DestinationKeyspaceID([]byte{0}), key.DestinationKeyspaceID([]byte{0}), key.DestinationKeyspaceID([]byte{0}), + key.DestinationKeyspaceID([]byte{0}), } if !reflect.DeepEqual(got, want) { t.Errorf("Map(): %#v, want %+v", got, want) diff --git a/go/vt/vtgate/vindexes/numeric.go b/go/vt/vtgate/vindexes/numeric.go index e031031048d..2671abe6314 100644 --- a/go/vt/vtgate/vindexes/numeric.go +++ b/go/vt/vtgate/vindexes/numeric.go @@ -25,7 +25,6 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" - "vitess.io/vitess/go/vt/vterrors" ) var ( @@ -71,7 +70,7 @@ func (*Numeric) Verify(_ VCursor, ids []sqltypes.Value, ksids [][]byte) ([]bool, var keybytes [8]byte num, err := evalengine.ToUint64(ids[i]) if err != nil { - return nil, vterrors.Wrap(err, "Numeric.Verify") + return nil, err } binary.BigEndian.PutUint64(keybytes[:], num) out[i] = bytes.Equal(keybytes[:], ksids[i]) diff --git a/go/vt/vtgate/vindexes/numeric_static_map.go b/go/vt/vtgate/vindexes/numeric_static_map.go index e4501a2f923..5bf0c6f042f 100644 --- a/go/vt/vtgate/vindexes/numeric_static_map.go +++ b/go/vt/vtgate/vindexes/numeric_static_map.go @@ -28,7 +28,6 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" - "vitess.io/vitess/go/vt/vterrors" ) var ( @@ -94,7 +93,7 @@ func (vind *NumericStaticMap) Verify(_ VCursor, ids []sqltypes.Value, ksids [][] var keybytes [8]byte num, err := evalengine.ToUint64(ids[i]) if err != nil { - return nil, vterrors.Wrap(err, "NumericStaticMap.Verify") + return nil, err } lookupNum, ok := vind.lookup[num] if ok { diff --git a/go/vt/vtgate/vindexes/numeric_static_map_test.go b/go/vt/vtgate/vindexes/numeric_static_map_test.go index e8cf8303f22..0e2beba05ee 100644 --- a/go/vt/vtgate/vindexes/numeric_static_map_test.go +++ b/go/vt/vtgate/vindexes/numeric_static_map_test.go @@ -63,6 +63,7 @@ func TestNumericStaticMapMap(t *testing.T) { sqltypes.NewInt64(6), sqltypes.NewInt64(7), sqltypes.NewInt64(8), + sqltypes.NULL, }) require.NoError(t, err) @@ -78,6 +79,7 @@ func TestNumericStaticMapMap(t *testing.T) { key.DestinationKeyspaceID([]byte("\x00\x00\x00\x00\x00\x00\x00\x06")), key.DestinationKeyspaceID([]byte("\x00\x00\x00\x00\x00\x00\x00\x07")), key.DestinationKeyspaceID([]byte("\x00\x00\x00\x00\x00\x00\x00\x08")), + key.DestinationNone{}, } if !reflect.DeepEqual(got, want) { t.Errorf("Map(): %+v, want %+v", got, want) @@ -100,8 +102,5 @@ func TestNumericStaticMapVerify(t *testing.T) { // Failure test _, err = numericStaticMap.Verify(nil, []sqltypes.Value{sqltypes.NewVarBinary("aa")}, [][]byte{nil}) - wantErr := "NumericStaticMap.Verify: could not parse value: 'aa'" - if err == nil || err.Error() != wantErr { - t.Errorf("hash.Verify err: %v, want %s", err, wantErr) - } + require.EqualError(t, err, "could not parse value: 'aa'") } diff --git a/go/vt/vtgate/vindexes/numeric_test.go b/go/vt/vtgate/vindexes/numeric_test.go index ac71bfcc499..b853c89736c 100644 --- a/go/vt/vtgate/vindexes/numeric_test.go +++ b/go/vt/vtgate/vindexes/numeric_test.go @@ -53,6 +53,7 @@ func TestNumericMap(t *testing.T) { sqltypes.NewInt64(7), sqltypes.NewInt64(8), sqltypes.NewInt32(8), + sqltypes.NULL, }) require.NoError(t, err) want := []key.Destination{ @@ -66,6 +67,7 @@ func TestNumericMap(t *testing.T) { key.DestinationKeyspaceID([]byte("\x00\x00\x00\x00\x00\x00\x00\x07")), key.DestinationKeyspaceID([]byte("\x00\x00\x00\x00\x00\x00\x00\x08")), key.DestinationKeyspaceID([]byte("\x00\x00\x00\x00\x00\x00\x00\x08")), + key.DestinationNone{}, } if !reflect.DeepEqual(got, want) { t.Errorf("Map(): %+v, want %+v", got, want) @@ -84,10 +86,7 @@ func TestNumericVerify(t *testing.T) { // Failure test _, err = numeric.Verify(nil, []sqltypes.Value{sqltypes.NewVarBinary("aa")}, [][]byte{nil}) - wantErr := "Numeric.Verify: could not parse value: 'aa'" - if err == nil || err.Error() != wantErr { - t.Errorf("hash.Verify err: %v, want %s", err, wantErr) - } + require.EqualError(t, err, "could not parse value: 'aa'") } func TestNumericReverseMap(t *testing.T) { diff --git a/go/vt/vtgate/vindexes/reverse_bits.go b/go/vt/vtgate/vindexes/reverse_bits.go index 9052a4c94ee..1d74e0932ef 100644 --- a/go/vt/vtgate/vindexes/reverse_bits.go +++ b/go/vt/vtgate/vindexes/reverse_bits.go @@ -27,7 +27,6 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" - "vitess.io/vitess/go/vt/vterrors" ) var ( @@ -86,7 +85,7 @@ func (vind *ReverseBits) Verify(_ VCursor, ids []sqltypes.Value, ksids [][]byte) for i := range ids { num, err := evalengine.ToUint64(ids[i]) if err != nil { - return nil, vterrors.Wrap(err, "reverseBits.Verify") + return nil, err } out[i] = bytes.Equal(reverse(num), ksids[i]) } diff --git a/go/vt/vtgate/vindexes/reverse_bits_test.go b/go/vt/vtgate/vindexes/reverse_bits_test.go index d79364bc3d9..d1d30cd2f9b 100644 --- a/go/vt/vtgate/vindexes/reverse_bits_test.go +++ b/go/vt/vtgate/vindexes/reverse_bits_test.go @@ -83,10 +83,7 @@ func TestReverseBitsVerify(t *testing.T) { // Failure test _, err = reverseBits.Verify(nil, []sqltypes.Value{sqltypes.NewVarBinary("aa")}, [][]byte{nil}) - wantErr := "reverseBits.Verify: could not parse value: 'aa'" - if err == nil || err.Error() != wantErr { - t.Errorf("reverseBits.Verify err: %v, want %s", err, wantErr) - } + require.EqualError(t, err, "could not parse value: 'aa'") } func TestReverseBitsReverseMap(t *testing.T) { diff --git a/go/vt/vtgate/vindexes/unicodeloosemd5_test.go b/go/vt/vtgate/vindexes/unicodeloosemd5_test.go index 1f6264cafd6..383e41ff3f0 100644 --- a/go/vt/vtgate/vindexes/unicodeloosemd5_test.go +++ b/go/vt/vtgate/vindexes/unicodeloosemd5_test.go @@ -42,40 +42,44 @@ func TestUnicodeLooseMD5Info(t *testing.T) { func TestUnicodeLooseMD5Map(t *testing.T) { tcases := []struct { - in, out string + in sqltypes.Value + out string }{{ - in: "Test", + in: sqltypes.NewVarBinary("Test"), out: "\v^۴\x01\xfdu$96\x90I\x1dd\xf1\xf5", }, { - in: "TEST", + in: sqltypes.NewVarBinary("TEST"), out: "\v^۴\x01\xfdu$96\x90I\x1dd\xf1\xf5", }, { - in: "Te\u0301st", + in: sqltypes.NewVarBinary("Te\u0301st"), out: "\v^۴\x01\xfdu$96\x90I\x1dd\xf1\xf5", }, { - in: "Tést", + in: sqltypes.NewVarBinary("Tést"), out: "\v^۴\x01\xfdu$96\x90I\x1dd\xf1\xf5", }, { - in: "Bést", + in: sqltypes.NewVarBinary("Bést"), out: "²3.Os\xd0\aA\x02bIpo/\xb6", }, { - in: "Test ", + in: sqltypes.NewVarBinary("Test "), out: "\v^۴\x01\xfdu$96\x90I\x1dd\xf1\xf5", }, { - in: " Test", + in: sqltypes.NewVarBinary(" Test"), out: "\xa2\xe3Q\\~\x8d\xf1\xff\xd2\xcc\xfc\x11Ʊ\x9d\xd1", }, { - in: "Test\t", + in: sqltypes.NewVarBinary("Test\t"), out: "\x82Em\xd8z\x9cz\x02\xb1\xc2\x05kZ\xba\xa2r", }, { - in: "TéstLooong", + in: sqltypes.NewVarBinary("TéstLooong"), out: "\x96\x83\xe1+\x80C\f\xd4S\xf5\xdfߺ\x81ɥ", }, { - in: "T", + in: sqltypes.NewVarBinary("T"), out: "\xac\x0f\x91y\xf5\x1d\xb8\u007f\xe8\xec\xc0\xcf@ʹz", + }, { + in: sqltypes.NULL, + out: "\xd4\x1d\x8cُ\x00\xb2\x04\xe9\x80\t\x98\xec\xf8B~", }} for _, tcase := range tcases { - got, err := charVindexMD5.Map(nil, []sqltypes.Value{sqltypes.NewVarBinary(tcase.in)}) + got, err := charVindexMD5.Map(nil, []sqltypes.Value{tcase.in}) if err != nil { t.Error(err) } diff --git a/go/vt/vtgate/vindexes/unicodeloosexxhash_test.go b/go/vt/vtgate/vindexes/unicodeloosexxhash_test.go index 72733c7dfbe..9a82a2a7056 100644 --- a/go/vt/vtgate/vindexes/unicodeloosexxhash_test.go +++ b/go/vt/vtgate/vindexes/unicodeloosexxhash_test.go @@ -42,40 +42,44 @@ func TestUnicodeLooseXXHashInfo(t *testing.T) { func TestUnicodeLooseXXHashMap(t *testing.T) { tcases := []struct { - in, out string + in sqltypes.Value + out string }{{ - in: "Test", + in: sqltypes.NewVarBinary("Test"), out: "B\xd2\x13a\bzL\a", }, { - in: "TEst", + in: sqltypes.NewVarBinary("TEst"), out: "B\xd2\x13a\bzL\a", }, { - in: "Te\u0301st", + in: sqltypes.NewVarBinary("Te\u0301st"), out: "B\xd2\x13a\bzL\a", }, { - in: "Tést", + in: sqltypes.NewVarBinary("Tést"), out: "B\xd2\x13a\bzL\a", }, { - in: "Bést", + in: sqltypes.NewVarBinary("Bést"), out: "\x92iu\xb9\xce.\xc3\x16", }, { - in: "Test ", + in: sqltypes.NewVarBinary("Test "), out: "B\xd2\x13a\bzL\a", }, { - in: " Test", + in: sqltypes.NewVarBinary(" Test"), out: "Oˋ\xe3N\xc0Wu", }, { - in: "Test\t", + in: sqltypes.NewVarBinary("Test\t"), out: " \xaf\x87\xfc6\xe3\xfdQ", }, { - in: "TéstLooong", + in: sqltypes.NewVarBinary("TéstLooong"), out: "\xd3\xea\x879B\xb4\x84\xa7", }, { - in: "T", + in: sqltypes.NewVarBinary("T"), out: "\xf8\x1c;\xe2\xd5\x01\xfe\x18", + }, { + in: sqltypes.NULL, + out: "\x99\xe9\xd8Q7\xdbF\xef", }} for _, tcase := range tcases { - got, err := charVindexXXHash.Map(nil, []sqltypes.Value{sqltypes.NewVarBinary(tcase.in)}) + got, err := charVindexXXHash.Map(nil, []sqltypes.Value{tcase.in}) if err != nil { t.Error(err) } diff --git a/go/vt/vtgate/vindexes/vindex.go b/go/vt/vtgate/vindexes/vindex.go index 06141b8a52c..47f3dc0eb2d 100644 --- a/go/vt/vtgate/vindexes/vindex.go +++ b/go/vt/vtgate/vindexes/vindex.go @@ -163,7 +163,7 @@ func Map(vindex Vindex, vcursor VCursor, rowsColValues [][]sqltypes.Value) ([]ke case SingleColumn: return vindex.Map(vcursor, firstColsOnly(rowsColValues)) } - return nil, vterrors.New(vtrpcpb.Code_INTERNAL, "vindex does not have Map functions") + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "vindex '%T' does not have Map function", vindex) } // Verify invokes the Verify implementation supplied by the vindex. @@ -174,7 +174,7 @@ func Verify(vindex Vindex, vcursor VCursor, rowsColValues [][]sqltypes.Value, ks case SingleColumn: return vindex.Verify(vcursor, firstColsOnly(rowsColValues), ksids) } - return nil, vterrors.New(vtrpcpb.Code_INTERNAL, "vindex does not have Map functions") + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "vindex '%T' does not have Verify function", vindex) } func firstColsOnly(rowsColValues [][]sqltypes.Value) []sqltypes.Value { diff --git a/go/vt/vtgate/vstream_manager.go b/go/vt/vtgate/vstream_manager.go index 892ddf5a037..e75c1f7ba50 100644 --- a/go/vt/vtgate/vstream_manager.go +++ b/go/vt/vtgate/vstream_manager.go @@ -17,11 +17,13 @@ limitations under the License. package vtgate import ( + "context" "fmt" "io" "sync" + "time" - "context" + vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" "github.com/golang/protobuf/proto" @@ -65,6 +67,26 @@ type vstream struct { cancel context.CancelFunc wg sync.WaitGroup + + // this flag is set by the client, default false + // if true skew detection is enabled and we align the streams so that they receive events from + // about the same time as each other. Note that there is no exact ordering of events across shards + minimizeSkew bool + + // mutex used to synchronize access to skew detection parameters + skewMu sync.Mutex + // channel is created whenever there is a skew detected. closing it implies the current skew has been fixed + skewCh chan bool + // if a skew lasts for this long, we timeout the vstream call. currently hardcoded + skewTimeoutSeconds int64 + // the slow streamId which is causing the skew. streamId is of the form . + laggard string + // transaction timestamp of the slowest stream + lowestTS int64 + // the timestamp of the most recent event, keyed by streamId. streamId is of the form . + timestamps map[string]int64 + + vsm *vstreamManager } type journalEvent struct { @@ -81,8 +103,9 @@ func newVStreamManager(resolver *srvtopo.Resolver, serv srvtopo.Server, cell str } } -func (vsm *vstreamManager) VStream(ctx context.Context, tabletType topodatapb.TabletType, vgtid *binlogdatapb.VGtid, filter *binlogdatapb.Filter, send func(events []*binlogdatapb.VEvent) error) error { - vgtid, filter, err := vsm.resolveParams(ctx, tabletType, vgtid, filter) +func (vsm *vstreamManager) VStream(ctx context.Context, tabletType topodatapb.TabletType, vgtid *binlogdatapb.VGtid, + filter *binlogdatapb.Filter, flags *vtgatepb.VStreamFlags, send func(events []*binlogdatapb.VEvent) error) error { + vgtid, filter, flags, err := vsm.resolveParams(ctx, tabletType, vgtid, filter, flags) if err != nil { return err } @@ -93,12 +116,19 @@ func (vsm *vstreamManager) VStream(ctx context.Context, tabletType topodatapb.Ta send: send, resolver: vsm.resolver, journaler: make(map[int64]*journalEvent), + + minimizeSkew: flags.MinimizeSkew, + skewTimeoutSeconds: 10 * 60, + timestamps: make(map[string]int64), + vsm: vsm, } return vs.stream(ctx) } // resolveParams provides defaults for the inputs if they're not specified. -func (vsm *vstreamManager) resolveParams(ctx context.Context, tabletType topodatapb.TabletType, vgtid *binlogdatapb.VGtid, filter *binlogdatapb.Filter) (*binlogdatapb.VGtid, *binlogdatapb.Filter, error) { +func (vsm *vstreamManager) resolveParams(ctx context.Context, tabletType topodatapb.TabletType, vgtid *binlogdatapb.VGtid, + filter *binlogdatapb.Filter, flags *vtgatepb.VStreamFlags) (*binlogdatapb.VGtid, *binlogdatapb.Filter, *vtgatepb.VStreamFlags, error) { + if filter == nil { filter = &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -106,8 +136,12 @@ func (vsm *vstreamManager) resolveParams(ctx context.Context, tabletType topodat }}, } } + + if flags == nil { + flags = &vtgatepb.VStreamFlags{} + } if vgtid == nil || len(vgtid.ShardGtids) == 0 { - return nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "vgtid must have at least one value with a starting position") + return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "vgtid must have at least one value with a starting position") } // To fetch from all keyspaces, the input must contain a single ShardGtid // that has an empty keyspace, and the Gtid must be "current". In the @@ -115,11 +149,11 @@ func (vsm *vstreamManager) resolveParams(ctx context.Context, tabletType topodat // copying of existing data. if len(vgtid.ShardGtids) == 1 && vgtid.ShardGtids[0].Keyspace == "" { if vgtid.ShardGtids[0].Gtid != "current" { - return nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "for an empty keyspace, the Gtid value must be 'current': %v", vgtid) + return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "for an empty keyspace, the Gtid value must be 'current': %v", vgtid) } keyspaces, err := vsm.toposerv.GetSrvKeyspaceNames(ctx, vsm.cell, false) if err != nil { - return nil, nil, err + return nil, nil, nil, err } newvgtid := &binlogdatapb.VGtid{} for _, keyspace := range keyspaces { @@ -134,12 +168,12 @@ func (vsm *vstreamManager) resolveParams(ctx context.Context, tabletType topodat for _, sgtid := range vgtid.ShardGtids { if sgtid.Shard == "" { if sgtid.Gtid != "current" { - return nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "if shards are unspecified, the Gtid value must be 'current': %v", vgtid) + return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "if shards are unspecified, the Gtid value must be 'current': %v", vgtid) } // TODO(sougou): this should work with the new Migrate workflow _, _, allShards, err := vsm.resolver.GetKeyspaceShards(ctx, sgtid.Keyspace, tabletType) if err != nil { - return nil, nil, err + return nil, nil, nil, err } for _, shard := range allShards { newvgtid.ShardGtids = append(newvgtid.ShardGtids, &binlogdatapb.ShardGtid{ @@ -152,11 +186,19 @@ func (vsm *vstreamManager) resolveParams(ctx context.Context, tabletType topodat newvgtid.ShardGtids = append(newvgtid.ShardGtids, sgtid) } } + //TODO add tablepk validations - return newvgtid, filter, nil + return newvgtid, filter, flags, nil +} + +func (vsm *vstreamManager) RecordStreamDelay() { + vstreamSkewDelayCount.Add(1) } +func (vsm *vstreamManager) GetTotalStreamDelay() int64 { + return vstreamSkewDelayCount.Get() +} func (vs *vstream) stream(ctx context.Context) error { ctx, vs.cancel = context.WithCancel(ctx) defer vs.cancel() @@ -187,6 +229,95 @@ func (vs *vstream) startOneStream(ctx context.Context, sgtid *binlogdatapb.Shard }() } +// MaxSkew is the threshold for a skew to be detected. Since MySQL timestamps are in seconds we account for +// two round-offs: one for the actual event and another while accounting for the clock skew +const MaxSkew = int64(2) + +// computeSkew sets the timestamp of the current event for the calling stream, accounts for a clock skew +// and declares that a skew has arisen if the streams are too far apart +func (vs *vstream) computeSkew(streamID string, event *binlogdatapb.VEvent) bool { + vs.skewMu.Lock() + defer vs.skewMu.Unlock() + // account for skew between this vtgate and the source mysql server + secondsInThePast := event.CurrentTime/1e9 - event.Timestamp + vs.timestamps[streamID] = time.Now().Unix() - secondsInThePast + + var minTs, maxTs int64 + var laggardStream string + + if len(vs.timestamps) <= 1 { + return false + } + for k, ts := range vs.timestamps { + if ts < minTs || minTs == 0 { + minTs = ts + laggardStream = k + } + if ts > maxTs { + maxTs = ts + } + } + if vs.laggard != "" { // we are skewed, check if this event has fixed the skew + if (maxTs - minTs) <= MaxSkew { + vs.laggard = "" + close(vs.skewCh) + } + } else { + if (maxTs - minTs) > MaxSkew { // check if we are skewed due to this event + log.Infof("Skew found, laggard is %s, %+v", laggardStream, vs.timestamps) + vs.laggard = laggardStream + vs.skewCh = make(chan bool) + } + } + return vs.mustPause(streamID) +} + +// mustPause returns true if a skew exists and the stream calling this is not the slowest one +func (vs *vstream) mustPause(streamID string) bool { + switch vs.laggard { + case "": + return false + case streamID: + // current stream is the laggard, not pausing + return false + } + + if (vs.timestamps[streamID] - vs.lowestTS) <= MaxSkew { + // current stream is not the laggard, but the skew is still within the limit + return false + } + vs.vsm.RecordStreamDelay() + return true +} + +// alignStreams is called by each individual shard's stream before an event is sent to the client or after each heartbeat. +// It checks for skew (if the minimizeSkew option is set). If skew is present this stream is delayed until the skew is fixed +// The faster stream detects the skew and waits. The slower stream resets the skew when it catches up. +func (vs *vstream) alignStreams(ctx context.Context, event *binlogdatapb.VEvent, keyspace, shard string) error { + if !vs.minimizeSkew || event.Timestamp == 0 { + return nil + } + streamID := fmt.Sprintf("%s/%s", keyspace, shard) + for { + mustPause := vs.computeSkew(streamID, event) + if event.Type == binlogdatapb.VEventType_HEARTBEAT { + return nil + } + if !mustPause { + return nil + } + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(time.Duration(vs.skewTimeoutSeconds) * time.Second): + log.Errorf("timed out while waiting for skew to reduce: %s", streamID) + return fmt.Errorf("timed out while waiting for skew to reduce: %s", streamID) + case <-vs.skewCh: + // once skew is fixed the channel is closed and all waiting streams "wake up" + } + } +} + // streamFromTablet streams from one shard. If transactions come in separate chunks, they are grouped and sent. func (vs *vstream) streamFromTablet(ctx context.Context, sgtid *binlogdatapb.ShardGtid) error { // journalDone is assigned a channel when a journal event is encountered. @@ -249,6 +380,11 @@ func (vs *vstream) streamFromTablet(ctx context.Context, sgtid *binlogdatapb.Sha case binlogdatapb.VEventType_COMMIT, binlogdatapb.VEventType_DDL, binlogdatapb.VEventType_OTHER: sendevents = append(sendevents, event) eventss = append(eventss, sendevents) + + if err := vs.alignStreams(ctx, event, sgtid.Keyspace, sgtid.Shard); err != nil { + return err + } + if err := vs.sendAll(sgtid, eventss); err != nil { return err } @@ -258,6 +394,10 @@ func (vs *vstream) streamFromTablet(ctx context.Context, sgtid *binlogdatapb.Sha // Remove all heartbeat events for now. // Otherwise they can accumulate indefinitely if there are no real events. // TODO(sougou): figure out a model for this. + if err := vs.alignStreams(ctx, event, sgtid.Keyspace, sgtid.Shard); err != nil { + return err + } + case binlogdatapb.VEventType_JOURNAL: journal := event.Journal // Journal events are not sent to clients. diff --git a/go/vt/vtgate/vstream_manager_test.go b/go/vt/vtgate/vstream_manager_test.go index 0515b99732e..addbe49269b 100644 --- a/go/vt/vtgate/vstream_manager_test.go +++ b/go/vt/vtgate/vstream_manager_test.go @@ -19,7 +19,14 @@ package vtgate import ( "fmt" "strings" + "sync" "testing" + "time" + + vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + + "vitess.io/vitess/go/stats" + "vitess.io/vitess/go/vt/vttablet/sandboxconn" "context" @@ -36,6 +43,102 @@ import ( "vitess.io/vitess/go/vt/vterrors" ) +var mu sync.Mutex + +func getVEvents(shard string, count, idx int64) []*binlogdatapb.VEvent { + mu.Lock() + defer mu.Unlock() + var vevents []*binlogdatapb.VEvent + var i int64 + currentTime := time.Now().Unix() + for i = count; i > 0; i-- { + j := i + idx + vevents = append(vevents, &binlogdatapb.VEvent{ + Type: binlogdatapb.VEventType_GTID, Gtid: fmt.Sprintf("gtid-%s-%d", shard, j), + Timestamp: currentTime - j, + CurrentTime: currentTime * 1e9, + }) + + vevents = append(vevents, &binlogdatapb.VEvent{ + Type: binlogdatapb.VEventType_COMMIT, + Timestamp: currentTime - j, + CurrentTime: currentTime * 1e9, + }) + } + return vevents +} + +func TestVStreamSkew(t *testing.T) { + stream := func(conn *sandboxconn.SandboxConn, shard string, count, idx int64) { + vevents := getVEvents(shard, count, idx) + for _, ev := range vevents { + conn.VStreamCh <- ev + time.Sleep(time.Duration(idx*100) * time.Millisecond) + } + } + type skewTestCase struct { + numEventsPerShard int64 + shard0idx, shard1idx int64 + expectedDelays int64 + } + tcases := []*skewTestCase{ + // shard0 events are all attempted to be sent first along with the first event of shard1 due to the increased sleep + // for shard1 in stream(). Third event and fourth events of shard0 need to wait for shard1 to catch up + {numEventsPerShard: 4, shard0idx: 1, shard1idx: 2, expectedDelays: 2}, + + // no delays if streams are aligned or if only one stream is present + {numEventsPerShard: 4, shard0idx: 1, shard1idx: 1, expectedDelays: 0}, + {numEventsPerShard: 4, shard0idx: 0, shard1idx: 1, expectedDelays: 0}, + {numEventsPerShard: 4, shard0idx: 1, shard1idx: 0, expectedDelays: 0}, + } + previousDelays := int64(0) + vstreamSkewDelayCount = stats.NewCounter("VStreamEventsDelayedBySkewAlignment", + "Number of events that had to wait because the skew across shards was too high") + for idx, tcase := range tcases { + t.Run("", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + name := fmt.Sprintf("TestVStreamSkew-%d", idx) + _ = createSandbox(name) + hc := discovery.NewFakeHealthCheck() + vsm := newTestVStreamManager(hc, new(sandboxTopo), "aa") + shard0 := "-20" + shard1 := "20-40" + vgtid := &binlogdatapb.VGtid{ShardGtids: []*binlogdatapb.ShardGtid{}} + want := int64(0) + var sbc0, sbc1 *sandboxconn.SandboxConn + if tcase.shard0idx != 0 { + sbc0 = hc.AddTestTablet("aa", "1.1.1.1", 1001, name, shard0, topodatapb.TabletType_MASTER, true, 1, nil) + sbc0.VStreamCh = make(chan *binlogdatapb.VEvent) + want += 2 * tcase.numEventsPerShard + vgtid.ShardGtids = append(vgtid.ShardGtids, &binlogdatapb.ShardGtid{Keyspace: name, Gtid: "pos", Shard: "-20"}) + go stream(sbc0, shard0, tcase.numEventsPerShard, tcase.shard0idx) + } + if tcase.shard1idx != 0 { + sbc1 = hc.AddTestTablet("aa", "1.1.1.1", 1002, name, shard1, topodatapb.TabletType_MASTER, true, 1, nil) + sbc1.VStreamCh = make(chan *binlogdatapb.VEvent) + want += 2 * tcase.numEventsPerShard + vgtid.ShardGtids = append(vgtid.ShardGtids, &binlogdatapb.ShardGtid{Keyspace: name, Gtid: "pos", Shard: "20-40"}) + go stream(sbc1, shard1, tcase.numEventsPerShard, tcase.shard1idx) + } + ch := startVStream(ctx, t, vsm, vgtid, true) + var receivedEvents []*binlogdatapb.VEvent + for len(receivedEvents) < int(want) { + select { + case <-time.After(1 * time.Minute): + require.FailNow(t, "test timed out") + case response := <-ch: + receivedEvents = append(receivedEvents, response.Events...) + } + } + require.Equal(t, int(want), int(len(receivedEvents))) + require.Equal(t, tcase.expectedDelays, vsm.GetTotalStreamDelay()-previousDelays) + previousDelays = vsm.GetTotalStreamDelay() + }) + } +} + func TestVStreamEvents(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -91,7 +194,7 @@ func TestVStreamEvents(t *testing.T) { } ch := make(chan *binlogdatapb.VStreamResponse) go func() { - err := vsm.VStream(ctx, topodatapb.TabletType_MASTER, vgtid, nil, func(events []*binlogdatapb.VEvent) error { + err := vsm.VStream(ctx, topodatapb.TabletType_MASTER, vgtid, nil, &vtgatepb.VStreamFlags{}, func(events []*binlogdatapb.VEvent) error { ch <- &binlogdatapb.VStreamResponse{Events: events} return nil }) @@ -142,7 +245,7 @@ func TestVStreamChunks(t *testing.T) { Gtid: "pos", }}, } - _ = vsm.VStream(ctx, topodatapb.TabletType_MASTER, vgtid, nil, func(events []*binlogdatapb.VEvent) error { + _ = vsm.VStream(ctx, topodatapb.TabletType_MASTER, vgtid, nil, &vtgatepb.VStreamFlags{}, func(events []*binlogdatapb.VEvent) error { switch events[0].Type { case binlogdatapb.VEventType_ROW: if doneCounting { @@ -214,7 +317,7 @@ func TestVStreamMulti(t *testing.T) { Gtid: "pos", }}, } - ch := startVStream(ctx, t, vsm, vgtid) + ch := startVStream(ctx, t, vsm, vgtid, false) <-ch response := <-ch var got *binlogdatapb.VGtid @@ -267,7 +370,7 @@ func TestVStreamRetry(t *testing.T) { Gtid: "pos", }}, } - err := vsm.VStream(ctx, topodatapb.TabletType_MASTER, vgtid, nil, func(events []*binlogdatapb.VEvent) error { + err := vsm.VStream(ctx, topodatapb.TabletType_MASTER, vgtid, nil, &vtgatepb.VStreamFlags{}, func(events []*binlogdatapb.VEvent) error { count++ return nil }) @@ -322,7 +425,7 @@ func TestVStreamHeartbeat(t *testing.T) { Gtid: "pos", }}, } - ch := startVStream(ctx, t, vsm, vgtid) + ch := startVStream(ctx, t, vsm, vgtid, false) verifyEvents(t, ch, want) } @@ -404,7 +507,7 @@ func TestVStreamJournalOneToMany(t *testing.T) { Gtid: "pos", }}, } - ch := startVStream(ctx, t, vsm, vgtid) + ch := startVStream(ctx, t, vsm, vgtid, false) verifyEvents(t, ch, want1) // The following two events from the different shards can come in any order. @@ -515,7 +618,7 @@ func TestVStreamJournalManyToOne(t *testing.T) { Gtid: "pos1020", }}, } - ch := startVStream(ctx, t, vsm, vgtid) + ch := startVStream(ctx, t, vsm, vgtid, false) // The following two events from the different shards can come in any order. // But the resulting VGTID should be the same after both are received. <-ch @@ -661,7 +764,7 @@ func TestVStreamJournalNoMatch(t *testing.T) { Gtid: "pos", }}, } - ch := startVStream(ctx, t, vsm, vgtid) + ch := startVStream(ctx, t, vsm, vgtid, false) verifyEvents(t, ch, want1, wantjn1, want2, wantjn2, want3) } @@ -708,7 +811,7 @@ func TestVStreamJournalPartialMatch(t *testing.T) { Gtid: "pos1020", }}, } - err := vsm.VStream(ctx, topodatapb.TabletType_MASTER, vgtid, nil, func(events []*binlogdatapb.VEvent) error { + err := vsm.VStream(ctx, topodatapb.TabletType_MASTER, vgtid, nil, &vtgatepb.VStreamFlags{}, func(events []*binlogdatapb.VEvent) error { t.Errorf("unexpected events: %v", events) return nil }) @@ -737,7 +840,7 @@ func TestVStreamJournalPartialMatch(t *testing.T) { }}, } sbc2.AddVStreamEvents(send, nil) - err = vsm.VStream(ctx, topodatapb.TabletType_MASTER, vgtid, nil, func(events []*binlogdatapb.VEvent) error { + err = vsm.VStream(ctx, topodatapb.TabletType_MASTER, vgtid, nil, &vtgatepb.VStreamFlags{}, func(events []*binlogdatapb.VEvent) error { t.Errorf("unexpected events: %v", events) return nil }) @@ -850,7 +953,7 @@ func TestResolveVStreamParams(t *testing.T) { }}, } for _, tcase := range testcases { - vgtid, filter, err := vsm.resolveParams(context.Background(), topodatapb.TabletType_REPLICA, tcase.input, nil) + vgtid, filter, flags, err := vsm.resolveParams(context.Background(), topodatapb.TabletType_REPLICA, tcase.input, nil, nil) if tcase.err != "" { if err == nil || !strings.Contains(err.Error(), tcase.err) { t.Errorf("resolve(%v) err: %v, must contain %v", tcase.input, err, tcase.err) @@ -860,6 +963,7 @@ func TestResolveVStreamParams(t *testing.T) { require.NoError(t, err, tcase.input) assert.Equal(t, tcase.output, vgtid, tcase.input) assert.Equal(t, wantFilter, filter, tcase.input) + require.False(t, flags.MinimizeSkew) } // Special-case: empty keyspace because output is too big. input := &binlogdatapb.VGtid{ @@ -867,11 +971,27 @@ func TestResolveVStreamParams(t *testing.T) { Gtid: "current", }}, } - vgtid, _, err := vsm.resolveParams(context.Background(), topodatapb.TabletType_REPLICA, input, nil) + vgtid, _, _, err := vsm.resolveParams(context.Background(), topodatapb.TabletType_REPLICA, input, nil, nil) require.NoError(t, err, input) if got, want := len(vgtid.ShardGtids), 8; want >= got { t.Errorf("len(vgtid.ShardGtids): %v, must be >%d", got, want) } + for _, minimizeSkew := range []bool{true, false} { + t.Run(fmt.Sprintf("resolveParams MinimizeSkew %t", minimizeSkew), func(t *testing.T) { + flags := &vtgatepb.VStreamFlags{MinimizeSkew: minimizeSkew} + vgtid := &binlogdatapb.VGtid{ + ShardGtids: []*binlogdatapb.ShardGtid{{ + Keyspace: "TestVStream", + Shard: "-20", + Gtid: "current", + }}, + } + _, _, flags2, err := vsm.resolveParams(context.Background(), topodatapb.TabletType_REPLICA, vgtid, nil, flags) + require.NoError(t, err) + require.Equal(t, minimizeSkew, flags2.MinimizeSkew) + }) + } + } func newTestVStreamManager(hc discovery.HealthCheck, serv srvtopo.Server, cell string) *vstreamManager { @@ -880,10 +1000,10 @@ func newTestVStreamManager(hc discovery.HealthCheck, serv srvtopo.Server, cell s return newVStreamManager(srvResolver, serv, cell) } -func startVStream(ctx context.Context, t *testing.T, vsm *vstreamManager, vgtid *binlogdatapb.VGtid) <-chan *binlogdatapb.VStreamResponse { +func startVStream(ctx context.Context, t *testing.T, vsm *vstreamManager, vgtid *binlogdatapb.VGtid, minimizeSkew bool) <-chan *binlogdatapb.VStreamResponse { ch := make(chan *binlogdatapb.VStreamResponse) go func() { - _ = vsm.VStream(ctx, topodatapb.TabletType_MASTER, vgtid, nil, func(events []*binlogdatapb.VEvent) error { + _ = vsm.VStream(ctx, topodatapb.TabletType_MASTER, vgtid, nil, &vtgatepb.VStreamFlags{MinimizeSkew: true}, func(events []*binlogdatapb.VEvent) error { ch <- &binlogdatapb.VStreamResponse{Events: events} return nil }) @@ -895,6 +1015,9 @@ func verifyEvents(t *testing.T, ch <-chan *binlogdatapb.VStreamResponse, wants . t.Helper() for i, want := range wants { got := <-ch + for _, event := range got.Events { + event.Timestamp = 0 + } if !proto.Equal(got, want) { t.Errorf("vstream(%d):\n%v, want\n%v", i, got, want) } diff --git a/go/vt/vtgate/vtgate.go b/go/vt/vtgate/vtgate.go index 288d7a7c5ae..fc8f10e8e6d 100644 --- a/go/vt/vtgate/vtgate.go +++ b/go/vt/vtgate/vtgate.go @@ -29,11 +29,11 @@ import ( "context" "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/cache" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/tb" "vitess.io/vitess/go/vt/discovery" - "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/schema" @@ -53,15 +53,18 @@ import ( ) var ( - transactionMode = flag.String("transaction_mode", "MULTI", "SINGLE: disallow multi-db transactions, MULTI: allow multi-db transactions with best effort commit, TWOPC: allow multi-db transactions with 2pc commit") - normalizeQueries = flag.Bool("normalize_queries", true, "Rewrite queries with bind vars. Turn this off if the app itself sends normalized queries with bind vars.") - terseErrors = flag.Bool("vtgate-config-terse-errors", false, "prevent bind vars from escaping in returned errors") - streamBufferSize = flag.Int("stream_buffer_size", 32*1024, "the number of bytes sent from vtgate for each stream call. It's recommended to keep this value in sync with vttablet's query-server-config-stream-buffer-size.") - queryPlanCacheSize = flag.Int64("gate_query_cache_size", 10000, "gate server query cache size, maximum number of queries to be cached. vtgate analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache.") - _ = flag.Bool("disable_local_gateway", false, "deprecated: if specified, this process will not route any queries to local tablets in the local cell") - maxMemoryRows = flag.Int("max_memory_rows", 300000, "Maximum number of rows that will be held in memory for intermediate results as well as the final result.") - warnMemoryRows = flag.Int("warn_memory_rows", 30000, "Warning threshold for in-memory results. A row count higher than this amount will cause the VtGateWarnings.ResultsExceeded counter to be incremented.") - defaultDDLStrategy = flag.String("ddl_strategy", string(schema.DDLStrategyDirect), "Set default strategy for DDL statements. Override with @@ddl_strategy session variable") + transactionMode = flag.String("transaction_mode", "MULTI", "SINGLE: disallow multi-db transactions, MULTI: allow multi-db transactions with best effort commit, TWOPC: allow multi-db transactions with 2pc commit") + normalizeQueries = flag.Bool("normalize_queries", true, "Rewrite queries with bind vars. Turn this off if the app itself sends normalized queries with bind vars.") + terseErrors = flag.Bool("vtgate-config-terse-errors", false, "prevent bind vars from escaping in returned errors") + streamBufferSize = flag.Int("stream_buffer_size", 32*1024, "the number of bytes sent from vtgate for each stream call. It's recommended to keep this value in sync with vttablet's query-server-config-stream-buffer-size.") + queryPlanCacheSize = flag.Int64("gate_query_cache_size", cache.DefaultConfig.MaxEntries, "gate server query cache size, maximum number of queries to be cached. vtgate analyzes every incoming query and generate a query plan, these plans are being cached in a cache. This config controls the expected amount of unique entries in the cache.") + queryPlanCacheMemory = flag.Int64("gate_query_cache_memory", cache.DefaultConfig.MaxMemoryUsage, "gate server query cache size in bytes, maximum amount of memory to be cached. vtgate analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache.") + queryPlanCacheLFU = flag.Bool("gate_query_cache_lfu", cache.DefaultConfig.LFU, "gate server cache algorithm. when set to true, a new cache algorithm based on a TinyLFU admission policy will be used to improve cache behavior and prevent pollution from sparse queries") + _ = flag.Bool("disable_local_gateway", false, "deprecated: if specified, this process will not route any queries to local tablets in the local cell") + maxMemoryRows = flag.Int("max_memory_rows", 300000, "Maximum number of rows that will be held in memory for intermediate results as well as the final result.") + warnMemoryRows = flag.Int("warn_memory_rows", 30000, "Warning threshold for in-memory results. A row count higher than this amount will cause the VtGateWarnings.ResultsExceeded counter to be incremented.") + defaultDDLStrategy = flag.String("ddl_strategy", string(schema.DDLStrategyDirect), "Set default strategy for DDL statements. Override with @@ddl_strategy session variable") + dbDDLPlugin = flag.String("dbddl_plugin", "fail", "controls how to handle CREATE/DROP DATABASE. use it if you are using your own database provisioning service") // TODO(deepthi): change these two vars to unexported and move to healthcheck.go when LegacyHealthcheck is removed @@ -74,8 +77,13 @@ var ( // Put set-passthrough under a flag. sysVarSetEnabled = flag.Bool("enable_system_settings", false, "This will enable the system settings to be changed per session at the database connection level") + plannerVersion = flag.String("planner_version", "v3", "Sets the default planner to use when the session has not changed it. Valid values are: V3, Gen4, Gen4Greedy and Gen4Fallback. Gen4Fallback tries the new gen4 planner and falls back to the V3 planner if the gen4 fails. All Gen4 versions should be considered experimental!") + // lockHeartbeatTime is used to set the next heartbeat time. lockHeartbeatTime = flag.Duration("lock_heartbeat_time", 5*time.Second, "If there is lock function used. This will keep the lock connection active by using this heartbeat") + warnShardedOnly = flag.Bool("warn_sharded_only", false, "If any features that are only available in unsharded mode are used, query execution warnings will be added to the session") + + foreignKeyMode = flag.String("foreign_key_mode", "allow", "This is to provide how to handle foreign key constraint in create/alter table. Valid values are: allow, disallow") ) func getTxMode() vtgatepb.TransactionMode { @@ -106,6 +114,8 @@ var ( errorCounts *stats.CountersWithMultiLabels warnings *stats.CountersWithSingleLabel + + vstreamSkewDelayCount *stats.Counter ) // VTGate is the rpc interface to vtgate. Only one instance @@ -124,6 +134,7 @@ type VTGate struct { // are global vars that depend on this member var. timings *stats.MultiTimings rowsReturned *stats.CountersWithMultiLabels + rowsAffected *stats.CountersWithMultiLabels // the throttled loggers for all errors, one per API entry logExecute *logutil.ThrottledLogger @@ -146,6 +157,9 @@ func Init(ctx context.Context, serv srvtopo.Server, cell string, tabletTypesToWa // catch the initial load stats. vschemaCounters = stats.NewCountersWithSingleLabel("VtgateVSchemaCounts", "Vtgate vschema counts", "changes") + vstreamSkewDelayCount = stats.NewCounter("VStreamEventsDelayedBySkewAlignment", + "Number of events that had to wait because the skew across shards was too high") + // Build objects from low to high level. // Start with the gateway. If we can't reach the topology service, // we can't go on much further, so we log.Fatal out. @@ -176,9 +190,14 @@ func Init(ctx context.Context, serv srvtopo.Server, cell string, tabletTypesToWa srvResolver := srvtopo.NewResolver(serv, gw, cell) resolver := NewResolver(srvResolver, serv, cell, sc) vsm := newVStreamManager(srvResolver, serv, cell) + cacheCfg := &cache.Config{ + MaxEntries: *queryPlanCacheSize, + MaxMemoryUsage: *queryPlanCacheMemory, + LFU: *queryPlanCacheLFU, + } rpcVTGate = &VTGate{ - executor: NewExecutor(ctx, serv, cell, resolver, *normalizeQueries, *streamBufferSize, *queryPlanCacheSize), + executor: NewExecutor(ctx, serv, cell, resolver, *normalizeQueries, *warnShardedOnly, *streamBufferSize, cacheCfg), resolver: resolver, vsm: vsm, txConn: tc, @@ -191,6 +210,10 @@ func Init(ctx context.Context, serv srvtopo.Server, cell string, tabletTypesToWa "VtgateApiRowsReturned", "Rows returned through the VTgate API", []string{"Operation", "Keyspace", "DbType"}), + rowsAffected: stats.NewCountersWithMultiLabels( + "VtgateApiRowsAffected", + "Rows affected by a write (DML) operation through the VTgate API", + []string{"Operation", "Keyspace", "DbType"}), logExecute: logutil.NewThrottledLogger("Execute", 5*time.Second), logStreamExecute: logutil.NewThrottledLogger("StreamExecute", 5*time.Second), @@ -266,6 +289,7 @@ func (vtg *VTGate) Execute(ctx context.Context, session *vtgatepb.Session, sql s qr, err = vtg.executor.Execute(ctx, "Execute", NewSafeSession(session), sql, bindVariables) if err == nil { vtg.rowsReturned.Add(statsKey, int64(len(qr.Rows))) + vtg.rowsAffected.Add(statsKey, int64(qr.RowsAffected)) return session, qr, nil } @@ -301,6 +325,7 @@ func (vtg *VTGate) ExecuteBatch(ctx context.Context, session *vtgatepb.Session, session, qrl[i].QueryResult, qrl[i].QueryError = vtg.Execute(ctx, session, sql, bv) if qr := qrl[i].QueryResult; qr != nil { vtg.rowsReturned.Add(statsKey, int64(len(qr.Rows))) + vtg.rowsAffected.Add(statsKey, int64(qr.RowsAffected)) } } return session, qrl, nil @@ -311,7 +336,7 @@ func (vtg *VTGate) ExecuteBatch(ctx context.Context, session *vtgatepb.Session, // by multiple go routines. func (vtg *VTGate) StreamExecute(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) error { // In this context, we don't care if we can't fully parse destination - destKeyspace, destTabletType, dest, _ := vtg.executor.ParseDestinationTarget(session.TargetString) + destKeyspace, destTabletType, _, _ := vtg.executor.ParseDestinationTarget(session.TargetString) statsKey := []string{"StreamExecute", destKeyspace, topoproto.TabletTypeLString(destTabletType)} defer vtg.timings.Record(statsKey, time.Now()) @@ -319,26 +344,7 @@ func (vtg *VTGate) StreamExecute(ctx context.Context, session *vtgatepb.Session, var err error if bvErr := sqltypes.ValidateBindVariables(bindVariables); bvErr != nil { err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v", bvErr) - goto handleError - } - - // TODO: This could be simplified to have a StreamExecute that takes - // a destTarget without explicit destination. - switch dest.(type) { - case key.DestinationShard: - err = vtg.resolver.StreamExecute( - ctx, - sql, - bindVariables, - destKeyspace, - destTabletType, - dest, - session.Options, - func(reply *sqltypes.Result) error { - vtg.rowsReturned.Add(statsKey, int64(len(reply.Rows))) - return callback(reply) - }) - default: + } else { err = vtg.executor.StreamExecute( ctx, "StreamExecute", @@ -351,10 +357,10 @@ func (vtg *VTGate) StreamExecute(ctx context.Context, session *vtgatepb.Session, }, func(reply *sqltypes.Result) error { vtg.rowsReturned.Add(statsKey, int64(len(reply.Rows))) + vtg.rowsAffected.Add(statsKey, int64(reply.RowsAffected)) return callback(reply) }) } -handleError: if err != nil { query := map[string]interface{}{ "Sql": sql, @@ -392,7 +398,6 @@ func (vtg *VTGate) Prepare(ctx context.Context, session *vtgatepb.Session, sql s fld, err = vtg.executor.Prepare(ctx, "Prepare", NewSafeSession(session), sql, bindVariables) if err == nil { - vtg.rowsReturned.Add(statsKey, int64(len(fld))) return session, fld, nil } @@ -407,8 +412,8 @@ handleError: } // VStream streams binlog events. -func (vtg *VTGate) VStream(ctx context.Context, tabletType topodatapb.TabletType, vgtid *binlogdatapb.VGtid, filter *binlogdatapb.Filter, send func([]*binlogdatapb.VEvent) error) error { - return vtg.vsm.VStream(ctx, tabletType, vgtid, filter, send) +func (vtg *VTGate) VStream(ctx context.Context, tabletType topodatapb.TabletType, vgtid *binlogdatapb.VGtid, filter *binlogdatapb.Filter, flags *vtgatepb.VStreamFlags, send func([]*binlogdatapb.VEvent) error) error { + return vtg.vsm.VStream(ctx, tabletType, vgtid, filter, flags, send) } // GetGatewayCacheStatus returns a displayable version of the Gateway cache. @@ -460,14 +465,14 @@ func recordAndAnnotateError(err error, statsKey []string, request map[string]int case vtrpcpb.Code_UNAVAILABLE: logger.Infof("%v, request: %+v", err, request) } - return vterrors.Wrapf(err, "vtgate: %s", servenv.ListeningURL.String()) + return err } func formatError(err error) error { if err == nil { return nil } - return vterrors.Wrapf(err, "vtgate: %s", servenv.ListeningURL.String()) + return err } // HandlePanic recovers from panics, and logs / increment counters @@ -515,9 +520,14 @@ func LegacyInit(ctx context.Context, hc discovery.LegacyHealthCheck, serv srvtop srvResolver := srvtopo.NewResolver(serv, gw, cell) resolver := NewResolver(srvResolver, serv, cell, sc) vsm := newVStreamManager(srvResolver, serv, cell) + cacheCfg := &cache.Config{ + MaxEntries: *queryPlanCacheSize, + MaxMemoryUsage: *queryPlanCacheMemory, + LFU: *queryPlanCacheLFU, + } rpcVTGate = &VTGate{ - executor: NewExecutor(ctx, serv, cell, resolver, *normalizeQueries, *streamBufferSize, *queryPlanCacheSize), + executor: NewExecutor(ctx, serv, cell, resolver, *normalizeQueries, *warnShardedOnly, *streamBufferSize, cacheCfg), resolver: resolver, vsm: vsm, txConn: tc, @@ -530,6 +540,10 @@ func LegacyInit(ctx context.Context, hc discovery.LegacyHealthCheck, serv srvtop "VtgateApiRowsReturned", "Rows returned through the VTgate API", []string{"Operation", "Keyspace", "DbType"}), + rowsAffected: stats.NewCountersWithMultiLabels( + "VtgateApiRowsAffected", + "Rows affected by a write (DML) operation through the VTgate API", + []string{"Operation", "Keyspace", "DbType"}), logExecute: logutil.NewThrottledLogger("Execute", 5*time.Second), logStreamExecute: logutil.NewThrottledLogger("StreamExecute", 5*time.Second), diff --git a/go/vt/vtgate/vtgate_test.go b/go/vt/vtgate/vtgate_test.go index 01ddb499c0a..35b0f668cfc 100644 --- a/go/vt/vtgate/vtgate_test.go +++ b/go/vt/vtgate/vtgate_test.go @@ -23,6 +23,10 @@ import ( "github.com/stretchr/testify/assert" + "vitess.io/vitess/go/test/utils" + + "github.com/stretchr/testify/require" + "context" "github.com/golang/protobuf/proto" @@ -103,7 +107,9 @@ func TestVTGateExecute(t *testing.T) { if err != nil { t.Errorf("want nil, got %v", err) } - if !reflect.DeepEqual(sandboxconn.SingleRowResult, qr) { + want := *sandboxconn.SingleRowResult + want.StatusFlags = 0 // VTGate result set does not contain status flags in sqltypes.Result + if !reflect.DeepEqual(&want, qr) { t.Errorf("want \n%+v, got \n%+v", sandboxconn.SingleRowResult, qr) } if !proto.Equal(sbc.Options[0], executeOptions) { @@ -128,7 +134,9 @@ func TestVTGateExecuteWithKeyspaceShard(t *testing.T) { if err != nil { t.Errorf("want nil, got %v", err) } - if !reflect.DeepEqual(sandboxconn.SingleRowResult, qr) { + wantQr := *sandboxconn.SingleRowResult + wantQr.StatusFlags = 0 // VTGate result set does not contain status flags in sqltypes.Result + if !reflect.DeepEqual(&wantQr, qr) { t.Errorf("want \n%+v, got \n%+v", sandboxconn.SingleRowResult, qr) } @@ -141,7 +149,7 @@ func TestVTGateExecuteWithKeyspaceShard(t *testing.T) { "select id from none", nil, ) - want := "vtgate: : keyspace invalid_keyspace not found in vschema" + want := "keyspace invalid_keyspace not found in vschema" assert.EqualError(t, err, want) // Valid keyspace/shard. @@ -156,7 +164,7 @@ func TestVTGateExecuteWithKeyspaceShard(t *testing.T) { if err != nil { t.Errorf("want nil, got %v", err) } - if !reflect.DeepEqual(sandboxconn.SingleRowResult, qr) { + if !reflect.DeepEqual(&wantQr, qr) { t.Errorf("want \n%+v, got \n%+v", sandboxconn.SingleRowResult, qr) } @@ -169,10 +177,8 @@ func TestVTGateExecuteWithKeyspaceShard(t *testing.T) { "select id from none", nil, ) - want = "TestUnsharded.noshard.master: no valid tablet" - if err == nil || !strings.Contains(err.Error(), want) { - t.Errorf("Execute: %v, want %s", err, want) - } + require.Error(t, err) + require.Contains(t, err.Error(), `no healthy tablet available for 'keyspace:"TestUnsharded" shard:"noshard" tablet_type:MASTER`) } func TestVTGateStreamExecute(t *testing.T) { @@ -203,9 +209,7 @@ func TestVTGateStreamExecute(t *testing.T) { }, { Rows: sandboxconn.StreamRowResult.Rows, }} - if !reflect.DeepEqual(want, qrs) { - t.Errorf("want \n%+v, got \n%+v", want, qrs) - } + utils.MustMatch(t, want, qrs) if !proto.Equal(sbc.Options[0], executeOptions) { t.Errorf("got ExecuteOptions \n%+v, want \n%+v", sbc.Options[0], executeOptions) } diff --git a/go/vt/vtgate/vtgateconn/vtgateconn.go b/go/vt/vtgate/vtgateconn/vtgateconn.go index 5df12f69309..17fe91fef4d 100644 --- a/go/vt/vtgate/vtgateconn/vtgateconn.go +++ b/go/vt/vtgate/vtgateconn/vtgateconn.go @@ -74,8 +74,9 @@ type VStreamReader interface { } // VStream streams binlog events. -func (conn *VTGateConn) VStream(ctx context.Context, tabletType topodatapb.TabletType, vgtid *binlogdatapb.VGtid, filter *binlogdatapb.Filter) (VStreamReader, error) { - return conn.impl.VStream(ctx, tabletType, vgtid, filter) +func (conn *VTGateConn) VStream(ctx context.Context, tabletType topodatapb.TabletType, vgtid *binlogdatapb.VGtid, + filter *binlogdatapb.Filter, flags *vtgatepb.VStreamFlags) (VStreamReader, error) { + return conn.impl.VStream(ctx, tabletType, vgtid, filter, flags) } // VTGateSession exposes the V3 API to the clients. @@ -134,7 +135,7 @@ type Impl interface { ResolveTransaction(ctx context.Context, dtid string) error // VStream streams binlogevents - VStream(ctx context.Context, tabletType topodatapb.TabletType, vgtid *binlogdatapb.VGtid, filter *binlogdatapb.Filter) (VStreamReader, error) + VStream(ctx context.Context, tabletType topodatapb.TabletType, vgtid *binlogdatapb.VGtid, filter *binlogdatapb.Filter, flags *vtgatepb.VStreamFlags) (VStreamReader, error) // Close must be called for releasing resources. Close() diff --git a/go/vt/vtgate/vtgateservice/interface.go b/go/vt/vtgate/vtgateservice/interface.go index 94f28205776..9daf7169a25 100644 --- a/go/vt/vtgate/vtgateservice/interface.go +++ b/go/vt/vtgate/vtgateservice/interface.go @@ -41,7 +41,7 @@ type VTGateService interface { ResolveTransaction(ctx context.Context, dtid string) error // Update Stream methods - VStream(ctx context.Context, tabletType topodatapb.TabletType, vgtid *binlogdatapb.VGtid, filter *binlogdatapb.Filter, send func([]*binlogdatapb.VEvent) error) error + VStream(ctx context.Context, tabletType topodatapb.TabletType, vgtid *binlogdatapb.VGtid, filter *binlogdatapb.Filter, flags *vtgatepb.VStreamFlags, send func([]*binlogdatapb.VEvent) error) error // HandlePanic should be called with defer at the beginning of each // RPC implementation method, before calling any of the previous methods diff --git a/go/vt/vttablet/endtoend/batch_test.go b/go/vt/vttablet/endtoend/batch_test.go index 8831c073f3f..fc0ed8f3a26 100644 --- a/go/vt/vttablet/endtoend/batch_test.go +++ b/go/vt/vttablet/endtoend/batch_test.go @@ -20,6 +20,8 @@ import ( "reflect" "testing" + "vitess.io/vitess/go/test/utils" + "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" @@ -27,6 +29,13 @@ import ( "vitess.io/vitess/go/vt/vttablet/endtoend/framework" ) +var mustMatch = utils.MustMatchFn( + []interface{}{ // types with unexported fields + sqltypes.Value{}, + }, + []string{}, // ignored fields +) + func TestBatchRead(t *testing.T) { client := framework.NewClient() queries := []*querypb.BoundQuery{{ @@ -77,7 +86,6 @@ func TestBatchRead(t *testing.T) { Charset: 63, Flags: 128, }}, - RowsAffected: 1, Rows: [][]sqltypes.Value{ { sqltypes.NewInt64(1), @@ -86,6 +94,7 @@ func TestBatchRead(t *testing.T) { sqltypes.NewVarBinary("fghi"), }, }, + StatusFlags: sqltypes.ServerStatusNoIndexUsed | sqltypes.ServerStatusAutocommit, } qr2 := sqltypes.Result{ Fields: []*querypb.Field{{ @@ -109,21 +118,19 @@ func TestBatchRead(t *testing.T) { Charset: 63, Flags: 49155, }}, - RowsAffected: 1, Rows: [][]sqltypes.Value{ { sqltypes.NewInt64(1), sqltypes.NewInt32(2), }, }, + StatusFlags: sqltypes.ServerStatusAutocommit, } want := []sqltypes.Result{qr1, qr2} qrl, err := client.ExecuteBatch(queries, false) require.NoError(t, err) - if !reflect.DeepEqual(qrl, want) { - t.Errorf("ExecueBatch: \n%#v, want \n%#v", prettyPrintArr(qrl), prettyPrintArr(want)) - } + mustMatch(t, want, qrl) } func TestBatchTransaction(t *testing.T) { diff --git a/go/vt/vttablet/endtoend/call_test.go b/go/vt/vttablet/endtoend/call_test.go new file mode 100644 index 00000000000..b20754fb803 --- /dev/null +++ b/go/vt/vttablet/endtoend/call_test.go @@ -0,0 +1,155 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package endtoend + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/vttablet/endtoend/framework" +) + +var procSQL = []string{ + `create procedure proc_select1() + BEGIN + select intval from vitess_test; + END;`, + `create procedure proc_select4() + BEGIN + select intval from vitess_test; + select intval from vitess_test; + select intval from vitess_test; + select intval from vitess_test; + END;`, + `create procedure proc_dml() + BEGIN + start transaction; + insert into vitess_test(intval) values(1432); + update vitess_test set intval = 2341 where intval = 1432; + delete from vitess_test where intval = 2341; + commit; + END;`, + `create procedure proc_tx_begin() + BEGIN + start transaction; + END;`, + `create procedure proc_tx_commit() + BEGIN + commit; + END;`, + `create procedure proc_tx_rollback() + BEGIN + rollback; + END;`, + `create procedure in_parameter(IN val int) + BEGIN + insert into vitess_test(intval) values(val); + END;`, + `create procedure out_parameter(OUT name varchar(255)) + BEGIN + select 42 into name from dual; + END;`, +} + +func TestCallProcedure(t *testing.T) { + client := framework.NewClient() + type testcases struct { + query string + wantErr bool + } + tcases := []testcases{{ + query: "call proc_select1()", + wantErr: true, + }, { + query: "call proc_select4()", + wantErr: true, + }, { + query: "call proc_dml()", + }} + + for _, tc := range tcases { + t.Run(tc.query, func(t *testing.T) { + _, err := client.Execute(tc.query, nil) + if tc.wantErr { + require.EqualError(t, err, "Multi-Resultset not supported in stored procedure (CallerID: dev)") + return + } + require.NoError(t, err) + + }) + } +} + +func TestCallProcedureInsideTx(t *testing.T) { + client := framework.NewClient() + defer client.Release() + + _, err := client.BeginExecute(`call proc_dml()`, nil, nil) + require.EqualError(t, err, "Transaction state change inside the stored procedure is not allowed (CallerID: dev)") + + _, err = client.Execute(`select 1`, nil) + require.Contains(t, err.Error(), "ended") + +} + +func TestCallProcedureInsideReservedConn(t *testing.T) { + client := framework.NewClient() + _, err := client.ReserveBeginExecute(`call proc_dml()`, nil, nil) + require.EqualError(t, err, "Transaction state change inside the stored procedure is not allowed (CallerID: dev)") + client.Release() + + _, err = client.ReserveExecute(`call proc_dml()`, nil, nil) + require.NoError(t, err) + + _, err = client.Execute(`call proc_dml()`, nil) + require.NoError(t, err) + + client.Release() +} + +func TestCallProcedureLeakTx(t *testing.T) { + client := framework.NewClient() + + _, err := client.Execute(`call proc_tx_begin()`, nil) + require.EqualError(t, err, "Transaction not concluded inside the stored procedure, leaking transaction from stored procedure is not allowed (CallerID: dev)") +} + +func TestCallProcedureChangedTx(t *testing.T) { + client := framework.NewClient() + + _, err := client.Execute(`call proc_tx_begin()`, nil) + require.EqualError(t, err, "Transaction not concluded inside the stored procedure, leaking transaction from stored procedure is not allowed (CallerID: dev)") + + queries := []string{ + `call proc_tx_commit()`, + `call proc_tx_rollback()`, + } + for _, query := range queries { + t.Run(query, func(t *testing.T) { + _, err := client.BeginExecute(query, nil, nil) + assert.EqualError(t, err, "Transaction state change inside the stored procedure is not allowed (CallerID: dev)") + client.Release() + }) + } + + // This passes as this starts a new transaction by commiting the old transaction implicitly. + _, err = client.BeginExecute(`call proc_tx_begin()`, nil, nil) + require.NoError(t, err) +} diff --git a/go/vt/vttablet/endtoend/compatibility_test.go b/go/vt/vttablet/endtoend/compatibility_test.go index fa4180659aa..3ed3b2dcef0 100644 --- a/go/vt/vttablet/endtoend/compatibility_test.go +++ b/go/vt/vttablet/endtoend/compatibility_test.go @@ -21,6 +21,9 @@ import ( "strings" "testing" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/vttablet/endtoend/framework" @@ -33,7 +36,7 @@ func TestCharaterSet(t *testing.T) { if err != nil { t.Fatal(err) } - want := sqltypes.Result{ + want := &sqltypes.Result{ Fields: []*querypb.Field{ { Name: "intval", @@ -77,7 +80,6 @@ func TestCharaterSet(t *testing.T) { Flags: 128, }, }, - RowsAffected: 1, Rows: [][]sqltypes.Value{ { sqltypes.TestValue(sqltypes.Int32, "1"), @@ -87,9 +89,7 @@ func TestCharaterSet(t *testing.T) { }, }, } - if !reflect.DeepEqual(*qr, want) { - t.Errorf("Execute: \n%#v, want \n%#v", prettyPrint(*qr), prettyPrint(want)) - } + mustMatch(t, want, qr) } func TestInts(t *testing.T) { @@ -120,7 +120,7 @@ func TestInts(t *testing.T) { if err != nil { t.Fatal(err) } - want := sqltypes.Result{ + want := &sqltypes.Result{ Fields: []*querypb.Field{ { Name: "tiny", @@ -234,7 +234,6 @@ func TestInts(t *testing.T) { Flags: 32864, }, }, - RowsAffected: 1, Rows: [][]sqltypes.Value{ { sqltypes.TestValue(sqltypes.Int8, "-128"), @@ -251,9 +250,8 @@ func TestInts(t *testing.T) { }, }, } - if !reflect.DeepEqual(*qr, want) { - t.Errorf("Execute: \n%#v, want \n%#v", prettyPrint(*qr), prettyPrint(want)) - } + mustMatch(t, want, qr) + // This test was added because the following query causes mysql to // return flags with both binary and unsigned set. The test ensures // that a Uint64 is produced in spite of the stray binary flag. @@ -261,7 +259,7 @@ func TestInts(t *testing.T) { if err != nil { t.Fatal(err) } - want = sqltypes.Result{ + want = &sqltypes.Result{ Fields: []*querypb.Field{ { Name: "max(bigu)", @@ -271,16 +269,14 @@ func TestInts(t *testing.T) { Flags: 32928, }, }, - RowsAffected: 1, Rows: [][]sqltypes.Value{ { sqltypes.TestValue(sqltypes.Uint64, "18446744073709551615"), }, }, } - if !reflect.DeepEqual(*qr, want) { - t.Errorf("Execute: \n%#v, want \n%#v", prettyPrint(*qr), prettyPrint(want)) - } + mustMatch(t, want, qr) + } func TestFractionals(t *testing.T) { @@ -304,7 +300,7 @@ func TestFractionals(t *testing.T) { if err != nil { t.Fatal(err) } - want := sqltypes.Result{ + want := &sqltypes.Result{ Fields: []*querypb.Field{ { Name: "id", @@ -362,7 +358,6 @@ func TestFractionals(t *testing.T) { Flags: 32768, }, }, - RowsAffected: 1, Rows: [][]sqltypes.Value{ { sqltypes.TestValue(sqltypes.Int32, "1"), @@ -373,9 +368,7 @@ func TestFractionals(t *testing.T) { }, }, } - if !reflect.DeepEqual(*qr, want) { - t.Errorf("Execute: \n%#v, want \n%#v", prettyPrint(*qr), prettyPrint(want)) - } + mustMatch(t, want, qr) } func TestStrings(t *testing.T) { @@ -405,7 +398,7 @@ func TestStrings(t *testing.T) { if err != nil { t.Fatal(err) } - want := sqltypes.Result{ + want := &sqltypes.Result{ Fields: []*querypb.Field{ { Name: "vb", @@ -507,7 +500,6 @@ func TestStrings(t *testing.T) { Flags: 2048, }, }, - RowsAffected: 1, Rows: [][]sqltypes.Value{ { sqltypes.TestValue(sqltypes.VarBinary, "a"), @@ -523,9 +515,7 @@ func TestStrings(t *testing.T) { }, }, } - if !reflect.DeepEqual(*qr, want) { - t.Errorf("Execute: \n%#v, want \n%#v", prettyPrint(*qr), prettyPrint(want)) - } + mustMatch(t, want, qr) } func TestMiscTypes(t *testing.T) { @@ -549,7 +539,7 @@ func TestMiscTypes(t *testing.T) { if err != nil { t.Fatal(err) } - want := sqltypes.Result{ + want := &sqltypes.Result{ Fields: []*querypb.Field{ { Name: "id", @@ -613,7 +603,6 @@ func TestMiscTypes(t *testing.T) { Flags: 144, }, }, - RowsAffected: 1, Rows: [][]sqltypes.Value{ { sqltypes.TestValue(sqltypes.Int32, "1"), @@ -625,9 +614,7 @@ func TestMiscTypes(t *testing.T) { }, }, } - if !reflect.DeepEqual(*qr, want) { - t.Errorf("Execute: \n%#v, want \n%#v", prettyPrint(*qr), prettyPrint(want)) - } + mustMatch(t, want, qr) } func TestNull(t *testing.T) { @@ -636,7 +623,7 @@ func TestNull(t *testing.T) { if err != nil { t.Fatal(err) } - want := sqltypes.Result{ + want := &sqltypes.Result{ Fields: []*querypb.Field{ { Name: "NULL", @@ -645,16 +632,13 @@ func TestNull(t *testing.T) { Flags: 32896, }, }, - RowsAffected: 1, Rows: [][]sqltypes.Value{ { {}, }, }, } - if !reflect.DeepEqual(*qr, want) { - t.Errorf("Execute: \n%#v, want \n%#v", prettyPrint(*qr), prettyPrint(want)) - } + mustMatch(t, want, qr) } func TestJSONType(t *testing.T) { @@ -677,7 +661,7 @@ func TestJSONType(t *testing.T) { if err != nil { t.Fatal(err) } - want := sqltypes.Result{ + want := &sqltypes.Result{ Fields: []*querypb.Field{ { Name: "id", @@ -701,23 +685,62 @@ func TestJSONType(t *testing.T) { Flags: 144, }, }, - RowsAffected: 1, Rows: [][]sqltypes.Value{ { sqltypes.TestValue(sqltypes.Int32, "1"), sqltypes.TestValue(sqltypes.TypeJSON, "{\"foo\": \"bar\"}"), }, }, + StatusFlags: sqltypes.ServerStatusNoIndexUsed | sqltypes.ServerStatusAutocommit, } - if !reflect.DeepEqual(*qr, want) { + if !reflect.DeepEqual(qr, want) { // MariaDB 10.3 has different behavior. want2 := want.Copy() want2.Fields[1].Type = sqltypes.Blob want2.Fields[1].Charset = 33 want2.Rows[0][1] = sqltypes.TestValue(sqltypes.Blob, "{\"foo\": \"bar\"}") - if !reflect.DeepEqual(*qr, *want2) { - t.Errorf("Execute:\n%v, want\n%v or\n%v", prettyPrint(*qr), prettyPrint(want), prettyPrint(*want2)) - } + mustMatch(t, want2, qr) } } + +func TestDBName(t *testing.T) { + client := framework.NewClient() + qr, err := client.Execute("select * from information_schema.tables where null", nil) + require.NoError(t, err) + for _, field := range qr.Fields { + t.Run("i_s:"+field.Name, func(t *testing.T) { + if field.Database != "" { + assert.Equal(t, "information_schema", field.Database, "field : %s", field.Name) + } + }) + } + + qr, err = client.Execute("select * from mysql.user where null", nil) + require.NoError(t, err) + for _, field := range qr.Fields { + t.Run("mysql:"+field.Name, func(t *testing.T) { + if field.Database != "" { + assert.Equal(t, "mysql", field.Database, "field : %s", field.Name) + } + }) + } + + qr, err = client.Execute("select * from sys.processlist where null", nil) + require.NoError(t, err) + for _, field := range qr.Fields { + t.Run("sys:"+field.Name, func(t *testing.T) { + assert.NotEqual(t, "vttest", field.Database, "field : %s", field.Name) + }) + } + + qr, err = client.Execute("select * from performance_schema.mutex_instances where null", nil) + require.NoError(t, err) + for _, field := range qr.Fields { + t.Run("performance_schema:"+field.Name, func(t *testing.T) { + if field.Database != "" { + assert.Equal(t, "performance_schema", field.Database, "field : %s", field.Name) + } + }) + } +} diff --git a/go/vt/vttablet/endtoend/config_test.go b/go/vt/vttablet/endtoend/config_test.go index f697029a579..75a6924a401 100644 --- a/go/vt/vttablet/endtoend/config_test.go +++ b/go/vt/vttablet/endtoend/config_test.go @@ -32,6 +32,7 @@ import ( vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/endtoend/framework" + "vitess.io/vitess/go/vt/vttablet/tabletserver" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" ) @@ -176,11 +177,14 @@ func TestConsolidatorReplicasOnly(t *testing.T) { } func TestQueryPlanCache(t *testing.T) { + t.Helper() + + var cachedPlanSize = int((&tabletserver.TabletPlan{}).CachedSize(true)) + //sleep to avoid race between SchemaChanged event clearing out the plans cache which breaks this test - time.Sleep(1 * time.Second) + framework.Server.WaitForSchemaReset(2 * time.Second) defer framework.Server.SetQueryPlanCacheCap(framework.Server.QueryPlanCacheCap()) - framework.Server.SetQueryPlanCacheCap(1) bindVars := map[string]*querypb.BindVariable{ "ival1": sqltypes.Int64BindVariable(1), @@ -191,21 +195,26 @@ func TestQueryPlanCache(t *testing.T) { client := framework.NewClient() _, _ = client.Execute("select * from vitess_test where intval=:ival1", bindVars) - _, _ = client.Execute("select * from vitess_test where intval=:ival2", bindVars) + _, _ = client.Execute("select * from vitess_test where intval=:ival1", bindVars) + assert.Equal(t, 1, framework.Server.QueryPlanCacheLen()) + vend := framework.DebugVars() - verifyIntValue(t, vend, "QueryCacheLength", 1) - verifyIntValue(t, vend, "QueryCacheSize", 1) - verifyIntValue(t, vend, "QueryCacheCapacity", 1) + assert.Equal(t, 1, framework.FetchInt(vend, "QueryCacheLength")) + assert.GreaterOrEqual(t, framework.FetchInt(vend, "QueryCacheSize"), cachedPlanSize) + + _, _ = client.Execute("select * from vitess_test where intval=:ival2", bindVars) + require.Equal(t, 2, framework.Server.QueryPlanCacheLen()) - framework.Server.SetQueryPlanCacheCap(10) - _, _ = client.Execute("select * from vitess_test where intval=:ival1", bindVars) vend = framework.DebugVars() - verifyIntValue(t, vend, "QueryCacheLength", 2) - verifyIntValue(t, vend, "QueryCacheSize", 2) + assert.Equal(t, 2, framework.FetchInt(vend, "QueryCacheLength")) + assert.GreaterOrEqual(t, framework.FetchInt(vend, "QueryCacheSize"), 2*cachedPlanSize) + _, _ = client.Execute("select * from vitess_test where intval=1", bindVars) + require.Equal(t, 3, framework.Server.QueryPlanCacheLen()) + vend = framework.DebugVars() - verifyIntValue(t, vend, "QueryCacheLength", 3) - verifyIntValue(t, vend, "QueryCacheSize", 3) + assert.Equal(t, 3, framework.FetchInt(vend, "QueryCacheLength")) + assert.GreaterOrEqual(t, framework.FetchInt(vend, "QueryCacheSize"), 3*cachedPlanSize) } func TestMaxResultSize(t *testing.T) { diff --git a/go/vt/vttablet/endtoend/endtoend.go b/go/vt/vttablet/endtoend/endtoend.go index 34979356639..4a15f40f602 100644 --- a/go/vt/vttablet/endtoend/endtoend.go +++ b/go/vt/vttablet/endtoend/endtoend.go @@ -34,12 +34,3 @@ func prettyPrint(qr sqltypes.Result) string { } return string(out) } - -func prettyPrintArr(qr []sqltypes.Result) string { - out, err := json.Marshal(qr) - if err != nil { - log.Errorf("Could not marshal result to json for %#v", qr) - return fmt.Sprintf("%#v", qr) - } - return string(out) -} diff --git a/go/vt/vttablet/endtoend/framework/client.go b/go/vt/vttablet/endtoend/framework/client.go index 93cbbfc0c46..7d02b07ce50 100644 --- a/go/vt/vttablet/endtoend/framework/client.go +++ b/go/vt/vttablet/endtoend/framework/client.go @@ -245,7 +245,6 @@ func (client *QueryClient) StreamExecuteWithOptions(query string, bindvars map[s result.Fields = res.Fields } result.Rows = append(result.Rows, res.Rows...) - result.RowsAffected += uint64(len(res.Rows)) return nil }, ) @@ -330,11 +329,11 @@ func (client *QueryClient) ReserveBeginExecute(query string, preQueries []string // Release performs a Release. func (client *QueryClient) Release() error { err := client.server.Release(client.ctx, &client.target, client.transactionID, client.reservedID) + client.reservedID = 0 + client.transactionID = 0 if err != nil { return err } - client.reservedID = 0 - client.transactionID = 0 return nil } diff --git a/go/vt/vttablet/endtoend/framework/querystats.go b/go/vt/vttablet/endtoend/framework/querystats.go index e1ad29f7cd3..c06304d2b12 100644 --- a/go/vt/vttablet/endtoend/framework/querystats.go +++ b/go/vt/vttablet/endtoend/framework/querystats.go @@ -24,8 +24,8 @@ import ( // QueryStat contains the stats for one query. type QueryStat struct { - Query, Table, Plan string - QueryCount, Time, MysqlTime, RowCount, ErrorCount int + Query, Table, Plan string + QueryCount, Time, MysqlTime, RowsAffected, RowsReturned, ErrorCount int } // QueryStats parses /debug/query_stats and returns diff --git a/go/vt/vttablet/endtoend/framework/testcase.go b/go/vt/vttablet/endtoend/framework/testcase.go index 61ec7713034..e9f3e0afbef 100644 --- a/go/vt/vttablet/endtoend/framework/testcase.go +++ b/go/vt/vttablet/endtoend/framework/testcase.go @@ -32,6 +32,7 @@ import ( // a test case. type Testable interface { Test(name string, client *QueryClient) error + Benchmark(client *QueryClient) error } var ( @@ -59,6 +60,12 @@ func (tq TestQuery) Test(name string, client *QueryClient) error { return nil } +// Benchmark executes the query and discards the results +func (tq TestQuery) Benchmark(client *QueryClient) error { + _, err := exec(client, string(tq), nil) + return err +} + // TestCase represents one test case. It will execute the // query and verify its results and effects against what // must be expected. Expected fields are optional. @@ -77,9 +84,12 @@ type TestCase struct { // query. The check is skipped if Result is nil. Result [][]string - // Rows affected can be nil or an int. + // RowsAffected affected can be nil or an int. RowsAffected interface{} + // RowsReturned affected can be nil or an int. + RowsReturned interface{} + // Rewritten specifies how the query should have be rewritten. Rewritten []string @@ -97,6 +107,12 @@ type TestCase struct { Invalidations interface{} } +// Benchmark executes the test case and discards the results without verifying them +func (tc *TestCase) Benchmark(client *QueryClient) error { + _, err := exec(client, tc.Query, tc.BindVars) + return err +} + // Test executes the test case and returns an error if it failed. // The name parameter is used if the test case doesn't have a name. func (tc *TestCase) Test(name string, client *QueryClient) error { @@ -105,6 +121,9 @@ func (tc *TestCase) Test(name string, client *QueryClient) error { name = tc.Name } + // wait for all previous test cases to have been settled in cache + client.server.QueryPlanCacheWait() + catcher := NewQueryCatcher() defer catcher.Close() @@ -127,6 +146,13 @@ func (tc *TestCase) Test(name string, client *QueryClient) error { } } + if tc.RowsReturned != nil { + want := tc.RowsReturned.(int) + if len(qr.Rows) != want { + errs = append(errs, fmt.Sprintf("RowsReturned mismatch: %d, want %d", len(qr.Rows), want)) + } + } + queryInfo, err := catcher.Next() if err != nil { errs = append(errs, fmt.Sprintf("Query catcher failed: %v", err)) @@ -208,3 +234,15 @@ func (mc *MultiCase) Test(name string, client *QueryClient) error { } return nil } + +// Benchmark executes the test cases in MultiCase and discards the +// results without validating them. +func (mc *MultiCase) Benchmark(client *QueryClient) error { + for _, tcase := range mc.Cases { + if err := tcase.Benchmark(client); err != nil { + client.Rollback() + return err + } + } + return nil +} diff --git a/go/vt/vttablet/endtoend/main_test.go b/go/vt/vttablet/endtoend/main_test.go index 438e05cc9c6..e27cf908e3f 100644 --- a/go/vt/vttablet/endtoend/main_test.go +++ b/go/vt/vttablet/endtoend/main_test.go @@ -75,11 +75,16 @@ func TestMain(m *testing.M) { fmt.Fprintf(os.Stderr, "could not launch mysql: %v\n", err) return 1 } + err := cluster.Execute(procSQL, "vttest") + if err != nil { + fmt.Fprintf(os.Stderr, "%v", err) + return 1 + } defer cluster.TearDown() connParams = cluster.MySQLConnParams() connAppDebugParams = cluster.MySQLAppDebugConnParams() - err := framework.StartServer(connParams, connAppDebugParams, cluster.DbName()) + err = framework.StartServer(connParams, connAppDebugParams, cluster.DbName()) if err != nil { fmt.Fprintf(os.Stderr, "%v", err) return 1 @@ -91,7 +96,6 @@ func TestMain(m *testing.M) { fmt.Fprintf(os.Stderr, "%v", err) return 1 } - return m.Run() }() os.Exit(exitCode) @@ -297,6 +301,13 @@ var tableACLConfig = `{ "readers": ["dev"], "writers": ["dev"], "admins": ["dev"] + }, + { + "name": "sys_table", + "table_names_or_prefixes": ["tables", "user", "processlist", "mutex_instances", "columns", "a"], + "readers": ["dev"], + "writers": ["dev"], + "admins": ["dev"] } ] }` diff --git a/go/vt/vttablet/endtoend/metadata_test.go b/go/vt/vttablet/endtoend/metadata_test.go index dc39ec3f239..aa61740276f 100644 --- a/go/vt/vttablet/endtoend/metadata_test.go +++ b/go/vt/vttablet/endtoend/metadata_test.go @@ -17,7 +17,6 @@ limitations under the License. package endtoend import ( - "reflect" "testing" "vitess.io/vitess/go/sqltypes" @@ -66,45 +65,36 @@ func TestMetadataSpecificExecOptions(t *testing.T) { t.Fatal(err) } - want := sqltypes.Result{ - Fields: []*querypb.Field{ - { - Name: "eid", - Type: sqltypes.Int64, - Table: "vitess_b", - OrgTable: "vitess_b", - Database: "vttest", - OrgName: "eid", - ColumnLength: 20, - Charset: 63, - Flags: 49155, - }, - { - Name: "id", - Type: sqltypes.Int32, - Table: "vitess_b", - OrgTable: "vitess_b", - Database: "vttest", - OrgName: "id", - ColumnLength: 11, - Charset: 63, - Flags: 49155, - }, - }, - RowsAffected: 1, - Rows: [][]sqltypes.Value{ - { - sqltypes.NewInt64(-9223372036854775808), - sqltypes.NewInt32(-2147483648), - }, - }, - } - if !reflect.DeepEqual(*qr, want) { - t.Errorf("Execute: \n%#v, want \n%#v", prettyPrint(*qr), prettyPrint(want)) - } - if !reflect.DeepEqual(*streamQr, want) { - t.Errorf("StreamExecute: \n%#v, want \n%#v", prettyPrint(*streamQr), prettyPrint(want)) - } + want := &sqltypes.Result{ + Fields: []*querypb.Field{{ + Name: "eid", + Type: sqltypes.Int64, + Table: "vitess_b", + OrgTable: "vitess_b", + Database: "vttest", + OrgName: "eid", + ColumnLength: 20, + Charset: 63, + Flags: 49155, + }, { + Name: "id", + Type: sqltypes.Int32, + Table: "vitess_b", + OrgTable: "vitess_b", + Database: "vttest", + OrgName: "id", + ColumnLength: 11, + Charset: 63, + Flags: 49155, + }}, + Rows: [][]sqltypes.Value{{ + sqltypes.NewInt64(-9223372036854775808), + sqltypes.NewInt32(-2147483648), + }}, + StatusFlags: sqltypes.ServerStatusAutocommit, + } + mustMatch(t, want, qr) + mustMatch(t, want, streamQr) } // should return Name and Type, because we pass an empty ExecuteOptions and that is the default @@ -121,31 +111,22 @@ func TestMetadataDefaultExecOptions(t *testing.T) { t.Fatal(err) } - want := sqltypes.Result{ - Fields: []*querypb.Field{ - { - Name: "eid", - Type: sqltypes.Int64, - }, - { - Name: "id", - Type: sqltypes.Int32, - }, - }, - RowsAffected: 1, - Rows: [][]sqltypes.Value{ - { - sqltypes.NewInt64(-9223372036854775808), - sqltypes.NewInt32(-2147483648), - }, - }, - } - if !reflect.DeepEqual(*qr, want) { - t.Errorf("Execute: \n%#v, want \n%#v", prettyPrint(*qr), prettyPrint(want)) - } - if !reflect.DeepEqual(*streamQr, want) { - t.Errorf("StreamExecute: \n%#v, want \n%#v", prettyPrint(*streamQr), prettyPrint(want)) - } + want := &sqltypes.Result{ + Fields: []*querypb.Field{{ + Name: "eid", + Type: sqltypes.Int64, + }, { + Name: "id", + Type: sqltypes.Int32, + }}, + Rows: [][]sqltypes.Value{{ + sqltypes.NewInt64(-9223372036854775808), + sqltypes.NewInt32(-2147483648), + }}, + StatusFlags: sqltypes.ServerStatusAutocommit, + } + mustMatch(t, want, qr) + mustMatch(t, want, streamQr) } // should return Name and Type, because if nil ExecuteOptions are passed, we normalize to TYPE_AND_NAME @@ -162,29 +143,20 @@ func TestMetadataNoExecOptions(t *testing.T) { t.Fatal(err) } - want := sqltypes.Result{ - Fields: []*querypb.Field{ - { - Name: "eid", - Type: sqltypes.Int64, - }, - { - Name: "id", - Type: sqltypes.Int32, - }, - }, - RowsAffected: 1, - Rows: [][]sqltypes.Value{ - { - sqltypes.NewInt64(-9223372036854775808), - sqltypes.NewInt32(-2147483648), - }, - }, - } - if !reflect.DeepEqual(*qr, want) { - t.Errorf("Execute: \n%#v, want \n%#v", prettyPrint(*qr), prettyPrint(want)) - } - if !reflect.DeepEqual(*streamQr, want) { - t.Errorf("StreamExecute: \n%#v, want \n%#v", prettyPrint(*streamQr), prettyPrint(want)) - } + want := &sqltypes.Result{ + Fields: []*querypb.Field{{ + Name: "eid", + Type: sqltypes.Int64, + }, { + Name: "id", + Type: sqltypes.Int32, + }}, + Rows: [][]sqltypes.Value{{ + sqltypes.NewInt64(-9223372036854775808), + sqltypes.NewInt32(-2147483648), + }}, + StatusFlags: sqltypes.ServerStatusAutocommit, + } + mustMatch(t, want, qr) + mustMatch(t, want, streamQr) } diff --git a/go/vt/vttablet/endtoend/misc_test.go b/go/vt/vttablet/endtoend/misc_test.go index 5c0f2c03908..068170d779b 100644 --- a/go/vt/vttablet/endtoend/misc_test.go +++ b/go/vt/vttablet/endtoend/misc_test.go @@ -27,9 +27,12 @@ import ( "testing" "time" + "vitess.io/vitess/go/test/utils" + "context" "github.com/golang/protobuf/proto" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" @@ -88,16 +91,14 @@ func TestBinary(t *testing.T) { Flags: 128, }, }, - RowsAffected: 1, Rows: [][]sqltypes.Value{ { sqltypes.NewVarBinary(binaryData), }, }, + StatusFlags: sqltypes.ServerStatusAutocommit, } - if !qr.Equal(&want) { - t.Errorf("Execute: \n%#v, want \n%#v", prettyPrint(*qr), prettyPrint(want)) - } + mustMatch(t, want, *qr) // Test with bindvars. _, err = client.Execute( @@ -132,9 +133,7 @@ func TestNocacheListArgs(t *testing.T) { t.Error(err) return } - if qr.RowsAffected != 2 { - t.Errorf("rows affected: %d, want 2", qr.RowsAffected) - } + assert.Equal(t, 2, len(qr.Rows)) qr, err = client.Execute( query, @@ -146,9 +145,7 @@ func TestNocacheListArgs(t *testing.T) { t.Error(err) return } - if qr.RowsAffected != 1 { - t.Errorf("rows affected: %d, want 1", qr.RowsAffected) - } + assert.Equal(t, 1, len(qr.Rows)) qr, err = client.Execute( query, @@ -160,9 +157,7 @@ func TestNocacheListArgs(t *testing.T) { t.Error(err) return } - if qr.RowsAffected != 1 { - t.Errorf("rows affected: %d, want 1", qr.RowsAffected) - } + assert.Equal(t, 1, len(qr.Rows)) // Error case _, err = client.Execute( @@ -320,7 +315,6 @@ func TestBindInSelect(t *testing.T) { Charset: 63, Flags: 32897, }}, - RowsAffected: 1, Rows: [][]sqltypes.Value{ { sqltypes.NewInt64(1), @@ -354,7 +348,6 @@ func TestBindInSelect(t *testing.T) { Charset: 33, Flags: 1, }}, - RowsAffected: 1, Rows: [][]sqltypes.Value{ { sqltypes.NewVarChar("abcd"), @@ -384,7 +377,6 @@ func TestBindInSelect(t *testing.T) { Charset: 33, Flags: 1, }}, - RowsAffected: 1, Rows: [][]sqltypes.Value{ { sqltypes.NewVarChar("\x00\xff"), @@ -450,17 +442,17 @@ func TestQueryStats(t *testing.T) { stat.Time = 0 stat.MysqlTime = 0 want := framework.QueryStat{ - Query: query, - Table: "vitess_a", - Plan: "Select", - QueryCount: 1, - RowCount: 2, - ErrorCount: 0, - } - if stat != want { - t.Errorf("stat: %+v, want %+v", stat, want) + Query: query, + Table: "vitess_a", + Plan: "Select", + QueryCount: 1, + RowsAffected: 0, + RowsReturned: 2, + ErrorCount: 0, } + utils.MustMatch(t, want, stat) + // Query cache should be updated for errors that happen at MySQL level also. query = "select /* query_stats */ eid from vitess_a where dontexist(eid) = :eid" _, _ = client.Execute(query, bv) @@ -468,19 +460,20 @@ func TestQueryStats(t *testing.T) { stat.Time = 0 stat.MysqlTime = 0 want = framework.QueryStat{ - Query: query, - Table: "vitess_a", - Plan: "Select", - QueryCount: 1, - RowCount: 0, - ErrorCount: 1, + Query: query, + Table: "vitess_a", + Plan: "Select", + QueryCount: 1, + RowsAffected: 0, + RowsReturned: 0, + ErrorCount: 1, } if stat != want { t.Errorf("stat: %+v, want %+v", stat, want) } vend := framework.DebugVars() compareIntDiff(t, vend, "QueryCounts/vitess_a.Select", vstart, 2) - compareIntDiff(t, vend, "QueryRowCounts/vitess_a.Select", vstart, 2) + compareIntDiff(t, vend, "QueryRowCounts/vitess_a.Select", vstart, 0) compareIntDiff(t, vend, "QueryErrorCounts/vitess_a.Select", vstart, 1) // Ensure BeginExecute also updates the stats and strips comments. @@ -518,18 +511,14 @@ func TestDBAStatements(t *testing.T) { t.Error(err) return } - if qr.RowsAffected != 4 { - t.Errorf("RowsAffected: %d, want 4", qr.RowsAffected) - } + assert.Equal(t, 4, len(qr.Rows)) qr, err = client.Execute("explain vitess_a", nil) if err != nil { t.Error(err) return } - if qr.RowsAffected != 4 { - t.Errorf("RowsAffected: %d, want 4", qr.RowsAffected) - } + assert.Equal(t, 4, len(qr.Rows)) } type testLogger struct { @@ -645,9 +634,7 @@ func TestClientFoundRows(t *testing.T) { } qr, err := client.Execute("update vitess_test set charval='aa' where intval=124", nil) require.NoError(t, err) - if qr.RowsAffected != 0 { - t.Errorf("Execute(rowsFound==false): %d, want 0", qr.RowsAffected) - } + assert.Equal(t, 0, len(qr.Rows)) if err := client.Rollback(); err != nil { t.Error(err) } @@ -658,9 +645,7 @@ func TestClientFoundRows(t *testing.T) { } qr, err = client.Execute("update vitess_test set charval='aa' where intval=124", nil) require.NoError(t, err) - if qr.RowsAffected != 1 { - t.Errorf("Execute(rowsFound==true): %d, want 1", qr.RowsAffected) - } + assert.EqualValues(t, 1, qr.RowsAffected) if err := client.Rollback(); err != nil { t.Error(err) } @@ -760,3 +745,102 @@ func TestBeginExecuteWithFailingPreQueriesAndCheckConnectionState(t *testing.T) require.NoError(t, err) require.Empty(t, qr.Rows) } + +func TestSelectBooleanSystemVariables(t *testing.T) { + client := framework.NewClient() + + type testCase struct { + Variable string + Value bool + Type querypb.Type + } + + newTestCase := func(varname string, vartype querypb.Type, value bool) testCase { + return testCase{Variable: varname, Value: value, Type: vartype} + } + + tcs := []testCase{ + newTestCase("autocommit", querypb.Type_INT64, true), + newTestCase("autocommit", querypb.Type_INT64, false), + newTestCase("enable_system_settings", querypb.Type_INT64, true), + newTestCase("enable_system_settings", querypb.Type_INT64, false), + } + + for _, tc := range tcs { + qr, err := client.Execute( + fmt.Sprintf("select :%s", tc.Variable), + map[string]*querypb.BindVariable{tc.Variable: sqltypes.BoolBindVariable(tc.Value)}, + ) + if err != nil { + t.Error(err) + return + } + require.NotEmpty(t, qr.Fields, "fields should not be empty") + require.Equal(t, tc.Type, qr.Fields[0].Type, fmt.Sprintf("invalid type, wants: %+v, but got: %+v\n", tc.Type, qr.Fields[0].Type)) + } +} + +func TestSysSchema(t *testing.T) { + client := framework.NewClient() + _, err := client.Execute("drop table if exists `a`", nil) + require.NoError(t, err) + + _, err = client.Execute("CREATE TABLE `a` (`one` int NOT NULL,`two` int NOT NULL,PRIMARY KEY (`one`,`two`)) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4", nil) + require.NoError(t, err) + defer client.Execute("drop table `a`", nil) + + qr, err := client.Execute(`SELECT + column_name column_name, + data_type data_type, + column_type full_data_type, + character_maximum_length character_maximum_length, + numeric_precision numeric_precision, + numeric_scale numeric_scale, + datetime_precision datetime_precision, + column_default column_default, + is_nullable is_nullable, + extra extra, + table_name table_name + FROM information_schema.columns + WHERE 1 != 1 + ORDER BY ordinal_position`, nil) + require.NoError(t, err) + + // This is mysql behaviour that we are receiving Uint32 on field query even though the column is Uint64. + // assert.EqualValues(t, sqltypes.Uint64, qr.Fields[4].Type) - ideally this should be received + // The issue is only in MySQL 8.0 , As CI is on MySQL 5.7 need to check with Uint64 + assert.True(t, qr.Fields[4].Type == sqltypes.Uint64 || qr.Fields[4].Type == sqltypes.Uint32) + + qr, err = client.Execute(`SELECT + column_name column_name, + data_type data_type, + column_type full_data_type, + character_maximum_length character_maximum_length, + numeric_precision numeric_precision, + numeric_scale numeric_scale, + datetime_precision datetime_precision, + column_default column_default, + is_nullable is_nullable, + extra extra, + table_name table_name + FROM information_schema.columns + WHERE table_schema = 'vttest' and table_name = 'a' + ORDER BY ordinal_position`, nil) + require.NoError(t, err) + require.Equal(t, 2, len(qr.Rows)) + + // is_nullable + assert.Equal(t, `VARCHAR("NO")`, qr.Rows[0][8].String()) + assert.Equal(t, `VARCHAR("NO")`, qr.Rows[1][8].String()) + + // table_name + assert.Equal(t, `VARCHAR("a")`, qr.Rows[0][10].String()) + assert.Equal(t, `VARCHAR("a")`, qr.Rows[1][10].String()) + + // The field Type and the row value type are not matching and because of this wrong packet is send regarding the data of bigint unsigned to the client on vttestserver. + // On, Vitess cluster using protobuf we are doing the row conversion to field type and so the final row type send to client is same as field type. + // assert.EqualValues(t, sqltypes.Uint64, qr.Fields[4].Type) - We would have received this but because of field caching we are receiving Uint32. + // The issue is only in MySQL 8.0 , As CI is on MySQL 5.7 need to check with Uint64 + assert.True(t, qr.Fields[4].Type == sqltypes.Uint64 || qr.Fields[4].Type == sqltypes.Uint32) + assert.Equal(t, querypb.Type_UINT64, qr.Rows[0][4].Type()) +} diff --git a/go/vt/vttablet/endtoend/queries_test.go b/go/vt/vttablet/endtoend/queries_test.go index a3e6322b790..48b6009833c 100644 --- a/go/vt/vttablet/endtoend/queries_test.go +++ b/go/vt/vttablet/endtoend/queries_test.go @@ -17,9 +17,13 @@ limitations under the License. package endtoend import ( + "math/rand" "testing" + "github.com/stretchr/testify/require" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/vttablet/endtoend/framework" querypb "vitess.io/vitess/go/vt/proto/query" @@ -29,7 +33,7 @@ var frameworkErrors = `fail failed: Result mismatch: '[[1 1] [1 2]]' does not match '[[2 1] [1 2]]' -RowsAffected mismatch: 2, want 1 +RowsReturned mismatch: 2, want 1 Rewritten mismatch: '["select eid, id from vitess_a where 1 != 1 union select eid, id from vitess_b where 1 != 1" "select /* fail */ eid, id from vitess_a union select eid, id from vitess_b limit 10001"]' does not match '["select eid id from vitess_a where 1 != 1 union select eid, id from vitess_b where 1 != 1" "select /* fail */ eid, id from vitess_a union select eid, id from vitess_b"]' @@ -45,7 +49,7 @@ func TestTheFramework(t *testing.T) { {"2", "1"}, {"1", "2"}, }, - RowsAffected: 1, + RowsReturned: 1, Rewritten: []string{ "select eid id from vitess_a where 1 != 1 union select eid, id from vitess_b where 1 != 1", "select /* fail */ eid, id from vitess_a union select eid, id from vitess_b", @@ -54,9 +58,1725 @@ func TestTheFramework(t *testing.T) { Table: "bb", } err := expectFail.Test("", client) - if err == nil || err.Error() != frameworkErrors { - t.Errorf("Framework result: \n%q\nexpecting\n%q", err.Error(), frameworkErrors) - } + require.Error(t, err) + utils.MustMatch(t, frameworkErrors, err.Error()) +} + +var TestQueryCases = []framework.Testable{ + &framework.TestCase{ + Name: "union", + Query: "select /* union */ eid, id from vitess_a union select eid, id from vitess_b", + Result: [][]string{ + {"1", "1"}, + {"1", "2"}, + }, + Rewritten: []string{ + "select eid, id from vitess_a where 1 != 1 union select eid, id from vitess_b where 1 != 1", + "select /* union */ eid, id from vitess_a union select eid, id from vitess_b limit 10001", + }, + RowsReturned: 2, + }, + &framework.TestCase{ + Name: "double union", + Query: "select /* double union */ eid, id from vitess_a union select eid, id from vitess_b union select eid, id from vitess_d", + Result: [][]string{ + {"1", "1"}, + {"1", "2"}, + }, + Rewritten: []string{ + "select eid, id from vitess_a where 1 != 1 union select eid, id from vitess_b where 1 != 1 union select eid, id from vitess_d where 1 != 1", + "select /* double union */ eid, id from vitess_a union select eid, id from vitess_b union select eid, id from vitess_d limit 10001", + }, + RowsReturned: 2, + }, + &framework.TestCase{ + Name: "distinct", + Query: "select /* distinct */ distinct * from vitess_a", + Result: [][]string{ + {"1", "1", "abcd", "efgh"}, + {"1", "2", "bcde", "fghi"}, + }, + Rewritten: []string{ + "select * from vitess_a where 1 != 1", + "select /* distinct */ distinct * from vitess_a limit 10001", + }, + }, + &framework.TestCase{ + Name: "group by", + Query: "select /* group by */ eid, sum(id) from vitess_a group by eid", + Result: [][]string{ + {"1", "3"}, + }, + Rewritten: []string{ + "select eid, sum(id) from vitess_a where 1 != 1 group by eid", + "select /* group by */ eid, sum(id) from vitess_a group by eid limit 10001", + }, + RowsReturned: 1, + }, + &framework.TestCase{ + Name: "having", + Query: "select /* having */ sum(id) from vitess_a having sum(id) = 3", + Result: [][]string{ + {"3"}, + }, + Rewritten: []string{ + "select sum(id) from vitess_a where 1 != 1", + "select /* having */ sum(id) from vitess_a having sum(id) = 3 limit 10001", + }, + RowsReturned: 1, + }, + &framework.TestCase{ + Name: "limit", + Query: "select /* limit */ eid, id from vitess_a limit :a", + BindVars: map[string]*querypb.BindVariable{ + "a": sqltypes.Int64BindVariable(1), + }, + Result: [][]string{ + {"1", "1"}, + }, + Rewritten: []string{ + "select eid, id from vitess_a where 1 != 1", + "select /* limit */ eid, id from vitess_a limit 1", + }, + RowsReturned: 1, + }, + &framework.TestCase{ + Name: "multi-table", + Query: "select /* multi-table */ a.eid, a.id, b.eid, b.id from vitess_a as a, vitess_b as b order by a.eid, a.id, b.eid, b.id", + Result: [][]string{ + {"1", "1", "1", "1"}, + {"1", "1", "1", "2"}, + {"1", "2", "1", "1"}, + {"1", "2", "1", "2"}, + }, + Rewritten: []string{ + "select a.eid, a.id, b.eid, b.id from vitess_a as a, vitess_b as b where 1 != 1", + "select /* multi-table */ a.eid, a.id, b.eid, b.id from vitess_a as a, vitess_b as b order by a.eid asc, a.id asc, b.eid asc, b.id asc limit 10001", + }, + RowsReturned: 4, + }, + &framework.TestCase{ + Name: "join", + Query: "select /* join */ a.eid, a.id, b.eid, b.id from vitess_a as a join vitess_b as b on a.eid = b.eid and a.id = b.id", + Result: [][]string{ + {"1", "1", "1", "1"}, + {"1", "2", "1", "2"}, + }, + Rewritten: []string{ + "select a.eid, a.id, b.eid, b.id from vitess_a as a join vitess_b as b on a.eid = b.eid and a.id = b.id where 1 != 1", + "select /* join */ a.eid, a.id, b.eid, b.id from vitess_a as a join vitess_b as b on a.eid = b.eid and a.id = b.id limit 10001", + }, + RowsReturned: 2, + }, + &framework.TestCase{ + Name: "straight_join", + Query: "select /* straight_join */ a.eid, a.id, b.eid, b.id from vitess_a as a straight_join vitess_b as b on a.eid = b.eid and a.id = b.id", + Result: [][]string{ + {"1", "1", "1", "1"}, + {"1", "2", "1", "2"}, + }, + Rewritten: []string{ + "select a.eid, a.id, b.eid, b.id from vitess_a as a straight_join vitess_b as b on a.eid = b.eid and a.id = b.id where 1 != 1", + "select /* straight_join */ a.eid, a.id, b.eid, b.id from vitess_a as a straight_join vitess_b as b on a.eid = b.eid and a.id = b.id limit 10001", + }, + RowsReturned: 2, + }, + &framework.TestCase{ + Name: "cross join", + Query: "select /* cross join */ a.eid, a.id, b.eid, b.id from vitess_a as a cross join vitess_b as b on a.eid = b.eid and a.id = b.id", + Result: [][]string{ + {"1", "1", "1", "1"}, + {"1", "2", "1", "2"}, + }, + Rewritten: []string{ + "select a.eid, a.id, b.eid, b.id from vitess_a as a join vitess_b as b on a.eid = b.eid and a.id = b.id where 1 != 1", + "select /* cross join */ a.eid, a.id, b.eid, b.id from vitess_a as a join vitess_b as b on a.eid = b.eid and a.id = b.id limit 10001", + }, + RowsReturned: 2, + }, + &framework.TestCase{ + Name: "natural join", + Query: "select /* natural join */ a.eid, a.id, b.eid, b.id from vitess_a as a natural join vitess_b as b", + Result: [][]string{ + {"1", "1", "1", "1"}, + {"1", "2", "1", "2"}, + }, + Rewritten: []string{ + "select a.eid, a.id, b.eid, b.id from vitess_a as a natural join vitess_b as b where 1 != 1", + "select /* natural join */ a.eid, a.id, b.eid, b.id from vitess_a as a natural join vitess_b as b limit 10001", + }, + RowsReturned: 2, + }, + &framework.TestCase{ + Name: "left join", + Query: "select /* left join */ a.eid, a.id, b.eid, b.id from vitess_a as a left join vitess_b as b on a.eid = b.eid and a.id = b.id", + Result: [][]string{ + {"1", "1", "1", "1"}, + {"1", "2", "1", "2"}, + }, + Rewritten: []string{ + "select a.eid, a.id, b.eid, b.id from vitess_a as a left join vitess_b as b on a.eid = b.eid and a.id = b.id where 1 != 1", + "select /* left join */ a.eid, a.id, b.eid, b.id from vitess_a as a left join vitess_b as b on a.eid = b.eid and a.id = b.id limit 10001", + }, + RowsReturned: 2, + }, + &framework.TestCase{ + Name: "right join", + Query: "select /* right join */ a.eid, a.id, b.eid, b.id from vitess_a as a right join vitess_b as b on a.eid = b.eid and a.id = b.id", + Result: [][]string{ + {"1", "1", "1", "1"}, + {"1", "2", "1", "2"}, + }, + Rewritten: []string{ + "select a.eid, a.id, b.eid, b.id from vitess_a as a right join vitess_b as b on a.eid = b.eid and a.id = b.id where 1 != 1", + "select /* right join */ a.eid, a.id, b.eid, b.id from vitess_a as a right join vitess_b as b on a.eid = b.eid and a.id = b.id limit 10001", + }, + RowsReturned: 2, + }, + &framework.TestCase{ + Name: "complex select list", + Query: "select /* complex select list */ eid+1, id from vitess_a", + Result: [][]string{ + {"2", "1"}, + {"2", "2"}, + }, + Rewritten: []string{ + "select eid + 1, id from vitess_a where 1 != 1", + "select /* complex select list */ eid + 1, id from vitess_a limit 10001", + }, + RowsReturned: 2, + }, + &framework.TestCase{ + Name: "*", + Query: "select /* * */ * from vitess_a", + Result: [][]string{ + {"1", "1", "abcd", "efgh"}, + {"1", "2", "bcde", "fghi"}, + }, + Rewritten: []string{ + "select * from vitess_a where 1 != 1", + "select /* * */ * from vitess_a limit 10001", + }, + RowsReturned: 2, + }, + &framework.TestCase{ + Name: "table alias", + Query: "select /* table alias */ a.eid from vitess_a as a where a.eid=1", + Result: [][]string{ + {"1"}, + {"1"}, + }, + Rewritten: []string{ + "select a.eid from vitess_a as a where 1 != 1", + "select /* table alias */ a.eid from vitess_a as a where a.eid = 1 limit 10001", + }, + RowsReturned: 2, + }, + &framework.TestCase{ + Name: "parenthesised col", + Query: "select /* parenthesised col */ (eid) from vitess_a where eid = 1 and id = 1", + Result: [][]string{ + {"1"}, + }, + Rewritten: []string{ + "select eid from vitess_a where 1 != 1", + "select /* parenthesised col */ eid from vitess_a where eid = 1 and id = 1 limit 10001", + }, + RowsReturned: 1, + }, + &framework.MultiCase{ + Name: "for update", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "select /* for update */ eid from vitess_a where eid = 1 and id = 1 for update", + Result: [][]string{ + {"1"}, + }, + Rewritten: []string{ + "select eid from vitess_a where 1 != 1", + "select /* for update */ eid from vitess_a where eid = 1 and id = 1 limit 10001 for update", + }, + RowsReturned: 1, + }, + framework.TestQuery("commit"), + }, + }, + &framework.MultiCase{ + Name: "lock in share mode", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "select /* for update */ eid from vitess_a where eid = 1 and id = 1 lock in share mode", + Result: [][]string{ + {"1"}, + }, + Rewritten: []string{ + "select eid from vitess_a where 1 != 1", + "select /* for update */ eid from vitess_a where eid = 1 and id = 1 limit 10001 lock in share mode", + }, + RowsReturned: 1, + }, + framework.TestQuery("commit"), + }, + }, + &framework.TestCase{ + Name: "complex where", + Query: "select /* complex where */ id from vitess_a where id+1 = 2", + Result: [][]string{ + {"1"}, + }, + Rewritten: []string{ + "select id from vitess_a where 1 != 1", + "select /* complex where */ id from vitess_a where id + 1 = 2 limit 10001", + }, + RowsReturned: 1, + }, + &framework.TestCase{ + Name: "complex where (non-value operand)", + Query: "select /* complex where (non-value operand) */ eid, id from vitess_a where eid = id", + Result: [][]string{ + {"1", "1"}, + }, + Rewritten: []string{ + "select eid, id from vitess_a where 1 != 1", + "select /* complex where (non-value operand) */ eid, id from vitess_a where eid = id limit 10001", + }, + RowsReturned: 1, + }, + &framework.TestCase{ + Name: "(condition)", + Query: "select /* (condition) */ * from vitess_a where (eid = 1)", + Result: [][]string{ + {"1", "1", "abcd", "efgh"}, + {"1", "2", "bcde", "fghi"}, + }, + Rewritten: []string{ + "select * from vitess_a where 1 != 1", + "select /* (condition) */ * from vitess_a where eid = 1 limit 10001", + }, + RowsReturned: 2, + }, + &framework.TestCase{ + Name: "inequality", + Query: "select /* inequality */ * from vitess_a where id > 1", + Result: [][]string{ + {"1", "2", "bcde", "fghi"}, + }, + Rewritten: []string{ + "select * from vitess_a where 1 != 1", + "select /* inequality */ * from vitess_a where id > 1 limit 10001", + }, + RowsReturned: 1, + }, + &framework.TestCase{ + Name: "in", + Query: "select /* in */ * from vitess_a where id in (1, 2)", + Result: [][]string{ + {"1", "1", "abcd", "efgh"}, + {"1", "2", "bcde", "fghi"}, + }, + Rewritten: []string{ + "select * from vitess_a where 1 != 1", + "select /* in */ * from vitess_a where id in (1, 2) limit 10001", + }, + RowsReturned: 2, + }, + &framework.TestCase{ + Name: "between", + Query: "select /* between */ * from vitess_a where id between 1 and 2", + Result: [][]string{ + {"1", "1", "abcd", "efgh"}, + {"1", "2", "bcde", "fghi"}, + }, + Rewritten: []string{ + "select * from vitess_a where 1 != 1", + "select /* between */ * from vitess_a where id between 1 and 2 limit 10001", + }, + RowsReturned: 2, + }, + &framework.TestCase{ + Name: "order", + Query: "select /* order */ * from vitess_a order by id desc", + Result: [][]string{ + {"1", "2", "bcde", "fghi"}, + {"1", "1", "abcd", "efgh"}, + }, + Rewritten: []string{ + "select * from vitess_a where 1 != 1", + "select /* order */ * from vitess_a order by id desc limit 10001", + }, + RowsReturned: 2, + }, + &framework.TestCase{ + Name: "select in select list", + Query: "select (select eid from vitess_a where id = 1), eid from vitess_a where id = 2", + Result: [][]string{ + {"1", "1"}, + }, + Rewritten: []string{ + "select (select eid from vitess_a where 1 != 1), eid from vitess_a where 1 != 1", + "select (select eid from vitess_a where id = 1), eid from vitess_a where id = 2 limit 10001", + }, + RowsReturned: 1, + }, + &framework.TestCase{ + Name: "select in from clause", + Query: "select eid from (select eid from vitess_a where id=2) as a", + Result: [][]string{ + {"1"}, + }, + Rewritten: []string{ + "select eid from (select eid from vitess_a where 1 != 1) as a where 1 != 1", + "select eid from (select eid from vitess_a where id = 2) as a limit 10001", + }, + RowsReturned: 1, + }, + &framework.MultiCase{ + Name: "select in transaction", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "select * from vitess_a where eid = 2 and id = 1", + Rewritten: []string{ + "select * from vitess_a where 1 != 1", + "select * from vitess_a where eid = 2 and id = 1 limit 10001", + }, + }, + &framework.TestCase{ + Query: "select * from vitess_a where eid = 2 and id = 1", + Rewritten: []string{ + "select * from vitess_a where eid = 2 and id = 1 limit 10001", + }, + }, + &framework.TestCase{ + Query: "select :bv from vitess_a where eid = 2 and id = 1", + BindVars: map[string]*querypb.BindVariable{ + "bv": sqltypes.Int64BindVariable(1), + }, + Rewritten: []string{ + "select 1 from vitess_a where eid = 2 and id = 1 limit 10001", + }, + }, + &framework.TestCase{ + Query: "select :bv from vitess_a where eid = 2 and id = 1", + BindVars: map[string]*querypb.BindVariable{ + "bv": sqltypes.StringBindVariable("abcd"), + }, + Rewritten: []string{ + "select 'abcd' from vitess_a where eid = 2 and id = 1 limit 10001", + }, + }, + framework.TestQuery("commit"), + }, + }, + &framework.MultiCase{ + Name: "simple insert", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "insert /* simple */ into vitess_a values (2, 1, 'aaaa', 'bbbb')", + Rewritten: []string{ + "insert /* simple */ into vitess_a values (2, 1, 'aaaa', 'bbbb')", + }, + RowsAffected: 1, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select * from vitess_a where eid = 2 and id = 1", + Result: [][]string{ + {"2", "1", "aaaa", "bbbb"}, + }, + }, + framework.TestQuery("begin"), + framework.TestQuery("delete from vitess_a where eid>1"), + framework.TestQuery("commit"), + }, + }, + &framework.MultiCase{ + Name: "insert ignore", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "insert /* simple */ ignore into vitess_a values (2, 1, 'aaaa', 'bbbb')", + Rewritten: []string{ + "insert /* simple */ ignore into vitess_a values (2, 1, 'aaaa', 'bbbb')", + }, + RowsAffected: 1, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select * from vitess_a where eid = 2 and id = 1", + Result: [][]string{ + {"2", "1", "aaaa", "bbbb"}, + }, + }, + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "insert /* simple */ ignore into vitess_a values (2, 1, 'cccc', 'cccc')", + Rewritten: []string{ + "insert /* simple */ ignore into vitess_a values (2, 1, 'cccc', 'cccc')", + }, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select * from vitess_a where eid = 2 and id = 1", + Result: [][]string{ + {"2", "1", "aaaa", "bbbb"}, + }, + }, + framework.TestQuery("begin"), + framework.TestQuery("delete from vitess_a where eid>1"), + framework.TestQuery("commit"), + }, + }, + &framework.MultiCase{ + Name: "qualified insert", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "insert /* qualified */ into vitess_a(eid, id, name, foo) values (3, 1, 'aaaa', 'cccc')", + Rewritten: []string{ + "insert /* qualified */ into vitess_a(eid, id, `name`, foo) values (3, 1, 'aaaa', 'cccc')", + }, + RowsAffected: 1, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select * from vitess_a where eid = 3 and id = 1", + Result: [][]string{ + {"3", "1", "aaaa", "cccc"}, + }, + }, + framework.TestQuery("begin"), + framework.TestQuery("delete from vitess_a where eid>1"), + framework.TestQuery("commit"), + }, + }, + &framework.MultiCase{ + Name: "insert with mixed case column names", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "insert into vitess_mixed_case(col1, col2) values(1, 2)", + Rewritten: []string{ + "insert into vitess_mixed_case(col1, col2) values (1, 2)", + }, + RowsAffected: 1, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select COL1, COL2 from vitess_mixed_case", + Result: [][]string{ + {"1", "2"}, + }, + }, + framework.TestQuery("begin"), + framework.TestQuery("delete from vitess_mixed_case"), + framework.TestQuery("commit"), + }, + }, + &framework.MultiCase{ + Name: "insert auto_increment", + Cases: []framework.Testable{ + framework.TestQuery("alter table vitess_e auto_increment = 1"), + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "insert /* auto_increment */ into vitess_e(name, foo) values ('aaaa', 'cccc')", + Rewritten: []string{ + "insert /* auto_increment */ into vitess_e(`name`, foo) values ('aaaa', 'cccc')", + }, + RowsAffected: 1, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select * from vitess_e", + Result: [][]string{ + {"1", "1", "aaaa", "cccc"}, + }, + }, + framework.TestQuery("begin"), + framework.TestQuery("delete from vitess_e"), + framework.TestQuery("commit"), + }, + }, + &framework.MultiCase{ + Name: "insert with null auto_increment", + Cases: []framework.Testable{ + framework.TestQuery("alter table vitess_e auto_increment = 1"), + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "insert /* auto_increment */ into vitess_e(eid, name, foo) values (NULL, 'aaaa', 'cccc')", + Rewritten: []string{ + "insert /* auto_increment */ into vitess_e(eid, `name`, foo) values (null, 'aaaa', 'cccc')", + }, + RowsAffected: 1, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select * from vitess_e", + Result: [][]string{ + {"1", "1", "aaaa", "cccc"}, + }, + }, + framework.TestQuery("begin"), + framework.TestQuery("delete from vitess_e"), + framework.TestQuery("commit"), + }, + }, + &framework.MultiCase{ + Name: "insert with number default value", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "insert /* num default */ into vitess_a(eid, name, foo) values (3, 'aaaa', 'cccc')", + Rewritten: []string{ + "insert /* num default */ into vitess_a(eid, `name`, foo) values (3, 'aaaa', 'cccc')", + }, + RowsAffected: 1, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select * from vitess_a where eid = 3 and id = 1", + Result: [][]string{ + {"3", "1", "aaaa", "cccc"}, + }, + }, + framework.TestQuery("begin"), + framework.TestQuery("delete from vitess_a where eid>1"), + framework.TestQuery("commit"), + }, + }, + &framework.MultiCase{ + Name: "insert with string default value", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "insert /* string default */ into vitess_f(id) values (1)", + Rewritten: []string{ + "insert /* string default */ into vitess_f(id) values (1)", + }, + RowsAffected: 1, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select * from vitess_f", + Result: [][]string{ + {"ab", "1"}, + }, + }, + framework.TestQuery("begin"), + framework.TestQuery("delete from vitess_f"), + framework.TestQuery("commit"), + }, + }, + &framework.MultiCase{ + Name: "bind values", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "insert /* bind values */ into vitess_a(eid, id, name, foo) values (:eid, :id, :name, :foo)", + BindVars: map[string]*querypb.BindVariable{ + "foo": sqltypes.StringBindVariable("cccc"), + "eid": sqltypes.Int64BindVariable(4), + "name": sqltypes.StringBindVariable("aaaa"), + "id": sqltypes.Int64BindVariable(1), + }, + Rewritten: []string{ + "insert /* bind values */ into vitess_a(eid, id, `name`, foo) values (4, 1, 'aaaa', 'cccc')", + }, + RowsAffected: 1, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select * from vitess_a where eid = 4 and id = 1", + Result: [][]string{ + {"4", "1", "aaaa", "cccc"}, + }, + }, + framework.TestQuery("begin"), + framework.TestQuery("delete from vitess_a where eid>1"), + framework.TestQuery("commit"), + }, + }, + &framework.MultiCase{ + Name: "positional values", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "insert /* positional values */ into vitess_a(eid, id, name, foo) values (?, ?, ?, ?)", + BindVars: map[string]*querypb.BindVariable{ + "v1": sqltypes.Int64BindVariable(4), + "v2": sqltypes.Int64BindVariable(1), + "v3": sqltypes.StringBindVariable("aaaa"), + "v4": sqltypes.StringBindVariable("cccc"), + }, + Rewritten: []string{ + "insert /* positional values */ into vitess_a(eid, id, `name`, foo) values (4, 1, 'aaaa', 'cccc')", + }, + RowsAffected: 1, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select * from vitess_a where eid = 4 and id = 1", + Result: [][]string{ + {"4", "1", "aaaa", "cccc"}, + }, + }, + framework.TestQuery("begin"), + framework.TestQuery("delete from vitess_a where eid>1"), + framework.TestQuery("commit"), + }, + }, + &framework.MultiCase{ + Name: "out of sequence columns", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "insert into vitess_a(id, eid, foo, name) values (-1, 5, 'aaa', 'bbb')", + Rewritten: []string{ + "insert into vitess_a(id, eid, foo, `name`) values (-1, 5, 'aaa', 'bbb')", + }, + RowsAffected: 1, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select * from vitess_a where eid = 5 and id = -1", + Result: [][]string{ + {"5", "-1", "bbb", "aaa"}, + }, + }, + framework.TestQuery("begin"), + framework.TestQuery("delete from vitess_a where eid>1"), + framework.TestQuery("commit"), + }, + }, + &framework.MultiCase{ + Name: "subquery", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "insert /* subquery */ into vitess_a(eid, name, foo) select eid, name, foo from vitess_c", + Rewritten: []string{ + "insert /* subquery */ into vitess_a(eid, `name`, foo) select eid, `name`, foo from vitess_c", + }, + RowsAffected: 2, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select * from vitess_a where eid in (10, 11)", + Result: [][]string{ + {"10", "1", "abcd", "20"}, + {"11", "1", "bcde", "30"}, + }, + }, + framework.TestQuery("alter table vitess_e auto_increment = 20"), + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "insert into vitess_e(id, name, foo) select eid, name, foo from vitess_c", + Rewritten: []string{ + "insert into vitess_e(id, `name`, foo) select eid, `name`, foo from vitess_c", + }, + RowsAffected: 2, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select eid, id, name, foo from vitess_e", + Result: [][]string{ + {"20", "10", "abcd", "20"}, + {"21", "11", "bcde", "30"}, + }, + }, + framework.TestQuery("begin"), + framework.TestQuery("delete from vitess_a where eid>1"), + framework.TestQuery("delete from vitess_c where eid<10"), + framework.TestQuery("commit"), + }, + }, + &framework.MultiCase{ + Name: "multi-value", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "insert into vitess_a(eid, id, name, foo) values (5, 1, '', ''), (7, 1, '', '')", + Rewritten: []string{ + "insert into vitess_a(eid, id, `name`, foo) values (5, 1, '', ''), (7, 1, '', '')", + }, + RowsAffected: 2, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select * from vitess_a where eid>1", + Result: [][]string{ + {"5", "1", "", ""}, + {"7", "1", "", ""}, + }, + }, + framework.TestQuery("begin"), + framework.TestQuery("delete from vitess_a where eid>1"), + framework.TestQuery("commit"), + }, + }, + &framework.MultiCase{ + Name: "upsert single row present/absent", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "insert into upsert_test(id1, id2) values (1, 1) on duplicate key update id2 = 1", + Rewritten: []string{ + "insert into upsert_test(id1, id2) values (1, 1) on duplicate key update id2 = 1", + }, + RowsAffected: 1, + }, + &framework.TestCase{ + Query: "select * from upsert_test", + Result: [][]string{ + {"1", "1"}, + }, + }, + &framework.TestCase{ + Query: "insert into upsert_test(id1, id2) values (1, 2) on duplicate key update id2 = 2", + Rewritten: []string{ + "insert into upsert_test(id1, id2) values (1, 2) on duplicate key update id2 = 2", + }, + RowsAffected: 2, + }, + &framework.TestCase{ + Query: "select * from upsert_test", + Result: [][]string{ + {"1", "2"}, + }, + }, + &framework.TestCase{ + Query: "insert into upsert_test(id1, id2) values (1, 2) on duplicate key update id2 = 2", + Rewritten: []string{ + "insert into upsert_test(id1, id2) values (1, 2) on duplicate key update id2 = 2", + }, + }, + &framework.TestCase{ + Query: "insert ignore into upsert_test(id1, id2) values (1, 3) on duplicate key update id2 = 3", + Rewritten: []string{ + "insert ignore into upsert_test(id1, id2) values (1, 3) on duplicate key update id2 = 3", + }, + RowsAffected: 2, + }, + &framework.TestCase{ + Query: "select * from upsert_test", + Result: [][]string{ + {"1", "3"}, + }, + }, + framework.TestQuery("commit"), + framework.TestQuery("begin"), + framework.TestQuery("delete from upsert_test"), + framework.TestQuery("commit"), + }, + }, + &framework.MultiCase{ + Name: "upsert changes pk", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "insert into upsert_test(id1, id2) values (1, 1) on duplicate key update id1 = 1", + Rewritten: []string{ + "insert into upsert_test(id1, id2) values (1, 1) on duplicate key update id1 = 1", + }, + RowsAffected: 1, + }, + &framework.TestCase{ + Query: "select * from upsert_test", + Result: [][]string{ + {"1", "1"}, + }, + }, + &framework.TestCase{ + Query: "insert into upsert_test(id1, id2) values (1, 2) on duplicate key update id1 = 2", + Rewritten: []string{ + "insert into upsert_test(id1, id2) values (1, 2) on duplicate key update id1 = 2", + }, + RowsAffected: 2, + }, + &framework.TestCase{ + Query: "select * from upsert_test", + Result: [][]string{ + {"2", "1"}, + }, + }, + framework.TestQuery("commit"), + framework.TestQuery("begin"), + framework.TestQuery("delete from upsert_test"), + framework.TestQuery("commit"), + }, + }, + &framework.MultiCase{ + Name: "upsert single row with values()", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "insert into upsert_test(id1, id2) values (1, 1) on duplicate key update id2 = values(id2) + 1", + Rewritten: []string{ + "insert into upsert_test(id1, id2) values (1, 1) on duplicate key update id2 = values(id2) + 1", + }, + RowsAffected: 1, + }, + &framework.TestCase{ + Query: "select * from upsert_test", + Result: [][]string{ + {"1", "1"}, + }, + }, + &framework.TestCase{ + Query: "insert into upsert_test(id1, id2) values (1, 2) on duplicate key update id2 = values(id2) + 1", + Rewritten: []string{ + "insert into upsert_test(id1, id2) values (1, 2) on duplicate key update id2 = values(id2) + 1", + }, + RowsAffected: 2, + }, + &framework.TestCase{ + Query: "select * from upsert_test", + Result: [][]string{ + {"1", "3"}, + }, + }, + &framework.TestCase{ + Query: "insert into upsert_test(id1, id2) values (1, 2) on duplicate key update id2 = values(id1)", + Rewritten: []string{ + "insert into upsert_test(id1, id2) values (1, 2) on duplicate key update id2 = values(id1)", + }, + }, + &framework.TestCase{ + Query: "select * from upsert_test", + Result: [][]string{ + {"1", "1"}, + }, + }, + &framework.TestCase{ + Query: "insert ignore into upsert_test(id1, id2) values (1, 3) on duplicate key update id2 = greatest(values(id1), values(id2))", + Rewritten: []string{ + "insert ignore into upsert_test(id1, id2) values (1, 3) on duplicate key update id2 = greatest(values(id1), values(id2))", + }, + RowsAffected: 2, + }, + &framework.TestCase{ + Query: "select * from upsert_test", + Result: [][]string{ + {"1", "3"}, + }, + }, + framework.TestQuery("commit"), + framework.TestQuery("begin"), + framework.TestQuery("delete from upsert_test"), + framework.TestQuery("commit"), + }, + }, + &framework.MultiCase{ + Name: "update", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "update /* pk */ vitess_a set foo='bar' where eid = 1 and id = 1", + Rewritten: []string{ + "update /* pk */ vitess_a set foo = 'bar' where eid = 1 and id = 1 limit 10001", + }, + RowsAffected: 1, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select foo from vitess_a where id = 1", + Result: [][]string{ + {"bar"}, + }, + }, + framework.TestQuery("begin"), + framework.TestQuery("update vitess_a set foo='efgh' where id=1"), + framework.TestQuery("commit"), + }, + }, + &framework.MultiCase{ + Name: "single in update", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "update /* pk */ vitess_a set foo='bar' where eid = 1 and id in (1, 2)", + Rewritten: []string{ + "update /* pk */ vitess_a set foo = 'bar' where eid = 1 and id in (1, 2) limit 10001", + }, + RowsAffected: 2, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select foo from vitess_a where id = 1", + Result: [][]string{ + {"bar"}, + }, + }, + framework.TestQuery("begin"), + framework.TestQuery("update vitess_a set foo='efgh' where id=1"), + framework.TestQuery("update vitess_a set foo='fghi' where id=2"), + framework.TestQuery("commit"), + }, + }, + &framework.MultiCase{ + Name: "double in update", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "update /* pk */ vitess_a set foo='bar' where eid in (1) and id in (1, 2)", + Rewritten: []string{ + "update /* pk */ vitess_a set foo = 'bar' where eid in (1) and id in (1, 2) limit 10001", + }, + RowsAffected: 2, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select foo from vitess_a where id = 1", + Result: [][]string{ + {"bar"}, + }, + }, + framework.TestQuery("begin"), + framework.TestQuery("update vitess_a set foo='efgh' where id=1"), + framework.TestQuery("update vitess_a set foo='fghi' where id=2"), + framework.TestQuery("commit"), + }, + }, + &framework.MultiCase{ + Name: "double in 2 update", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "update /* pk */ vitess_a set foo='bar' where eid in (1, 2) and id in (1, 2)", + Rewritten: []string{ + "update /* pk */ vitess_a set foo = 'bar' where eid in (1, 2) and id in (1, 2) limit 10001", + }, + RowsAffected: 2, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select foo from vitess_a where id = 1", + Result: [][]string{ + {"bar"}, + }, + }, + framework.TestQuery("begin"), + framework.TestQuery("update vitess_a set foo='efgh' where id=1"), + framework.TestQuery("update vitess_a set foo='fghi' where id=2"), + framework.TestQuery("commit"), + }, + }, + &framework.MultiCase{ + Name: "pk change update", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "update vitess_a set eid = 2 where eid = 1 and id = 1", + Rewritten: []string{ + "update vitess_a set eid = 2 where eid = 1 and id = 1 limit 10001", + }, + RowsAffected: 1, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select eid from vitess_a where id = 1", + Result: [][]string{ + {"2"}, + }, + }, + framework.TestQuery("begin"), + framework.TestQuery("update vitess_a set eid=1 where id=1"), + framework.TestQuery("commit"), + }, + }, + &framework.MultiCase{ + Name: "partial pk update", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "update /* pk */ vitess_a set foo='bar' where id = 1", + Rewritten: []string{ + "update /* pk */ vitess_a set foo = 'bar' where id = 1 limit 10001", + }, + RowsAffected: 1, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select foo from vitess_a where id = 1", + Result: [][]string{ + {"bar"}, + }, + }, + framework.TestQuery("begin"), + framework.TestQuery("update vitess_a set foo='efgh' where id=1"), + framework.TestQuery("commit"), + }, + }, + &framework.MultiCase{ + Name: "limit update", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "update /* pk */ vitess_a set foo='bar' where eid = 1 limit 1", + Rewritten: []string{ + "update /* pk */ vitess_a set foo = 'bar' where eid = 1 limit 1", + }, + RowsAffected: 1, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select foo from vitess_a where id = 1", + Result: [][]string{ + {"bar"}, + }, + }, + framework.TestQuery("begin"), + framework.TestQuery("update vitess_a set foo='efgh' where id=1"), + framework.TestQuery("commit"), + }, + }, + &framework.MultiCase{ + Name: "order by update", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "update /* pk */ vitess_a set foo='bar' where eid = 1 order by id desc limit 1", + Rewritten: []string{ + "update /* pk */ vitess_a set foo = 'bar' where eid = 1 order by id desc limit 1", + }, + RowsAffected: 1, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select foo from vitess_a where id = 2", + Result: [][]string{ + {"bar"}, + }, + }, + framework.TestQuery("begin"), + framework.TestQuery("update vitess_a set foo='fghi' where id=2"), + framework.TestQuery("commit"), + }, + }, + &framework.MultiCase{ + Name: "missing where update", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "update vitess_a set foo='bar'", + Rewritten: []string{ + "update vitess_a set foo = 'bar' limit 10001", + }, + RowsAffected: 2, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select * from vitess_a", + Result: [][]string{ + {"1", "1", "abcd", "bar"}, + {"1", "2", "bcde", "bar"}, + }, + }, + framework.TestQuery("begin"), + framework.TestQuery("update vitess_a set foo='efgh' where id=1"), + framework.TestQuery("update vitess_a set foo='fghi' where id=2"), + framework.TestQuery("commit"), + }, + }, + &framework.MultiCase{ + Name: "single pk update one row update", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + framework.TestQuery("insert into vitess_f(vb,id) values ('a', 1), ('b', 2)"), + framework.TestQuery("commit"), + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "update vitess_f set id=2 where vb='a'", + Rewritten: []string{ + "update vitess_f set id = 2 where vb = 'a' limit 10001", + }, + RowsAffected: 1, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select * from vitess_f", + Result: [][]string{ + {"a", "2"}, + {"b", "2"}, + }, + }, + framework.TestQuery("begin"), + framework.TestQuery("delete from vitess_f"), + framework.TestQuery("commit"), + }, + }, + &framework.MultiCase{ + Name: "single pk update two rows", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + framework.TestQuery("insert into vitess_f(vb,id) values ('a', 1), ('b', 2)"), + framework.TestQuery("commit"), + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "update vitess_f set id=3 where vb in ('a', 'b')", + Rewritten: []string{ + "update vitess_f set id = 3 where vb in ('a', 'b') limit 10001", + }, + RowsAffected: 2, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select * from vitess_f", + Result: [][]string{ + {"a", "3"}, + {"b", "3"}, + }, + }, + framework.TestQuery("begin"), + framework.TestQuery("delete from vitess_f"), + framework.TestQuery("commit"), + }, + }, + &framework.MultiCase{ + Name: "single pk update subquery", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + framework.TestQuery("insert into vitess_f(vb,id) values ('a', 1), ('b', 2)"), + framework.TestQuery("commit"), + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "update vitess_f set id=4 where id >= 0", + Rewritten: []string{ + "update vitess_f set id = 4 where id >= 0 limit 10001", + }, + RowsAffected: 2, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select * from vitess_f", + Result: [][]string{ + {"a", "4"}, + {"b", "4"}, + }, + }, + framework.TestQuery("begin"), + framework.TestQuery("delete from vitess_f"), + framework.TestQuery("commit"), + }, + }, + &framework.MultiCase{ + Name: "single pk update subquery no rows", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + framework.TestQuery("insert into vitess_f(vb,id) values ('a', 1), ('b', 2)"), + framework.TestQuery("commit"), + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "update vitess_f set id=4 where id < 0", + Rewritten: []string{ + "update vitess_f set id = 4 where id < 0 limit 10001", + }, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select * from vitess_f", + Result: [][]string{ + {"a", "1"}, + {"b", "2"}, + }, + }, + framework.TestQuery("begin"), + framework.TestQuery("delete from vitess_f"), + framework.TestQuery("commit"), + }, + }, + &framework.MultiCase{ + Name: "delete", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + framework.TestQuery("insert into vitess_a(eid, id, name, foo) values (2, 1, '', '')"), + &framework.TestCase{ + Query: "delete /* pk */ from vitess_a where eid = 2 and id = 1", + Rewritten: []string{ + "delete /* pk */ from vitess_a where eid = 2 and id = 1 limit 10001", + }, + RowsAffected: 1, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select * from vitess_a where eid=2", + }, + }, + }, + &framework.MultiCase{ + Name: "single in delete", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + framework.TestQuery("insert into vitess_a(eid, id, name, foo) values (2, 1, '', '')"), + &framework.TestCase{ + Query: "delete /* pk */ from vitess_a where eid = 2 and id in (1, 2)", + Rewritten: []string{ + "delete /* pk */ from vitess_a where eid = 2 and id in (1, 2) limit 10001", + }, + RowsAffected: 1, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select * from vitess_a where eid=2", + }, + }, + }, + &framework.MultiCase{ + Name: "double in delete", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + framework.TestQuery("insert into vitess_a(eid, id, name, foo) values (2, 1, '', '')"), + &framework.TestCase{ + Query: "delete /* pk */ from vitess_a where eid in (2) and id in (1, 2)", + Rewritten: []string{ + "delete /* pk */ from vitess_a where eid in (2) and id in (1, 2) limit 10001", + }, + RowsAffected: 1, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select * from vitess_a where eid=2", + }, + }, + }, + &framework.MultiCase{ + Name: "double in 2 delete", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + framework.TestQuery("insert into vitess_a(eid, id, name, foo) values (2, 1, '', '')"), + &framework.TestCase{ + Query: "delete /* pk */ from vitess_a where eid in (2, 3) and id in (1, 2)", + Rewritten: []string{ + "delete /* pk */ from vitess_a where eid in (2, 3) and id in (1, 2) limit 10001", + }, + RowsAffected: 1, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select * from vitess_a where eid=2", + }, + }, + }, + &framework.MultiCase{ + Name: "complex where delete", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + framework.TestQuery("insert into vitess_a(eid, id, name, foo) values (2, 1, '', '')"), + &framework.TestCase{ + Query: "delete from vitess_a where eid = 1+1 and id = 1", + Rewritten: []string{ + "delete from vitess_a where eid = 1 + 1 and id = 1 limit 10001", + }, + RowsAffected: 1, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select * from vitess_a where eid=2", + }, + }, + }, + &framework.MultiCase{ + Name: "partial pk delete", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + framework.TestQuery("insert into vitess_a(eid, id, name, foo) values (2, 1, '', '')"), + &framework.TestCase{ + Query: "delete from vitess_a where eid = 2", + Rewritten: []string{ + "delete from vitess_a where eid = 2 limit 10001", + }, + RowsAffected: 1, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select * from vitess_a where eid=2", + }, + }, + }, + &framework.MultiCase{ + Name: "limit delete", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + framework.TestQuery("insert into vitess_a(eid, id, name, foo) values (2, 1, '', '')"), + &framework.TestCase{ + Query: "delete from vitess_a where eid = 2 limit 1", + Rewritten: []string{ + "delete from vitess_a where eid = 2 limit 1", + }, + RowsAffected: 1, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select * from vitess_a where eid=2", + }, + }, + }, + &framework.MultiCase{ + Name: "order by delete", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + framework.TestQuery("insert into vitess_a(eid, id, name, foo) values (2, 1, '', '')"), + framework.TestQuery("insert into vitess_a(eid, id, name, foo) values (2, 2, '', '')"), + &framework.TestCase{ + Query: "delete from vitess_a where eid = 2 order by id desc", + Rewritten: []string{ + "delete from vitess_a where eid = 2 order by id desc limit 10001", + }, + RowsAffected: 2, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select * from vitess_a where eid=2", + }, + }, + }, + &framework.MultiCase{ + Name: "integer data types", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "insert into vitess_ints values(:tiny, :tinyu, :small, :smallu, :medium, :mediumu, :normal, :normalu, :big, :bigu, :year)", + BindVars: map[string]*querypb.BindVariable{ + "medium": sqltypes.Int64BindVariable(-8388608), + "smallu": sqltypes.Int64BindVariable(65535), + "normal": sqltypes.Int64BindVariable(-2147483648), + "big": sqltypes.Int64BindVariable(-9223372036854775808), + "tinyu": sqltypes.Int64BindVariable(255), + "year": sqltypes.Int64BindVariable(2012), + "tiny": sqltypes.Int64BindVariable(-128), + "bigu": sqltypes.Uint64BindVariable(18446744073709551615), + "normalu": sqltypes.Int64BindVariable(4294967295), + "small": sqltypes.Int64BindVariable(-32768), + "mediumu": sqltypes.Int64BindVariable(16777215), + }, + Rewritten: []string{ + "insert into vitess_ints values (-128, 255, -32768, 65535, -8388608, 16777215, -2147483648, 4294967295, -9223372036854775808, 18446744073709551615, 2012)", + }, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select * from vitess_ints where tiny = -128", + Result: [][]string{ + {"-128", "255", "-32768", "65535", "-8388608", "16777215", "-2147483648", "4294967295", "-9223372036854775808", "18446744073709551615", "2012"}, + }, + Rewritten: []string{ + "select * from vitess_ints where 1 != 1", + "select * from vitess_ints where tiny = -128 limit 10001", + }, + }, + &framework.TestCase{ + Query: "select * from vitess_ints where tiny = -128", + Result: [][]string{ + {"-128", "255", "-32768", "65535", "-8388608", "16777215", "-2147483648", "4294967295", "-9223372036854775808", "18446744073709551615", "2012"}, + }, + Rewritten: []string{ + "select * from vitess_ints where tiny = -128 limit 10001", + }, + }, + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "insert into vitess_ints select 2, tinyu, small, smallu, medium, mediumu, normal, normalu, big, bigu, y from vitess_ints", + Rewritten: []string{ + "insert into vitess_ints select 2, tinyu, small, smallu, medium, mediumu, normal, normalu, big, bigu, y from vitess_ints", + }, + }, + framework.TestQuery("commit"), + framework.TestQuery("begin"), + framework.TestQuery("delete from vitess_ints"), + framework.TestQuery("commit"), + }, + }, + &framework.MultiCase{ + Name: "fractional data types", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "insert into vitess_fracts values(:id, :deci, :num, :f, :d)", + BindVars: map[string]*querypb.BindVariable{ + "d": sqltypes.Float64BindVariable(4.99), + "num": sqltypes.StringBindVariable("2.99"), + "id": sqltypes.Int64BindVariable(1), + "f": sqltypes.Float64BindVariable(3.99), + "deci": sqltypes.StringBindVariable("1.99"), + }, + Rewritten: []string{ + "insert into vitess_fracts values (1, '1.99', '2.99', 3.99, 4.99)", + }, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select * from vitess_fracts where id = 1", + Result: [][]string{ + {"1", "1.99", "2.99", "3.99", "4.99"}, + }, + Rewritten: []string{ + "select * from vitess_fracts where 1 != 1", + "select * from vitess_fracts where id = 1 limit 10001", + }, + }, + &framework.TestCase{ + Query: "select * from vitess_fracts where id = 1", + Result: [][]string{ + {"1", "1.99", "2.99", "3.99", "4.99"}, + }, + Rewritten: []string{ + "select * from vitess_fracts where id = 1 limit 10001", + }, + }, + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "insert into vitess_fracts select 2, deci, num, f, d from vitess_fracts", + Rewritten: []string{ + "insert into vitess_fracts select 2, deci, num, f, d from vitess_fracts", + }, + }, + framework.TestQuery("commit"), + framework.TestQuery("begin"), + framework.TestQuery("delete from vitess_fracts"), + framework.TestQuery("commit"), + }, + }, + &framework.MultiCase{ + Name: "string data types", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "insert into vitess_strings values (:vb, :c, :vc, :b, :tb, :bl, :ttx, :tx, :en, :s)", + BindVars: map[string]*querypb.BindVariable{ + "ttx": sqltypes.StringBindVariable("g"), + "vb": sqltypes.StringBindVariable("a"), + "vc": sqltypes.StringBindVariable("c"), + "en": sqltypes.StringBindVariable("a"), + "tx": sqltypes.StringBindVariable("h"), + "bl": sqltypes.StringBindVariable("f"), + "s": sqltypes.StringBindVariable("a,b"), + "b": sqltypes.StringBindVariable("d"), + "tb": sqltypes.StringBindVariable("e"), + "c": sqltypes.StringBindVariable("b"), + }, + Rewritten: []string{ + "insert into vitess_strings values ('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'a', 'a,b')", + }, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select * from vitess_strings where vb = 'a'", + Result: [][]string{ + {"a", "b", "c", "d\x00\x00\x00", "e", "f", "g", "h", "a", "a,b"}, + }, + Rewritten: []string{ + "select * from vitess_strings where 1 != 1", + "select * from vitess_strings where vb = 'a' limit 10001", + }, + }, + &framework.TestCase{ + Query: "select * from vitess_strings where vb = 'a'", + Result: [][]string{ + {"a", "b", "c", "d\x00\x00\x00", "e", "f", "g", "h", "a", "a,b"}, + }, + Rewritten: []string{ + "select * from vitess_strings where vb = 'a' limit 10001", + }, + }, + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "insert into vitess_strings select 'b', c, vc, b, tb, bl, ttx, tx, en, s from vitess_strings", + Rewritten: []string{ + "insert into vitess_strings select 'b', c, vc, b, tb, bl, ttx, tx, en, s from vitess_strings", + }, + }, + framework.TestQuery("commit"), + framework.TestQuery("begin"), + framework.TestQuery("delete from vitess_strings"), + framework.TestQuery("commit"), + }, + }, + &framework.MultiCase{ + Name: "misc data types", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "insert into vitess_misc values(:id, :b, :d, :dt, :t, point(1, 2))", + BindVars: map[string]*querypb.BindVariable{ + "t": sqltypes.StringBindVariable("15:45:45"), + "dt": sqltypes.StringBindVariable("2012-01-01 15:45:45"), + "b": sqltypes.StringBindVariable("\x01"), + "id": sqltypes.Int64BindVariable(1), + "d": sqltypes.StringBindVariable("2012-01-01"), + }, + Rewritten: []string{ + "insert into vitess_misc values (1, '\x01', '2012-01-01', '2012-01-01 15:45:45', '15:45:45', point(1, 2))", + }, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select * from vitess_misc where id = 1", + Result: [][]string{ + {"1", "\x01", "2012-01-01", "2012-01-01 15:45:45", "15:45:45", point12}, + }, + Rewritten: []string{ + "select * from vitess_misc where 1 != 1", + "select * from vitess_misc where id = 1 limit 10001", + }, + }, + &framework.TestCase{ + Query: "select * from vitess_misc where id = 1", + Result: [][]string{ + {"1", "\x01", "2012-01-01", "2012-01-01 15:45:45", "15:45:45", point12}, + }, + Rewritten: []string{ + "select * from vitess_misc where id = 1 limit 10001", + }, + }, + framework.TestQuery("begin"), + &framework.TestCase{ + // Skip geometry test. The binary representation is non-trivial to represent as go string. + Query: "insert into vitess_misc(id, b, d, dt, t) select 2, b, d, dt, t from vitess_misc", + Rewritten: []string{ + "insert into vitess_misc(id, b, d, dt, t) select 2, b, d, dt, t from vitess_misc", + }, + }, + framework.TestQuery("commit"), + framework.TestQuery("begin"), + framework.TestQuery("delete from vitess_misc"), + framework.TestQuery("commit"), + }, + }, + &framework.MultiCase{ + Name: "boolean expressions", + Cases: []framework.Testable{ + framework.TestQuery("begin"), + framework.TestQuery("insert into vitess_bool(bval, sval, ival) values (true, 'foo', false)"), + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select * from vitess_bool", + Result: [][]string{ + {"1", "1", "foo", "0"}, + }, + }, + framework.TestQuery("begin"), + framework.TestQuery("insert into vitess_bool(bval, sval, ival) values (true, 'bar', 23)"), + framework.TestQuery("insert into vitess_bool(bval, sval, ival) values (true, 'baz', 2342)"), + framework.TestQuery("insert into vitess_bool(bval, sval, ival) values (true, 'test', 123)"), + framework.TestQuery("insert into vitess_bool(bval, sval, ival) values (true, 'aa', 384)"), + framework.TestQuery("insert into vitess_bool(bval, sval, ival) values (false, 'bbb', 213)"), + framework.TestQuery("insert into vitess_bool(bval, sval, ival) values (false, 'cc', 24342)"), + framework.TestQuery("insert into vitess_bool(bval, sval, ival) values (false, 'd', 1231)"), + framework.TestQuery("insert into vitess_bool(bval, sval, ival) values (false, 'ee', 3894)"), + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select * from vitess_bool where bval", + Result: [][]string{ + {"1", "1", "foo", "0"}, + {"2", "1", "bar", "23"}, + {"3", "1", "baz", "2342"}, + {"4", "1", "test", "123"}, + {"5", "1", "aa", "384"}, + }, + }, + &framework.TestCase{ + Query: "select * from vitess_bool where case sval when 'foo' then true when 'test' then true else false end", + Result: [][]string{ + {"1", "1", "foo", "0"}, + {"4", "1", "test", "123"}, + }, + }, + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "insert into vitess_bool(auto, bval, sval, ival) values (1, false, 'test2', 191) on duplicate key update bval = false", + Rewritten: []string{ + "insert into vitess_bool(auto, bval, sval, ival) values (1, false, 'test2', 191) on duplicate key update bval = false", + }, + RowsAffected: 2, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select * from vitess_bool where bval", + Result: [][]string{ + {"2", "1", "bar", "23"}, + {"3", "1", "baz", "2342"}, + {"4", "1", "test", "123"}, + {"5", "1", "aa", "384"}, + }, + }, + &framework.TestCase{ + Query: "select * from vitess_bool where not bval", + Result: [][]string{ + {"1", "0", "foo", "0"}, + {"6", "0", "bbb", "213"}, + {"7", "0", "cc", "24342"}, + {"8", "0", "d", "1231"}, + {"9", "0", "ee", "3894"}, + }, + }, + framework.TestQuery("begin"), + &framework.TestCase{ + Query: "update vitess_bool set sval = 'test' where bval is false or ival = 23", + Rewritten: []string{ + "update vitess_bool set sval = 'test' where bval is false or ival = 23 limit 10001", + }, + RowsAffected: 6, + }, + framework.TestQuery("commit"), + &framework.TestCase{ + Query: "select * from vitess_bool where not bval", + Result: [][]string{ + {"1", "0", "test", "0"}, + {"6", "0", "test", "213"}, + {"7", "0", "test", "24342"}, + {"8", "0", "test", "1231"}, + {"9", "0", "test", "3894"}, + }, + }, + &framework.TestCase{ + Query: "select (bval or ival) from vitess_bool where ival = 213", + Result: [][]string{ + {"1"}, + }, + }, + &framework.TestCase{ + Query: "select bval from vitess_bool where ival = 213", + Result: [][]string{ + {"0"}, + }, + }, + }, + }, + &framework.MultiCase{ + Name: "impossible queries", + Cases: []framework.Testable{ + &framework.TestCase{ + Name: "specific column", + Query: "select eid from vitess_a where 1 != 1", + Rewritten: []string{ + "select eid from vitess_a where 1 != 1", + }, + RowsAffected: 0, + }, + &framework.TestCase{ + Name: "all columns", + Query: "select * from vitess_a where 1 != 1", + Rewritten: []string{ + "select * from vitess_a where 1 != 1", + }, + RowsAffected: 0, + }, + &framework.TestCase{ + Name: "bind vars", + Query: "select :bv from vitess_a where 1 != 1", + BindVars: map[string]*querypb.BindVariable{ + "bv": sqltypes.Int64BindVariable(1), + }, + Rewritten: []string{ + "select 1 from vitess_a where 1 != 1 limit 10001", + }, + RowsAffected: 0, + }, + }, + }, } // Most of these tests are not really needed because the queries are mostly pass-through. @@ -64,1725 +1784,21 @@ func TestTheFramework(t *testing.T) { func TestQueries(t *testing.T) { client := framework.NewClient() - testCases := []framework.Testable{ - &framework.TestCase{ - Name: "union", - Query: "select /* union */ eid, id from vitess_a union select eid, id from vitess_b", - Result: [][]string{ - {"1", "1"}, - {"1", "2"}, - }, - Rewritten: []string{ - "select eid, id from vitess_a where 1 != 1 union select eid, id from vitess_b where 1 != 1", - "select /* union */ eid, id from vitess_a union select eid, id from vitess_b limit 10001", - }, - RowsAffected: 2, - }, - &framework.TestCase{ - Name: "double union", - Query: "select /* double union */ eid, id from vitess_a union select eid, id from vitess_b union select eid, id from vitess_d", - Result: [][]string{ - {"1", "1"}, - {"1", "2"}, - }, - Rewritten: []string{ - "select eid, id from vitess_a where 1 != 1 union select eid, id from vitess_b where 1 != 1 union select eid, id from vitess_d where 1 != 1", - "select /* double union */ eid, id from vitess_a union select eid, id from vitess_b union select eid, id from vitess_d limit 10001", - }, - RowsAffected: 2, - }, - &framework.TestCase{ - Name: "distinct", - Query: "select /* distinct */ distinct * from vitess_a", - Result: [][]string{ - {"1", "1", "abcd", "efgh"}, - {"1", "2", "bcde", "fghi"}, - }, - Rewritten: []string{ - "select * from vitess_a where 1 != 1", - "select /* distinct */ distinct * from vitess_a limit 10001", - }, - }, - &framework.TestCase{ - Name: "group by", - Query: "select /* group by */ eid, sum(id) from vitess_a group by eid", - Result: [][]string{ - {"1", "3"}, - }, - Rewritten: []string{ - "select eid, sum(id) from vitess_a where 1 != 1 group by eid", - "select /* group by */ eid, sum(id) from vitess_a group by eid limit 10001", - }, - RowsAffected: 1, - }, - &framework.TestCase{ - Name: "having", - Query: "select /* having */ sum(id) from vitess_a having sum(id) = 3", - Result: [][]string{ - {"3"}, - }, - Rewritten: []string{ - "select sum(id) from vitess_a where 1 != 1", - "select /* having */ sum(id) from vitess_a having sum(id) = 3 limit 10001", - }, - RowsAffected: 1, - }, - &framework.TestCase{ - Name: "limit", - Query: "select /* limit */ eid, id from vitess_a limit :a", - BindVars: map[string]*querypb.BindVariable{ - "a": sqltypes.Int64BindVariable(1), - }, - Result: [][]string{ - {"1", "1"}, - }, - Rewritten: []string{ - "select eid, id from vitess_a where 1 != 1", - "select /* limit */ eid, id from vitess_a limit 1", - }, - RowsAffected: 1, - }, - &framework.TestCase{ - Name: "multi-table", - Query: "select /* multi-table */ a.eid, a.id, b.eid, b.id from vitess_a as a, vitess_b as b order by a.eid, a.id, b.eid, b.id", - Result: [][]string{ - {"1", "1", "1", "1"}, - {"1", "1", "1", "2"}, - {"1", "2", "1", "1"}, - {"1", "2", "1", "2"}, - }, - Rewritten: []string{ - "select a.eid, a.id, b.eid, b.id from vitess_a as a, vitess_b as b where 1 != 1", - "select /* multi-table */ a.eid, a.id, b.eid, b.id from vitess_a as a, vitess_b as b order by a.eid asc, a.id asc, b.eid asc, b.id asc limit 10001", - }, - RowsAffected: 4, - }, - &framework.TestCase{ - Name: "join", - Query: "select /* join */ a.eid, a.id, b.eid, b.id from vitess_a as a join vitess_b as b on a.eid = b.eid and a.id = b.id", - Result: [][]string{ - {"1", "1", "1", "1"}, - {"1", "2", "1", "2"}, - }, - Rewritten: []string{ - "select a.eid, a.id, b.eid, b.id from vitess_a as a join vitess_b as b on a.eid = b.eid and a.id = b.id where 1 != 1", - "select /* join */ a.eid, a.id, b.eid, b.id from vitess_a as a join vitess_b as b on a.eid = b.eid and a.id = b.id limit 10001", - }, - RowsAffected: 2, - }, - &framework.TestCase{ - Name: "straight_join", - Query: "select /* straight_join */ a.eid, a.id, b.eid, b.id from vitess_a as a straight_join vitess_b as b on a.eid = b.eid and a.id = b.id", - Result: [][]string{ - {"1", "1", "1", "1"}, - {"1", "2", "1", "2"}, - }, - Rewritten: []string{ - "select a.eid, a.id, b.eid, b.id from vitess_a as a straight_join vitess_b as b on a.eid = b.eid and a.id = b.id where 1 != 1", - "select /* straight_join */ a.eid, a.id, b.eid, b.id from vitess_a as a straight_join vitess_b as b on a.eid = b.eid and a.id = b.id limit 10001", - }, - RowsAffected: 2, - }, - &framework.TestCase{ - Name: "cross join", - Query: "select /* cross join */ a.eid, a.id, b.eid, b.id from vitess_a as a cross join vitess_b as b on a.eid = b.eid and a.id = b.id", - Result: [][]string{ - {"1", "1", "1", "1"}, - {"1", "2", "1", "2"}, - }, - Rewritten: []string{ - "select a.eid, a.id, b.eid, b.id from vitess_a as a join vitess_b as b on a.eid = b.eid and a.id = b.id where 1 != 1", - "select /* cross join */ a.eid, a.id, b.eid, b.id from vitess_a as a join vitess_b as b on a.eid = b.eid and a.id = b.id limit 10001", - }, - RowsAffected: 2, - }, - &framework.TestCase{ - Name: "natural join", - Query: "select /* natural join */ a.eid, a.id, b.eid, b.id from vitess_a as a natural join vitess_b as b", - Result: [][]string{ - {"1", "1", "1", "1"}, - {"1", "2", "1", "2"}, - }, - Rewritten: []string{ - "select a.eid, a.id, b.eid, b.id from vitess_a as a natural join vitess_b as b where 1 != 1", - "select /* natural join */ a.eid, a.id, b.eid, b.id from vitess_a as a natural join vitess_b as b limit 10001", - }, - RowsAffected: 2, - }, - &framework.TestCase{ - Name: "left join", - Query: "select /* left join */ a.eid, a.id, b.eid, b.id from vitess_a as a left join vitess_b as b on a.eid = b.eid and a.id = b.id", - Result: [][]string{ - {"1", "1", "1", "1"}, - {"1", "2", "1", "2"}, - }, - Rewritten: []string{ - "select a.eid, a.id, b.eid, b.id from vitess_a as a left join vitess_b as b on a.eid = b.eid and a.id = b.id where 1 != 1", - "select /* left join */ a.eid, a.id, b.eid, b.id from vitess_a as a left join vitess_b as b on a.eid = b.eid and a.id = b.id limit 10001", - }, - RowsAffected: 2, - }, - &framework.TestCase{ - Name: "right join", - Query: "select /* right join */ a.eid, a.id, b.eid, b.id from vitess_a as a right join vitess_b as b on a.eid = b.eid and a.id = b.id", - Result: [][]string{ - {"1", "1", "1", "1"}, - {"1", "2", "1", "2"}, - }, - Rewritten: []string{ - "select a.eid, a.id, b.eid, b.id from vitess_a as a right join vitess_b as b on a.eid = b.eid and a.id = b.id where 1 != 1", - "select /* right join */ a.eid, a.id, b.eid, b.id from vitess_a as a right join vitess_b as b on a.eid = b.eid and a.id = b.id limit 10001", - }, - RowsAffected: 2, - }, - &framework.TestCase{ - Name: "complex select list", - Query: "select /* complex select list */ eid+1, id from vitess_a", - Result: [][]string{ - {"2", "1"}, - {"2", "2"}, - }, - Rewritten: []string{ - "select eid + 1, id from vitess_a where 1 != 1", - "select /* complex select list */ eid + 1, id from vitess_a limit 10001", - }, - RowsAffected: 2, - }, - &framework.TestCase{ - Name: "*", - Query: "select /* * */ * from vitess_a", - Result: [][]string{ - {"1", "1", "abcd", "efgh"}, - {"1", "2", "bcde", "fghi"}, - }, - Rewritten: []string{ - "select * from vitess_a where 1 != 1", - "select /* * */ * from vitess_a limit 10001", - }, - RowsAffected: 2, - }, - &framework.TestCase{ - Name: "table alias", - Query: "select /* table alias */ a.eid from vitess_a as a where a.eid=1", - Result: [][]string{ - {"1"}, - {"1"}, - }, - Rewritten: []string{ - "select a.eid from vitess_a as a where 1 != 1", - "select /* table alias */ a.eid from vitess_a as a where a.eid = 1 limit 10001", - }, - RowsAffected: 2, - }, - &framework.TestCase{ - Name: "parenthesised col", - Query: "select /* parenthesised col */ (eid) from vitess_a where eid = 1 and id = 1", - Result: [][]string{ - {"1"}, - }, - Rewritten: []string{ - "select eid from vitess_a where 1 != 1", - "select /* parenthesised col */ eid from vitess_a where eid = 1 and id = 1 limit 10001", - }, - RowsAffected: 1, - }, - &framework.MultiCase{ - Name: "for update", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "select /* for update */ eid from vitess_a where eid = 1 and id = 1 for update", - Result: [][]string{ - {"1"}, - }, - Rewritten: []string{ - "select eid from vitess_a where 1 != 1", - "select /* for update */ eid from vitess_a where eid = 1 and id = 1 limit 10001 for update", - }, - RowsAffected: 1, - }, - framework.TestQuery("commit"), - }, - }, - &framework.MultiCase{ - Name: "lock in share mode", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "select /* for update */ eid from vitess_a where eid = 1 and id = 1 lock in share mode", - Result: [][]string{ - {"1"}, - }, - Rewritten: []string{ - "select eid from vitess_a where 1 != 1", - "select /* for update */ eid from vitess_a where eid = 1 and id = 1 limit 10001 lock in share mode", - }, - RowsAffected: 1, - }, - framework.TestQuery("commit"), - }, - }, - &framework.TestCase{ - Name: "complex where", - Query: "select /* complex where */ id from vitess_a where id+1 = 2", - Result: [][]string{ - {"1"}, - }, - Rewritten: []string{ - "select id from vitess_a where 1 != 1", - "select /* complex where */ id from vitess_a where id + 1 = 2 limit 10001", - }, - RowsAffected: 1, - }, - &framework.TestCase{ - Name: "complex where (non-value operand)", - Query: "select /* complex where (non-value operand) */ eid, id from vitess_a where eid = id", - Result: [][]string{ - {"1", "1"}, - }, - Rewritten: []string{ - "select eid, id from vitess_a where 1 != 1", - "select /* complex where (non-value operand) */ eid, id from vitess_a where eid = id limit 10001", - }, - RowsAffected: 1, - }, - &framework.TestCase{ - Name: "(condition)", - Query: "select /* (condition) */ * from vitess_a where (eid = 1)", - Result: [][]string{ - {"1", "1", "abcd", "efgh"}, - {"1", "2", "bcde", "fghi"}, - }, - Rewritten: []string{ - "select * from vitess_a where 1 != 1", - "select /* (condition) */ * from vitess_a where eid = 1 limit 10001", - }, - RowsAffected: 2, - }, - &framework.TestCase{ - Name: "inequality", - Query: "select /* inequality */ * from vitess_a where id > 1", - Result: [][]string{ - {"1", "2", "bcde", "fghi"}, - }, - Rewritten: []string{ - "select * from vitess_a where 1 != 1", - "select /* inequality */ * from vitess_a where id > 1 limit 10001", - }, - RowsAffected: 1, - }, - &framework.TestCase{ - Name: "in", - Query: "select /* in */ * from vitess_a where id in (1, 2)", - Result: [][]string{ - {"1", "1", "abcd", "efgh"}, - {"1", "2", "bcde", "fghi"}, - }, - Rewritten: []string{ - "select * from vitess_a where 1 != 1", - "select /* in */ * from vitess_a where id in (1, 2) limit 10001", - }, - RowsAffected: 2, - }, - &framework.TestCase{ - Name: "between", - Query: "select /* between */ * from vitess_a where id between 1 and 2", - Result: [][]string{ - {"1", "1", "abcd", "efgh"}, - {"1", "2", "bcde", "fghi"}, - }, - Rewritten: []string{ - "select * from vitess_a where 1 != 1", - "select /* between */ * from vitess_a where id between 1 and 2 limit 10001", - }, - RowsAffected: 2, - }, - &framework.TestCase{ - Name: "order", - Query: "select /* order */ * from vitess_a order by id desc", - Result: [][]string{ - {"1", "2", "bcde", "fghi"}, - {"1", "1", "abcd", "efgh"}, - }, - Rewritten: []string{ - "select * from vitess_a where 1 != 1", - "select /* order */ * from vitess_a order by id desc limit 10001", - }, - RowsAffected: 2, - }, - &framework.TestCase{ - Name: "select in select list", - Query: "select (select eid from vitess_a where id = 1), eid from vitess_a where id = 2", - Result: [][]string{ - {"1", "1"}, - }, - Rewritten: []string{ - "select (select eid from vitess_a where 1 != 1), eid from vitess_a where 1 != 1", - "select (select eid from vitess_a where id = 1), eid from vitess_a where id = 2 limit 10001", - }, - RowsAffected: 1, - }, - &framework.TestCase{ - Name: "select in from clause", - Query: "select eid from (select eid from vitess_a where id=2) as a", - Result: [][]string{ - {"1"}, - }, - Rewritten: []string{ - "select eid from (select eid from vitess_a where 1 != 1) as a where 1 != 1", - "select eid from (select eid from vitess_a where id = 2) as a limit 10001", - }, - RowsAffected: 1, - }, - &framework.MultiCase{ - Name: "select in transaction", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "select * from vitess_a where eid = 2 and id = 1", - Rewritten: []string{ - "select * from vitess_a where 1 != 1", - "select * from vitess_a where eid = 2 and id = 1 limit 10001", - }, - }, - &framework.TestCase{ - Query: "select * from vitess_a where eid = 2 and id = 1", - Rewritten: []string{ - "select * from vitess_a where eid = 2 and id = 1 limit 10001", - }, - }, - &framework.TestCase{ - Query: "select :bv from vitess_a where eid = 2 and id = 1", - BindVars: map[string]*querypb.BindVariable{ - "bv": sqltypes.Int64BindVariable(1), - }, - Rewritten: []string{ - "select 1 from vitess_a where eid = 2 and id = 1 limit 10001", - }, - }, - &framework.TestCase{ - Query: "select :bv from vitess_a where eid = 2 and id = 1", - BindVars: map[string]*querypb.BindVariable{ - "bv": sqltypes.StringBindVariable("abcd"), - }, - Rewritten: []string{ - "select 'abcd' from vitess_a where eid = 2 and id = 1 limit 10001", - }, - }, - framework.TestQuery("commit"), - }, - }, - &framework.MultiCase{ - Name: "simple insert", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "insert /* simple */ into vitess_a values (2, 1, 'aaaa', 'bbbb')", - Rewritten: []string{ - "insert /* simple */ into vitess_a values (2, 1, 'aaaa', 'bbbb')", - }, - RowsAffected: 1, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select * from vitess_a where eid = 2 and id = 1", - Result: [][]string{ - {"2", "1", "aaaa", "bbbb"}, - }, - }, - framework.TestQuery("begin"), - framework.TestQuery("delete from vitess_a where eid>1"), - framework.TestQuery("commit"), - }, - }, - &framework.MultiCase{ - Name: "insert ignore", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "insert /* simple */ ignore into vitess_a values (2, 1, 'aaaa', 'bbbb')", - Rewritten: []string{ - "insert /* simple */ ignore into vitess_a values (2, 1, 'aaaa', 'bbbb')", - }, - RowsAffected: 1, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select * from vitess_a where eid = 2 and id = 1", - Result: [][]string{ - {"2", "1", "aaaa", "bbbb"}, - }, - }, - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "insert /* simple */ ignore into vitess_a values (2, 1, 'cccc', 'cccc')", - Rewritten: []string{ - "insert /* simple */ ignore into vitess_a values (2, 1, 'cccc', 'cccc')", - }, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select * from vitess_a where eid = 2 and id = 1", - Result: [][]string{ - {"2", "1", "aaaa", "bbbb"}, - }, - }, - framework.TestQuery("begin"), - framework.TestQuery("delete from vitess_a where eid>1"), - framework.TestQuery("commit"), - }, - }, - &framework.MultiCase{ - Name: "qualified insert", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "insert /* qualified */ into vitess_a(eid, id, name, foo) values (3, 1, 'aaaa', 'cccc')", - Rewritten: []string{ - "insert /* qualified */ into vitess_a(eid, id, `name`, foo) values (3, 1, 'aaaa', 'cccc')", - }, - RowsAffected: 1, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select * from vitess_a where eid = 3 and id = 1", - Result: [][]string{ - {"3", "1", "aaaa", "cccc"}, - }, - }, - framework.TestQuery("begin"), - framework.TestQuery("delete from vitess_a where eid>1"), - framework.TestQuery("commit"), - }, - }, - &framework.MultiCase{ - Name: "insert with mixed case column names", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "insert into vitess_mixed_case(col1, col2) values(1, 2)", - Rewritten: []string{ - "insert into vitess_mixed_case(col1, col2) values (1, 2)", - }, - RowsAffected: 1, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select COL1, COL2 from vitess_mixed_case", - Result: [][]string{ - {"1", "2"}, - }, - }, - framework.TestQuery("begin"), - framework.TestQuery("delete from vitess_mixed_case"), - framework.TestQuery("commit"), - }, - }, - &framework.MultiCase{ - Name: "insert auto_increment", - Cases: []framework.Testable{ - framework.TestQuery("alter table vitess_e auto_increment = 1"), - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "insert /* auto_increment */ into vitess_e(name, foo) values ('aaaa', 'cccc')", - Rewritten: []string{ - "insert /* auto_increment */ into vitess_e(`name`, foo) values ('aaaa', 'cccc')", - }, - RowsAffected: 1, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select * from vitess_e", - Result: [][]string{ - {"1", "1", "aaaa", "cccc"}, - }, - }, - framework.TestQuery("begin"), - framework.TestQuery("delete from vitess_e"), - framework.TestQuery("commit"), - }, - }, - &framework.MultiCase{ - Name: "insert with null auto_increment", - Cases: []framework.Testable{ - framework.TestQuery("alter table vitess_e auto_increment = 1"), - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "insert /* auto_increment */ into vitess_e(eid, name, foo) values (NULL, 'aaaa', 'cccc')", - Rewritten: []string{ - "insert /* auto_increment */ into vitess_e(eid, `name`, foo) values (null, 'aaaa', 'cccc')", - }, - RowsAffected: 1, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select * from vitess_e", - Result: [][]string{ - {"1", "1", "aaaa", "cccc"}, - }, - }, - framework.TestQuery("begin"), - framework.TestQuery("delete from vitess_e"), - framework.TestQuery("commit"), - }, - }, - &framework.MultiCase{ - Name: "insert with number default value", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "insert /* num default */ into vitess_a(eid, name, foo) values (3, 'aaaa', 'cccc')", - Rewritten: []string{ - "insert /* num default */ into vitess_a(eid, `name`, foo) values (3, 'aaaa', 'cccc')", - }, - RowsAffected: 1, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select * from vitess_a where eid = 3 and id = 1", - Result: [][]string{ - {"3", "1", "aaaa", "cccc"}, - }, - }, - framework.TestQuery("begin"), - framework.TestQuery("delete from vitess_a where eid>1"), - framework.TestQuery("commit"), - }, - }, - &framework.MultiCase{ - Name: "insert with string default value", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "insert /* string default */ into vitess_f(id) values (1)", - Rewritten: []string{ - "insert /* string default */ into vitess_f(id) values (1)", - }, - RowsAffected: 1, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select * from vitess_f", - Result: [][]string{ - {"ab", "1"}, - }, - }, - framework.TestQuery("begin"), - framework.TestQuery("delete from vitess_f"), - framework.TestQuery("commit"), - }, - }, - &framework.MultiCase{ - Name: "bind values", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "insert /* bind values */ into vitess_a(eid, id, name, foo) values (:eid, :id, :name, :foo)", - BindVars: map[string]*querypb.BindVariable{ - "foo": sqltypes.StringBindVariable("cccc"), - "eid": sqltypes.Int64BindVariable(4), - "name": sqltypes.StringBindVariable("aaaa"), - "id": sqltypes.Int64BindVariable(1), - }, - Rewritten: []string{ - "insert /* bind values */ into vitess_a(eid, id, `name`, foo) values (4, 1, 'aaaa', 'cccc')", - }, - RowsAffected: 1, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select * from vitess_a where eid = 4 and id = 1", - Result: [][]string{ - {"4", "1", "aaaa", "cccc"}, - }, - }, - framework.TestQuery("begin"), - framework.TestQuery("delete from vitess_a where eid>1"), - framework.TestQuery("commit"), - }, - }, - &framework.MultiCase{ - Name: "positional values", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "insert /* positional values */ into vitess_a(eid, id, name, foo) values (?, ?, ?, ?)", - BindVars: map[string]*querypb.BindVariable{ - "v1": sqltypes.Int64BindVariable(4), - "v2": sqltypes.Int64BindVariable(1), - "v3": sqltypes.StringBindVariable("aaaa"), - "v4": sqltypes.StringBindVariable("cccc"), - }, - Rewritten: []string{ - "insert /* positional values */ into vitess_a(eid, id, `name`, foo) values (4, 1, 'aaaa', 'cccc')", - }, - RowsAffected: 1, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select * from vitess_a where eid = 4 and id = 1", - Result: [][]string{ - {"4", "1", "aaaa", "cccc"}, - }, - }, - framework.TestQuery("begin"), - framework.TestQuery("delete from vitess_a where eid>1"), - framework.TestQuery("commit"), - }, - }, - &framework.MultiCase{ - Name: "out of sequence columns", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "insert into vitess_a(id, eid, foo, name) values (-1, 5, 'aaa', 'bbb')", - Rewritten: []string{ - "insert into vitess_a(id, eid, foo, `name`) values (-1, 5, 'aaa', 'bbb')", - }, - RowsAffected: 1, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select * from vitess_a where eid = 5 and id = -1", - Result: [][]string{ - {"5", "-1", "bbb", "aaa"}, - }, - }, - framework.TestQuery("begin"), - framework.TestQuery("delete from vitess_a where eid>1"), - framework.TestQuery("commit"), - }, - }, - &framework.MultiCase{ - Name: "subquery", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "insert /* subquery */ into vitess_a(eid, name, foo) select eid, name, foo from vitess_c", - Rewritten: []string{ - "insert /* subquery */ into vitess_a(eid, `name`, foo) select eid, `name`, foo from vitess_c", - }, - RowsAffected: 2, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select * from vitess_a where eid in (10, 11)", - Result: [][]string{ - {"10", "1", "abcd", "20"}, - {"11", "1", "bcde", "30"}, - }, - }, - framework.TestQuery("alter table vitess_e auto_increment = 20"), - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "insert into vitess_e(id, name, foo) select eid, name, foo from vitess_c", - Rewritten: []string{ - "insert into vitess_e(id, `name`, foo) select eid, `name`, foo from vitess_c", - }, - RowsAffected: 2, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select eid, id, name, foo from vitess_e", - Result: [][]string{ - {"20", "10", "abcd", "20"}, - {"21", "11", "bcde", "30"}, - }, - }, - framework.TestQuery("begin"), - framework.TestQuery("delete from vitess_a where eid>1"), - framework.TestQuery("delete from vitess_c where eid<10"), - framework.TestQuery("commit"), - }, - }, - &framework.MultiCase{ - Name: "multi-value", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "insert into vitess_a(eid, id, name, foo) values (5, 1, '', ''), (7, 1, '', '')", - Rewritten: []string{ - "insert into vitess_a(eid, id, `name`, foo) values (5, 1, '', ''), (7, 1, '', '')", - }, - RowsAffected: 2, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select * from vitess_a where eid>1", - Result: [][]string{ - {"5", "1", "", ""}, - {"7", "1", "", ""}, - }, - }, - framework.TestQuery("begin"), - framework.TestQuery("delete from vitess_a where eid>1"), - framework.TestQuery("commit"), - }, - }, - &framework.MultiCase{ - Name: "upsert single row present/absent", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "insert into upsert_test(id1, id2) values (1, 1) on duplicate key update id2 = 1", - Rewritten: []string{ - "insert into upsert_test(id1, id2) values (1, 1) on duplicate key update id2 = 1", - }, - RowsAffected: 1, - }, - &framework.TestCase{ - Query: "select * from upsert_test", - Result: [][]string{ - {"1", "1"}, - }, - }, - &framework.TestCase{ - Query: "insert into upsert_test(id1, id2) values (1, 2) on duplicate key update id2 = 2", - Rewritten: []string{ - "insert into upsert_test(id1, id2) values (1, 2) on duplicate key update id2 = 2", - }, - RowsAffected: 2, - }, - &framework.TestCase{ - Query: "select * from upsert_test", - Result: [][]string{ - {"1", "2"}, - }, - }, - &framework.TestCase{ - Query: "insert into upsert_test(id1, id2) values (1, 2) on duplicate key update id2 = 2", - Rewritten: []string{ - "insert into upsert_test(id1, id2) values (1, 2) on duplicate key update id2 = 2", - }, - }, - &framework.TestCase{ - Query: "insert ignore into upsert_test(id1, id2) values (1, 3) on duplicate key update id2 = 3", - Rewritten: []string{ - "insert ignore into upsert_test(id1, id2) values (1, 3) on duplicate key update id2 = 3", - }, - RowsAffected: 2, - }, - &framework.TestCase{ - Query: "select * from upsert_test", - Result: [][]string{ - {"1", "3"}, - }, - }, - framework.TestQuery("commit"), - framework.TestQuery("begin"), - framework.TestQuery("delete from upsert_test"), - framework.TestQuery("commit"), - }, - }, - &framework.MultiCase{ - Name: "upsert changes pk", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "insert into upsert_test(id1, id2) values (1, 1) on duplicate key update id1 = 1", - Rewritten: []string{ - "insert into upsert_test(id1, id2) values (1, 1) on duplicate key update id1 = 1", - }, - RowsAffected: 1, - }, - &framework.TestCase{ - Query: "select * from upsert_test", - Result: [][]string{ - {"1", "1"}, - }, - }, - &framework.TestCase{ - Query: "insert into upsert_test(id1, id2) values (1, 2) on duplicate key update id1 = 2", - Rewritten: []string{ - "insert into upsert_test(id1, id2) values (1, 2) on duplicate key update id1 = 2", - }, - RowsAffected: 2, - }, - &framework.TestCase{ - Query: "select * from upsert_test", - Result: [][]string{ - {"2", "1"}, - }, - }, - framework.TestQuery("commit"), - framework.TestQuery("begin"), - framework.TestQuery("delete from upsert_test"), - framework.TestQuery("commit"), - }, - }, - &framework.MultiCase{ - Name: "upsert single row with values()", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "insert into upsert_test(id1, id2) values (1, 1) on duplicate key update id2 = values(id2) + 1", - Rewritten: []string{ - "insert into upsert_test(id1, id2) values (1, 1) on duplicate key update id2 = values(id2) + 1", - }, - RowsAffected: 1, - }, - &framework.TestCase{ - Query: "select * from upsert_test", - Result: [][]string{ - {"1", "1"}, - }, - }, - &framework.TestCase{ - Query: "insert into upsert_test(id1, id2) values (1, 2) on duplicate key update id2 = values(id2) + 1", - Rewritten: []string{ - "insert into upsert_test(id1, id2) values (1, 2) on duplicate key update id2 = values(id2) + 1", - }, - RowsAffected: 2, - }, - &framework.TestCase{ - Query: "select * from upsert_test", - Result: [][]string{ - {"1", "3"}, - }, - }, - &framework.TestCase{ - Query: "insert into upsert_test(id1, id2) values (1, 2) on duplicate key update id2 = values(id1)", - Rewritten: []string{ - "insert into upsert_test(id1, id2) values (1, 2) on duplicate key update id2 = values(id1)", - }, - }, - &framework.TestCase{ - Query: "select * from upsert_test", - Result: [][]string{ - {"1", "1"}, - }, - }, - &framework.TestCase{ - Query: "insert ignore into upsert_test(id1, id2) values (1, 3) on duplicate key update id2 = greatest(values(id1), values(id2))", - Rewritten: []string{ - "insert ignore into upsert_test(id1, id2) values (1, 3) on duplicate key update id2 = greatest(values(id1), values(id2))", - }, - RowsAffected: 2, - }, - &framework.TestCase{ - Query: "select * from upsert_test", - Result: [][]string{ - {"1", "3"}, - }, - }, - framework.TestQuery("commit"), - framework.TestQuery("begin"), - framework.TestQuery("delete from upsert_test"), - framework.TestQuery("commit"), - }, - }, - &framework.MultiCase{ - Name: "update", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "update /* pk */ vitess_a set foo='bar' where eid = 1 and id = 1", - Rewritten: []string{ - "update /* pk */ vitess_a set foo = 'bar' where eid = 1 and id = 1 limit 10001", - }, - RowsAffected: 1, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select foo from vitess_a where id = 1", - Result: [][]string{ - {"bar"}, - }, - }, - framework.TestQuery("begin"), - framework.TestQuery("update vitess_a set foo='efgh' where id=1"), - framework.TestQuery("commit"), - }, - }, - &framework.MultiCase{ - Name: "single in update", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "update /* pk */ vitess_a set foo='bar' where eid = 1 and id in (1, 2)", - Rewritten: []string{ - "update /* pk */ vitess_a set foo = 'bar' where eid = 1 and id in (1, 2) limit 10001", - }, - RowsAffected: 2, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select foo from vitess_a where id = 1", - Result: [][]string{ - {"bar"}, - }, - }, - framework.TestQuery("begin"), - framework.TestQuery("update vitess_a set foo='efgh' where id=1"), - framework.TestQuery("update vitess_a set foo='fghi' where id=2"), - framework.TestQuery("commit"), - }, - }, - &framework.MultiCase{ - Name: "double in update", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "update /* pk */ vitess_a set foo='bar' where eid in (1) and id in (1, 2)", - Rewritten: []string{ - "update /* pk */ vitess_a set foo = 'bar' where eid in (1) and id in (1, 2) limit 10001", - }, - RowsAffected: 2, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select foo from vitess_a where id = 1", - Result: [][]string{ - {"bar"}, - }, - }, - framework.TestQuery("begin"), - framework.TestQuery("update vitess_a set foo='efgh' where id=1"), - framework.TestQuery("update vitess_a set foo='fghi' where id=2"), - framework.TestQuery("commit"), - }, - }, - &framework.MultiCase{ - Name: "double in 2 update", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "update /* pk */ vitess_a set foo='bar' where eid in (1, 2) and id in (1, 2)", - Rewritten: []string{ - "update /* pk */ vitess_a set foo = 'bar' where eid in (1, 2) and id in (1, 2) limit 10001", - }, - RowsAffected: 2, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select foo from vitess_a where id = 1", - Result: [][]string{ - {"bar"}, - }, - }, - framework.TestQuery("begin"), - framework.TestQuery("update vitess_a set foo='efgh' where id=1"), - framework.TestQuery("update vitess_a set foo='fghi' where id=2"), - framework.TestQuery("commit"), - }, - }, - &framework.MultiCase{ - Name: "pk change update", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "update vitess_a set eid = 2 where eid = 1 and id = 1", - Rewritten: []string{ - "update vitess_a set eid = 2 where eid = 1 and id = 1 limit 10001", - }, - RowsAffected: 1, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select eid from vitess_a where id = 1", - Result: [][]string{ - {"2"}, - }, - }, - framework.TestQuery("begin"), - framework.TestQuery("update vitess_a set eid=1 where id=1"), - framework.TestQuery("commit"), - }, - }, - &framework.MultiCase{ - Name: "partial pk update", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "update /* pk */ vitess_a set foo='bar' where id = 1", - Rewritten: []string{ - "update /* pk */ vitess_a set foo = 'bar' where id = 1 limit 10001", - }, - RowsAffected: 1, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select foo from vitess_a where id = 1", - Result: [][]string{ - {"bar"}, - }, - }, - framework.TestQuery("begin"), - framework.TestQuery("update vitess_a set foo='efgh' where id=1"), - framework.TestQuery("commit"), - }, - }, - &framework.MultiCase{ - Name: "limit update", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "update /* pk */ vitess_a set foo='bar' where eid = 1 limit 1", - Rewritten: []string{ - "update /* pk */ vitess_a set foo = 'bar' where eid = 1 limit 1", - }, - RowsAffected: 1, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select foo from vitess_a where id = 1", - Result: [][]string{ - {"bar"}, - }, - }, - framework.TestQuery("begin"), - framework.TestQuery("update vitess_a set foo='efgh' where id=1"), - framework.TestQuery("commit"), - }, - }, - &framework.MultiCase{ - Name: "order by update", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "update /* pk */ vitess_a set foo='bar' where eid = 1 order by id desc limit 1", - Rewritten: []string{ - "update /* pk */ vitess_a set foo = 'bar' where eid = 1 order by id desc limit 1", - }, - RowsAffected: 1, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select foo from vitess_a where id = 2", - Result: [][]string{ - {"bar"}, - }, - }, - framework.TestQuery("begin"), - framework.TestQuery("update vitess_a set foo='fghi' where id=2"), - framework.TestQuery("commit"), - }, - }, - &framework.MultiCase{ - Name: "missing where update", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "update vitess_a set foo='bar'", - Rewritten: []string{ - "update vitess_a set foo = 'bar' limit 10001", - }, - RowsAffected: 2, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select * from vitess_a", - Result: [][]string{ - {"1", "1", "abcd", "bar"}, - {"1", "2", "bcde", "bar"}, - }, - }, - framework.TestQuery("begin"), - framework.TestQuery("update vitess_a set foo='efgh' where id=1"), - framework.TestQuery("update vitess_a set foo='fghi' where id=2"), - framework.TestQuery("commit"), - }, - }, - &framework.MultiCase{ - Name: "single pk update one row update", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - framework.TestQuery("insert into vitess_f(vb,id) values ('a', 1), ('b', 2)"), - framework.TestQuery("commit"), - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "update vitess_f set id=2 where vb='a'", - Rewritten: []string{ - "update vitess_f set id = 2 where vb = 'a' limit 10001", - }, - RowsAffected: 1, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select * from vitess_f", - Result: [][]string{ - {"a", "2"}, - {"b", "2"}, - }, - }, - framework.TestQuery("begin"), - framework.TestQuery("delete from vitess_f"), - framework.TestQuery("commit"), - }, - }, - &framework.MultiCase{ - Name: "single pk update two rows", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - framework.TestQuery("insert into vitess_f(vb,id) values ('a', 1), ('b', 2)"), - framework.TestQuery("commit"), - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "update vitess_f set id=3 where vb in ('a', 'b')", - Rewritten: []string{ - "update vitess_f set id = 3 where vb in ('a', 'b') limit 10001", - }, - RowsAffected: 2, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select * from vitess_f", - Result: [][]string{ - {"a", "3"}, - {"b", "3"}, - }, - }, - framework.TestQuery("begin"), - framework.TestQuery("delete from vitess_f"), - framework.TestQuery("commit"), - }, - }, - &framework.MultiCase{ - Name: "single pk update subquery", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - framework.TestQuery("insert into vitess_f(vb,id) values ('a', 1), ('b', 2)"), - framework.TestQuery("commit"), - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "update vitess_f set id=4 where id >= 0", - Rewritten: []string{ - "update vitess_f set id = 4 where id >= 0 limit 10001", - }, - RowsAffected: 2, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select * from vitess_f", - Result: [][]string{ - {"a", "4"}, - {"b", "4"}, - }, - }, - framework.TestQuery("begin"), - framework.TestQuery("delete from vitess_f"), - framework.TestQuery("commit"), - }, - }, - &framework.MultiCase{ - Name: "single pk update subquery no rows", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - framework.TestQuery("insert into vitess_f(vb,id) values ('a', 1), ('b', 2)"), - framework.TestQuery("commit"), - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "update vitess_f set id=4 where id < 0", - Rewritten: []string{ - "update vitess_f set id = 4 where id < 0 limit 10001", - }, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select * from vitess_f", - Result: [][]string{ - {"a", "1"}, - {"b", "2"}, - }, - }, - framework.TestQuery("begin"), - framework.TestQuery("delete from vitess_f"), - framework.TestQuery("commit"), - }, - }, - &framework.MultiCase{ - Name: "delete", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - framework.TestQuery("insert into vitess_a(eid, id, name, foo) values (2, 1, '', '')"), - &framework.TestCase{ - Query: "delete /* pk */ from vitess_a where eid = 2 and id = 1", - Rewritten: []string{ - "delete /* pk */ from vitess_a where eid = 2 and id = 1 limit 10001", - }, - RowsAffected: 1, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select * from vitess_a where eid=2", - }, - }, - }, - &framework.MultiCase{ - Name: "single in delete", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - framework.TestQuery("insert into vitess_a(eid, id, name, foo) values (2, 1, '', '')"), - &framework.TestCase{ - Query: "delete /* pk */ from vitess_a where eid = 2 and id in (1, 2)", - Rewritten: []string{ - "delete /* pk */ from vitess_a where eid = 2 and id in (1, 2) limit 10001", - }, - RowsAffected: 1, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select * from vitess_a where eid=2", - }, - }, - }, - &framework.MultiCase{ - Name: "double in delete", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - framework.TestQuery("insert into vitess_a(eid, id, name, foo) values (2, 1, '', '')"), - &framework.TestCase{ - Query: "delete /* pk */ from vitess_a where eid in (2) and id in (1, 2)", - Rewritten: []string{ - "delete /* pk */ from vitess_a where eid in (2) and id in (1, 2) limit 10001", - }, - RowsAffected: 1, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select * from vitess_a where eid=2", - }, - }, - }, - &framework.MultiCase{ - Name: "double in 2 delete", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - framework.TestQuery("insert into vitess_a(eid, id, name, foo) values (2, 1, '', '')"), - &framework.TestCase{ - Query: "delete /* pk */ from vitess_a where eid in (2, 3) and id in (1, 2)", - Rewritten: []string{ - "delete /* pk */ from vitess_a where eid in (2, 3) and id in (1, 2) limit 10001", - }, - RowsAffected: 1, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select * from vitess_a where eid=2", - }, - }, - }, - &framework.MultiCase{ - Name: "complex where delete", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - framework.TestQuery("insert into vitess_a(eid, id, name, foo) values (2, 1, '', '')"), - &framework.TestCase{ - Query: "delete from vitess_a where eid = 1+1 and id = 1", - Rewritten: []string{ - "delete from vitess_a where eid = 1 + 1 and id = 1 limit 10001", - }, - RowsAffected: 1, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select * from vitess_a where eid=2", - }, - }, - }, - &framework.MultiCase{ - Name: "partial pk delete", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - framework.TestQuery("insert into vitess_a(eid, id, name, foo) values (2, 1, '', '')"), - &framework.TestCase{ - Query: "delete from vitess_a where eid = 2", - Rewritten: []string{ - "delete from vitess_a where eid = 2 limit 10001", - }, - RowsAffected: 1, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select * from vitess_a where eid=2", - }, - }, - }, - &framework.MultiCase{ - Name: "limit delete", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - framework.TestQuery("insert into vitess_a(eid, id, name, foo) values (2, 1, '', '')"), - &framework.TestCase{ - Query: "delete from vitess_a where eid = 2 limit 1", - Rewritten: []string{ - "delete from vitess_a where eid = 2 limit 1", - }, - RowsAffected: 1, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select * from vitess_a where eid=2", - }, - }, - }, - &framework.MultiCase{ - Name: "order by delete", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - framework.TestQuery("insert into vitess_a(eid, id, name, foo) values (2, 1, '', '')"), - framework.TestQuery("insert into vitess_a(eid, id, name, foo) values (2, 2, '', '')"), - &framework.TestCase{ - Query: "delete from vitess_a where eid = 2 order by id desc", - Rewritten: []string{ - "delete from vitess_a where eid = 2 order by id desc limit 10001", - }, - RowsAffected: 2, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select * from vitess_a where eid=2", - }, - }, - }, - &framework.MultiCase{ - Name: "integer data types", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "insert into vitess_ints values(:tiny, :tinyu, :small, :smallu, :medium, :mediumu, :normal, :normalu, :big, :bigu, :year)", - BindVars: map[string]*querypb.BindVariable{ - "medium": sqltypes.Int64BindVariable(-8388608), - "smallu": sqltypes.Int64BindVariable(65535), - "normal": sqltypes.Int64BindVariable(-2147483648), - "big": sqltypes.Int64BindVariable(-9223372036854775808), - "tinyu": sqltypes.Int64BindVariable(255), - "year": sqltypes.Int64BindVariable(2012), - "tiny": sqltypes.Int64BindVariable(-128), - "bigu": sqltypes.Uint64BindVariable(18446744073709551615), - "normalu": sqltypes.Int64BindVariable(4294967295), - "small": sqltypes.Int64BindVariable(-32768), - "mediumu": sqltypes.Int64BindVariable(16777215), - }, - Rewritten: []string{ - "insert into vitess_ints values (-128, 255, -32768, 65535, -8388608, 16777215, -2147483648, 4294967295, -9223372036854775808, 18446744073709551615, 2012)", - }, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select * from vitess_ints where tiny = -128", - Result: [][]string{ - {"-128", "255", "-32768", "65535", "-8388608", "16777215", "-2147483648", "4294967295", "-9223372036854775808", "18446744073709551615", "2012"}, - }, - Rewritten: []string{ - "select * from vitess_ints where 1 != 1", - "select * from vitess_ints where tiny = -128 limit 10001", - }, - }, - &framework.TestCase{ - Query: "select * from vitess_ints where tiny = -128", - Result: [][]string{ - {"-128", "255", "-32768", "65535", "-8388608", "16777215", "-2147483648", "4294967295", "-9223372036854775808", "18446744073709551615", "2012"}, - }, - Rewritten: []string{ - "select * from vitess_ints where tiny = -128 limit 10001", - }, - }, - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "insert into vitess_ints select 2, tinyu, small, smallu, medium, mediumu, normal, normalu, big, bigu, y from vitess_ints", - Rewritten: []string{ - "insert into vitess_ints select 2, tinyu, small, smallu, medium, mediumu, normal, normalu, big, bigu, y from vitess_ints", - }, - }, - framework.TestQuery("commit"), - framework.TestQuery("begin"), - framework.TestQuery("delete from vitess_ints"), - framework.TestQuery("commit"), - }, - }, - &framework.MultiCase{ - Name: "fractional data types", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "insert into vitess_fracts values(:id, :deci, :num, :f, :d)", - BindVars: map[string]*querypb.BindVariable{ - "d": sqltypes.Float64BindVariable(4.99), - "num": sqltypes.StringBindVariable("2.99"), - "id": sqltypes.Int64BindVariable(1), - "f": sqltypes.Float64BindVariable(3.99), - "deci": sqltypes.StringBindVariable("1.99"), - }, - Rewritten: []string{ - "insert into vitess_fracts values (1, '1.99', '2.99', 3.99, 4.99)", - }, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select * from vitess_fracts where id = 1", - Result: [][]string{ - {"1", "1.99", "2.99", "3.99", "4.99"}, - }, - Rewritten: []string{ - "select * from vitess_fracts where 1 != 1", - "select * from vitess_fracts where id = 1 limit 10001", - }, - }, - &framework.TestCase{ - Query: "select * from vitess_fracts where id = 1", - Result: [][]string{ - {"1", "1.99", "2.99", "3.99", "4.99"}, - }, - Rewritten: []string{ - "select * from vitess_fracts where id = 1 limit 10001", - }, - }, - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "insert into vitess_fracts select 2, deci, num, f, d from vitess_fracts", - Rewritten: []string{ - "insert into vitess_fracts select 2, deci, num, f, d from vitess_fracts", - }, - }, - framework.TestQuery("commit"), - framework.TestQuery("begin"), - framework.TestQuery("delete from vitess_fracts"), - framework.TestQuery("commit"), - }, - }, - &framework.MultiCase{ - Name: "string data types", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "insert into vitess_strings values (:vb, :c, :vc, :b, :tb, :bl, :ttx, :tx, :en, :s)", - BindVars: map[string]*querypb.BindVariable{ - "ttx": sqltypes.StringBindVariable("g"), - "vb": sqltypes.StringBindVariable("a"), - "vc": sqltypes.StringBindVariable("c"), - "en": sqltypes.StringBindVariable("a"), - "tx": sqltypes.StringBindVariable("h"), - "bl": sqltypes.StringBindVariable("f"), - "s": sqltypes.StringBindVariable("a,b"), - "b": sqltypes.StringBindVariable("d"), - "tb": sqltypes.StringBindVariable("e"), - "c": sqltypes.StringBindVariable("b"), - }, - Rewritten: []string{ - "insert into vitess_strings values ('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'a', 'a,b')", - }, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select * from vitess_strings where vb = 'a'", - Result: [][]string{ - {"a", "b", "c", "d\x00\x00\x00", "e", "f", "g", "h", "a", "a,b"}, - }, - Rewritten: []string{ - "select * from vitess_strings where 1 != 1", - "select * from vitess_strings where vb = 'a' limit 10001", - }, - }, - &framework.TestCase{ - Query: "select * from vitess_strings where vb = 'a'", - Result: [][]string{ - {"a", "b", "c", "d\x00\x00\x00", "e", "f", "g", "h", "a", "a,b"}, - }, - Rewritten: []string{ - "select * from vitess_strings where vb = 'a' limit 10001", - }, - }, - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "insert into vitess_strings select 'b', c, vc, b, tb, bl, ttx, tx, en, s from vitess_strings", - Rewritten: []string{ - "insert into vitess_strings select 'b', c, vc, b, tb, bl, ttx, tx, en, s from vitess_strings", - }, - }, - framework.TestQuery("commit"), - framework.TestQuery("begin"), - framework.TestQuery("delete from vitess_strings"), - framework.TestQuery("commit"), - }, - }, - &framework.MultiCase{ - Name: "misc data types", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "insert into vitess_misc values(:id, :b, :d, :dt, :t, point(1, 2))", - BindVars: map[string]*querypb.BindVariable{ - "t": sqltypes.StringBindVariable("15:45:45"), - "dt": sqltypes.StringBindVariable("2012-01-01 15:45:45"), - "b": sqltypes.StringBindVariable("\x01"), - "id": sqltypes.Int64BindVariable(1), - "d": sqltypes.StringBindVariable("2012-01-01"), - }, - Rewritten: []string{ - "insert into vitess_misc values (1, '\x01', '2012-01-01', '2012-01-01 15:45:45', '15:45:45', point(1, 2))", - }, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select * from vitess_misc where id = 1", - Result: [][]string{ - {"1", "\x01", "2012-01-01", "2012-01-01 15:45:45", "15:45:45", point12}, - }, - Rewritten: []string{ - "select * from vitess_misc where 1 != 1", - "select * from vitess_misc where id = 1 limit 10001", - }, - }, - &framework.TestCase{ - Query: "select * from vitess_misc where id = 1", - Result: [][]string{ - {"1", "\x01", "2012-01-01", "2012-01-01 15:45:45", "15:45:45", point12}, - }, - Rewritten: []string{ - "select * from vitess_misc where id = 1 limit 10001", - }, - }, - framework.TestQuery("begin"), - &framework.TestCase{ - // Skip geometry test. The binary representation is non-trivial to represent as go string. - Query: "insert into vitess_misc(id, b, d, dt, t) select 2, b, d, dt, t from vitess_misc", - Rewritten: []string{ - "insert into vitess_misc(id, b, d, dt, t) select 2, b, d, dt, t from vitess_misc", - }, - }, - framework.TestQuery("commit"), - framework.TestQuery("begin"), - framework.TestQuery("delete from vitess_misc"), - framework.TestQuery("commit"), - }, - }, - &framework.MultiCase{ - Name: "boolean expressions", - Cases: []framework.Testable{ - framework.TestQuery("begin"), - framework.TestQuery("insert into vitess_bool(bval, sval, ival) values (true, 'foo', false)"), - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select * from vitess_bool", - Result: [][]string{ - {"1", "1", "foo", "0"}, - }, - }, - framework.TestQuery("begin"), - framework.TestQuery("insert into vitess_bool(bval, sval, ival) values (true, 'bar', 23)"), - framework.TestQuery("insert into vitess_bool(bval, sval, ival) values (true, 'baz', 2342)"), - framework.TestQuery("insert into vitess_bool(bval, sval, ival) values (true, 'test', 123)"), - framework.TestQuery("insert into vitess_bool(bval, sval, ival) values (true, 'aa', 384)"), - framework.TestQuery("insert into vitess_bool(bval, sval, ival) values (false, 'bbb', 213)"), - framework.TestQuery("insert into vitess_bool(bval, sval, ival) values (false, 'cc', 24342)"), - framework.TestQuery("insert into vitess_bool(bval, sval, ival) values (false, 'd', 1231)"), - framework.TestQuery("insert into vitess_bool(bval, sval, ival) values (false, 'ee', 3894)"), - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select * from vitess_bool where bval", - Result: [][]string{ - {"1", "1", "foo", "0"}, - {"2", "1", "bar", "23"}, - {"3", "1", "baz", "2342"}, - {"4", "1", "test", "123"}, - {"5", "1", "aa", "384"}, - }, - }, - &framework.TestCase{ - Query: "select * from vitess_bool where case sval when 'foo' then true when 'test' then true else false end", - Result: [][]string{ - {"1", "1", "foo", "0"}, - {"4", "1", "test", "123"}, - }, - }, - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "insert into vitess_bool(auto, bval, sval, ival) values (1, false, 'test2', 191) on duplicate key update bval = false", - Rewritten: []string{ - "insert into vitess_bool(auto, bval, sval, ival) values (1, false, 'test2', 191) on duplicate key update bval = false", - }, - RowsAffected: 2, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select * from vitess_bool where bval", - Result: [][]string{ - {"2", "1", "bar", "23"}, - {"3", "1", "baz", "2342"}, - {"4", "1", "test", "123"}, - {"5", "1", "aa", "384"}, - }, - }, - &framework.TestCase{ - Query: "select * from vitess_bool where not bval", - Result: [][]string{ - {"1", "0", "foo", "0"}, - {"6", "0", "bbb", "213"}, - {"7", "0", "cc", "24342"}, - {"8", "0", "d", "1231"}, - {"9", "0", "ee", "3894"}, - }, - }, - framework.TestQuery("begin"), - &framework.TestCase{ - Query: "update vitess_bool set sval = 'test' where bval is false or ival = 23", - Rewritten: []string{ - "update vitess_bool set sval = 'test' where bval is false or ival = 23 limit 10001", - }, - RowsAffected: 6, - }, - framework.TestQuery("commit"), - &framework.TestCase{ - Query: "select * from vitess_bool where not bval", - Result: [][]string{ - {"1", "0", "test", "0"}, - {"6", "0", "test", "213"}, - {"7", "0", "test", "24342"}, - {"8", "0", "test", "1231"}, - {"9", "0", "test", "3894"}, - }, - }, - &framework.TestCase{ - Query: "select (bval or ival) from vitess_bool where ival = 213", - Result: [][]string{ - {"1"}, - }, - }, - &framework.TestCase{ - Query: "select bval from vitess_bool where ival = 213", - Result: [][]string{ - {"0"}, - }, - }, - }, - }, - &framework.MultiCase{ - Name: "impossible queries", - Cases: []framework.Testable{ - &framework.TestCase{ - Name: "specific column", - Query: "select eid from vitess_a where 1 != 1", - Rewritten: []string{ - "select eid from vitess_a where 1 != 1", - }, - RowsAffected: 0, - }, - &framework.TestCase{ - Name: "all columns", - Query: "select * from vitess_a where 1 != 1", - Rewritten: []string{ - "select * from vitess_a where 1 != 1", - }, - RowsAffected: 0, - }, - &framework.TestCase{ - Name: "bind vars", - Query: "select :bv from vitess_a where 1 != 1", - BindVars: map[string]*querypb.BindVariable{ - "bv": sqltypes.Int64BindVariable(1), - }, - Rewritten: []string{ - "select 1 from vitess_a where 1 != 1 limit 10001", - }, - RowsAffected: 0, - }, - }, - }, - } - for _, tcase := range testCases { + for _, tcase := range TestQueryCases { if err := tcase.Test("", client); err != nil { t.Error(err) } } } + +func BenchmarkTabletQueries(b *testing.B) { + client := framework.NewClient() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + tcase := TestQueryCases[rand.Intn(len(TestQueryCases))] + if err := tcase.Benchmark(client); err != nil { + b.Error(err) + } + } +} diff --git a/go/vt/vttablet/endtoend/sequence_test.go b/go/vt/vttablet/endtoend/sequence_test.go index 491841b5186..5aadc62e109 100644 --- a/go/vt/vttablet/endtoend/sequence_test.go +++ b/go/vt/vttablet/endtoend/sequence_test.go @@ -20,6 +20,8 @@ import ( "reflect" "testing" + "vitess.io/vitess/go/test/utils" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -36,7 +38,6 @@ func TestSequence(t *testing.T) { Name: "nextval", Type: sqltypes.Int64, }}, - RowsAffected: 1, Rows: [][]sqltypes.Value{{ sqltypes.NewInt64(0), }}, @@ -45,6 +46,7 @@ func TestSequence(t *testing.T) { want.Rows[0][0] = sqltypes.NewInt64(wantval) qr, err := framework.NewClient().Execute("select next 2 values from vitess_seq", nil) require.NoError(t, err) + utils.MustMatch(t, want, qr) assert.Equal(t, want, qr) } @@ -54,13 +56,13 @@ func TestSequence(t *testing.T) { qr.Fields = nil want = &sqltypes.Result{ - RowsAffected: 1, Rows: [][]sqltypes.Value{{ sqltypes.NewInt64(13), sqltypes.NewInt64(3), }}, + StatusFlags: sqltypes.ServerStatusNoIndexUsed | sqltypes.ServerStatusAutocommit, } - assert.Equal(t, want, qr) + utils.MustMatch(t, want, qr) // Mess up the sequence by reducing next_id _, err = framework.NewClient().Execute("update vitess_seq set next_id=1", nil) @@ -71,12 +73,11 @@ func TestSequence(t *testing.T) { // Next value generated should be based on the LastVal want = &sqltypes.Result{ - RowsAffected: 1, Rows: [][]sqltypes.Value{{ sqltypes.NewInt64(13), }}, } - assert.Equal(t, want, qr) + utils.MustMatch(t, want, qr) // next_id should be reset to LastVal+cache qr, err = framework.NewClient().Execute("select next_id, cache from vitess_seq", nil) @@ -84,13 +85,13 @@ func TestSequence(t *testing.T) { qr.Fields = nil want = &sqltypes.Result{ - RowsAffected: 1, Rows: [][]sqltypes.Value{{ sqltypes.NewInt64(16), sqltypes.NewInt64(3), }}, + StatusFlags: sqltypes.ServerStatusNoIndexUsed | sqltypes.ServerStatusAutocommit, } - assert.Equal(t, want, qr) + utils.MustMatch(t, want, qr) // Change next_id to a very high value _, err = framework.NewClient().Execute("update vitess_seq set next_id=100", nil) @@ -101,12 +102,11 @@ func TestSequence(t *testing.T) { // Next value should jump to the high value want = &sqltypes.Result{ - RowsAffected: 1, Rows: [][]sqltypes.Value{{ sqltypes.NewInt64(100), }}, } - assert.Equal(t, want, qr) + utils.MustMatch(t, want, qr) } func TestResetSequence(t *testing.T) { @@ -116,7 +116,6 @@ func TestResetSequence(t *testing.T) { Name: "nextval", Type: sqltypes.Int64, }}, - RowsAffected: 1, Rows: [][]sqltypes.Value{{ sqltypes.NewInt64(1), }}, diff --git a/go/vt/vttablet/endtoend/stream_test.go b/go/vt/vttablet/endtoend/stream_test.go index 16e4b704583..d5dbb70887f 100644 --- a/go/vt/vttablet/endtoend/stream_test.go +++ b/go/vt/vttablet/endtoend/stream_test.go @@ -26,6 +26,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" @@ -40,9 +41,7 @@ func TestStreamUnion(t *testing.T) { t.Error(err) return } - if qr.RowsAffected != 1 { - t.Errorf("RowsAffected: %d, want 1", qr.RowsAffected) - } + assert.Equal(t, 1, len(qr.Rows)) } func TestStreamConsolidation(t *testing.T) { diff --git a/go/vt/vttablet/endtoend/transaction_test.go b/go/vt/vttablet/endtoend/transaction_test.go index 02cbd7ac6c9..c42363ba192 100644 --- a/go/vt/vttablet/endtoend/transaction_test.go +++ b/go/vt/vttablet/endtoend/transaction_test.go @@ -18,7 +18,6 @@ package endtoend import ( "fmt" - "strings" "testing" "time" @@ -57,14 +56,14 @@ func TestCommit(t *testing.T) { qr, err := client.Execute("select * from vitess_test", nil) require.NoError(t, err) - require.Equal(t, uint64(4), qr.RowsAffected, "rows affected") + require.Equal(t, 4, len(qr.Rows), "rows affected") _, err = client.Execute("delete from vitess_test where intval=4", nil) require.NoError(t, err) qr, err = client.Execute("select * from vitess_test", nil) require.NoError(t, err) - require.Equal(t, uint64(3), qr.RowsAffected, "rows affected") + require.Equal(t, 3, len(qr.Rows), "rows affected") expectedDiffs := []struct { tag string @@ -115,9 +114,7 @@ func TestRollback(t *testing.T) { qr, err := client.Execute("select * from vitess_test", nil) require.NoError(t, err) - if qr.RowsAffected != 3 { - t.Errorf("rows affected: %d, want 3", qr.RowsAffected) - } + assert.Equal(t, 3, len(qr.Rows)) expectedDiffs := []struct { tag string @@ -156,18 +153,14 @@ func TestAutoCommit(t *testing.T) { qr, err := client.Execute("select * from vitess_test", nil) require.NoError(t, err) - if qr.RowsAffected != 4 { - t.Errorf("rows affected: %d, want 4", qr.RowsAffected) - } + assert.Equal(t, 4, len(qr.Rows)) _, err = client.Execute("delete from vitess_test where intval=4", nil) require.NoError(t, err) qr, err = client.Execute("select * from vitess_test", nil) require.NoError(t, err) - if qr.RowsAffected != 3 { - t.Errorf("rows affected: %d, want 4", qr.RowsAffected) - } + assert.Equal(t, 3, len(qr.Rows)) expectedDiffs := []struct { tag string @@ -236,10 +229,7 @@ func TestForUpdate(t *testing.T) { client := framework.NewClient() query := fmt.Sprintf("select * from vitess_test where intval=2 %s", mode) _, err := client.Execute(query, nil) - want := "SelectLock disallowed outside transaction" - if err == nil || !strings.HasPrefix(err.Error(), want) { - t.Errorf("%v, must have prefix %s", err, want) - } + require.NoError(t, err) // We should not get errors here err = client.Begin(false) @@ -270,9 +260,7 @@ func TestPrepareRollback(t *testing.T) { require.NoError(t, err) qr, err := client.Execute("select * from vitess_test", nil) require.NoError(t, err) - if qr.RowsAffected != 3 { - t.Errorf("rows affected: %d, want 3", qr.RowsAffected) - } + assert.Equal(t, 3, len(qr.Rows)) } func TestPrepareCommit(t *testing.T) { @@ -294,9 +282,7 @@ func TestPrepareCommit(t *testing.T) { require.NoError(t, err) qr, err := client.Execute("select * from vitess_test", nil) require.NoError(t, err) - if qr.RowsAffected != 4 { - t.Errorf("rows affected: %d, want 4", qr.RowsAffected) - } + assert.Equal(t, 4, len(qr.Rows)) } func TestPrepareReparentCommit(t *testing.T) { @@ -324,9 +310,7 @@ func TestPrepareReparentCommit(t *testing.T) { require.NoError(t, err) qr, err := client.Execute("select * from vitess_test", nil) require.NoError(t, err) - if qr.RowsAffected != 4 { - t.Errorf("rows affected: %d, want 4", qr.RowsAffected) - } + assert.Equal(t, 4, len(qr.Rows)) } func TestShutdownGracePeriod(t *testing.T) { diff --git a/go/vt/vttablet/endtoend/vstreamer_test.go b/go/vt/vttablet/endtoend/vstreamer_test.go index 48c685a9119..de21af94217 100644 --- a/go/vt/vttablet/endtoend/vstreamer_test.go +++ b/go/vt/vttablet/endtoend/vstreamer_test.go @@ -59,6 +59,8 @@ func TestSchemaVersioning(t *testing.T) { tsv.EnableHistorian(false) tsv.SetTracking(false) tsv.EnableHeartbeat(false) + tsv.EnableThrottler(false) + defer tsv.EnableThrottler(true) defer tsv.EnableHeartbeat(true) defer tsv.EnableHistorian(true) defer tsv.SetTracking(true) diff --git a/go/vt/vttablet/grpctabletconn/conn_benchmark_test.go b/go/vt/vttablet/grpctabletconn/conn_benchmark_test.go new file mode 100644 index 00000000000..8f39c564acd --- /dev/null +++ b/go/vt/vttablet/grpctabletconn/conn_benchmark_test.go @@ -0,0 +1,331 @@ +package grpctabletconn + +import ( + "context" + "fmt" + "math/rand" + "net" + "sort" + "testing" + + "google.golang.org/grpc" + + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/callerid" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/vttablet/grpcqueryservice" + "vitess.io/vitess/go/vt/vttablet/tabletconn" + "vitess.io/vitess/go/vt/vttablet/tabletconntest" +) + +type BenchmarkService struct { + t testing.TB + batchResult []sqltypes.Result +} + +func (b *BenchmarkService) Begin(ctx context.Context, target *querypb.Target, options *querypb.ExecuteOptions) (int64, *topodatapb.TabletAlias, error) { + panic("should not be called") +} + +func (b *BenchmarkService) Commit(ctx context.Context, target *querypb.Target, transactionID int64) (int64, error) { + panic("should not be called") +} + +func (b *BenchmarkService) Rollback(ctx context.Context, target *querypb.Target, transactionID int64) (int64, error) { + panic("should not be called") +} + +func (b *BenchmarkService) Prepare(ctx context.Context, target *querypb.Target, transactionID int64, dtid string) (err error) { + panic("should not be called") +} + +func (b *BenchmarkService) CommitPrepared(ctx context.Context, target *querypb.Target, dtid string) (err error) { + panic("should not be called") +} + +func (b *BenchmarkService) RollbackPrepared(ctx context.Context, target *querypb.Target, dtid string, originalID int64) (err error) { + panic("should not be called") +} + +func (b *BenchmarkService) CreateTransaction(ctx context.Context, target *querypb.Target, dtid string, participants []*querypb.Target) (err error) { + panic("should not be called") +} + +func (b *BenchmarkService) StartCommit(ctx context.Context, target *querypb.Target, transactionID int64, dtid string) (err error) { + panic("should not be called") +} + +func (b *BenchmarkService) SetRollback(ctx context.Context, target *querypb.Target, dtid string, transactionID int64) (err error) { + panic("should not be called") +} + +func (b *BenchmarkService) ConcludeTransaction(ctx context.Context, target *querypb.Target, dtid string) (err error) { + panic("should not be called") +} + +func (b *BenchmarkService) ReadTransaction(ctx context.Context, target *querypb.Target, dtid string) (metadata *querypb.TransactionMetadata, err error) { + panic("should not be called") +} + +func (b *BenchmarkService) Execute(ctx context.Context, target *querypb.Target, sql string, bindVariables map[string]*querypb.BindVariable, transactionID, reservedID int64, options *querypb.ExecuteOptions) (*sqltypes.Result, error) { + panic("should not be called") +} + +func (b *BenchmarkService) StreamExecute(ctx context.Context, target *querypb.Target, sql string, bindVariables map[string]*querypb.BindVariable, transactionID int64, options *querypb.ExecuteOptions, callback func(*sqltypes.Result) error) error { + panic("should not be called") +} + +func (b *BenchmarkService) ExecuteBatch(ctx context.Context, target *querypb.Target, queries []*querypb.BoundQuery, asTransaction bool, transactionID int64, options *querypb.ExecuteOptions) ([]sqltypes.Result, error) { + return b.batchResult, nil +} + +func (b *BenchmarkService) BeginExecute(ctx context.Context, target *querypb.Target, preQueries []string, sql string, bindVariables map[string]*querypb.BindVariable, reservedID int64, options *querypb.ExecuteOptions) (*sqltypes.Result, int64, *topodatapb.TabletAlias, error) { + panic("should not be called") +} + +func (b *BenchmarkService) BeginExecuteBatch(ctx context.Context, target *querypb.Target, queries []*querypb.BoundQuery, asTransaction bool, options *querypb.ExecuteOptions) ([]sqltypes.Result, int64, *topodatapb.TabletAlias, error) { + panic("should not be called") +} + +func (b *BenchmarkService) MessageStream(ctx context.Context, target *querypb.Target, name string, callback func(*sqltypes.Result) error) error { + panic("should not be called") +} + +func (b *BenchmarkService) MessageAck(ctx context.Context, target *querypb.Target, name string, ids []*querypb.Value) (count int64, err error) { + panic("should not be called") +} + +func (b *BenchmarkService) VStream(ctx context.Context, target *querypb.Target, startPos string, tableLastPKs []*binlogdatapb.TableLastPK, filter *binlogdatapb.Filter, send func([]*binlogdatapb.VEvent) error) error { + panic("should not be called") +} + +func (b *BenchmarkService) VStreamRows(ctx context.Context, target *querypb.Target, query string, lastpk *querypb.QueryResult, send func(*binlogdatapb.VStreamRowsResponse) error) error { + panic("should not be called") +} + +func (b *BenchmarkService) VStreamResults(ctx context.Context, target *querypb.Target, query string, send func(*binlogdatapb.VStreamResultsResponse) error) error { + panic("should not be called") +} + +func (b *BenchmarkService) StreamHealth(ctx context.Context, callback func(*querypb.StreamHealthResponse) error) error { + panic("should not be called") +} + +func (b *BenchmarkService) HandlePanic(err *error) { + if x := recover(); x != nil { + *err = fmt.Errorf("caught test panic: %v", x) + } +} + +func (b *BenchmarkService) ReserveBeginExecute(ctx context.Context, target *querypb.Target, preQueries []string, sql string, bindVariables map[string]*querypb.BindVariable, options *querypb.ExecuteOptions) (*sqltypes.Result, int64, int64, *topodatapb.TabletAlias, error) { + panic("should not be called") +} + +func (b *BenchmarkService) ReserveExecute(ctx context.Context, target *querypb.Target, preQueries []string, sql string, bindVariables map[string]*querypb.BindVariable, transactionID int64, options *querypb.ExecuteOptions) (*sqltypes.Result, int64, *topodatapb.TabletAlias, error) { + panic("should not be called") +} + +func (b *BenchmarkService) Release(ctx context.Context, target *querypb.Target, transactionID, reservedID int64) error { + panic("should not be called") +} + +func (b *BenchmarkService) Close(ctx context.Context) error { + panic("should not be called") +} + +type generator struct { + n int + r *rand.Rand + tt []querypb.Type +} + +func (gen *generator) randomString(min, max int) string { + const letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + n := min + gen.r.Intn(max-min) + b := make([]byte, n) + for i := range b { + b[i] = letters[gen.r.Intn(len(letters))] + } + return string(b) +} + +func (gen *generator) generateBindVar(tt int) interface{} { + if gen.n < 0 { + return nil + } + gen.n-- + + switch tt { + case 0: + return gen.randomString(1, 256) + case 1: + return gen.r.Float64() + case 2: + return gen.r.Uint64() + case 3: + return gen.r.Int63() + case 4, 5, 6, 7: + var ( + ary []interface{} + l = gen.r.Intn(32) + ) + for i := 0; i < l && gen.n > 0; i++ { + ary = append(ary, gen.generateBindVar(tt-4)) + } + return ary + default: + panic("unreachable") + } +} + +func (gen *generator) generateBindVars() map[string]*querypb.BindVariable { + bv := make(map[string]*querypb.BindVariable) + c := gen.r.Intn(128) + for i := 0; i < c && gen.n > 0; i++ { + tt := gen.r.Intn(8) + v, err := sqltypes.BuildBindVariable(gen.generateBindVar(tt)) + if err != nil { + panic(err) + } + bv[fmt.Sprintf("bind%d", i)] = v + } + return bv +} + +func (gen *generator) generateBoundQuery() (bq []*querypb.BoundQuery) { + for gen.n > 0 { + q := &querypb.BoundQuery{ + Sql: gen.randomString(32, 1024), + BindVariables: gen.generateBindVars(), + } + gen.n-- + bq = append(bq, q) + } + return +} + +func (gen *generator) generateType() querypb.Type { + if len(gen.tt) == 0 { + for _, tt := range querypb.Type_value { + gen.tt = append(gen.tt, querypb.Type(tt)) + } + sort.Slice(gen.tt, func(i, j int) bool { + return gen.tt[i] < gen.tt[j] + }) + } + return gen.tt[gen.r.Intn(len(gen.tt))] +} + +func (gen *generator) generateRows() (fields []*querypb.Field, rows [][]sqltypes.Value) { + fieldCount := 1 + gen.r.Intn(16) + + for i := 0; i < fieldCount; i++ { + fields = append(fields, &querypb.Field{ + Name: fmt.Sprintf("field%d", i), + Type: gen.generateType(), + }) + } + + for gen.n > 0 { + var row []sqltypes.Value + for _, f := range fields { + row = append(row, sqltypes.TestValue(f.Type, gen.randomString(8, 32))) + gen.n-- + } + rows = append(rows, row) + } + return +} + +func (gen *generator) generateQueryResultList() (qrl []sqltypes.Result) { + for gen.n > 0 { + fields, rows := gen.generateRows() + r := sqltypes.Result{ + Fields: fields, + RowsAffected: gen.r.Uint64(), + InsertID: gen.r.Uint64(), + Rows: rows, + } + gen.n-- + qrl = append(qrl, r) + } + return +} + +func BenchmarkGRPCTabletConn(b *testing.B) { + // fake service + service := &BenchmarkService{ + t: b, + } + + // listen on a random port + listener, err := net.Listen("tcp", ":0") + if err != nil { + b.Fatalf("Cannot listen: %v", err) + } + host := listener.Addr().(*net.TCPAddr).IP.String() + port := listener.Addr().(*net.TCPAddr).Port + + // Create a gRPC server and listen on the port + server := grpc.NewServer() + grpcqueryservice.Register(server, service) + go server.Serve(listener) + + tablet := &topodatapb.Tablet{ + Keyspace: tabletconntest.TestTarget.Keyspace, + Shard: tabletconntest.TestTarget.Shard, + Type: tabletconntest.TestTarget.TabletType, + Alias: tabletconntest.TestAlias, + Hostname: host, + PortMap: map[string]int32{ + "grpc": int32(port), + }, + } + + var querySizes = []int{8, 64, 512, 4096, 32768} + var requests = make(map[int][]*querypb.BoundQuery) + var responses = make(map[int][]sqltypes.Result) + + for _, size := range querySizes { + gen := &generator{ + n: size, + r: rand.New(rand.NewSource(int64(0x33333 ^ size))), + } + requests[size] = gen.generateBoundQuery() + + gen = &generator{ + n: size, + r: rand.New(rand.NewSource(int64(0x44444 ^ size))), + } + responses[size] = gen.generateQueryResultList() + } + + for _, reqSize := range querySizes { + for _, respSize := range querySizes { + b.Run(fmt.Sprintf("Req%d-Resp%d", reqSize, respSize), func(b *testing.B) { + conn, err := tabletconn.GetDialer()(tablet, false) + if err != nil { + b.Fatalf("dial failed: %v", err) + } + defer conn.Close(context.Background()) + + requestQuery := requests[reqSize] + service.batchResult = responses[respSize] + + b.SetParallelism(4) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + ctx := context.Background() + ctx = callerid.NewContext(ctx, tabletconntest.TestCallerID, tabletconntest.TestVTGateCallerID) + _, err := conn.ExecuteBatch(ctx, tabletconntest.TestTarget, requestQuery, tabletconntest.TestAsTransaction, tabletconntest.ExecuteBatchTransactionID, tabletconntest.TestExecuteOptions) + if err != nil { + b.Fatalf("ExecuteBatch failed: %v", err) + } + } + }) + }) + } + } +} diff --git a/go/vt/vttablet/onlineddl/executor.go b/go/vt/vttablet/onlineddl/executor.go index 4f815585027..a6135e1d882 100644 --- a/go/vt/vttablet/onlineddl/executor.go +++ b/go/vt/vttablet/onlineddl/executor.go @@ -15,7 +15,7 @@ limitations under the License. */ /* -Functionality of this Executor is tested in go/test/endtoend/onlineddl/onlineddl_test.go +Functionality of this Executor is tested in go/test/endtoend/onlineddl/... */ package onlineddl @@ -40,8 +40,13 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/textutil" "vitess.io/vitess/go/timer" + "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/dbconnpool" "vitess.io/vitess/go/vt/log" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/sqlparser" @@ -50,22 +55,22 @@ import ( "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" + "vitess.io/vitess/go/vt/vttablet/tmclient" "vitess.io/vitess/go/vt/vttablet/vexec" - querypb "vitess.io/vitess/go/vt/proto/query" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - - "github.com/google/shlex" + mysqldriver "github.com/go-sql-driver/mysql" + "github.com/golang/protobuf/proto" + "github.com/jmoiron/sqlx" + "github.com/skeema/tengo" ) var ( // ErrExecutorNotWritableTablet is generated when executor is asked to run gh-ost on a read-only server - ErrExecutorNotWritableTablet = errors.New("Cannot run gh-ost migration on non-writable tablet") + ErrExecutorNotWritableTablet = errors.New("cannot run migration on non-writable tablet") // ErrExecutorMigrationAlreadyRunning is generated when an attempt is made to run an operation that conflicts with a running migration - ErrExecutorMigrationAlreadyRunning = errors.New("Cannot run gh-ost migration since a migration is already running") + ErrExecutorMigrationAlreadyRunning = errors.New("cannot run migration since a migration is already running") // ErrMigrationNotFound is returned by readMigration when given UUI cannot be found - ErrMigrationNotFound = errors.New("Migration not found") + ErrMigrationNotFound = errors.New("migration not found") ) var vexecUpdateTemplates = []string{ @@ -93,13 +98,13 @@ var vexecInsertTemplates = []string{ )`, } -var emptyResult = &sqltypes.Result{ - RowsAffected: 0, -} +var emptyResult = &sqltypes.Result{} +var acceptableDropTableIfExistsErrorCodes = []int{mysql.ERCantFindFile, mysql.ERNoSuchTable} var ghostOverridePath = flag.String("gh-ost-path", "", "override default gh-ost binary full path") var ptOSCOverridePath = flag.String("pt-osc-path", "", "override default pt-online-schema-change binary full path") var migrationCheckInterval = flag.Duration("migration_check_interval", 1*time.Minute, "Interval between migration checks") +var retainOnlineDDLTables = flag.Duration("retain_online_ddl_tables", 24*time.Hour, "How long should vttablet keep an old migrated table before purging it") var migrationNextCheckInterval = 5 * time.Second const ( @@ -107,8 +112,10 @@ const ( staleMigrationMinutes = 10 progressPctStarted float64 = 0 progressPctFull float64 = 100.0 - gcHoldHours = 72 + etaSecondsUnknown = -1 + etaSecondsNow = 0 databasePoolSize = 3 + cutOverThreshold = 3 * time.Second ) var ( @@ -117,6 +124,14 @@ var ( onlineDDLGrant = fmt.Sprintf("'%s'@'%s'", onlineDDLUser, "%") ) +type mysqlVariables struct { + host string + port int + readOnly bool + version string + versionComment string +} + // Executor wraps and manages the execution of a gh-ost migration. type Executor struct { env tabletenv.Env @@ -129,11 +144,13 @@ type Executor struct { shard string dbName string - initMutex sync.Mutex - migrationMutex sync.Mutex - migrationRunning int64 - lastMigrationUUID string - tickReentranceFlag int64 + initMutex sync.Mutex + migrationMutex sync.Mutex + vreplMigrationRunning int64 + ghostMigrationRunning int64 + ptoscMigrationRunning int64 + lastMigrationUUID string + tickReentranceFlag int64 ticks *timer.Timer isOpen bool @@ -157,6 +174,11 @@ func PTOSCFileName() (fileName string, isOverride bool) { return "/usr/bin/pt-online-schema-change", false } +// newGCTableRetainTime returns the time until which a new GC table is to be retained +func newGCTableRetainTime() time.Time { + return time.Now().UTC().Add(*retainOnlineDDLTables) +} + // NewExecutor creates a new gh-ost executor. func NewExecutor(env tabletenv.Env, tabletAlias topodatapb.TabletAlias, ts *topo.Server, tabletTypeFunc func() topodatapb.TabletType) *Executor { return &Executor{ @@ -264,6 +286,20 @@ func (e *Executor) triggerNextCheckInterval() { e.ticks.TriggerAfter(migrationNextCheckInterval) } +// isAnyMigrationRunning sees if there's any migration running right now +func (e *Executor) isAnyMigrationRunning() bool { + if atomic.LoadInt64(&e.vreplMigrationRunning) > 0 { + return true + } + if atomic.LoadInt64(&e.ghostMigrationRunning) > 0 { + return true + } + if atomic.LoadInt64(&e.ptoscMigrationRunning) > 0 { + return true + } + return false +} + func (e *Executor) ghostPanicFlagFileName(uuid string) string { return path.Join(os.TempDir(), fmt.Sprintf("ghost.%s.panic.flag", uuid)) } @@ -283,33 +319,44 @@ func (e *Executor) ptPidFileName(uuid string) string { } // readMySQLVariables contacts the backend MySQL server to read some of its configuration -func (e *Executor) readMySQLVariables(ctx context.Context) (host string, port int, readOnly bool, err error) { +func (e *Executor) readMySQLVariables(ctx context.Context) (variables *mysqlVariables, err error) { conn, err := e.pool.Get(ctx) if err != nil { - return host, port, readOnly, err + return nil, err } defer conn.Recycle() - tm, err := conn.Exec(ctx, "select @@global.hostname as hostname, @@global.port as port, @@global.read_only as read_only from dual", 1, true) + tm, err := conn.Exec(ctx, `select + @@global.hostname as hostname, + @@global.port as port, + @@global.read_only as read_only, + @@global.version AS version, + @@global.version_comment AS version_comment + from dual`, 1, true) if err != nil { - return host, port, readOnly, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "could not read MySQL variables: %v", err) + return nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "could not read MySQL variables: %v", err) } row := tm.Named().Row() if row == nil { - return host, port, readOnly, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "unexpected result for MySQL variables: %+v", tm.Rows) + return nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "unexpected result for MySQL variables: %+v", tm.Rows) } - host = row["hostname"].ToString() + variables = &mysqlVariables{} - p, err := row.ToInt64("port") - if err != nil { - return host, port, readOnly, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "could not parse @@global.port %v: %v", tm, err) - } - port = int(p) + variables.host = row["hostname"].ToString() - if readOnly, err = row.ToBool("read_only"); err != nil { - return host, port, readOnly, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "could not parse @@global.read_only %v: %v", tm, err) + if port, err := row.ToInt64("port"); err != nil { + return nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "could not parse @@global.port %v: %v", tm, err) + } else { + variables.port = int(port) + } + if variables.readOnly, err = row.ToBool("read_only"); err != nil { + return nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "could not parse @@global.read_only %v: %v", tm, err) } - return host, port, readOnly, nil + + variables.version = row["version"].ToString() + variables.versionComment = row["version_comment"].ToString() + + return variables, nil } // createOnlineDDLUser creates a gh-ost user account with all neccessary privileges and with a random password @@ -368,18 +415,25 @@ func (e *Executor) tableExists(ctx context.Context, tableName string) (bool, err return (row != nil), nil } -// executeDirectly runs a DDL query directly on the backend MySQL server -func (e *Executor) executeDirectly(ctx context.Context, onlineDDL *schema.OnlineDDL, acceptableMySQLErrorCodes ...int) error { - e.migrationMutex.Lock() - defer e.migrationMutex.Unlock() +func (e *Executor) parseAlterOptions(ctx context.Context, onlineDDL *schema.OnlineDDL) string { + // Temporary hack (2020-08-11) + // Because sqlparser does not do full blown ALTER TABLE parsing, + // and because we don't want gh-ost to know about WITH_GHOST and WITH_PT syntax, + // we resort to regexp-based parsing of the query. + // TODO(shlomi): generate _alter options_ via sqlparser when it full supports ALTER TABLE syntax. + _, _, alterOptions := schema.ParseAlterTableOptions(onlineDDL.SQL) + return alterOptions +} +// executeDirectly runs a DDL query directly on the backend MySQL server +func (e *Executor) executeDirectly(ctx context.Context, onlineDDL *schema.OnlineDDL, acceptableMySQLErrorCodes ...int) (acceptableErrorCodeFound bool, err error) { conn, err := dbconnpool.NewDBConnection(ctx, e.env.Config().DB.DbaWithDB()) if err != nil { - return err + return false, err } defer conn.Close() - _ = e.onSchemaMigrationStatus(ctx, onlineDDL.UUID, schema.OnlineDDLStatusRunning, false, progressPctStarted) + _ = e.onSchemaMigrationStatus(ctx, onlineDDL.UUID, schema.OnlineDDLStatusRunning, false, progressPctStarted, etaSecondsUnknown) _, err = conn.ExecuteFetch(onlineDDL.SQL, 0, false) if err != nil { @@ -388,6 +442,7 @@ func (e *Executor) executeDirectly(ctx context.Context, onlineDDL *schema.Online for _, acceptableCode := range acceptableMySQLErrorCodes { if merr.Num == acceptableCode { // we don't consider this to be an error. + acceptableErrorCodeFound = true err = nil break } @@ -395,10 +450,283 @@ func (e *Executor) executeDirectly(ctx context.Context, onlineDDL *schema.Online } } if err != nil { + return false, err + } + _ = e.onSchemaMigrationStatus(ctx, onlineDDL.UUID, schema.OnlineDDLStatusComplete, false, progressPctFull, etaSecondsNow) + + return acceptableErrorCodeFound, nil +} + +// terminateVReplMigration stops vreplication, then removes the _vt.vreplication entry for the given migration +func (e *Executor) terminateVReplMigration(ctx context.Context, uuid string) error { + tmClient := tmclient.NewTabletManagerClient() + tablet, err := e.ts.GetTablet(ctx, e.tabletAlias) + if err != nil { + return err + } + { + query, err := sqlparser.ParseAndBind(sqlStopVReplStream, + sqltypes.StringBindVariable(e.dbName), + sqltypes.StringBindVariable(uuid), + ) + if err != nil { + return err + } + // silently skip error; stopping the stream is just a graceful act; later deleting it is more important + _, _ = tmClient.VReplicationExec(ctx, tablet.Tablet, query) + } + { + query, err := sqlparser.ParseAndBind(sqlDeleteVReplStream, + sqltypes.StringBindVariable(e.dbName), + sqltypes.StringBindVariable(uuid), + ) + if err != nil { + return err + } + // silently skip error; stopping the stream is just a graceful act; later deleting it is more important + if _, err := tmClient.VReplicationExec(ctx, tablet.Tablet, query); err != nil { + return err + } + } + return nil +} + +// cutOverVReplMigration stops vreplication, then removes the _vt.vreplication entry for the given migration +func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream) error { + // sanity checks: + vreplTable, err := getVreplTable(ctx, s) + if err != nil { + return err + } + + // get topology client & entities: + tmClient := tmclient.NewTabletManagerClient() + tablet, err := e.ts.GetTablet(ctx, e.tabletAlias) + if err != nil { + return err + } + shardInfo, err := e.ts.GetShard(ctx, e.keyspace, e.shard) + if err != nil { + return err + } + + // information about source tablet + onlineDDL, _, err := e.readMigration(ctx, s.workflow) + if err != nil { + return err + } + // come up with temporary name for swap table + swapTable, err := schema.CreateUUID() + if err != nil { + return err + } + swapTable = strings.Replace(swapTable, "-", "", -1) + swapTable = fmt.Sprintf("_swap_%s", swapTable) + + // Preparation is complete. We proceed to cut-over. + + // lock keyspace: + { + lctx, unlockKeyspace, err := e.ts.LockKeyspace(ctx, e.keyspace, "OnlineDDLCutOver") + if err != nil { + return err + } + // lctx has the lock info, needed for UpdateShardFields + ctx = lctx + defer unlockKeyspace(&err) + } + toggleWrites := func(allowWrites bool) error { + if _, err := e.ts.UpdateShardFields(ctx, e.keyspace, shardInfo.ShardName(), func(si *topo.ShardInfo) error { + err := si.UpdateSourceBlacklistedTables(ctx, topodatapb.TabletType_MASTER, nil, allowWrites, []string{onlineDDL.Table}) + return err + }); err != nil { + return err + } + if err := tmClient.RefreshState(ctx, tablet.Tablet); err != nil { + return err + } + return nil + } + // stop writes on source: + if err := toggleWrites(false); err != nil { + return err + } + defer toggleWrites(true) + + // Writes are now disabled on table. Read up-to-date vreplication info, specifically to get latest (and fixed) pos: + s, err = e.readVReplStream(ctx, s.workflow, false) + if err != nil { + return err + } + + waitForPos := func() error { + ctx, cancel := context.WithTimeout(ctx, 2*cutOverThreshold) + defer cancel() + // Wait for target to reach the up-to-date pos + if err := tmClient.VReplicationWaitForPos(ctx, tablet.Tablet, int(s.id), s.pos); err != nil { + return err + } + // Target is now in sync with source! + return nil + } + if err := waitForPos(); err != nil { + return err + } + // Stop vreplication + if _, err := tmClient.VReplicationExec(ctx, tablet.Tablet, binlogplayer.StopVReplication(uint32(s.id), "stopped for online DDL cutover")); err != nil { + return err + } + + // rename tables atomically (remember, writes on source tables are stopped) + { + parsed := sqlparser.BuildParsedQuery(sqlSwapTables, + onlineDDL.Table, swapTable, + vreplTable, onlineDDL.Table, + swapTable, vreplTable, + ) + if _, err = e.execQuery(ctx, parsed.Query); err != nil { + return err + } + } + + go func() { + // Tables are swapped! Let's take the opportunity to ReloadSchema now + // We do this in a goroutine because it might take time on a schema with thousands of tables, and we don't want to delay + // the cut-over. + // this means ReloadSchema is not in sync with the actual schema change. Users will still need to run tracker if they want to sync. + // In the future, we will want to reload the single table, instead of reloading the schema. + if err := tmClient.ReloadSchema(ctx, tablet.Tablet, ""); err != nil { + vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "Error on ReloadSchema while cutting over vreplication migration UUID: %+v", onlineDDL.UUID) + } + }() + + // Tables are now swapped! Migration is successful + _ = e.onSchemaMigrationStatus(ctx, onlineDDL.UUID, schema.OnlineDDLStatusComplete, false, progressPctFull, etaSecondsNow) + return nil + + // deferred function will re-enable writes now + // deferred function will unlock keyspace +} + +func (e *Executor) initVreplicationOriginalMigration(ctx context.Context, onlineDDL *schema.OnlineDDL, conn *dbconnpool.DBConnection) (v *VRepl, err error) { + vreplTableName := fmt.Sprintf("_%s_%s_vrepl", onlineDDL.UUID, ReadableTimestamp()) + { + // Apply CREATE TABLE for materialized table + parsed := sqlparser.BuildParsedQuery(sqlCreateTableLike, vreplTableName, onlineDDL.Table) + if _, err := conn.ExecuteFetch(parsed.Query, 0, false); err != nil { + return v, err + } + } + alterOptions := e.parseAlterOptions(ctx, onlineDDL) + { + // Apply ALTER TABLE to materialized table + parsed := sqlparser.BuildParsedQuery(sqlAlterTableOptions, vreplTableName, alterOptions) + if _, err := conn.ExecuteFetch(parsed.Query, 0, false); err != nil { + return v, err + } + } + v = NewVRepl(onlineDDL.UUID, e.keyspace, e.shard, e.dbName, onlineDDL.Table, vreplTableName, alterOptions) + return v, nil +} + +func (e *Executor) initVreplicationRevertMigration(ctx context.Context, onlineDDL *schema.OnlineDDL, revertMigration *schema.OnlineDDL) (v *VRepl, err error) { + // Getting here we've already validated that migration is revertible + + // Validation: vreplication still exists for reverted migration + revertStream, err := e.readVReplStream(ctx, revertMigration.UUID, false) + if err != nil { + // cannot read the vreplication stream which we want to revert + return nil, fmt.Errorf("can not revert vreplication migration %s because vreplication stream %s was not found", revertMigration.UUID, revertMigration.UUID) + } + + onlineDDL.Table = revertMigration.Table + if err := e.updateMySQLTable(ctx, onlineDDL.UUID, onlineDDL.Table); err != nil { + return nil, err + } + + vreplTableName, err := getVreplTable(ctx, revertStream) + if err != nil { + return nil, err + } + + v = NewVRepl(onlineDDL.UUID, e.keyspace, e.shard, e.dbName, onlineDDL.Table, vreplTableName, "") + v.pos = revertStream.pos + return v, nil +} + +// ExecuteWithVReplication sets up the grounds for a vreplication schema migration +func (e *Executor) ExecuteWithVReplication(ctx context.Context, onlineDDL *schema.OnlineDDL, revertMigration *schema.OnlineDDL) error { + // make sure there's no vreplication workflow running under same name + _ = e.terminateVReplMigration(ctx, onlineDDL.UUID) + + if e.isAnyMigrationRunning() { + return ErrExecutorMigrationAlreadyRunning + } + + if e.tabletTypeFunc() != topodatapb.TabletType_MASTER { + return ErrExecutorNotWritableTablet + } + + conn, err := dbconnpool.NewDBConnection(ctx, e.env.Config().DB.DbaWithDB()) + if err != nil { + return err + } + defer conn.Close() + + atomic.StoreInt64(&e.vreplMigrationRunning, 1) + e.lastMigrationUUID = onlineDDL.UUID + if err := e.onSchemaMigrationStatus(ctx, onlineDDL.UUID, schema.OnlineDDLStatusRunning, false, progressPctStarted, etaSecondsUnknown); err != nil { + return err + } + + var v *VRepl + if revertMigration == nil { + // Original ALTER TABLE request for vreplication + v, err = e.initVreplicationOriginalMigration(ctx, onlineDDL, conn) + } else { + // this is a revert request + v, err = e.initVreplicationRevertMigration(ctx, onlineDDL, revertMigration) + } + if err != nil { + return err + } + if err := v.analyze(ctx, conn); err != nil { return err } - _ = e.onSchemaMigrationStatus(ctx, onlineDDL.UUID, schema.OnlineDDLStatusComplete, false, progressPctFull) + if err := e.updateArtifacts(ctx, onlineDDL.UUID, v.targetTable); err != nil { + return err + } + + { + // We need to talk to tabletmanager's VREngine. But we're on TabletServer. While we live in the same + // process as VREngine, it is actually simpler to get hold of it via gRPC, just like wrangler does. + tmClient := tmclient.NewTabletManagerClient() + tablet, err := e.ts.GetTablet(ctx, e.tabletAlias) + if err != nil { + return err + } + // reload schema + if err := tmClient.ReloadSchema(ctx, tablet.Tablet, ""); err != nil { + return err + } + // create vreplication entry + insertVReplicationQuery, err := v.generateInsertStatement(ctx) + if err != nil { + return err + } + if _, err := tmClient.VReplicationExec(ctx, tablet.Tablet, insertVReplicationQuery); err != nil { + return err + } + // start stream! + startVReplicationQuery, err := v.generateStartStatement(ctx) + if err != nil { + return err + } + if _, err := tmClient.VReplicationExec(ctx, tablet.Tablet, startVReplicationQuery); err != nil { + return err + } + } return nil } @@ -406,22 +734,19 @@ func (e *Executor) executeDirectly(ctx context.Context, onlineDDL *schema.Online // Validation included testing the backend MySQL server and the gh-ost binary itself // Execution runs first a dry run, then an actual migration func (e *Executor) ExecuteWithGhost(ctx context.Context, onlineDDL *schema.OnlineDDL) error { - e.migrationMutex.Lock() - defer e.migrationMutex.Unlock() - - if atomic.LoadInt64(&e.migrationRunning) > 0 { + if e.isAnyMigrationRunning() { return ErrExecutorMigrationAlreadyRunning } if e.tabletTypeFunc() != topodatapb.TabletType_MASTER { return ErrExecutorNotWritableTablet } - mysqlHost, mysqlPort, readOnly, err := e.readMySQLVariables(ctx) + variables, err := e.readMySQLVariables(ctx) if err != nil { log.Errorf("Error before running gh-ost: %+v", err) return err } - if readOnly { + if variables.readOnly { err := fmt.Errorf("Error before running gh-ost: MySQL server is read_only") log.Errorf(err.Error()) return err @@ -464,7 +789,7 @@ export ONLINE_DDL_PASSWORD } onHookContent := func(status schema.OnlineDDLStatus) string { return fmt.Sprintf(`#!/bin/bash -curl -s 'http://localhost:%d/schema-migration/report-status?uuid=%s&status=%s&dryrun='"$GH_OST_DRY_RUN"'&progress='"$GH_OST_PROGRESS" +curl -s 'http://localhost:%d/schema-migration/report-status?uuid=%s&status=%s&dryrun='"$GH_OST_DRY_RUN"'&progress='"$GH_OST_PROGRESS"'&eta='"$GH_OST_ETA_SECONDS" `, *servenv.Port, onlineDDL.UUID, string(status)) } if _, err := createTempScript(tempDir, "gh-ost-on-startup", onHookContent(schema.OnlineDDLStatusRunning)); err != nil { @@ -508,17 +833,12 @@ curl -s 'http://localhost:%d/schema-migration/report-status?uuid=%s&status=%s&dr } log.Infof("+ OK") - if err := e.updateMigrationLogPath(ctx, onlineDDL.UUID, mysqlHost, tempDir); err != nil { + if err := e.updateMigrationLogPath(ctx, onlineDDL.UUID, variables.host, tempDir); err != nil { return err } runGhost := func(execute bool) error { - // Temporary hack (2020-08-11) - // Because sqlparser does not do full blown ALTER TABLE parsing, - // and because we don't want gh-ost to know about WITH_GHOST and WITH_PT syntax, - // we resort to regexp-based parsing of the query. - // TODO(shlomi): generate _alter options_ via sqlparser when it full supports ALTER TABLE syntax. - _, _, alterOptions := schema.ParseAlterTableOptions(onlineDDL.SQL) + alterOptions := e.parseAlterOptions(ctx, onlineDDL) forceTableNames := fmt.Sprintf("%s_%s", onlineDDL.UUID, ReadableTimestamp()) if err := e.updateArtifacts(ctx, onlineDDL.UUID, @@ -532,8 +852,8 @@ curl -s 'http://localhost:%d/schema-migration/report-status?uuid=%s&status=%s&dr os.Setenv("ONLINE_DDL_PASSWORD", onlineDDLPassword) args := []string{ wrapperScriptFileName, - fmt.Sprintf(`--host=%s`, mysqlHost), - fmt.Sprintf(`--port=%d`, mysqlPort), + fmt.Sprintf(`--host=%s`, variables.host), + fmt.Sprintf(`--port=%d`, variables.port), fmt.Sprintf(`--conf=%s`, credentialsConfigFileName), // user & password found here `--allow-on-master`, `--max-load=Threads_running=900`, @@ -554,34 +874,36 @@ curl -s 'http://localhost:%d/schema-migration/report-status?uuid=%s&status=%s&dr fmt.Sprintf(`--panic-flag-file=%s`, e.ghostPanicFlagFileName(onlineDDL.UUID)), fmt.Sprintf(`--execute=%t`, execute), } - opts, _ := shlex.Split(onlineDDL.Options) - args = append(args, opts...) + args = append(args, onlineDDL.RuntimeOptions()...) _, err := execCmd("bash", args, os.Environ(), "/tmp", nil, nil) return err } - atomic.StoreInt64(&e.migrationRunning, 1) + atomic.StoreInt64(&e.ghostMigrationRunning, 1) e.lastMigrationUUID = onlineDDL.UUID go func() error { - defer atomic.StoreInt64(&e.migrationRunning, 0) + defer atomic.StoreInt64(&e.ghostMigrationRunning, 0) defer e.dropOnlineDDLUser(ctx) defer e.gcArtifacts(ctx) - log.Infof("Will now dry-run gh-ost on: %s:%d", mysqlHost, mysqlPort) + log.Infof("Will now dry-run gh-ost on: %s:%d", variables.host, variables.port) if err := runGhost(false); err != nil { // perhaps gh-ost was interrupted midway and didn't have the chance to send a "failes" status _ = e.updateMigrationStatus(ctx, onlineDDL.UUID, schema.OnlineDDLStatusFailed) + _ = e.updateMigrationMessage(ctx, onlineDDL.UUID, err.Error()) + log.Errorf("Error executing gh-ost dry run: %+v", err) return err } log.Infof("+ OK") - log.Infof("Will now run gh-ost on: %s:%d", mysqlHost, mysqlPort) + log.Infof("Will now run gh-ost on: %s:%d", variables.host, variables.port) startedMigrations.Add(1) if err := runGhost(true); err != nil { // perhaps gh-ost was interrupted midway and didn't have the chance to send a "failes" status _ = e.updateMigrationStatus(ctx, onlineDDL.UUID, schema.OnlineDDLStatusFailed) + _ = e.updateMigrationMessage(ctx, onlineDDL.UUID, err.Error()) failedMigrations.Add(1) log.Errorf("Error running gh-ost: %+v", err) return err @@ -599,22 +921,19 @@ curl -s 'http://localhost:%d/schema-migration/report-status?uuid=%s&status=%s&dr // Validation included testing the backend MySQL server and the pt-online-schema-change binary itself // Execution runs first a dry run, then an actual migration func (e *Executor) ExecuteWithPTOSC(ctx context.Context, onlineDDL *schema.OnlineDDL) error { - e.migrationMutex.Lock() - defer e.migrationMutex.Unlock() - - if atomic.LoadInt64(&e.migrationRunning) > 0 { + if e.isAnyMigrationRunning() { return ErrExecutorMigrationAlreadyRunning } if e.tabletTypeFunc() != topodatapb.TabletType_MASTER { return ErrExecutorNotWritableTablet } - mysqlHost, mysqlPort, readOnly, err := e.readMySQLVariables(ctx) + variables, err := e.readMySQLVariables(ctx) if err != nil { log.Errorf("Error before running pt-online-schema-change: %+v", err) return err } - if readOnly { + if variables.readOnly { err := fmt.Errorf("Error before running pt-online-schema-change: MySQL server is read_only") log.Errorf(err.Error()) return err @@ -719,16 +1038,11 @@ export MYSQL_PWD } log.Infof("+ OK") - if err := e.updateMigrationLogPath(ctx, onlineDDL.UUID, mysqlHost, tempDir); err != nil { + if err := e.updateMigrationLogPath(ctx, onlineDDL.UUID, variables.host, tempDir); err != nil { return err } - // Temporary hack (2020-08-11) - // Because sqlparser does not do full blown ALTER TABLE parsing, - // and because pt-online-schema-change requires only the table options part of the ALTER TABLE statement, - // we resort to regexp-based parsing of the query. - // TODO(shlomi): generate _alter options_ via sqlparser when it full supports ALTER TABLE syntax. - _, _, alterOptions := schema.ParseAlterTableOptions(onlineDDL.SQL) + alterOptions := e.parseAlterOptions(ctx, onlineDDL) // The following sleep() is temporary and artificial. Because we create a new user for this // migration, and because we throttle by replicas, we need to wait for the replicas to be @@ -771,9 +1085,9 @@ export MYSQL_PWD `--alter`, alterOptions, `--check-slave-lag`, // We use primary's identity so that pt-online-schema-change calls our lag plugin for exactly 1 server - fmt.Sprintf(`h=%s,P=%d,D=%s,t=%s,u=%s`, mysqlHost, mysqlPort, e.dbName, onlineDDL.Table, onlineDDLUser), + fmt.Sprintf(`h=%s,P=%d,D=%s,t=%s,u=%s`, variables.host, variables.port, e.dbName, onlineDDL.Table, onlineDDLUser), executeFlag, - fmt.Sprintf(`h=%s,P=%d,D=%s,t=%s,u=%s`, mysqlHost, mysqlPort, e.dbName, onlineDDL.Table, onlineDDLUser), + fmt.Sprintf(`h=%s,P=%d,D=%s,t=%s,u=%s`, variables.host, variables.port, e.dbName, onlineDDL.Table, onlineDDLUser), } if execute { @@ -782,35 +1096,36 @@ export MYSQL_PWD `--no-drop-old-table`, ) } - opts, _ := shlex.Split(onlineDDL.Options) - args = append(args, opts...) + args = append(args, onlineDDL.RuntimeOptions()...) _, err = execCmd("bash", args, os.Environ(), "/tmp", nil, nil) return err } - atomic.StoreInt64(&e.migrationRunning, 1) + atomic.StoreInt64(&e.ptoscMigrationRunning, 1) e.lastMigrationUUID = onlineDDL.UUID go func() error { - defer atomic.StoreInt64(&e.migrationRunning, 0) + defer atomic.StoreInt64(&e.ptoscMigrationRunning, 0) defer e.dropOnlineDDLUser(ctx) defer e.gcArtifacts(ctx) - log.Infof("Will now dry-run pt-online-schema-change on: %s:%d", mysqlHost, mysqlPort) + log.Infof("Will now dry-run pt-online-schema-change on: %s:%d", variables.host, variables.port) if err := runPTOSC(false); err != nil { // perhaps pt-osc was interrupted midway and didn't have the chance to send a "failes" status _ = e.updateMigrationStatus(ctx, onlineDDL.UUID, schema.OnlineDDLStatusFailed) + _ = e.updateMigrationMessage(ctx, onlineDDL.UUID, err.Error()) _ = e.updateMigrationTimestamp(ctx, "completed_timestamp", onlineDDL.UUID) log.Errorf("Error executing pt-online-schema-change dry run: %+v", err) return err } log.Infof("+ OK") - log.Infof("Will now run pt-online-schema-change on: %s:%d", mysqlHost, mysqlPort) + log.Infof("Will now run pt-online-schema-change on: %s:%d", variables.host, variables.port) startedMigrations.Add(1) if err := runPTOSC(true); err != nil { // perhaps pt-osc was interrupted midway and didn't have the chance to send a "failes" status _ = e.updateMigrationStatus(ctx, onlineDDL.UUID, schema.OnlineDDLStatusFailed) + _ = e.updateMigrationMessage(ctx, onlineDDL.UUID, err.Error()) _ = e.updateMigrationTimestamp(ctx, "completed_timestamp", onlineDDL.UUID) _ = e.dropPTOSCMigrationTriggers(ctx, onlineDDL) failedMigrations.Add(1) @@ -826,24 +1141,24 @@ export MYSQL_PWD return nil } -func (e *Executor) readMigration(ctx context.Context, uuid string) (onlineDDL *schema.OnlineDDL, err error) { +func (e *Executor) readMigration(ctx context.Context, uuid string) (onlineDDL *schema.OnlineDDL, row sqltypes.RowNamedValues, err error) { - parsed := sqlparser.BuildParsedQuery(sqlSelectMigration, "_vt", ":migration_uuid") + parsed := sqlparser.BuildParsedQuery(sqlSelectMigration, ":migration_uuid") bindVars := map[string]*querypb.BindVariable{ "migration_uuid": sqltypes.StringBindVariable(uuid), } bound, err := parsed.GenerateQuery(bindVars, nil) if err != nil { - return onlineDDL, err + return onlineDDL, nil, err } r, err := e.execQuery(ctx, bound) if err != nil { - return onlineDDL, err + return onlineDDL, nil, err } - row := r.Named().Row() + row = r.Named().Row() if row == nil { // No results - return nil, ErrMigrationNotFound + return nil, nil, ErrMigrationNotFound } onlineDDL = &schema.OnlineDDL{ Keyspace: row["keyspace"].ToString(), @@ -857,19 +1172,19 @@ func (e *Executor) readMigration(ctx context.Context, uuid string) (onlineDDL *s Retries: row.AsInt64("retries", 0), TabletAlias: row["tablet"].ToString(), } - return onlineDDL, nil + return onlineDDL, row, nil } // terminateMigration attempts to interrupt and hard-stop a running migration func (e *Executor) terminateMigration(ctx context.Context, onlineDDL *schema.OnlineDDL, lastMigrationUUID string) (foundRunning bool, err error) { - if atomic.LoadInt64(&e.migrationRunning) > 0 { - // double check: is the running migration the very same one we wish to cancel? - if onlineDDL.UUID == lastMigrationUUID { - // assuming all goes well in next steps, we can already report that there has indeed been a migration - foundRunning = true - } - } switch onlineDDL.Strategy { + case schema.DDLStrategyOnline: + // migration could have started by a different tablet. We need to actively verify if it is running + foundRunning, _, _ = e.isVReplMigrationRunning(ctx, onlineDDL.UUID) + if err := e.terminateVReplMigration(ctx, onlineDDL.UUID); err != nil { + return foundRunning, fmt.Errorf("Error cancelling migration, vreplication exec error: %+v", err) + } + _ = e.updateMigrationStatus(ctx, onlineDDL.UUID, schema.OnlineDDLStatusFailed) case schema.DDLStrategyPTOSC: // see if pt-osc is running (could have been executed by this vttablet or one that crashed in the past) if running, pid, _ := e.isPTOSCMigrationRunning(ctx, onlineDDL.UUID); running { @@ -890,6 +1205,13 @@ func (e *Executor) terminateMigration(ctx context.Context, onlineDDL *schema.Onl } } case schema.DDLStrategyGhost: + if atomic.LoadInt64(&e.ghostMigrationRunning) > 0 { + // double check: is the running migration the very same one we wish to cancel? + if onlineDDL.UUID == lastMigrationUUID { + // assuming all goes well in next steps, we can already report that there has indeed been a migration + foundRunning = true + } + } // gh-ost migrations are easy to kill: just touch their specific panic flag files. We trust // gh-ost to terminate. No need to KILL it. And there's no trigger cleanup. if err := e.createGhostPanicFlagFile(onlineDDL.UUID); err != nil { @@ -899,19 +1221,21 @@ func (e *Executor) terminateMigration(ctx context.Context, onlineDDL *schema.Onl return foundRunning, nil } -// cancelMigration attempts to abort a scheduled or a running migration -func (e *Executor) cancelMigration(ctx context.Context, uuid string, terminateRunningMigration bool) (result *sqltypes.Result, err error) { +// CancelMigration attempts to abort a scheduled or a running migration +func (e *Executor) CancelMigration(ctx context.Context, uuid string, terminateRunningMigration bool, message string) (result *sqltypes.Result, err error) { e.migrationMutex.Lock() defer e.migrationMutex.Unlock() var rowsAffected uint64 - onlineDDL, err := e.readMigration(ctx, uuid) + onlineDDL, _, err := e.readMigration(ctx, uuid) if err != nil { return nil, err } switch onlineDDL.Status { + case schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed: + return emptyResult, nil case schema.OnlineDDLStatusQueued, schema.OnlineDDLStatusReady: if err := e.updateMigrationStatus(ctx, onlineDDL.UUID, schema.OnlineDDLStatusCancelled); err != nil { return nil, err @@ -921,6 +1245,8 @@ func (e *Executor) cancelMigration(ctx context.Context, uuid string, terminateRu if terminateRunningMigration { migrationFound, err := e.terminateMigration(ctx, onlineDDL, e.lastMigrationUUID) + defer e.updateMigrationMessage(ctx, onlineDDL.UUID, message) + if migrationFound { rowsAffected = 1 } @@ -937,21 +1263,20 @@ func (e *Executor) cancelMigration(ctx context.Context, uuid string, terminateRu } // cancelMigrations attempts to abort a list of migrations -func (e *Executor) cancelMigrations(ctx context.Context, uuids []string) (err error) { +func (e *Executor) cancelMigrations(ctx context.Context, uuids []string, message string) (err error) { for _, uuid := range uuids { log.Infof("cancelMigrations: cancelling %s", uuid) - if _, err := e.cancelMigration(ctx, uuid, true); err != nil { + if _, err := e.CancelMigration(ctx, uuid, true, message); err != nil { return err } } return nil } -// cancelPendingMigrations cancels all pending migrations (that are expected to run or are running) +// CancelPendingMigrations cancels all pending migrations (that are expected to run or are running) // for this keyspace -func (e *Executor) cancelPendingMigrations(ctx context.Context) (result *sqltypes.Result, err error) { - parsed := sqlparser.BuildParsedQuery(sqlSelectPendingMigrations, "_vt") - r, err := e.execQuery(ctx, parsed.Query) +func (e *Executor) CancelPendingMigrations(ctx context.Context, message string) (result *sqltypes.Result, err error) { + r, err := e.execQuery(ctx, sqlSelectPendingMigrations) if err != nil { return result, err } @@ -963,8 +1288,8 @@ func (e *Executor) cancelPendingMigrations(ctx context.Context) (result *sqltype result = &sqltypes.Result{} for _, uuid := range uuids { - log.Infof("cancelPendingMigrations: cancelling %s", uuid) - res, err := e.cancelMigration(ctx, uuid, true) + log.Infof("CancelPendingMigrations: cancelling %s", uuid) + res, err := e.CancelMigration(ctx, uuid, true, message) if err != nil { return result, err } @@ -980,13 +1305,12 @@ func (e *Executor) scheduleNextMigration(ctx context.Context) error { e.migrationMutex.Lock() defer e.migrationMutex.Unlock() - if atomic.LoadInt64(&e.migrationRunning) > 0 { + if e.isAnyMigrationRunning() { return ErrExecutorMigrationAlreadyRunning } { - parsed := sqlparser.BuildParsedQuery(sqlSelectCountReadyMigrations, "_vt") - r, err := e.execQuery(ctx, parsed.Query) + r, err := e.execQuery(ctx, sqlSelectCountReadyMigrations) if err != nil { return err } @@ -1003,76 +1327,498 @@ func (e *Executor) scheduleNextMigration(ctx context.Context) error { } } // Cool, seems like no migration is ready. Let's try and make a single 'queued' migration 'ready' - parsed := sqlparser.BuildParsedQuery(sqlScheduleSingleMigration, "_vt") - _, err := e.execQuery(ctx, parsed.Query) + _, err := e.execQuery(ctx, sqlScheduleSingleMigration) return err } -func (e *Executor) executeMigration(ctx context.Context, onlineDDL *schema.OnlineDDL) error { - failMigration := func(err error) error { - _ = e.updateMigrationStatus(ctx, onlineDDL.UUID, schema.OnlineDDLStatusFailed) - e.triggerNextCheckInterval() - return err - } - - ddlAction, err := onlineDDL.GetAction() +func (e *Executor) validateMigrationRevertible(ctx context.Context, revertMigration *schema.OnlineDDL) (err error) { + // Validation: migration to revert exists and is in complete state + action, actionStr, err := revertMigration.GetActionStr() if err != nil { - return failMigration(err) + return err } - switch ddlAction { + switch action { + case sqlparser.AlterDDLAction: + if revertMigration.Strategy != schema.DDLStrategyOnline { + return fmt.Errorf("can only revert a %s strategy migration. Migration %s has %s strategy", schema.DDLStrategyOnline, revertMigration.UUID, revertMigration.Strategy) + } + case sqlparser.RevertDDLAction: + case sqlparser.CreateDDLAction: case sqlparser.DropDDLAction: - go func() error { - // Drop statement. - // Normally, we're going to modify DROP to RENAME (see later on). But if table name is - // already a GC-lifecycle table, then we don't put it through yet another GC lifecycle, - // we just drop it. - if schema.IsGCTableName(onlineDDL.Table) { - if err := e.executeDirectly(ctx, onlineDDL); err != nil { - return failMigration(err) - } - return nil + default: + return fmt.Errorf("cannot revert migration %s: unexpected action %s", revertMigration.UUID, actionStr) + } + if revertMigration.Status != schema.OnlineDDLStatusComplete { + return fmt.Errorf("can only revert a migration in a '%s' state. Migration %s is in '%s' state", schema.OnlineDDLStatusComplete, revertMigration.UUID, revertMigration.Status) + } + { + // Validation: see if there's a pending migration on this table: + r, err := e.execQuery(ctx, sqlSelectPendingMigrations) + if err != nil { + return err + } + // we identify running migrations on requested table + for _, row := range r.Named().Rows { + pendingUUID := row["migration_uuid"].ToString() + keyspace := row["keyspace"].ToString() + table := row["mysql_table"].ToString() + status := schema.OnlineDDLStatus(row["migration_status"].ToString()) + + if keyspace == e.keyspace && table == revertMigration.Table { + return fmt.Errorf("can not revert migration %s on table %s because migration %s is in %s status. May only revert if all migrations on this table are completed or failed", revertMigration.UUID, revertMigration.Table, pendingUUID, status) } - - // We transform a DROP TABLE into a RENAME TABLE statement, so as to remove the table safely and asynchronously. - - ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL) + } + { + // Validation: see that we're reverting the last successful migration on this table: + query, err := sqlparser.ParseAndBind(sqlSelectCompleteMigrationsOnTable, + sqltypes.StringBindVariable(e.keyspace), + sqltypes.StringBindVariable(revertMigration.Table), + ) if err != nil { - return failMigration(err) + return err } - - onlineDDL.SQL, _, err = schema.GenerateRenameStatementWithUUID(onlineDDL.Table, schema.HoldTableGCState, onlineDDL.GetGCUUID(), time.Now().UTC().Add(gcHoldHours*time.Hour)) + r, err := e.execQuery(ctx, query) if err != nil { - return failMigration(err) + return err } - - if ddlStmt.GetIfExists() { - err = e.executeDirectly(ctx, onlineDDL, mysql.ERCantFindFile, mysql.ERNoSuchTable) - } else { - err = e.executeDirectly(ctx, onlineDDL) + for _, row := range r.Named().Rows { + completeUUID := row["migration_uuid"].ToString() + if completeUUID != revertMigration.UUID { + return fmt.Errorf("can not revert migration %s on table %s because it is not the last migration to complete on that table. The last migration to complete was %s", revertMigration.UUID, revertMigration.Table, completeUUID) + } } + } + } + return nil +} - if err != nil { +// executeRevert is called for 'revert' migrations (SQL is of the form "revert 99caeca2_74e2_11eb_a693_f875a4d24e90", not a real SQL of course). +// In this function we: +// - figure out whether the revert is valid: can we really revert requested migration? +// - what type of migration we're reverting? (CREATE/DROP/ALTER) +// - revert appropriately to the type of migration +func (e *Executor) executeRevert(ctx context.Context, onlineDDL *schema.OnlineDDL) (err error) { + revertUUID, _ := onlineDDL.GetRevertUUID() + if err != nil { + return fmt.Errorf("cannot run a revert migration %v: %+v", onlineDDL.UUID, err) + } + + revertMigration, row, err := e.readMigration(ctx, revertUUID) + if err != nil { + return err + } + if err := e.validateMigrationRevertible(ctx, revertMigration); err != nil { + return err + } + revertActionStr := row["ddl_action"].ToString() + switch revertActionStr { + case sqlparser.CreateStr: + { + // We are reverting a CREATE migration. The revert is to DROP, only we don't actually + // drop the table, we rename it into lifecycle + // Possibly this was a CREATE TABLE IF NOT EXISTS, and possibly the table already existed + // before the DDL, in which case the CREATE was a noop. In that scenario we _do not_ drop + // the table. + // We can tell the difference by looking at the artifacts. A successful CREATE TABLE, where + // a table actually gets created, has a sentry, dummy artifact. A noop has not. + + if err := e.updateDDLAction(ctx, onlineDDL.UUID, sqlparser.DropStr); err != nil { + return err + } + if err := e.updateMySQLTable(ctx, onlineDDL.UUID, revertMigration.Table); err != nil { + return err + } + + artifacts := row["artifacts"].ToString() + artifactTables := textutil.SplitDelimitedList(artifacts) + if len(artifactTables) > 1 { + return fmt.Errorf("cannot run migration %s reverting %s: found %d artifact tables, expected maximum 1", onlineDDL.UUID, revertMigration.UUID, len(artifactTables)) + } + if len(artifactTables) == 0 { + // This indicates no table was actually created. this must have been a CREATE TABLE IF NOT EXISTS where the table already existed. + _ = e.onSchemaMigrationStatus(ctx, onlineDDL.UUID, schema.OnlineDDLStatusComplete, false, progressPctFull, etaSecondsNow) + } + + for _, artifactTable := range artifactTables { + if err := e.updateArtifacts(ctx, onlineDDL.UUID, artifactTable); err != nil { + return err + } + onlineDDL.SQL = sqlparser.BuildParsedQuery(sqlRenameTable, revertMigration.Table, artifactTable).Query + if _, err := e.executeDirectly(ctx, onlineDDL); err != nil { + return err + } + } + } + case sqlparser.DropStr: + { + // We are reverting a DROP migration. But the table wasn't really dropped, because that's not how + // we run DROP migrations. It was renamed. So we need to rename it back. + // But we impose as if we are now CREATE-ing the table. + if err := e.updateDDLAction(ctx, onlineDDL.UUID, sqlparser.CreateStr); err != nil { + return err + } + if err := e.updateMySQLTable(ctx, onlineDDL.UUID, revertMigration.Table); err != nil { + return err + } + artifacts := row["artifacts"].ToString() + artifactTables := textutil.SplitDelimitedList(artifacts) + if len(artifactTables) > 1 { + return fmt.Errorf("cannot run migration %s reverting %s: found %d artifact tables, expected maximum 1", onlineDDL.UUID, revertMigration.UUID, len(artifactTables)) + } + if len(artifactTables) == 0 { + // Could happen on `DROP TABLE IF EXISTS` where the table did not exist... + _ = e.onSchemaMigrationStatus(ctx, onlineDDL.UUID, schema.OnlineDDLStatusComplete, false, progressPctFull, etaSecondsNow) + } + for _, artifactTable := range artifactTables { + if err := e.updateArtifacts(ctx, onlineDDL.UUID, artifactTable); err != nil { + return err + } + onlineDDL.SQL = sqlparser.BuildParsedQuery(sqlRenameTable, artifactTable, revertMigration.Table).Query + if _, err := e.executeDirectly(ctx, onlineDDL); err != nil { + return err + } + } + } + case sqlparser.AlterStr: + { + if err := e.updateDDLAction(ctx, onlineDDL.UUID, sqlparser.AlterStr); err != nil { + return err + } + if err := e.ExecuteWithVReplication(ctx, onlineDDL, revertMigration); err != nil { + return err + } + } + default: + return fmt.Errorf("cannot run migration %s reverting %s: unexpected action %s", onlineDDL.UUID, revertMigration.UUID, revertActionStr) + } + + return nil +} + +// evaluateDeclarativeDiff is called for -declarative CREATE statements, where the table already exists. The function generates a SQL diff, which can be: +// - empty, in which case the migration is noop and implicitly successful, or +// - non-empty, in which case the migration turns to be an ALTER +func (e *Executor) evaluateDeclarativeDiff(ctx context.Context, onlineDDL *schema.OnlineDDL) (alterClause string, err error) { + + // Modify the CREATE TABLE statement to indicate a different, made up table name, known as the "comparison table" + ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL) + if err != nil { + return "", err + } + comparisonTableName, err := schema.GenerateGCTableName(schema.HoldTableGCState, newGCTableRetainTime()) + if err != nil { + return "", err + } + + conn, err := dbconnpool.NewDBConnection(ctx, e.env.Config().DB.DbaWithDB()) + if err != nil { + return "", err + } + defer conn.Close() + + { + // Create the comparison table + ddlStmt.SetTable("", comparisonTableName) + modifiedCreateSQL := sqlparser.String(ddlStmt) + + if _, err := conn.ExecuteFetch(modifiedCreateSQL, 0, false); err != nil { + return "", err + } + + defer func() { + // Drop the comparison table + parsed := sqlparser.BuildParsedQuery(sqlDropTable, comparisonTableName) + _, _ = conn.ExecuteFetch(parsed.Query, 0, false) + // Nothing bad happens for not checking the error code. The table is GC/HOLD. If we + // can't drop it now, it still gets collected later by tablegc mechanism + }() + + } + + // Compare the existing (to be potentially migrated) table with the declared (newly created) table: + // all things are tengo related + if err := func() error { + variables, err := e.readMySQLVariables(ctx) + if err != nil { + return err + } + flavor := tengo.ParseFlavor(variables.version, variables.versionComment) + + // Create a temporary account for tengo to use + onlineDDLPassword, err := e.createOnlineDDLUser(ctx) + if err != nil { + return err + } + defer e.dropOnlineDDLUser(ctx) + + // tengo requires sqlx.DB + cfg := mysqldriver.NewConfig() + cfg.User = onlineDDLUser + cfg.Passwd = onlineDDLPassword + cfg.Net = "tcp" + cfg.Addr = fmt.Sprintf("%s:%d", variables.host, variables.port) + cfg.DBName = e.dbName + cfg.ParseTime = true + cfg.InterpolateParams = true + cfg.Timeout = 1 * time.Second + mysqlDSN := cfg.FormatDSN() + + db, err := sqlx.Open("mysql", mysqlDSN) + if err != nil { + return err + } + defer db.Close() + + // Read existing table + existingTable, err := tengo.QuerySchemaTable(ctx, db, e.dbName, onlineDDL.Table, flavor) + if err != nil { + return err + } + // Read comparison table + comparisonTable, err := tengo.QuerySchemaTable(ctx, db, e.dbName, comparisonTableName, flavor) + if err != nil { + return err + } + // We created the comparison tablein same schema as original table, but under different name (because obviously we can't have + // two tables with identical name in same schema). It is our preference to create the table in the same schema. + // unfortunately, tengo does not allow comparing tables with different names. After asking tengo to read table info, we cheat + // and override the name of the table: + comparisonTable.Name = existingTable.Name + // We also override `.CreateStatement` (output of SHOW CREATE TABLE), because tengo has a validation, where if it doesn't + // find any ALTER changes, then the CreateStatement-s must be identical (or else it errors with UnsupportedDiffError) + comparisonTable.CreateStatement, err = schema.ReplaceTableNameInCreateTableStatement(comparisonTable.CreateStatement, existingTable.Name) + if err != nil { + return err + } + // Diff the two tables + diff := tengo.NewAlterTable(existingTable, comparisonTable) + if diff == nil { + // No change. alterClause remains empty + return nil + } + mods := tengo.StatementModifiers{ + AllowUnsafe: true, + NextAutoInc: tengo.NextAutoIncIfIncreased, + } + alterClause, err = diff.Clauses(mods) + if err != nil { + return err + } + return nil + }(); err != nil { + return "", err + } + + return alterClause, nil +} + +// executeMigration executes a single migration. It analyzes the migration type: +// - is it declarative? +// - is it CREATE / DROP / ALTER? +// - it is a Revert request? +// - what's the migration strategy? +// The function invokes the appropriate handlers for each of those cases. +func (e *Executor) executeMigration(ctx context.Context, onlineDDL *schema.OnlineDDL) error { + failMigration := func(err error) error { + _ = e.updateMigrationStatus(ctx, onlineDDL.UUID, schema.OnlineDDLStatusFailed) + if err != nil { + _ = e.updateMigrationMessage(ctx, onlineDDL.UUID, err.Error()) + } + e.triggerNextCheckInterval() + return err + } + + ddlAction, err := onlineDDL.GetAction() + if err != nil { + return failMigration(err) + } + + if onlineDDL.IsDeclarative() { + switch ddlAction { + case sqlparser.RevertDDLAction: + // No special action. Declarative Revert migrations are handled like any normal Revert migration. + case sqlparser.AlterDDLAction: + return failMigration(vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "strategy is declarative. ALTER cannot run in declarative mode for migration %v", onlineDDL.UUID)) + case sqlparser.DropDDLAction: + // This DROP is declarative, meaning it may: + // - actually DROP a table, if that table exists, or + // - Implicitly do nothing, if the table does not exist + + exists, err := e.tableExists(ctx, onlineDDL.Table) + if err != nil { + return failMigration(err) + } + if exists { + // table does exist, so this declarative DROP turns out to really be an actual DROP. No further action is needed here + } else { + // table does not exist. We mark this DROP as implicitly sucessful + _ = e.onSchemaMigrationStatus(ctx, onlineDDL.UUID, schema.OnlineDDLStatusComplete, false, progressPctFull, etaSecondsNow) + _ = e.updateMigrationMessage(ctx, onlineDDL.UUID, "no change") + return nil + } + case sqlparser.CreateDDLAction: + // This CREATE is declarative, meaning it may: + // - actually CREATE a table, if that table does not exist, or + // - ALTER the table, if it exists and is different, or + // - Implicitly do nothing, if the table exists and is identical to CREATE statement + + { + // Sanity: reject IF NOT EXISTS statements, because they don't make sense (or are ambiguous) in declarative mode + ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL) + if err != nil { + return failMigration(err) + } + if ddlStmt.GetIfNotExists() { + return failMigration(vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "strategy is declarative. IF NOT EXISTS does not work in declarative mode for migration %v", onlineDDL.UUID)) + } + } + exists, err := e.tableExists(ctx, onlineDDL.Table) + if err != nil { return failMigration(err) } + if exists { + alterClause, err := e.evaluateDeclarativeDiff(ctx, onlineDDL) + if err != nil { + return failMigration(err) + } + if alterClause == "" { + // No diff! We mark this CREATE as implicitly sucessful + _ = e.onSchemaMigrationStatus(ctx, onlineDDL.UUID, schema.OnlineDDLStatusComplete, false, progressPctFull, etaSecondsNow) + _ = e.updateMigrationMessage(ctx, onlineDDL.UUID, "no change") + return nil + } + // alterClause is non empty. We convert this migration into an ALTER + if err := e.updateDDLAction(ctx, onlineDDL.UUID, sqlparser.AlterStr); err != nil { + return failMigration(err) + } + ddlAction = sqlparser.AlterDDLAction + onlineDDL.SQL = fmt.Sprintf("ALTER TABLE `%s` %s", onlineDDL.Table, alterClause) + _ = e.updateMigrationMessage(ctx, onlineDDL.UUID, alterClause) + } else { + { + // table does not exist, so this declarative CREATE turns out to really be an actual CREATE. No further action is needed here. + // the statement is empty, but I want to keep the 'else' clause here just for sake of this comment. + } + } + } + } // endif onlineDDL.IsDeclarative() + // Noting that if the migration is declarative, then it may have been modified in the above block, to meet the next operations. + + switch ddlAction { + case sqlparser.DropDDLAction: + go func() error { + e.migrationMutex.Lock() + defer e.migrationMutex.Unlock() + + // Drop statement. + // Normally, we're going to modify DROP to RENAME (see later on). But if table name is + // already a GC-lifecycle table, then we don't put it through yet another GC lifecycle, + // we just drop it. + if schema.IsGCTableName(onlineDDL.Table) { + if _, err := e.executeDirectly(ctx, onlineDDL); err != nil { + return failMigration(err) + } + return nil + } + + // We transform a DROP TABLE into a RENAME TABLE statement, so as to remove the table safely and asynchronously. + + ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL) + if err != nil { + return failMigration(err) + } + + var toTableName string + onlineDDL.SQL, toTableName, err = schema.GenerateRenameStatementWithUUID(onlineDDL.Table, schema.HoldTableGCState, onlineDDL.GetGCUUID(), newGCTableRetainTime()) + if err != nil { + return failMigration(err) + } + if err := e.updateArtifacts(ctx, onlineDDL.UUID, toTableName); err != nil { + return err + } + + acceptableErrorCodes := []int{} + if ddlStmt.GetIfExists() { + acceptableErrorCodes = acceptableDropTableIfExistsErrorCodes + } + acceptableErrCodeFound, err := e.executeDirectly(ctx, onlineDDL, acceptableErrorCodes...) + if err != nil { + return failMigration(err) + } + if acceptableErrCodeFound { + // Table did not exist after all. There is no artifact + if err := e.clearArtifacts(ctx, onlineDDL.UUID); err != nil { + return err + } + } + return nil }() case sqlparser.CreateDDLAction: - go func() { - if err := e.executeDirectly(ctx, onlineDDL); err != nil { + go func() error { + e.migrationMutex.Lock() + defer e.migrationMutex.Unlock() + + sentryArtifactTableName, err := schema.GenerateGCTableName(schema.HoldTableGCState, newGCTableRetainTime()) + if err != nil { + return failMigration(err) + } + // we create a dummy artifact. Its existence means the table was created by this migration. + // It will be read by the revert operation. + if err := e.updateArtifacts(ctx, onlineDDL.UUID, sentryArtifactTableName); err != nil { + return err + } + ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL) + if err != nil { + return failMigration(err) + } + if ddlStmt.GetIfNotExists() { + // This is a CREATE TABLE IF NOT EXISTS + // We want to know if the table actually exists before running this migration. + // If so, then the operation is noop, and when we revert the migration, we also do a noop. + exists, err := e.tableExists(ctx, onlineDDL.Table) + if err != nil { + return failMigration(err) + } + if exists { + // the table already exists. This CREATE TABLE IF NOT EXISTS statement is a noop. + // We therefore clear the artifact field. A revert operation will use this as a hint. + if err := e.clearArtifacts(ctx, onlineDDL.UUID); err != nil { + return failMigration(err) + } + } + } + if _, err := e.executeDirectly(ctx, onlineDDL); err != nil { failMigration(err) } + return nil }() case sqlparser.AlterDDLAction: switch onlineDDL.Strategy { + case schema.DDLStrategyOnline: + go func() { + e.migrationMutex.Lock() + defer e.migrationMutex.Unlock() + + if err := e.ExecuteWithVReplication(ctx, onlineDDL, nil); err != nil { + failMigration(err) + } + }() case schema.DDLStrategyGhost: go func() { + e.migrationMutex.Lock() + defer e.migrationMutex.Unlock() + if err := e.ExecuteWithGhost(ctx, onlineDDL); err != nil { failMigration(err) } }() case schema.DDLStrategyPTOSC: go func() { + e.migrationMutex.Lock() + defer e.migrationMutex.Unlock() + if err := e.ExecuteWithPTOSC(ctx, onlineDDL); err != nil { failMigration(err) } @@ -1082,6 +1828,15 @@ func (e *Executor) executeMigration(ctx context.Context, onlineDDL *schema.Onlin return failMigration(fmt.Errorf("Unsupported strategy: %+v", onlineDDL.Strategy)) } } + case sqlparser.RevertDDLAction: + go func() { + e.migrationMutex.Lock() + defer e.migrationMutex.Unlock() + + if err := e.executeRevert(ctx, onlineDDL); err != nil { + failMigration(err) + } + }() } return nil } @@ -1090,12 +1845,11 @@ func (e *Executor) runNextMigration(ctx context.Context) error { e.migrationMutex.Lock() defer e.migrationMutex.Unlock() - if atomic.LoadInt64(&e.migrationRunning) > 0 { + if e.isAnyMigrationRunning() { return ErrExecutorMigrationAlreadyRunning } - parsed := sqlparser.BuildParsedQuery(sqlSelectReadyMigration, "_vt") - r, err := e.execQuery(ctx, parsed.Query) + r, err := e.execQuery(ctx, sqlSelectReadyMigration) if err != nil { return err } @@ -1187,30 +1941,179 @@ func (e *Executor) dropPTOSCMigrationTriggers(ctx context.Context, onlineDDL *sc return err } -// reviewRunningMigrations iterates migrations in 'running' state (there really should just be one that is -// actually running). -func (e *Executor) reviewRunningMigrations(ctx context.Context) (countRunnning int, runningNotByThisProcess []string, err error) { - e.migrationMutex.Lock() - defer e.migrationMutex.Unlock() +// readVReplStream reads _vt.vreplication entries for given workflow +func (e *Executor) readVReplStream(ctx context.Context, uuid string, okIfMissing bool) (*VReplStream, error) { + query, err := sqlparser.ParseAndBind(sqlReadVReplStream, + sqltypes.StringBindVariable(uuid), + ) + if err != nil { + return nil, err + } + r, err := e.execQuery(ctx, query) + if err != nil { + return nil, err + } + if len(r.Rows) == 0 && okIfMissing { + return nil, nil + } + row := r.Named().Row() + if row == nil { + return nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "Cannot find unique workflow for UUID: %+v", uuid) + } + s := &VReplStream{ + id: row.AsInt64("id", 0), + workflow: row.AsString("workflow", ""), + source: row.AsString("source", ""), + pos: row.AsString("pos", ""), + timeUpdated: row.AsInt64("time_updated", 0), + transactionTimestamp: row.AsInt64("transaction_timestamp", 0), + state: row.AsString("state", ""), + message: row.AsString("message", ""), + bls: &binlogdatapb.BinlogSource{}, + } + if err := proto.UnmarshalText(s.source, s.bls); err != nil { + return nil, err + } + return s, nil +} - parsed := sqlparser.BuildParsedQuery(sqlSelectRunningMigrations, "_vt", ":strategy") - bindVars := map[string]*querypb.BindVariable{ - "strategy": sqltypes.StringBindVariable(string(schema.DDLStrategyPTOSC)), +// isVReplMigrationReadyToCutOver sees if the vreplication migration has completed the row copy +// and is up to date with the binlogs. +func (e *Executor) isVReplMigrationReadyToCutOver(ctx context.Context, s *VReplStream) (isReady bool, err error) { + // Check all the cases where migration is still running: + { + // when ready to cut-over, pos must have some value + if s.pos == "" { + return false, nil + } } - bound, err := parsed.GenerateQuery(bindVars, nil) + { + // Both time_updated and transaction_timestamp must be in close priximity to each + // other and to the time now, otherwise that means we're lagging and it's not a good time + // to cut-over + durationDiff := func(t1, t2 time.Time) time.Duration { + diff := t1.Sub(t2) + if diff < 0 { + diff = -diff + } + return diff + } + timeNow := time.Now() + timeUpdated := time.Unix(s.timeUpdated, 0) + if durationDiff(timeNow, timeUpdated) > cutOverThreshold { + return false, nil + } + // Let's look at transaction timestamp. This gets written by any ongoing + // writes on the server (whether on this table or any other table) + transactionTimestamp := time.Unix(s.transactionTimestamp, 0) + if durationDiff(timeNow, transactionTimestamp) > cutOverThreshold { + return false, nil + } + } + { + // copy_state must have no entries for this vreplication id: if entries are + // present that means copy is still in progress + query, err := sqlparser.ParseAndBind(sqlReadCountCopyState, + sqltypes.Int64BindVariable(s.id), + ) + if err != nil { + return false, err + } + r, err := e.execQuery(ctx, query) + if err != nil { + return false, err + } + csRow := r.Named().Row() + if csRow == nil { + return false, err + } + count := csRow.AsInt64("cnt", 0) + if count > 0 { + // Still copying + return false, nil + } + } + return true, nil +} + +// isVReplMigrationRunning sees if there is a VReplication migration actively running +func (e *Executor) isVReplMigrationRunning(ctx context.Context, uuid string) (isRunning bool, s *VReplStream, err error) { + s, err = e.readVReplStream(ctx, uuid, true) if err != nil { - return countRunnning, runningNotByThisProcess, err + return false, s, err } - r, err := e.execQuery(ctx, bound) + if s == nil { + return false, s, nil + } + if strings.Contains(strings.ToLower(s.message), "error") { + return false, s, nil + } + switch s.state { + case binlogplayer.VReplicationInit, binlogplayer.VReplicationCopying, binlogplayer.BlpRunning: + return true, s, nil + } + return false, s, nil +} + +// reviewRunningMigrations iterates migrations in 'running' state. Normally there's only one running, which was +// spawned by this tablet; but vreplication migrations could also resume from failure. +func (e *Executor) reviewRunningMigrations(ctx context.Context) (countRunnning int, cancellable []string, err error) { + e.migrationMutex.Lock() + defer e.migrationMutex.Unlock() + + r, err := e.execQuery(ctx, sqlSelectRunningMigrations) if err != nil { - return countRunnning, runningNotByThisProcess, err + return countRunnning, cancellable, err } + // we identify running vreplication migrations in this function + atomic.StoreInt64(&e.vreplMigrationRunning, 0) for _, row := range r.Named().Rows { uuid := row["migration_uuid"].ToString() - // Since pt-osc doesn't have a "liveness" plugin entry point, we do it externally: - // if the process is alive, we update the `liveness_timestamp` for this migration. - if running, _, _ := e.isPTOSCMigrationRunning(ctx, uuid); running { - _ = e.updateMigrationTimestamp(ctx, "liveness_timestamp", uuid) + strategy := schema.DDLStrategy(row["strategy"].ToString()) + switch strategy { + case schema.DDLStrategyOnline: + { + // We check the _vt.vreplication table + running, s, err := e.isVReplMigrationRunning(ctx, uuid) + if err != nil { + return countRunnning, cancellable, err + } + if running { + // This VRepl migration may have started from outside this tablet, so + // vreplMigrationRunning could be zero. Whatever the case is, we're under + // migrationMutex lock and it's now safe to ensure vreplMigrationRunning is 1 + atomic.StoreInt64(&e.vreplMigrationRunning, 1) + _ = e.updateMigrationTimestamp(ctx, "liveness_timestamp", uuid) + isReady, err := e.isVReplMigrationReadyToCutOver(ctx, s) + if err != nil { + return countRunnning, cancellable, err + } + if isReady { + if err := e.cutOverVReplMigration(ctx, s); err != nil { + return countRunnning, cancellable, err + } + } + } + } + case schema.DDLStrategyPTOSC: + { + // Since pt-osc doesn't have a "liveness" plugin entry point, we do it externally: + // if the process is alive, we update the `liveness_timestamp` for this migration. + running, _, err := e.isPTOSCMigrationRunning(ctx, uuid) + if err != nil { + return countRunnning, cancellable, err + } + if running { + _ = e.updateMigrationTimestamp(ctx, "liveness_timestamp", uuid) + } + if uuid != e.lastMigrationUUID { + // This executor can only spawn one migration at a time. And that + // migration is identified by e.lastMigrationUUID. + // If we find a _running_ migration that does not have this UUID, it _must_ + // mean the migration was started by a former vttablet (ie vttablet crashed and restarted) + cancellable = append(cancellable, uuid) + } + } } countRunnning++ @@ -1219,10 +2122,10 @@ func (e *Executor) reviewRunningMigrations(ctx context.Context) (countRunnning i // migration is identified by e.lastMigrationUUID. // If we find a _running_ migration that does not have this UUID, it _must_ // mean the migration was started by a former vttablet (ie vttablet crashed and restarted) - runningNotByThisProcess = append(runningNotByThisProcess, uuid) + cancellable = append(cancellable, uuid) } } - return countRunnning, runningNotByThisProcess, err + return countRunnning, cancellable, err } // reviewStaleMigrations marks as 'failed' migrations whose status is 'running' but which have @@ -1231,7 +2134,7 @@ func (e *Executor) reviewStaleMigrations(ctx context.Context) error { e.migrationMutex.Lock() defer e.migrationMutex.Unlock() - parsed := sqlparser.BuildParsedQuery(sqlSelectStaleMigrations, "_vt", ":minutes") + parsed := sqlparser.BuildParsedQuery(sqlSelectStaleMigrations, ":minutes") bindVars := map[string]*querypb.BindVariable{ "minutes": sqltypes.Int64BindVariable(staleMigrationMinutes), } @@ -1246,7 +2149,7 @@ func (e *Executor) reviewStaleMigrations(ctx context.Context) error { for _, row := range r.Named().Rows { uuid := row["migration_uuid"].ToString() - onlineDDL, err := e.readMigration(ctx, uuid) + onlineDDL, _, err := e.readMigration(ctx, uuid) if err != nil { return err } @@ -1266,6 +2169,7 @@ func (e *Executor) reviewStaleMigrations(ctx context.Context) error { if err := e.updateMigrationStatus(ctx, onlineDDL.UUID, schema.OnlineDDLStatusFailed); err != nil { return err } + _ = e.updateMigrationMessage(ctx, onlineDDL.UUID, "stale migration") } return nil @@ -1274,11 +2178,11 @@ func (e *Executor) reviewStaleMigrations(ctx context.Context) error { // retryTabletFailureMigrations looks for migrations failed by tablet failure (e.g. by failover) // and retry them (put them back in the queue) func (e *Executor) retryTabletFailureMigrations(ctx context.Context) error { - _, err := e.retryMigration(ctx, sqlWhereTabletFailure) + _, err := e.retryMigrationWhere(ctx, sqlWhereTabletFailure) return err } -// gcArtifacts garbage-collects migration artifacts from completed/failed migrations +// gcArtifactTable garbage-collects a single table func (e *Executor) gcArtifactTable(ctx context.Context, artifactTable, uuid string) error { tableExists, err := e.tableExists(ctx, artifactTable) if err != nil { @@ -1287,6 +2191,8 @@ func (e *Executor) gcArtifactTable(ctx context.Context, artifactTable, uuid stri if !tableExists { return nil } + // We've already concluded in gcArtifacts() that this table was held for long enough. + // We therefore move it into PURGE state. renameStatement, _, err := schema.GenerateRenameStatementWithUUID(artifactTable, schema.PurgeTableGCState, schema.OnlineDDLToGCUUID(uuid), time.Now().UTC()) if err != nil { return err @@ -1306,8 +2212,13 @@ func (e *Executor) gcArtifacts(ctx context.Context) error { e.migrationMutex.Lock() defer e.migrationMutex.Unlock() - parsed := sqlparser.BuildParsedQuery(sqlSelectUncollectedArtifacts, "_vt") - r, err := e.execQuery(ctx, parsed.Query) + query, err := sqlparser.ParseAndBind(sqlSelectUncollectedArtifacts, + sqltypes.Int64BindVariable(int64((*retainOnlineDDLTables).Seconds())), + ) + if err != nil { + return err + } + r, err := e.execQuery(ctx, query) if err != nil { return err } @@ -1359,6 +2270,7 @@ func (e *Executor) onMigrationCheckTick() { log.Error(err) return } + if err := e.retryTabletFailureMigrations(ctx); err != nil { log.Error(err) } @@ -1368,9 +2280,9 @@ func (e *Executor) onMigrationCheckTick() { if err := e.runNextMigration(ctx); err != nil { log.Error(err) } - if _, runningNotByThisProcess, err := e.reviewRunningMigrations(ctx); err != nil { + if _, cancellable, err := e.reviewRunningMigrations(ctx); err != nil { log.Error(err) - } else if err := e.cancelMigrations(ctx, runningNotByThisProcess); err != nil { + } else if err := e.cancelMigrations(ctx, cancellable, "auto cancel"); err != nil { log.Error(err) } if err := e.reviewStaleMigrations(ctx); err != nil { @@ -1382,7 +2294,7 @@ func (e *Executor) onMigrationCheckTick() { } func (e *Executor) updateMigrationStartedTimestamp(ctx context.Context, uuid string) error { - parsed := sqlparser.BuildParsedQuery(sqlUpdateMigrationStartedTimestamp, "_vt", + parsed := sqlparser.BuildParsedQuery(sqlUpdateMigrationStartedTimestamp, ":migration_uuid", ) bindVars := map[string]*querypb.BindVariable{ @@ -1397,7 +2309,7 @@ func (e *Executor) updateMigrationStartedTimestamp(ctx context.Context, uuid str } func (e *Executor) updateMigrationTimestamp(ctx context.Context, timestampColumn string, uuid string) error { - parsed := sqlparser.BuildParsedQuery(sqlUpdateMigrationTimestamp, "_vt", timestampColumn, + parsed := sqlparser.BuildParsedQuery(sqlUpdateMigrationTimestamp, timestampColumn, ":migration_uuid", ) bindVars := map[string]*querypb.BindVariable{ @@ -1413,43 +2325,44 @@ func (e *Executor) updateMigrationTimestamp(ctx context.Context, timestampColumn func (e *Executor) updateMigrationLogPath(ctx context.Context, uuid string, hostname, path string) error { logPath := fmt.Sprintf("%s:%s", hostname, path) - parsed := sqlparser.BuildParsedQuery(sqlUpdateMigrationLogPath, "_vt", - ":log_path", - ":migration_uuid", + query, err := sqlparser.ParseAndBind(sqlUpdateMigrationLogPath, + sqltypes.StringBindVariable(logPath), + sqltypes.StringBindVariable(uuid), ) - bindVars := map[string]*querypb.BindVariable{ - "log_path": sqltypes.StringBindVariable(logPath), - "migration_uuid": sqltypes.StringBindVariable(uuid), - } - bound, err := parsed.GenerateQuery(bindVars, nil) if err != nil { return err } - _, err = e.execQuery(ctx, bound) + _, err = e.execQuery(ctx, query) return err } func (e *Executor) updateArtifacts(ctx context.Context, uuid string, artifacts ...string) error { bindArtifacts := strings.Join(artifacts, ",") - parsed := sqlparser.BuildParsedQuery(sqlUpdateArtifacts, "_vt", - ":artifacts", - ":migration_uuid", + query, err := sqlparser.ParseAndBind(sqlUpdateArtifacts, + sqltypes.StringBindVariable(bindArtifacts), + sqltypes.StringBindVariable(uuid), ) - bindVars := map[string]*querypb.BindVariable{ - "artifacts": sqltypes.StringBindVariable(bindArtifacts), - "migration_uuid": sqltypes.StringBindVariable(uuid), + if err != nil { + return err } - bound, err := parsed.GenerateQuery(bindVars, nil) + _, err = e.execQuery(ctx, query) + return err +} + +func (e *Executor) clearArtifacts(ctx context.Context, uuid string) error { + query, err := sqlparser.ParseAndBind(sqlClearArtifacts, + sqltypes.StringBindVariable(uuid), + ) if err != nil { return err } - _, err = e.execQuery(ctx, bound) + _, err = e.execQuery(ctx, query) return err } // updateTabletFailure marks a given migration as "tablet_failed" func (e *Executor) updateTabletFailure(ctx context.Context, uuid string) error { - parsed := sqlparser.BuildParsedQuery(sqlUpdateTabletFailure, "_vt", + parsed := sqlparser.BuildParsedQuery(sqlUpdateTabletFailure, ":migration_uuid", ) bindVars := map[string]*querypb.BindVariable{ @@ -1464,19 +2377,62 @@ func (e *Executor) updateTabletFailure(ctx context.Context, uuid string) error { } func (e *Executor) updateMigrationStatus(ctx context.Context, uuid string, status schema.OnlineDDLStatus) error { - parsed := sqlparser.BuildParsedQuery(sqlUpdateMigrationStatus, "_vt", - ":migration_status", - ":migration_uuid", + query, err := sqlparser.ParseAndBind(sqlUpdateMigrationStatus, + sqltypes.StringBindVariable(string(status)), + sqltypes.StringBindVariable(uuid), ) - bindVars := map[string]*querypb.BindVariable{ - "migration_status": sqltypes.StringBindVariable(string(status)), - "migration_uuid": sqltypes.StringBindVariable(uuid), + if err != nil { + return err } - bound, err := parsed.GenerateQuery(bindVars, nil) + _, err = e.execQuery(ctx, query) + return err +} + +func (e *Executor) updateDDLAction(ctx context.Context, uuid string, actionStr string) error { + query, err := sqlparser.ParseAndBind(sqlUpdateDDLAction, + sqltypes.StringBindVariable(actionStr), + sqltypes.StringBindVariable(uuid), + ) if err != nil { return err } - _, err = e.execQuery(ctx, bound) + _, err = e.execQuery(ctx, query) + return err +} + +func (e *Executor) updateMigrationMessage(ctx context.Context, uuid string, message string) error { + query, err := sqlparser.ParseAndBind(sqlUpdateMessage, + sqltypes.StringBindVariable(message), + sqltypes.StringBindVariable(uuid), + ) + if err != nil { + return err + } + _, err = e.execQuery(ctx, query) + return err +} + +func (e *Executor) updateMySQLTable(ctx context.Context, uuid string, tableName string) error { + query, err := sqlparser.ParseAndBind(sqlUpdateMySQLTable, + sqltypes.StringBindVariable(tableName), + sqltypes.StringBindVariable(uuid), + ) + if err != nil { + return err + } + _, err = e.execQuery(ctx, query) + return err +} + +func (e *Executor) updateETASeconds(ctx context.Context, uuid string, etaSeconds int64) error { + query, err := sqlparser.ParseAndBind(sqlUpdateMigrationETASeconds, + sqltypes.Int64BindVariable(etaSeconds), + sqltypes.StringBindVariable(uuid), + ) + if err != nil { + return err + } + _, err = e.execQuery(ctx, query) return err } @@ -1487,36 +2443,53 @@ func (e *Executor) updateMigrationProgress(ctx context.Context, uuid string, pro // In both cases there's nothing to update return nil } - parsed := sqlparser.BuildParsedQuery(sqlUpdateMigrationProgress, "_vt", - ":migration_progress", - ":migration_uuid", + query, err := sqlparser.ParseAndBind(sqlUpdateMigrationProgress, + sqltypes.Float64BindVariable(progress), + sqltypes.StringBindVariable(uuid), ) - bindVars := map[string]*querypb.BindVariable{ - "migration_progress": sqltypes.Float64BindVariable(progress), - "migration_uuid": sqltypes.StringBindVariable(uuid), - } - bound, err := parsed.GenerateQuery(bindVars, nil) if err != nil { return err } - _, err = e.execQuery(ctx, bound) + _, err = e.execQuery(ctx, query) return err } -func (e *Executor) retryMigration(ctx context.Context, whereExpr string) (result *sqltypes.Result, err error) { +// retryMigrationWhere retries a migration based on a given WHERE clause +func (e *Executor) retryMigrationWhere(ctx context.Context, whereExpr string) (result *sqltypes.Result, err error) { e.migrationMutex.Lock() defer e.migrationMutex.Unlock() - parsed := sqlparser.BuildParsedQuery(sqlRetryMigration, "_vt", ":tablet", whereExpr) + parsed := sqlparser.BuildParsedQuery(sqlRetryMigrationWhere, ":tablet", whereExpr) bindVars := map[string]*querypb.BindVariable{ "tablet": sqltypes.StringBindVariable(e.TabletAliasString()), } bound, err := parsed.GenerateQuery(bindVars, nil) + if err != nil { + return nil, err + } result, err = e.execQuery(ctx, bound) return result, err } +// RetryMigration marks given migration for retry +func (e *Executor) RetryMigration(ctx context.Context, uuid string) (result *sqltypes.Result, err error) { + if !schema.IsOnlineDDLUUID(uuid) { + return nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "Not a valid migration ID in RETRY: %s", uuid) + } + e.migrationMutex.Lock() + defer e.migrationMutex.Unlock() + + query, err := sqlparser.ParseAndBind(sqlRetryMigration, + sqltypes.StringBindVariable(e.TabletAliasString()), + sqltypes.StringBindVariable(uuid), + ) + if err != nil { + return nil, err + } + return e.execQuery(ctx, query) +} + // onSchemaMigrationStatus is called when a status is set/changed for a running migration -func (e *Executor) onSchemaMigrationStatus(ctx context.Context, uuid string, status schema.OnlineDDLStatus, dryRun bool, progressPct float64) (err error) { +func (e *Executor) onSchemaMigrationStatus(ctx context.Context, uuid string, status schema.OnlineDDLStatus, dryRun bool, progressPct float64, etaSeconds int64) (err error) { if dryRun && status != schema.OnlineDDLStatusFailed { // We don't consider dry-run reports unless there's a failure return nil @@ -1552,6 +2525,9 @@ func (e *Executor) onSchemaMigrationStatus(ctx context.Context, uuid string, sta if err = e.updateMigrationProgress(ctx, uuid, progressPct); err != nil { return err } + if err = e.updateETASeconds(ctx, uuid, etaSeconds); err != nil { + return err + } if !dryRun { switch status { @@ -1564,18 +2540,23 @@ func (e *Executor) onSchemaMigrationStatus(ctx context.Context, uuid string, sta } // OnSchemaMigrationStatus is called by TabletServer's API, which is invoked by a running gh-ost migration's hooks. -func (e *Executor) OnSchemaMigrationStatus(ctx context.Context, uuidParam, statusParam, dryrunParam, progressParam string) (err error) { +func (e *Executor) OnSchemaMigrationStatus(ctx context.Context, uuidParam, statusParam, dryrunParam, progressParam, etaParam string) (err error) { status := schema.OnlineDDLStatus(statusParam) dryRun := (dryrunParam == "true") var progressPct float64 - if pct, err := strconv.ParseFloat(progressParam, 32); err == nil { + if pct, err := strconv.ParseFloat(progressParam, 64); err == nil { progressPct = pct } + var etaSeconds int64 = etaSecondsUnknown + if eta, err := strconv.ParseInt(etaParam, 10, 64); err == nil { + etaSeconds = eta + } - return e.onSchemaMigrationStatus(ctx, uuidParam, status, dryRun, progressPct) + return e.onSchemaMigrationStatus(ctx, uuidParam, status, dryRun, progressPct, etaSeconds) } // VExec is called by a VExec invocation +// Implements vitess.io/vitess/go/vt/vttablet/vexec.Executor interface func (e *Executor) VExec(ctx context.Context, vx *vexec.TabletVExec) (qr *querypb.QueryResult, err error) { response := func(result *sqltypes.Result, err error) (*querypb.QueryResult, error) { if err != nil { @@ -1630,7 +2611,7 @@ func (e *Executor) VExec(ctx context.Context, vx *vexec.TabletVExec) (qr *queryp } switch statusVal { case retryMigrationHint: - return response(e.retryMigration(ctx, sqlparser.String(stmt.Where.Expr))) + return response(e.retryMigrationWhere(ctx, sqlparser.String(stmt.Where.Expr))) case cancelMigrationHint: uuid, err := vx.ColumnStringVal(vx.WhereCols, "migration_uuid") if err != nil { @@ -1639,13 +2620,13 @@ func (e *Executor) VExec(ctx context.Context, vx *vexec.TabletVExec) (qr *queryp if !schema.IsOnlineDDLUUID(uuid) { return nil, fmt.Errorf("Not an Online DDL UUID: %s", uuid) } - return response(e.cancelMigration(ctx, uuid, true)) + return response(e.CancelMigration(ctx, uuid, true, "cancel by user")) case cancelAllMigrationHint: uuid, _ := vx.ColumnStringVal(vx.WhereCols, "migration_uuid") if uuid != "" { return nil, fmt.Errorf("Unexpetced UUID: %s", uuid) } - return response(e.cancelPendingMigrations(ctx)) + return response(e.CancelPendingMigrations(ctx, "cancel-all by user")) default: return nil, fmt.Errorf("Unexpected value for migration_status: %v. Supported values are: %s, %s", statusVal, retryMigrationHint, cancelMigrationHint) diff --git a/go/vt/sqlparser/fuzz.go b/go/vt/vttablet/onlineddl/executor_test.go similarity index 77% rename from go/vt/sqlparser/fuzz.go rename to go/vt/vttablet/onlineddl/executor_test.go index 35699150592..3d521ff4f59 100644 --- a/go/vt/sqlparser/fuzz.go +++ b/go/vt/vttablet/onlineddl/executor_test.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Vitess Authors. +Copyright 2021 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,12 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -package sqlparser +/* +Functionality of this Executor is tested in go/test/endtoend/onlineddl/... +*/ -func Fuzz(data []byte) int { - _, err := Parse(string(data)) - if err != nil { - return 0 - } - return 1 -} +package onlineddl diff --git a/go/vt/vttablet/onlineddl/ghost.go b/go/vt/vttablet/onlineddl/ghost.go index 40234c87f9b..5dd19a81a3f 100644 --- a/go/vt/vttablet/onlineddl/ghost.go +++ b/go/vt/vttablet/onlineddl/ghost.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Vitess Authors. +Copyright 2020 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,18 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package heartbeat contains a writer and reader of heartbeats for a master-replica cluster. -// This is similar to Percona's pt-heartbeat, and is meant to supplement the information -// returned from SHOW SLAVE STATUS. In some circumstances, lag returned from SHOW SLAVE STATUS -// is incorrect and is at best only at 1 second resolution. The heartbeat package directly -// tests replication by writing a record with a timestamp on the master, and comparing that -// timestamp after reading it on the replica. This happens at the interval defined by heartbeat_interval. -// Note: the lag reported will be affected by clock drift, so it is recommended to run ntpd or similar. -// -// The data collected by the heartbeat package is made available in /debug/vars in counters prefixed by Heartbeat*. -// It's additionally used as a source for healthchecks and will impact the serving state of a tablet, if enabled. -// The heartbeat interval is purposefully kept distinct from the health check interval because lag measurement -// requires more frequent polling that the healthcheck typically is configured for. package onlineddl import ( diff --git a/go/vt/vttablet/onlineddl/schema.go b/go/vt/vttablet/onlineddl/schema.go index 322b5cf8b8c..f96a75c505f 100644 --- a/go/vt/vttablet/onlineddl/schema.go +++ b/go/vt/vttablet/onlineddl/schema.go @@ -16,15 +16,10 @@ limitations under the License. package onlineddl -import ( - "fmt" -) - const ( // SchemaMigrationsTableName is used by VExec interceptor to call the correct handler - SchemaMigrationsTableName = "schema_migrations" - sqlCreateSidecarDB = "create database if not exists %s" - sqlCreateSchemaMigrationsTable = `CREATE TABLE IF NOT EXISTS %s.schema_migrations ( + sqlCreateSidecarDB = "create database if not exists _vt" + sqlCreateSchemaMigrationsTable = `CREATE TABLE IF NOT EXISTS _vt.schema_migrations ( id bigint(20) unsigned NOT NULL AUTO_INCREMENT, migration_uuid varchar(64) NOT NULL, keyspace varchar(256) NOT NULL, @@ -50,16 +45,19 @@ const ( KEY status_idx (migration_status, liveness_timestamp), KEY cleanup_status_idx (cleanup_timestamp, migration_status) ) engine=InnoDB DEFAULT CHARSET=utf8mb4` - alterSchemaMigrationsTableRetries = "ALTER TABLE %s.schema_migrations add column retries int unsigned NOT NULL DEFAULT 0" - alterSchemaMigrationsTableTablet = "ALTER TABLE %s.schema_migrations add column tablet varchar(128) NOT NULL DEFAULT ''" - alterSchemaMigrationsTableArtifacts = "ALTER TABLE %s.schema_migrations modify artifacts TEXT NOT NULL" - alterSchemaMigrationsTableTabletFailure = "ALTER TABLE %s.schema_migrations add column tablet_failure tinyint unsigned NOT NULL DEFAULT 0" - alterSchemaMigrationsTableTabletFailureIndex = "ALTER TABLE %s.schema_migrations add KEY tablet_failure_idx (tablet_failure, migration_status, retries)" - alterSchemaMigrationsTableProgress = "ALTER TABLE %s.schema_migrations add column progress float NOT NULL DEFAULT 0" - alterSchemaMigrationsTableContext = "ALTER TABLE %s.schema_migrations add column migration_context varchar(1024) NOT NULL DEFAULT ''" - alterSchemaMigrationsTableDDLAction = "ALTER TABLE %s.schema_migrations add column ddl_action varchar(16) NOT NULL DEFAULT ''" + alterSchemaMigrationsTableRetries = "ALTER TABLE _vt.schema_migrations add column retries int unsigned NOT NULL DEFAULT 0" + alterSchemaMigrationsTableTablet = "ALTER TABLE _vt.schema_migrations add column tablet varchar(128) NOT NULL DEFAULT ''" + alterSchemaMigrationsTableArtifacts = "ALTER TABLE _vt.schema_migrations modify artifacts TEXT NOT NULL" + alterSchemaMigrationsTableTabletFailure = "ALTER TABLE _vt.schema_migrations add column tablet_failure tinyint unsigned NOT NULL DEFAULT 0" + alterSchemaMigrationsTableTabletFailureIndex = "ALTER TABLE _vt.schema_migrations add KEY tablet_failure_idx (tablet_failure, migration_status, retries)" + alterSchemaMigrationsTableProgress = "ALTER TABLE _vt.schema_migrations add column progress float NOT NULL DEFAULT 0" + alterSchemaMigrationsTableContext = "ALTER TABLE _vt.schema_migrations add column migration_context varchar(1024) NOT NULL DEFAULT ''" + alterSchemaMigrationsTableDDLAction = "ALTER TABLE _vt.schema_migrations add column ddl_action varchar(16) NOT NULL DEFAULT ''" + alterSchemaMigrationsTableMessage = "ALTER TABLE _vt.schema_migrations add column message TEXT NOT NULL" + alterSchemaMigrationsTableTableCompleteIndex = "ALTER TABLE _vt.schema_migrations add KEY table_complete_idx (migration_status, keyspace(64), mysql_table(64), completed_timestamp)" + alterSchemaMigrationsTableETASeconds = "ALTER TABLE _vt.schema_migrations add column eta_seconds bigint NOT NULL DEFAULT -1" - sqlScheduleSingleMigration = `UPDATE %s.schema_migrations + sqlScheduleSingleMigration = `UPDATE _vt.schema_migrations SET migration_status='ready', ready_timestamp=NOW() @@ -69,42 +67,67 @@ const ( requested_timestamp ASC LIMIT 1 ` - sqlUpdateMigrationStatus = `UPDATE %s.schema_migrations + sqlUpdateMySQLTable = `UPDATE _vt.schema_migrations + SET mysql_table=%a + WHERE + migration_uuid=%a + ` + sqlUpdateMigrationStatus = `UPDATE _vt.schema_migrations SET migration_status=%a WHERE migration_uuid=%a ` - sqlUpdateMigrationProgress = `UPDATE %s.schema_migrations + sqlUpdateMigrationProgress = `UPDATE _vt.schema_migrations SET progress=%a WHERE migration_uuid=%a ` - sqlUpdateMigrationStartedTimestamp = `UPDATE %s.schema_migrations + sqlUpdateMigrationETASeconds = `UPDATE _vt.schema_migrations + SET eta_seconds=%a + WHERE + migration_uuid=%a + ` + sqlUpdateMigrationStartedTimestamp = `UPDATE _vt.schema_migrations SET started_timestamp=IFNULL(started_timestamp, NOW()) WHERE migration_uuid=%a ` - sqlUpdateMigrationTimestamp = `UPDATE %s.schema_migrations + sqlUpdateMigrationTimestamp = `UPDATE _vt.schema_migrations SET %s=NOW() WHERE migration_uuid=%a ` - sqlUpdateMigrationLogPath = `UPDATE %s.schema_migrations + sqlUpdateMigrationLogPath = `UPDATE _vt.schema_migrations SET log_path=%a WHERE migration_uuid=%a ` - sqlUpdateArtifacts = `UPDATE %s.schema_migrations + sqlUpdateArtifacts = `UPDATE _vt.schema_migrations SET artifacts=concat(%a, ',', artifacts) WHERE migration_uuid=%a ` - sqlUpdateTabletFailure = `UPDATE %s.schema_migrations + sqlClearArtifacts = `UPDATE _vt.schema_migrations + SET artifacts='' + WHERE + migration_uuid=%a + ` + sqlUpdateTabletFailure = `UPDATE _vt.schema_migrations SET tablet_failure=1 WHERE migration_uuid=%a ` - sqlRetryMigration = `UPDATE %s.schema_migrations + sqlUpdateDDLAction = `UPDATE _vt.schema_migrations + SET ddl_action=%a + WHERE + migration_uuid=%a + ` + sqlUpdateMessage = `UPDATE _vt.schema_migrations + SET message=%a + WHERE + migration_uuid=%a + ` + sqlRetryMigrationWhere = `UPDATE _vt.schema_migrations SET migration_status='queued', tablet=%a, @@ -120,44 +143,72 @@ const ( AND (%s) LIMIT 1 ` + sqlRetryMigration = `UPDATE _vt.schema_migrations + SET + migration_status='queued', + tablet=%a, + retries=retries + 1, + tablet_failure=0, + ready_timestamp=NULL, + started_timestamp=NULL, + liveness_timestamp=NULL, + completed_timestamp=NULL, + cleanup_timestamp=NULL + WHERE + migration_status IN ('failed', 'cancelled') + AND migration_uuid=%a + ` sqlWhereTabletFailure = ` tablet_failure=1 AND migration_status='failed' AND retries=0 ` sqlSelectRunningMigrations = `SELECT - migration_uuid - FROM %s.schema_migrations + migration_uuid, + strategy + FROM _vt.schema_migrations WHERE migration_status='running' - AND strategy=%a + ` + sqlSelectCompleteMigrationsOnTable = `SELECT + migration_uuid, + strategy + FROM _vt.schema_migrations + WHERE + migration_status='complete' + AND keyspace=%a + AND mysql_table=%a + ORDER BY + completed_timestamp DESC + LIMIT 1 ` sqlSelectCountReadyMigrations = `SELECT count(*) as count_ready - FROM %s.schema_migrations + FROM _vt.schema_migrations WHERE migration_status='ready' ` sqlSelectStaleMigrations = `SELECT migration_uuid - FROM %s.schema_migrations + FROM _vt.schema_migrations WHERE migration_status='running' AND liveness_timestamp < NOW() - INTERVAL %a MINUTE ` sqlSelectPendingMigrations = `SELECT migration_uuid - FROM %s.schema_migrations + FROM _vt.schema_migrations WHERE migration_status IN ('queued', 'ready', 'running') ` sqlSelectUncollectedArtifacts = `SELECT migration_uuid, artifacts - FROM %s.schema_migrations + FROM _vt.schema_migrations WHERE migration_status IN ('complete', 'failed') AND cleanup_timestamp IS NULL + AND completed_timestamp <= NOW() - INTERVAL %a SECOND ` sqlSelectMigration = `SELECT id, @@ -177,8 +228,10 @@ const ( migration_status, log_path, retries, + ddl_action, + artifacts, tablet - FROM %s.schema_migrations + FROM _vt.schema_migrations WHERE migration_uuid=%a ` @@ -200,8 +253,10 @@ const ( migration_status, log_path, retries, + ddl_action, + artifacts, tablet - FROM %s.schema_migrations + FROM _vt.schema_migrations WHERE migration_status='ready' LIMIT 1 @@ -216,8 +271,37 @@ const ( AND ACTION_TIMING='AFTER' AND LEFT(TRIGGER_NAME, 7)='pt_osc_' ` - sqlDropTrigger = "DROP TRIGGER IF EXISTS `%a`.`%a`" - sqlShowTablesLike = "SHOW TABLES LIKE '%a'" + sqlDropTrigger = "DROP TRIGGER IF EXISTS `%a`.`%a`" + sqlShowTablesLike = "SHOW TABLES LIKE '%a'" + sqlCreateTableLike = "CREATE TABLE `%a` LIKE `%a`" + sqlDropTable = "DROP TABLE `%a`" + sqlAlterTableOptions = "ALTER TABLE `%a` %s" + sqlShowColumnsFrom = "SHOW COLUMNS FROM `%a`" + sqlStartVReplStream = "UPDATE _vt.vreplication set state='Running' where db_name=%a and workflow=%a" + sqlStopVReplStream = "UPDATE _vt.vreplication set state='Stopped' where db_name=%a and workflow=%a" + sqlDeleteVReplStream = "DELETE FROM _vt.vreplication where db_name=%a and workflow=%a" + sqlReadVReplStream = `SELECT + id, + workflow, + source, + pos, + time_updated, + transaction_timestamp, + state, + message + FROM _vt.vreplication + WHERE + workflow=%a + + ` + sqlReadCountCopyState = `SELECT + count(*) as cnt + FROM + _vt.copy_state + WHERE vrepl_id=%a + ` + sqlSwapTables = "RENAME TABLE `%a` TO `%a`, `%a` TO `%a`, `%a` TO `%a`" + sqlRenameTable = "RENAME TABLE `%a` TO `%a`" ) const ( @@ -239,14 +323,17 @@ var ( ) var applyDDL = []string{ - fmt.Sprintf(sqlCreateSidecarDB, "_vt"), - fmt.Sprintf(sqlCreateSchemaMigrationsTable, "_vt"), - fmt.Sprintf(alterSchemaMigrationsTableRetries, "_vt"), - fmt.Sprintf(alterSchemaMigrationsTableTablet, "_vt"), - fmt.Sprintf(alterSchemaMigrationsTableArtifacts, "_vt"), - fmt.Sprintf(alterSchemaMigrationsTableTabletFailure, "_vt"), - fmt.Sprintf(alterSchemaMigrationsTableTabletFailureIndex, "_vt"), - fmt.Sprintf(alterSchemaMigrationsTableProgress, "_vt"), - fmt.Sprintf(alterSchemaMigrationsTableContext, "_vt"), - fmt.Sprintf(alterSchemaMigrationsTableDDLAction, "_vt"), + sqlCreateSidecarDB, + sqlCreateSchemaMigrationsTable, + alterSchemaMigrationsTableRetries, + alterSchemaMigrationsTableTablet, + alterSchemaMigrationsTableArtifacts, + alterSchemaMigrationsTableTabletFailure, + alterSchemaMigrationsTableTabletFailureIndex, + alterSchemaMigrationsTableProgress, + alterSchemaMigrationsTableContext, + alterSchemaMigrationsTableDDLAction, + alterSchemaMigrationsTableMessage, + alterSchemaMigrationsTableTableCompleteIndex, + alterSchemaMigrationsTableETASeconds, } diff --git a/go/vt/vttablet/onlineddl/util.go b/go/vt/vttablet/onlineddl/util.go index e901fe7dabd..00c7f3fd844 100644 --- a/go/vt/vttablet/onlineddl/util.go +++ b/go/vt/vttablet/onlineddl/util.go @@ -25,6 +25,7 @@ import ( "io/ioutil" "os/exec" "path/filepath" + "strings" "time" "vitess.io/vitess/go/vt/log" @@ -55,7 +56,7 @@ func execCmd(name string, args, env []string, dir string, input io.Reader, outpu } err = cmd.Run() if err != nil { - err = fmt.Errorf("execCmd failed: %v, %v", name, err) + err = fmt.Errorf("failed running command: %v %s; error=%v", name, strings.Join(args, " "), err) log.Errorf(err.Error()) } log.Infof("execCmd success: %v", name) diff --git a/go/vt/vttablet/onlineddl/vrepl.go b/go/vt/vttablet/onlineddl/vrepl.go new file mode 100644 index 00000000000..4b300e61236 --- /dev/null +++ b/go/vt/vttablet/onlineddl/vrepl.go @@ -0,0 +1,370 @@ +/* + Original copyright by GitHub as follows. Additions by the Vitess authors as follows. +*/ +/* + Copyright 2016 GitHub Inc. + See https://github.com/github/gh-ost/blob/master/LICENSE +*/ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package onlineddl + +import ( + "context" + "fmt" + "math" + "strconv" + "strings" + + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/binlog/binlogplayer" + "vitess.io/vitess/go/vt/dbconnpool" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vttablet/onlineddl/vrepl" + "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" +) + +// VReplStream represents a row in _vt.vreplication table +type VReplStream struct { + id int64 + workflow string + source string + pos string + timeUpdated int64 + transactionTimestamp int64 + state string + message string + bls *binlogdatapb.BinlogSource +} + +// VRepl is an online DDL helper for VReplication based migrations (ddl_strategy="online") +type VRepl struct { + workflow string + keyspace string + shard string + dbName string + sourceTable string + targetTable string + pos string + alterOptions string + + sharedPKColumns *vrepl.ColumnList + + sourceSharedColumns *vrepl.ColumnList + targetSharedColumns *vrepl.ColumnList + sharedColumnsMap map[string]string + + filterQuery string + bls *binlogdatapb.BinlogSource + + parser *vrepl.AlterTableParser +} + +// NewVRepl creates a VReplication handler for Online DDL +func NewVRepl(workflow, keyspace, shard, dbName, sourceTable, targetTable, alterOptions string) *VRepl { + return &VRepl{ + workflow: workflow, + keyspace: keyspace, + shard: shard, + dbName: dbName, + sourceTable: sourceTable, + targetTable: targetTable, + alterOptions: alterOptions, + parser: vrepl.NewAlterTableParser(), + } +} + +// getCandidateUniqueKeys investigates a table and returns the list of unique keys +// candidate for chunking +func (v *VRepl) getCandidateUniqueKeys(ctx context.Context, conn *dbconnpool.DBConnection, tableName string) (uniqueKeys [](*vrepl.UniqueKey), err error) { + + query, err := sqlparser.ParseAndBind(sqlShowColumnsFrom, + sqltypes.StringBindVariable(v.dbName), + sqltypes.StringBindVariable(tableName), + sqltypes.StringBindVariable(v.dbName), + sqltypes.StringBindVariable(tableName), + ) + if err != nil { + return uniqueKeys, err + } + + rs, err := conn.ExecuteFetch(query, math.MaxInt64, true) + if err != nil { + return nil, err + } + for _, row := range rs.Named().Rows { + uniqueKey := &vrepl.UniqueKey{ + Name: row.AsString("INDEX_NAME", ""), + Columns: *vrepl.ParseColumnList(row.AsString("COLUMN_NAMES", "")), + HasNullable: row.AsBool("has_nullable", false), + IsAutoIncrement: row.AsBool("is_auto_increment", false), + } + uniqueKeys = append(uniqueKeys, uniqueKey) + } + return uniqueKeys, nil +} + +// readTableColumns reads column list from given table +func (v *VRepl) readTableColumns(ctx context.Context, conn *dbconnpool.DBConnection, tableName string) (columns *vrepl.ColumnList, virtualColumns *vrepl.ColumnList, pkColumns *vrepl.ColumnList, err error) { + parsed := sqlparser.BuildParsedQuery(sqlShowColumnsFrom, tableName) + rs, err := conn.ExecuteFetch(parsed.Query, math.MaxInt64, true) + if err != nil { + return nil, nil, nil, err + } + columnNames := []string{} + virtualColumnNames := []string{} + pkColumnNames := []string{} + for _, row := range rs.Named().Rows { + columnName := row.AsString("Field", "") + columnNames = append(columnNames, columnName) + + extra := row.AsString("Extra", "") + if strings.Contains(extra, " GENERATED") { + virtualColumnNames = append(virtualColumnNames, columnName) + } + + key := row.AsString("Key", "") + if key == "PRI" { + pkColumnNames = append(pkColumnNames, columnName) + } + } + if len(columnNames) == 0 { + return nil, nil, nil, fmt.Errorf("Found 0 columns on `%s`", tableName) + } + return vrepl.NewColumnList(columnNames), vrepl.NewColumnList(virtualColumnNames), vrepl.NewColumnList(pkColumnNames), nil +} + +// getSharedColumns returns the intersection of two lists of columns in same order as the first list +func (v *VRepl) getSharedColumns(sourceColumns, targetColumns *vrepl.ColumnList, sourceVirtualColumns, targetVirtualColumns *vrepl.ColumnList, columnRenameMap map[string]string) ( + sourceSharedColumns *vrepl.ColumnList, targetSharedColumns *vrepl.ColumnList, sharedColumnsMap map[string]string, +) { + sharedColumnNames := []string{} + for _, sourceColumn := range sourceColumns.Names() { + isSharedColumn := false + for _, targetColumn := range targetColumns.Names() { + if strings.EqualFold(sourceColumn, targetColumn) { + // both tables have this column. Good start. + isSharedColumn = true + break + } + if strings.EqualFold(columnRenameMap[sourceColumn], targetColumn) { + // column in source is renamed in target + isSharedColumn = true + break + } + } + for _, virtualColumn := range sourceVirtualColumns.Names() { + // virtual/generated columns on source are silently skipped + if strings.EqualFold(sourceColumn, virtualColumn) { + isSharedColumn = false + } + } + for _, virtualColumn := range targetVirtualColumns.Names() { + // virtual/generated columns on target are silently skipped + if strings.EqualFold(sourceColumn, virtualColumn) { + isSharedColumn = false + } + } + if isSharedColumn { + sharedColumnNames = append(sharedColumnNames, sourceColumn) + } + } + sharedColumnsMap = map[string]string{} + for _, columnName := range sharedColumnNames { + if mapped, ok := columnRenameMap[columnName]; ok { + sharedColumnsMap[columnName] = mapped + } else { + sharedColumnsMap[columnName] = columnName + } + } + mappedSharedColumnNames := []string{} + for _, columnName := range sharedColumnNames { + mappedSharedColumnNames = append(mappedSharedColumnNames, sharedColumnsMap[columnName]) + } + return vrepl.NewColumnList(sharedColumnNames), vrepl.NewColumnList(mappedSharedColumnNames), sharedColumnsMap +} + +// getSharedPKColumns returns the intersection of PRIMARY KEY columns (taking renaming into consideration) between source and target tables +func (v *VRepl) getSharedPKColumns(sourcePKColumns, targetPKColumns *vrepl.ColumnList, columnRenameMap map[string]string) ( + sharedPKColumns *vrepl.ColumnList, +) { + sharedColumnNames := []string{} + for _, sourceColumn := range sourcePKColumns.Names() { + isSharedColumn := false + for _, targetColumn := range targetPKColumns.Names() { + if strings.EqualFold(sourceColumn, targetColumn) { + // both tables have this column. Good start. + isSharedColumn = true + break + } + if strings.EqualFold(columnRenameMap[sourceColumn], targetColumn) { + // column in source is renamed in target + isSharedColumn = true + break + } + } + if isSharedColumn { + sharedColumnNames = append(sharedColumnNames, sourceColumn) + } + } + return vrepl.NewColumnList(sharedColumnNames) +} + +// getSharedUniqueKeys returns the intersection of two given unique keys, +// testing by list of columns +func (v *VRepl) getSharedUniqueKeys(sourceUniqueKeys, targetUniqueKeys [](*vrepl.UniqueKey)) (uniqueKeys [](*vrepl.UniqueKey), err error) { + // We actually do NOT rely on key name, just on the set of columns. This is because maybe + // the ALTER is on the name itself... + for _, sourceUniqueKey := range sourceUniqueKeys { + for _, targetUniqueKey := range targetUniqueKeys { + if sourceUniqueKey.Columns.EqualsByNames(&targetUniqueKey.Columns) { + uniqueKeys = append(uniqueKeys, sourceUniqueKey) + } + } + } + return uniqueKeys, nil +} + +func (v *VRepl) analyzeAlter(ctx context.Context) error { + if err := v.parser.ParseAlterStatement(v.alterOptions); err != nil { + return err + } + if v.parser.IsRenameTable() { + return fmt.Errorf("Renaming the table is not aupported in ALTER TABLE: %s", v.alterOptions) + } + return nil +} + +func (v *VRepl) analyzeTables(ctx context.Context, conn *dbconnpool.DBConnection) error { + // columns: + sourceColumns, sourceVirtualColumns, sourcePKColumns, err := v.readTableColumns(ctx, conn, v.sourceTable) + if err != nil { + return err + } + targetColumns, targetVirtualColumns, targetPKColumns, err := v.readTableColumns(ctx, conn, v.targetTable) + if err != nil { + return err + } + v.sourceSharedColumns, v.targetSharedColumns, v.sharedColumnsMap = v.getSharedColumns(sourceColumns, targetColumns, sourceVirtualColumns, targetVirtualColumns, v.parser.ColumnRenameMap()) + + v.sharedPKColumns = v.getSharedPKColumns(sourcePKColumns, targetPKColumns, v.parser.ColumnRenameMap()) + if v.sharedPKColumns.Len() == 0 { + // TODO(shlomi): need to carefully examine what happens when we extend/reduce a PRIMARY KEY + // is a column subset OK? + return fmt.Errorf("Found no shared PRIMARY KEY columns between `%s` and `%s`", v.sourceTable, v.targetTable) + } + + return nil +} + +// generateFilterQuery creates a SELECT query used by vreplication as a filter. It SELECTs all +// non-generated columns between source & target tables, and takes care of column renames. +func (v *VRepl) generateFilterQuery(ctx context.Context) error { + if v.sourceSharedColumns.Len() == 0 { + return fmt.Errorf("Empty column list") + } + var sb strings.Builder + sb.WriteString("select ") + for i, name := range v.sourceSharedColumns.Names() { + targetName := v.sharedColumnsMap[name] + + if i > 0 { + sb.WriteString(", ") + } + sb.WriteString(escapeName(name)) + sb.WriteString(" as ") + sb.WriteString(escapeName(targetName)) + } + sb.WriteString(" from ") + sb.WriteString(escapeName(v.sourceTable)) + + v.filterQuery = sb.String() + return nil +} + +func (v *VRepl) analyzeBinlogSource(ctx context.Context) { + bls := &binlogdatapb.BinlogSource{ + Keyspace: v.keyspace, + Shard: v.shard, + Filter: &binlogdatapb.Filter{}, + StopAfterCopy: false, + } + rule := &binlogdatapb.Rule{ + Match: v.targetTable, + Filter: v.filterQuery, + } + bls.Filter.Rules = append(bls.Filter.Rules, rule) + v.bls = bls +} + +func (v *VRepl) analyze(ctx context.Context, conn *dbconnpool.DBConnection) error { + if err := v.analyzeAlter(ctx); err != nil { + return err + } + if err := v.analyzeTables(ctx, conn); err != nil { + return err + } + if err := v.generateFilterQuery(ctx); err != nil { + return err + } + v.analyzeBinlogSource(ctx) + return nil +} + +// generateInsertStatement generates the INSERT INTO _vt.replication stataement that creates the vreplication workflow +func (v *VRepl) generateInsertStatement(ctx context.Context) (string, error) { + ig := vreplication.NewInsertGenerator(binlogplayer.BlpStopped, v.dbName) + ig.AddRow(v.workflow, v.bls, v.pos, "", "MASTER") + + return ig.String(), nil +} + +// generateStartStatement Generates the statement to start VReplication running on the workflow +func (v *VRepl) generateStartStatement(ctx context.Context) (string, error) { + return sqlparser.ParseAndBind(sqlStartVReplStream, + sqltypes.StringBindVariable(v.dbName), + sqltypes.StringBindVariable(v.workflow), + ) +} + +func getVreplTable(ctx context.Context, s *VReplStream) (string, error) { + // sanity checks: + if s == nil { + return "", vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "No vreplication stream migration %s", s.workflow) + } + if s.bls.Filter == nil { + return "", vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "No binlog source filter for migration %s", s.workflow) + } + if len(s.bls.Filter.Rules) != 1 { + return "", vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "Cannot detect filter rules for migration/vreplication %+v", s.workflow) + } + vreplTable := s.bls.Filter.Rules[0].Match + return vreplTable, nil +} + +// escapeName will escape a db/table/column/... name by wrapping with backticks. +// It is not fool proof. I'm just trying to do the right thing here, not solving +// SQL injection issues, which should be irrelevant for this tool. +func escapeName(name string) string { + if unquoted, err := strconv.Unquote(name); err == nil { + name = unquoted + } + return fmt.Sprintf("`%s`", name) +} diff --git a/go/vt/vttablet/onlineddl/vrepl/encoding.go b/go/vt/vttablet/onlineddl/vrepl/encoding.go new file mode 100644 index 00000000000..713c6925878 --- /dev/null +++ b/go/vt/vttablet/onlineddl/vrepl/encoding.go @@ -0,0 +1,23 @@ +/* + Copyright 2016 GitHub Inc. + See https://github.com/github/gh-ost/blob/master/LICENSE +*/ + +package vrepl + +import ( + "golang.org/x/text/encoding" + "golang.org/x/text/encoding/charmap" + "golang.org/x/text/encoding/simplifiedchinese" +) + +type charsetEncoding map[string]encoding.Encoding + +var charsetEncodingMap charsetEncoding + +func init() { + charsetEncodingMap = make(map[string]encoding.Encoding) + // Begin mappings + charsetEncodingMap["latin1"] = charmap.Windows1252 + charsetEncodingMap["gbk"] = simplifiedchinese.GBK +} diff --git a/go/vt/vttablet/onlineddl/vrepl/parser.go b/go/vt/vttablet/onlineddl/vrepl/parser.go new file mode 100644 index 00000000000..57689d64498 --- /dev/null +++ b/go/vt/vttablet/onlineddl/vrepl/parser.go @@ -0,0 +1,228 @@ +/* + Copyright 2016 GitHub Inc. + See https://github.com/github/gh-ost/blob/master/LICENSE +*/ + +package vrepl + +import ( + "regexp" + "strconv" + "strings" +) + +var ( + sanitizeQuotesRegexp = regexp.MustCompile("('[^']*')") + renameColumnRegexp = regexp.MustCompile(`(?i)\bchange\s+(column\s+|)([\S]+)\s+([\S]+)\s+`) + dropColumnRegexp = regexp.MustCompile(`(?i)\bdrop\s+(column\s+|)([\S]+)$`) + renameTableRegexp = regexp.MustCompile(`(?i)\brename\s+(to|as)\s+`) + autoIncrementRegexp = regexp.MustCompile(`(?i)\bauto_increment[\s]*=[\s]*([0-9]+)`) + alterTableExplicitSchemaTableRegexps = []*regexp.Regexp{ + // ALTER TABLE `scm`.`tbl` something + regexp.MustCompile(`(?i)\balter\s+table\s+` + "`" + `([^` + "`" + `]+)` + "`" + `[.]` + "`" + `([^` + "`" + `]+)` + "`" + `\s+(.*$)`), + // ALTER TABLE `scm`.tbl something + regexp.MustCompile(`(?i)\balter\s+table\s+` + "`" + `([^` + "`" + `]+)` + "`" + `[.]([\S]+)\s+(.*$)`), + // ALTER TABLE scm.`tbl` something + regexp.MustCompile(`(?i)\balter\s+table\s+([\S]+)[.]` + "`" + `([^` + "`" + `]+)` + "`" + `\s+(.*$)`), + // ALTER TABLE scm.tbl something + regexp.MustCompile(`(?i)\balter\s+table\s+([\S]+)[.]([\S]+)\s+(.*$)`), + } + alterTableExplicitTableRegexps = []*regexp.Regexp{ + // ALTER TABLE `tbl` something + regexp.MustCompile(`(?i)\balter\s+table\s+` + "`" + `([^` + "`" + `]+)` + "`" + `\s+(.*$)`), + // ALTER TABLE tbl something + regexp.MustCompile(`(?i)\balter\s+table\s+([\S]+)\s+(.*$)`), + } +) + +// AlterTableParser is a parser tool for ALTER TABLE statements +// This is imported from gh-ost. In the future, we should replace that with Vitess parsing. +type AlterTableParser struct { + columnRenameMap map[string]string + droppedColumns map[string]bool + isRenameTable bool + isAutoIncrementDefined bool + + alterStatementOptions string + alterTokens []string + + explicitSchema string + explicitTable string +} + +// NewAlterTableParser creates a new parser +func NewAlterTableParser() *AlterTableParser { + return &AlterTableParser{ + columnRenameMap: make(map[string]string), + droppedColumns: make(map[string]bool), + } +} + +// NewParserFromAlterStatement creates a new parser with a ALTER TABLE statement +func NewParserFromAlterStatement(alterStatement string) *AlterTableParser { + parser := NewAlterTableParser() + parser.ParseAlterStatement(alterStatement) + return parser +} + +// tokenizeAlterStatement +func (p *AlterTableParser) tokenizeAlterStatement(alterStatement string) (tokens []string, err error) { + terminatingQuote := rune(0) + f := func(c rune) bool { + switch { + case c == terminatingQuote: + terminatingQuote = rune(0) + return false + case terminatingQuote != rune(0): + return false + case c == '\'': + terminatingQuote = c + return false + case c == '(': + terminatingQuote = ')' + return false + default: + return c == ',' + } + } + + tokens = strings.FieldsFunc(alterStatement, f) + for i := range tokens { + tokens[i] = strings.TrimSpace(tokens[i]) + } + return tokens, nil +} + +func (p *AlterTableParser) sanitizeQuotesFromAlterStatement(alterStatement string) (strippedStatement string) { + strippedStatement = alterStatement + strippedStatement = sanitizeQuotesRegexp.ReplaceAllString(strippedStatement, "''") + return strippedStatement +} + +// parseAlterToken parses a single ALTER option (e.g. a DROP COLUMN) +func (p *AlterTableParser) parseAlterToken(alterToken string) (err error) { + { + // rename + allStringSubmatch := renameColumnRegexp.FindAllStringSubmatch(alterToken, -1) + for _, submatch := range allStringSubmatch { + if unquoted, err := strconv.Unquote(submatch[2]); err == nil { + submatch[2] = unquoted + } + if unquoted, err := strconv.Unquote(submatch[3]); err == nil { + submatch[3] = unquoted + } + p.columnRenameMap[submatch[2]] = submatch[3] + } + } + { + // drop + allStringSubmatch := dropColumnRegexp.FindAllStringSubmatch(alterToken, -1) + for _, submatch := range allStringSubmatch { + if unquoted, err := strconv.Unquote(submatch[2]); err == nil { + submatch[2] = unquoted + } + p.droppedColumns[submatch[2]] = true + } + } + { + // rename table + if renameTableRegexp.MatchString(alterToken) { + p.isRenameTable = true + } + } + { + // auto_increment + if autoIncrementRegexp.MatchString(alterToken) { + p.isAutoIncrementDefined = true + } + } + return nil +} + +// ParseAlterStatement is the main function of th eparser, and parses an ALTER TABLE statement +func (p *AlterTableParser) ParseAlterStatement(alterStatement string) (err error) { + p.alterStatementOptions = alterStatement + for _, alterTableRegexp := range alterTableExplicitSchemaTableRegexps { + if submatch := alterTableRegexp.FindStringSubmatch(p.alterStatementOptions); len(submatch) > 0 { + p.explicitSchema = submatch[1] + p.explicitTable = submatch[2] + p.alterStatementOptions = submatch[3] + break + } + } + for _, alterTableRegexp := range alterTableExplicitTableRegexps { + if submatch := alterTableRegexp.FindStringSubmatch(p.alterStatementOptions); len(submatch) > 0 { + p.explicitTable = submatch[1] + p.alterStatementOptions = submatch[2] + break + } + } + alterTokens, _ := p.tokenizeAlterStatement(p.alterStatementOptions) + for _, alterToken := range alterTokens { + alterToken = p.sanitizeQuotesFromAlterStatement(alterToken) + p.parseAlterToken(alterToken) + p.alterTokens = append(p.alterTokens, alterToken) + } + return nil +} + +// GetNonTrivialRenames gets a list of renamed column +func (p *AlterTableParser) GetNonTrivialRenames() map[string]string { + result := make(map[string]string) + for column, renamed := range p.columnRenameMap { + if column != renamed { + result[column] = renamed + } + } + return result +} + +// HasNonTrivialRenames is true when columns have been renamed +func (p *AlterTableParser) HasNonTrivialRenames() bool { + return len(p.GetNonTrivialRenames()) > 0 +} + +// DroppedColumnsMap returns list of dropped columns +func (p *AlterTableParser) DroppedColumnsMap() map[string]bool { + return p.droppedColumns +} + +// IsRenameTable returns true when the ALTER TABLE statement inclusdes renaming the table +func (p *AlterTableParser) IsRenameTable() bool { + return p.isRenameTable +} + +// IsAutoIncrementDefined returns true when alter options include an explicit AUTO_INCREMENT value +func (p *AlterTableParser) IsAutoIncrementDefined() bool { + return p.isAutoIncrementDefined +} + +// GetExplicitSchema returns the explciit schema, if defined +func (p *AlterTableParser) GetExplicitSchema() string { + return p.explicitSchema +} + +// HasExplicitSchema returns true when the ALTER TABLE statement includes the schema qualifier +func (p *AlterTableParser) HasExplicitSchema() bool { + return p.GetExplicitSchema() != "" +} + +// GetExplicitTable return the table name +func (p *AlterTableParser) GetExplicitTable() string { + return p.explicitTable +} + +// HasExplicitTable checks if the ALTER TABLE statement has an explicit table name +func (p *AlterTableParser) HasExplicitTable() bool { + return p.GetExplicitTable() != "" +} + +// GetAlterStatementOptions returns the options section in the ALTER TABLE statement +func (p *AlterTableParser) GetAlterStatementOptions() string { + return p.alterStatementOptions +} + +// ColumnRenameMap returns the renamed column mapping +func (p *AlterTableParser) ColumnRenameMap() map[string]string { + return p.columnRenameMap +} diff --git a/go/vt/vttablet/onlineddl/vrepl/parser_test.go b/go/vt/vttablet/onlineddl/vrepl/parser_test.go new file mode 100644 index 00000000000..457ad062e6a --- /dev/null +++ b/go/vt/vttablet/onlineddl/vrepl/parser_test.go @@ -0,0 +1,319 @@ +/* + Copyright 2016 GitHub Inc. + See https://github.com/github/gh-ost/blob/master/LICENSE +*/ + +package vrepl + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParseAlterStatement(t *testing.T) { + statement := "add column t int, engine=innodb" + parser := NewAlterTableParser() + err := parser.ParseAlterStatement(statement) + assert.NoError(t, err) + assert.Equal(t, parser.alterStatementOptions, statement) + assert.False(t, parser.HasNonTrivialRenames()) + assert.False(t, parser.IsAutoIncrementDefined()) +} + +func TestParseAlterStatementTrivialRename(t *testing.T) { + statement := "add column t int, change ts ts timestamp, engine=innodb" + parser := NewAlterTableParser() + err := parser.ParseAlterStatement(statement) + assert.NoError(t, err) + assert.Equal(t, parser.alterStatementOptions, statement) + assert.False(t, parser.HasNonTrivialRenames()) + assert.False(t, parser.IsAutoIncrementDefined()) + assert.Equal(t, len(parser.columnRenameMap), 1) + assert.Equal(t, parser.columnRenameMap["ts"], "ts") +} + +func TestParseAlterStatementWithAutoIncrement(t *testing.T) { + + statements := []string{ + "auto_increment=7", + "auto_increment = 7", + "AUTO_INCREMENT = 71", + "add column t int, change ts ts timestamp, auto_increment=7 engine=innodb", + "add column t int, change ts ts timestamp, auto_increment =7 engine=innodb", + "add column t int, change ts ts timestamp, AUTO_INCREMENT = 7 engine=innodb", + "add column t int, change ts ts timestamp, engine=innodb auto_increment=73425", + } + for _, statement := range statements { + parser := NewAlterTableParser() + err := parser.ParseAlterStatement(statement) + assert.NoError(t, err) + assert.Equal(t, parser.alterStatementOptions, statement) + assert.True(t, parser.IsAutoIncrementDefined()) + } +} + +func TestParseAlterStatementTrivialRenames(t *testing.T) { + statement := "add column t int, change ts ts timestamp, CHANGE f `f` float, engine=innodb" + parser := NewAlterTableParser() + err := parser.ParseAlterStatement(statement) + assert.NoError(t, err) + assert.Equal(t, parser.alterStatementOptions, statement) + assert.False(t, parser.HasNonTrivialRenames()) + assert.False(t, parser.IsAutoIncrementDefined()) + assert.Equal(t, len(parser.columnRenameMap), 2) + assert.Equal(t, parser.columnRenameMap["ts"], "ts") + assert.Equal(t, parser.columnRenameMap["f"], "f") +} + +func TestParseAlterStatementNonTrivial(t *testing.T) { + statements := []string{ + `add column b bigint, change f fl float, change i count int, engine=innodb`, + "add column b bigint, change column `f` fl float, change `i` `count` int, engine=innodb", + "add column b bigint, change column `f` fl float, change `i` `count` int, change ts ts timestamp, engine=innodb", + `change + f fl float, + CHANGE COLUMN i + count int, engine=innodb`, + } + + for _, statement := range statements { + parser := NewAlterTableParser() + err := parser.ParseAlterStatement(statement) + assert.NoError(t, err) + assert.False(t, parser.IsAutoIncrementDefined()) + assert.Equal(t, parser.alterStatementOptions, statement) + renames := parser.GetNonTrivialRenames() + assert.Equal(t, len(renames), 2) + assert.Equal(t, renames["i"], "count") + assert.Equal(t, renames["f"], "fl") + } +} + +func TestTokenizeAlterStatement(t *testing.T) { + parser := NewAlterTableParser() + { + alterStatement := "add column t int" + tokens, _ := parser.tokenizeAlterStatement(alterStatement) + assert.True(t, reflect.DeepEqual(tokens, []string{"add column t int"})) + } + { + alterStatement := "add column t int, change column i int" + tokens, _ := parser.tokenizeAlterStatement(alterStatement) + assert.True(t, reflect.DeepEqual(tokens, []string{"add column t int", "change column i int"})) + } + { + alterStatement := "add column t int, change column i int 'some comment'" + tokens, _ := parser.tokenizeAlterStatement(alterStatement) + assert.True(t, reflect.DeepEqual(tokens, []string{"add column t int", "change column i int 'some comment'"})) + } + { + alterStatement := "add column t int, change column i int 'some comment, with comma'" + tokens, _ := parser.tokenizeAlterStatement(alterStatement) + assert.True(t, reflect.DeepEqual(tokens, []string{"add column t int", "change column i int 'some comment, with comma'"})) + } + { + alterStatement := "add column t int, add column d decimal(10,2)" + tokens, _ := parser.tokenizeAlterStatement(alterStatement) + assert.True(t, reflect.DeepEqual(tokens, []string{"add column t int", "add column d decimal(10,2)"})) + } + { + alterStatement := "add column t int, add column e enum('a','b','c')" + tokens, _ := parser.tokenizeAlterStatement(alterStatement) + assert.True(t, reflect.DeepEqual(tokens, []string{"add column t int", "add column e enum('a','b','c')"})) + } + { + alterStatement := "add column t int(11), add column e enum('a','b','c')" + tokens, _ := parser.tokenizeAlterStatement(alterStatement) + assert.True(t, reflect.DeepEqual(tokens, []string{"add column t int(11)", "add column e enum('a','b','c')"})) + } +} + +func TestSanitizeQuotesFromAlterStatement(t *testing.T) { + parser := NewAlterTableParser() + { + alterStatement := "add column e enum('a','b','c')" + strippedStatement := parser.sanitizeQuotesFromAlterStatement(alterStatement) + assert.Equal(t, strippedStatement, "add column e enum('','','')") + } + { + alterStatement := "change column i int 'some comment, with comma'" + strippedStatement := parser.sanitizeQuotesFromAlterStatement(alterStatement) + assert.Equal(t, strippedStatement, "change column i int ''") + } +} + +func TestParseAlterStatementDroppedColumns(t *testing.T) { + + { + parser := NewAlterTableParser() + statement := "drop column b" + err := parser.ParseAlterStatement(statement) + assert.NoError(t, err) + assert.Equal(t, len(parser.droppedColumns), 1) + assert.True(t, parser.droppedColumns["b"]) + } + { + parser := NewAlterTableParser() + statement := "drop column b, drop key c_idx, drop column `d`" + err := parser.ParseAlterStatement(statement) + assert.NoError(t, err) + assert.Equal(t, parser.alterStatementOptions, statement) + assert.Equal(t, len(parser.droppedColumns), 2) + assert.True(t, parser.droppedColumns["b"]) + assert.True(t, parser.droppedColumns["d"]) + } + { + parser := NewAlterTableParser() + statement := "drop column b, drop key c_idx, drop column `d`, drop `e`, drop primary key, drop foreign key fk_1" + err := parser.ParseAlterStatement(statement) + assert.NoError(t, err) + assert.Equal(t, len(parser.droppedColumns), 3) + assert.True(t, parser.droppedColumns["b"]) + assert.True(t, parser.droppedColumns["d"]) + assert.True(t, parser.droppedColumns["e"]) + } + { + parser := NewAlterTableParser() + statement := "drop column b, drop bad statement, add column i int" + err := parser.ParseAlterStatement(statement) + assert.NoError(t, err) + assert.Equal(t, len(parser.droppedColumns), 1) + assert.True(t, parser.droppedColumns["b"]) + } +} + +func TestParseAlterStatementRenameTable(t *testing.T) { + + { + parser := NewAlterTableParser() + statement := "drop column b" + err := parser.ParseAlterStatement(statement) + assert.NoError(t, err) + assert.False(t, parser.isRenameTable) + } + { + parser := NewAlterTableParser() + statement := "rename as something_else" + err := parser.ParseAlterStatement(statement) + assert.NoError(t, err) + assert.True(t, parser.isRenameTable) + } + { + parser := NewAlterTableParser() + statement := "drop column b, rename as something_else" + err := parser.ParseAlterStatement(statement) + assert.NoError(t, err) + assert.Equal(t, parser.alterStatementOptions, statement) + assert.True(t, parser.isRenameTable) + } + { + parser := NewAlterTableParser() + statement := "engine=innodb rename as something_else" + err := parser.ParseAlterStatement(statement) + assert.NoError(t, err) + assert.True(t, parser.isRenameTable) + } + { + parser := NewAlterTableParser() + statement := "rename as something_else, engine=innodb" + err := parser.ParseAlterStatement(statement) + assert.NoError(t, err) + assert.True(t, parser.isRenameTable) + } +} + +func TestParseAlterStatementExplicitTable(t *testing.T) { + + { + parser := NewAlterTableParser() + statement := "drop column b" + err := parser.ParseAlterStatement(statement) + assert.NoError(t, err) + assert.Equal(t, parser.explicitSchema, "") + assert.Equal(t, parser.explicitTable, "") + assert.Equal(t, parser.alterStatementOptions, "drop column b") + assert.True(t, reflect.DeepEqual(parser.alterTokens, []string{"drop column b"})) + } + { + parser := NewAlterTableParser() + statement := "alter table tbl drop column b" + err := parser.ParseAlterStatement(statement) + assert.NoError(t, err) + assert.Equal(t, parser.explicitSchema, "") + assert.Equal(t, parser.explicitTable, "tbl") + assert.Equal(t, parser.alterStatementOptions, "drop column b") + assert.True(t, reflect.DeepEqual(parser.alterTokens, []string{"drop column b"})) + } + { + parser := NewAlterTableParser() + statement := "alter table `tbl` drop column b" + err := parser.ParseAlterStatement(statement) + assert.NoError(t, err) + assert.Equal(t, parser.explicitSchema, "") + assert.Equal(t, parser.explicitTable, "tbl") + assert.Equal(t, parser.alterStatementOptions, "drop column b") + assert.True(t, reflect.DeepEqual(parser.alterTokens, []string{"drop column b"})) + } + { + parser := NewAlterTableParser() + statement := "alter table `scm with spaces`.`tbl` drop column b" + err := parser.ParseAlterStatement(statement) + assert.NoError(t, err) + assert.Equal(t, parser.explicitSchema, "scm with spaces") + assert.Equal(t, parser.explicitTable, "tbl") + assert.Equal(t, parser.alterStatementOptions, "drop column b") + assert.True(t, reflect.DeepEqual(parser.alterTokens, []string{"drop column b"})) + } + { + parser := NewAlterTableParser() + statement := "alter table `scm`.`tbl with spaces` drop column b" + err := parser.ParseAlterStatement(statement) + assert.NoError(t, err) + assert.Equal(t, parser.explicitSchema, "scm") + assert.Equal(t, parser.explicitTable, "tbl with spaces") + assert.Equal(t, parser.alterStatementOptions, "drop column b") + assert.True(t, reflect.DeepEqual(parser.alterTokens, []string{"drop column b"})) + } + { + parser := NewAlterTableParser() + statement := "alter table `scm`.tbl drop column b" + err := parser.ParseAlterStatement(statement) + assert.NoError(t, err) + assert.Equal(t, parser.explicitSchema, "scm") + assert.Equal(t, parser.explicitTable, "tbl") + assert.Equal(t, parser.alterStatementOptions, "drop column b") + assert.True(t, reflect.DeepEqual(parser.alterTokens, []string{"drop column b"})) + } + { + parser := NewAlterTableParser() + statement := "alter table scm.`tbl` drop column b" + err := parser.ParseAlterStatement(statement) + assert.NoError(t, err) + assert.Equal(t, parser.explicitSchema, "scm") + assert.Equal(t, parser.explicitTable, "tbl") + assert.Equal(t, parser.alterStatementOptions, "drop column b") + assert.True(t, reflect.DeepEqual(parser.alterTokens, []string{"drop column b"})) + } + { + parser := NewAlterTableParser() + statement := "alter table scm.tbl drop column b" + err := parser.ParseAlterStatement(statement) + assert.NoError(t, err) + assert.Equal(t, parser.explicitSchema, "scm") + assert.Equal(t, parser.explicitTable, "tbl") + assert.Equal(t, parser.alterStatementOptions, "drop column b") + assert.True(t, reflect.DeepEqual(parser.alterTokens, []string{"drop column b"})) + } + { + parser := NewAlterTableParser() + statement := "alter table scm.tbl drop column b, add index idx(i)" + err := parser.ParseAlterStatement(statement) + assert.NoError(t, err) + assert.Equal(t, parser.explicitSchema, "scm") + assert.Equal(t, parser.explicitTable, "tbl") + assert.Equal(t, parser.alterStatementOptions, "drop column b, add index idx(i)") + assert.True(t, reflect.DeepEqual(parser.alterTokens, []string{"drop column b", "add index idx(i)"})) + } +} diff --git a/go/vt/vttablet/onlineddl/vrepl/types.go b/go/vt/vttablet/onlineddl/vrepl/types.go new file mode 100644 index 00000000000..fe7bcbcbd29 --- /dev/null +++ b/go/vt/vttablet/onlineddl/vrepl/types.go @@ -0,0 +1,277 @@ +/* + Original copyright by GitHub as follows. Additions by the Vitess authors as follows. +*/ +/* + Copyright 2016 GitHub Inc. + See https://github.com/github/gh-ost/blob/master/LICENSE +*/ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vrepl + +import ( + "fmt" + "reflect" + "strconv" + "strings" +) + +// ColumnType enumerates some important column types +type ColumnType int + +const ( + UnknownColumnType ColumnType = iota + TimestampColumnType + DateTimeColumnType + EnumColumnType + MediumIntColumnType + JSONColumnType + FloatColumnType +) + +const maxMediumintUnsigned int32 = 16777215 + +// TimezoneConversion indicates how to convert a timezone value +type TimezoneConversion struct { + ToTimezone string +} + +// Column represents a table column +type Column struct { + Name string + IsUnsigned bool + Charset string + Type ColumnType + timezoneConversion *TimezoneConversion +} + +func (c *Column) convertArg(arg interface{}) interface{} { + if s, ok := arg.(string); ok { + // string, charset conversion + if encoding, ok := charsetEncodingMap[c.Charset]; ok { + arg, _ = encoding.NewDecoder().String(s) + } + return arg + } + + if c.IsUnsigned { + if i, ok := arg.(int8); ok { + return uint8(i) + } + if i, ok := arg.(int16); ok { + return uint16(i) + } + if i, ok := arg.(int32); ok { + if c.Type == MediumIntColumnType { + // problem with mediumint is that it's a 3-byte type. There is no compatible golang type to match that. + // So to convert from negative to positive we'd need to convert the value manually + if i >= 0 { + return i + } + return uint32(maxMediumintUnsigned + i + 1) + } + return uint32(i) + } + if i, ok := arg.(int64); ok { + return strconv.FormatUint(uint64(i), 10) + } + if i, ok := arg.(int); ok { + return uint(i) + } + } + return arg +} + +// NewColumns creates a new column array from non empty names +func NewColumns(names []string) []Column { + result := []Column{} + for _, name := range names { + if name == "" { + continue + } + result = append(result, Column{Name: name}) + } + return result +} + +// ParseColumns creates a new column array fby parsing comma delimited names list +func ParseColumns(names string) []Column { + namesArray := strings.Split(names, ",") + return NewColumns(namesArray) +} + +// ColumnsMap maps a column name onto its ordinal position +type ColumnsMap map[string]int + +// NewEmptyColumnsMap creates an empty map +func NewEmptyColumnsMap() ColumnsMap { + columnsMap := make(map[string]int) + return ColumnsMap(columnsMap) +} + +// NewColumnsMap creates a column map based on ordered list of columns +func NewColumnsMap(orderedColumns []Column) ColumnsMap { + columnsMap := NewEmptyColumnsMap() + for i, column := range orderedColumns { + columnsMap[column.Name] = i + } + return columnsMap +} + +// ColumnList makes for a named list of columns +type ColumnList struct { + columns []Column + Ordinals ColumnsMap +} + +// NewColumnList creates an object given ordered list of column names +func NewColumnList(names []string) *ColumnList { + result := &ColumnList{ + columns: NewColumns(names), + } + result.Ordinals = NewColumnsMap(result.columns) + return result +} + +// ParseColumnList parses a comma delimited list of column names +func ParseColumnList(names string) *ColumnList { + result := &ColumnList{ + columns: ParseColumns(names), + } + result.Ordinals = NewColumnsMap(result.columns) + return result +} + +// Columns returns the list of columns +func (l *ColumnList) Columns() []Column { + return l.columns +} + +// Names returns list of column names +func (l *ColumnList) Names() []string { + names := make([]string, len(l.columns)) + for i := range l.columns { + names[i] = l.columns[i].Name + } + return names +} + +// GetColumn gets a column by name +func (l *ColumnList) GetColumn(columnName string) *Column { + if ordinal, ok := l.Ordinals[columnName]; ok { + return &l.columns[ordinal] + } + return nil +} + +// SetUnsigned toggles on the unsigned property +func (l *ColumnList) SetUnsigned(columnName string) { + l.GetColumn(columnName).IsUnsigned = true +} + +// IsUnsigned returns true when the column is an unsigned numeral +func (l *ColumnList) IsUnsigned(columnName string) bool { + return l.GetColumn(columnName).IsUnsigned +} + +// SetCharset sets the charset property +func (l *ColumnList) SetCharset(columnName string, charset string) { + l.GetColumn(columnName).Charset = charset +} + +// GetCharset returns the hcarset property +func (l *ColumnList) GetCharset(columnName string) string { + return l.GetColumn(columnName).Charset +} + +// SetColumnType sets the type of the column (for interesting types) +func (l *ColumnList) SetColumnType(columnName string, columnType ColumnType) { + l.GetColumn(columnName).Type = columnType +} + +// GetColumnType gets type of column, for interesting types +func (l *ColumnList) GetColumnType(columnName string) ColumnType { + return l.GetColumn(columnName).Type +} + +// SetConvertDatetimeToTimestamp sets the timezone conversion +func (l *ColumnList) SetConvertDatetimeToTimestamp(columnName string, toTimezone string) { + l.GetColumn(columnName).timezoneConversion = &TimezoneConversion{ToTimezone: toTimezone} +} + +// HasTimezoneConversion sees if there's timezone conversion defined (only applicable to temporal values) +func (l *ColumnList) HasTimezoneConversion(columnName string) bool { + return l.GetColumn(columnName).timezoneConversion != nil +} + +// String returns a comma separated list of column names +func (l *ColumnList) String() string { + return strings.Join(l.Names(), ",") +} + +// Equals checks for complete (deep) identities of columns, in order. +func (l *ColumnList) Equals(other *ColumnList) bool { + return reflect.DeepEqual(l.Columns, other.Columns) +} + +// EqualsByNames chcks if the names in this list equals the names of another list, in order. Type is ignored. +func (l *ColumnList) EqualsByNames(other *ColumnList) bool { + return reflect.DeepEqual(l.Names(), other.Names()) +} + +// IsSubsetOf returns 'true' when column names of this list are a subset of +// another list, in arbitrary order (order agnostic) +func (l *ColumnList) IsSubsetOf(other *ColumnList) bool { + for _, column := range l.columns { + if _, exists := other.Ordinals[column.Name]; !exists { + return false + } + } + return true +} + +// Len returns the length of this list +func (l *ColumnList) Len() int { + return len(l.columns) +} + +// UniqueKey is the combination of a key's name and columns +type UniqueKey struct { + Name string + Columns ColumnList + HasNullable bool + IsAutoIncrement bool +} + +// IsPrimary checks if this unique key is primary +func (k *UniqueKey) IsPrimary() bool { + return k.Name == "PRIMARY" +} + +// Len returns the length of this list +func (k *UniqueKey) Len() int { + return k.Columns.Len() +} + +// String returns a visual representation of this key +func (k *UniqueKey) String() string { + description := k.Name + if k.IsAutoIncrement { + description = fmt.Sprintf("%s (auto_increment)", description) + } + return fmt.Sprintf("%s: %s; has nullable: %+v", description, k.Columns.Names(), k.HasNullable) +} diff --git a/go/vt/vttablet/onlineddl/vrepl/types_test.go b/go/vt/vttablet/onlineddl/vrepl/types_test.go new file mode 100644 index 00000000000..25663f9e918 --- /dev/null +++ b/go/vt/vttablet/onlineddl/vrepl/types_test.go @@ -0,0 +1,100 @@ +/* + Copyright 2016 GitHub Inc. + See https://github.com/github/gh-ost/blob/master/LICENSE +*/ + +package vrepl + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParseColumnList(t *testing.T) { + names := "id,category,max_len" + + columnList := ParseColumnList(names) + assert.Equal(t, columnList.Len(), 3) + assert.Equal(t, columnList.Names(), []string{"id", "category", "max_len"}) + assert.Equal(t, columnList.Ordinals["id"], 0) + assert.Equal(t, columnList.Ordinals["category"], 1) + assert.Equal(t, columnList.Ordinals["max_len"], 2) +} + +func TestGetColumn(t *testing.T) { + names := "id,category,max_len" + columnList := ParseColumnList(names) + { + column := columnList.GetColumn("category") + assert.NotNil(t, column) + assert.Equal(t, column.Name, "category") + } + { + column := columnList.GetColumn("no_such_column") + assert.True(t, column == nil) + } +} + +func TestIsSubsetOf(t *testing.T) { + tt := []struct { + columns1 *ColumnList + columns2 *ColumnList + expectSubset bool + }{ + { + columns1: ParseColumnList(""), + columns2: ParseColumnList("a,b,c"), + expectSubset: true, + }, + { + columns1: ParseColumnList("a,b,c"), + columns2: ParseColumnList("a,b,c"), + expectSubset: true, + }, + { + columns1: ParseColumnList("a,c"), + columns2: ParseColumnList("a,b,c"), + expectSubset: true, + }, + { + columns1: ParseColumnList("b,c"), + columns2: ParseColumnList("a,b,c"), + expectSubset: true, + }, + { + columns1: ParseColumnList("b"), + columns2: ParseColumnList("a,b,c"), + expectSubset: true, + }, + { + columns1: ParseColumnList(""), + columns2: ParseColumnList("a,b,c"), + expectSubset: true, + }, + { + columns1: ParseColumnList("a,d"), + columns2: ParseColumnList("a,b,c"), + expectSubset: false, + }, + { + columns1: ParseColumnList("a,b,c"), + columns2: ParseColumnList("a,b"), + expectSubset: false, + }, + { + columns1: ParseColumnList("a,b,c"), + columns2: ParseColumnList(""), + expectSubset: false, + }, + } + for _, tc := range tt { + name := fmt.Sprintf("%v:%v", tc.columns1.Names(), tc.columns2.Names()) + t.Run(name, func(t *testing.T) { + isSubset := tc.columns1.IsSubsetOf(tc.columns2) + assert.Equal(t, tc.expectSubset, isSubset) + }, + ) + } +} diff --git a/go/vt/vttablet/sandboxconn/sandboxconn.go b/go/vt/vttablet/sandboxconn/sandboxconn.go index 912fd19f5b0..c0c7d740e27 100644 --- a/go/vt/vttablet/sandboxconn/sandboxconn.go +++ b/go/vt/vttablet/sandboxconn/sandboxconn.go @@ -21,6 +21,10 @@ package sandboxconn import ( "fmt" "sync" + "time" + + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/sqlparser" "context" @@ -94,6 +98,7 @@ type SandboxConn struct { StartPos string VStreamEvents [][]*binlogdatapb.VEvent VStreamErrors []error + VStreamCh chan *binlogdatapb.VEvent // transaction id generator TransactionID sync2.AtomicInt64 @@ -101,6 +106,7 @@ type SandboxConn struct { // reserve id generator ReserveID sync2.AtomicInt64 + mapMu sync.Mutex //protects the map txIDToRID txIDToRID map[int64]int64 sExecMu sync.Mutex @@ -167,7 +173,9 @@ func (sbc *SandboxConn) Execute(ctx context.Context, target *querypb.Target, que if err := sbc.getError(); err != nil { return nil, err } - return sbc.getNextResult(), nil + + stmt, _ := sqlparser.Parse(query) // knowingly ignoring the error + return sbc.getNextResult(stmt), nil } // ExecuteBatch is part of the QueryService interface. @@ -183,7 +191,7 @@ func (sbc *SandboxConn) ExecuteBatch(ctx context.Context, target *querypb.Target sbc.Options = append(sbc.Options, options) result := make([]sqltypes.Result, 0, len(queries)) for range queries { - result = append(result, *(sbc.getNextResult())) + result = append(result, *(sbc.getNextResult(nil))) } return result, nil } @@ -206,14 +214,16 @@ func (sbc *SandboxConn) StreamExecute(ctx context.Context, target *querypb.Targe sbc.sExecMu.Unlock() return err } + parse, _ := sqlparser.Parse(query) + if sbc.results == nil { - nextRs := sbc.getNextResult() + nextRs := sbc.getNextResult(parse) sbc.sExecMu.Unlock() return callback(nextRs) } for len(sbc.results) > 0 { - nextRs := sbc.getNextResult() + nextRs := sbc.getNextResult(parse) sbc.sExecMu.Unlock() err := callback(nextRs) if err != nil { @@ -221,8 +231,8 @@ func (sbc *SandboxConn) StreamExecute(ctx context.Context, target *querypb.Targe } sbc.sExecMu.Lock() } - sbc.sExecMu.Unlock() + sbc.sExecMu.Unlock() return nil } @@ -254,7 +264,7 @@ func (sbc *SandboxConn) begin(ctx context.Context, target *querypb.Target, preQu // Commit is part of the QueryService interface. func (sbc *SandboxConn) Commit(ctx context.Context, target *querypb.Target, transactionID int64) (int64, error) { sbc.CommitCount.Add(1) - reservedID := sbc.txIDToRID[transactionID] + reservedID := sbc.getTxReservedID(transactionID) if reservedID != 0 { reservedID = sbc.ReserveID.Add(1) } @@ -264,7 +274,7 @@ func (sbc *SandboxConn) Commit(ctx context.Context, target *querypb.Target, tran // Rollback is part of the QueryService interface. func (sbc *SandboxConn) Rollback(ctx context.Context, target *querypb.Target, transactionID int64) (int64, error) { sbc.RollbackCount.Add(1) - reservedID := sbc.txIDToRID[transactionID] + reservedID := sbc.getTxReservedID(transactionID) if reservedID != 0 { reservedID = sbc.ReserveID.Add(1) } @@ -362,7 +372,7 @@ func (sbc *SandboxConn) ReadTransaction(ctx context.Context, target *querypb.Tar func (sbc *SandboxConn) BeginExecute(ctx context.Context, target *querypb.Target, preQueries []string, query string, bindVars map[string]*querypb.BindVariable, reservedID int64, options *querypb.ExecuteOptions) (*sqltypes.Result, int64, *topodatapb.TabletAlias, error) { transactionID, alias, err := sbc.begin(ctx, target, preQueries, reservedID, options) if transactionID != 0 { - sbc.txIDToRID[transactionID] = reservedID + sbc.setTxReservedID(transactionID, reservedID) } if err != nil { return nil, 0, nil, err @@ -386,7 +396,7 @@ func (sbc *SandboxConn) MessageStream(ctx context.Context, target *querypb.Targe if err := sbc.getError(); err != nil { return err } - r := sbc.getNextResult() + r := sbc.getNextResult(nil) if r == nil { return nil } @@ -422,18 +432,60 @@ func (sbc *SandboxConn) AddVStreamEvents(events []*binlogdatapb.VEvent, err erro // VStream is part of the QueryService interface. func (sbc *SandboxConn) VStream(ctx context.Context, target *querypb.Target, startPos string, tablePKs []*binlogdatapb.TableLastPK, filter *binlogdatapb.Filter, send func([]*binlogdatapb.VEvent) error) error { if sbc.StartPos != "" && sbc.StartPos != startPos { + log.Errorf("startPos(%v): %v, want %v", target, startPos, sbc.StartPos) return fmt.Errorf("startPos(%v): %v, want %v", target, startPos, sbc.StartPos) } - for len(sbc.VStreamEvents) != 0 { - ev := sbc.VStreamEvents[0] - err := sbc.VStreamErrors[0] - sbc.VStreamEvents = sbc.VStreamEvents[1:] - sbc.VStreamErrors = sbc.VStreamErrors[1:] - if ev == nil { - return err + done := false + // for testing the minimize stream skew feature (TestStreamSkew) we need the ability to send events in specific sequences from + // multiple streams. We introduce a channel in the sandbox that we listen on and vstream those events + // as we receive them. We also need to simulate vstreamer heartbeats since the skew detection logic depends on it + // in case of shards where there are no real events within a second + if sbc.VStreamCh != nil { + lastTimestamp := int64(0) + for !done { + timer := time.NewTimer(1 * time.Second) + select { + case <-timer.C: + events := []*binlogdatapb.VEvent{{ + Type: binlogdatapb.VEventType_HEARTBEAT, + Timestamp: lastTimestamp, + CurrentTime: lastTimestamp, + }, { + Type: binlogdatapb.VEventType_COMMIT, + Timestamp: lastTimestamp, + CurrentTime: lastTimestamp, + }} + + if err := send(events); err != nil { + log.Infof("error sending event in test sandbox %s", err.Error()) + return err + } + lastTimestamp++ + + case ev := <-sbc.VStreamCh: + if ev == nil { + done = true + } + if err := send([]*binlogdatapb.VEvent{ev}); err != nil { + log.Infof("error sending event in test sandbox %s", err.Error()) + return err + } + lastTimestamp = ev.Timestamp + } } - if err := send(ev); err != nil { - return err + } else { + // this path is followed for all vstream tests other than the skew tests + for len(sbc.VStreamEvents) != 0 { + ev := sbc.VStreamEvents[0] + err := sbc.VStreamErrors[0] + sbc.VStreamEvents = sbc.VStreamEvents[1:] + sbc.VStreamErrors = sbc.VStreamErrors[1:] + if ev == nil { + return err + } + if err := send(ev); err != nil { + return err + } } } // Don't return till context is canceled. @@ -465,7 +517,7 @@ func (sbc *SandboxConn) ReserveBeginExecute(ctx context.Context, target *querypb reservedID := sbc.reserve(ctx, target, preQueries, bindVariables, 0, options) result, transactionID, alias, err := sbc.BeginExecute(ctx, target, nil, sql, bindVariables, reservedID, options) if transactionID != 0 { - sbc.txIDToRID[transactionID] = reservedID + sbc.setTxReservedID(transactionID, reservedID) } if err != nil { return nil, transactionID, reservedID, alias, err @@ -478,7 +530,7 @@ func (sbc *SandboxConn) ReserveExecute(ctx context.Context, target *querypb.Targ reservedID := sbc.reserve(ctx, target, preQueries, bindVariables, transactionID, options) result, err := sbc.Execute(ctx, target, sql, bindVariables, transactionID, reservedID, options) if transactionID != 0 { - sbc.txIDToRID[transactionID] = reservedID + sbc.setTxReservedID(transactionID, reservedID) } if err != nil { return nil, 0, nil, err @@ -513,13 +565,54 @@ func (sbc *SandboxConn) Tablet() *topodatapb.Tablet { return sbc.tablet } -func (sbc *SandboxConn) getNextResult() *sqltypes.Result { +// ChangeTabletType changes the tablet type. +func (sbc *SandboxConn) ChangeTabletType(typ topodatapb.TabletType) { + sbc.tablet.Type = typ +} + +func (sbc *SandboxConn) getNextResult(stmt sqlparser.Statement) *sqltypes.Result { if len(sbc.results) != 0 { r := sbc.results[0] sbc.results = sbc.results[1:] return r } - return SingleRowResult + if stmt == nil { + // if we didn't get a valid query, we'll assume we need a SELECT + return getSingleRowResult() + } + switch stmt.(type) { + case *sqlparser.Select, + *sqlparser.Union, + *sqlparser.Show, + sqlparser.Explain, + *sqlparser.OtherRead: + return getSingleRowResult() + case *sqlparser.Set, + sqlparser.DDLStatement, + *sqlparser.AlterVschema, + *sqlparser.Use, + *sqlparser.OtherAdmin, + *sqlparser.SetTransaction, + *sqlparser.Savepoint, + *sqlparser.SRollback, + *sqlparser.Release: + return &sqltypes.Result{} + } + + // for everything else we fake a single row being affected + return &sqltypes.Result{RowsAffected: 1} +} + +func (sbc *SandboxConn) setTxReservedID(transactionID int64, reservedID int64) { + sbc.mapMu.Lock() + defer sbc.mapMu.Unlock() + sbc.txIDToRID[transactionID] = reservedID +} + +func (sbc *SandboxConn) getTxReservedID(txID int64) int64 { + sbc.mapMu.Lock() + defer sbc.mapMu.Unlock() + return sbc.txIDToRID[txID] } //StringQueries returns the queries executed as a slice of strings @@ -531,18 +624,37 @@ func (sbc *SandboxConn) StringQueries() []string { return result } +// getSingleRowResult is used to get a SingleRowResult but it creates separate fields because some tests change the fields +// If these fields are not created separately then the constants value also changes which leads to some other tests failing later +func getSingleRowResult() *sqltypes.Result { + singleRowResult := &sqltypes.Result{ + InsertID: SingleRowResult.InsertID, + StatusFlags: SingleRowResult.StatusFlags, + Rows: SingleRowResult.Rows, + } + + for _, field := range SingleRowResult.Fields { + singleRowResult.Fields = append(singleRowResult.Fields, &querypb.Field{ + Name: field.Name, + Type: field.Type, + }) + } + + return singleRowResult +} + // SingleRowResult is returned when there is no pre-stored result. var SingleRowResult = &sqltypes.Result{ Fields: []*querypb.Field{ {Name: "id", Type: sqltypes.Int32}, {Name: "value", Type: sqltypes.VarChar}, }, - RowsAffected: 1, - InsertID: 0, + InsertID: 0, Rows: [][]sqltypes.Value{{ sqltypes.NewInt32(1), sqltypes.NewVarChar("foo"), }}, + StatusFlags: sqltypes.ServerStatusAutocommit, } // StreamRowResult is SingleRowResult with RowsAffected set to 0. @@ -551,8 +663,6 @@ var StreamRowResult = &sqltypes.Result{ {Name: "id", Type: sqltypes.Int32}, {Name: "value", Type: sqltypes.VarChar}, }, - RowsAffected: 0, - InsertID: 0, Rows: [][]sqltypes.Value{{ sqltypes.NewInt32(1), sqltypes.NewVarChar("foo"), diff --git a/go/vt/vttablet/tabletconntest/fakequeryservice.go b/go/vt/vttablet/tabletconntest/fakequeryservice.go index ec5cc103efd..9fb15d053d5 100644 --- a/go/vt/vttablet/tabletconntest/fakequeryservice.go +++ b/go/vt/vttablet/tabletconntest/fakequeryservice.go @@ -39,7 +39,7 @@ import ( // FakeQueryService implements a programmable fake for the query service // server side. type FakeQueryService struct { - t *testing.T + t testing.TB TestingGateway bool // these fields are used to simulate and synchronize on errors @@ -740,7 +740,7 @@ func (f *FakeQueryService) Release(ctx context.Context, target *querypb.Target, } // CreateFakeServer returns the fake server for the tests -func CreateFakeServer(t *testing.T) *FakeQueryService { +func CreateFakeServer(t testing.TB) *FakeQueryService { return &FakeQueryService{ t: t, } diff --git a/go/vt/vttablet/tabletmanager/rpc_replication.go b/go/vt/vttablet/tabletmanager/rpc_replication.go index eeb08050009..cb47db542ef 100644 --- a/go/vt/vttablet/tabletmanager/rpc_replication.go +++ b/go/vt/vttablet/tabletmanager/rpc_replication.go @@ -22,6 +22,8 @@ import ( "strings" "time" + "vitess.io/vitess/go/vt/proto/vtrpc" + "context" "vitess.io/vitess/go/mysql" @@ -49,7 +51,7 @@ func (tm *TabletManager) ReplicationStatus(ctx context.Context) (*replicationdat return mysql.ReplicationStatusToProto(status), nil } -// MasterStatus returns the replication status fopr a master tablet. +// MasterStatus returns the replication status for a master tablet. func (tm *TabletManager) MasterStatus(ctx context.Context) (*replicationdatapb.MasterStatus, error) { status, err := tm.MysqlDaemon.MasterStatus(ctx) if err != nil { @@ -69,6 +71,7 @@ func (tm *TabletManager) MasterPosition(ctx context.Context) (string, error) { // WaitForPosition returns the master position func (tm *TabletManager) WaitForPosition(ctx context.Context, pos string) error { + log.Infof("WaitForPosition: %v", pos) mpos, err := mysql.DecodePosition(pos) if err != nil { return err @@ -79,6 +82,7 @@ func (tm *TabletManager) WaitForPosition(ctx context.Context, pos string) error // StopReplication will stop the mysql. Works both when Vitess manages // replication or not (using hook if not). func (tm *TabletManager) StopReplication(ctx context.Context) error { + log.Infof("StopReplication") if err := tm.lock(ctx); err != nil { return err } @@ -131,6 +135,7 @@ func (tm *TabletManager) stopIOThreadLocked(ctx context.Context) error { // provided position. Works both when Vitess manages // replication or not (using hook if not). func (tm *TabletManager) StopReplicationMinimum(ctx context.Context, position string, waitTime time.Duration) (string, error) { + log.Infof("StopReplicationMinimum: position: %v waitTime: %v", position, waitTime) if err := tm.lock(ctx); err != nil { return "", err } @@ -158,6 +163,7 @@ func (tm *TabletManager) StopReplicationMinimum(ctx context.Context, position st // StartReplication will start the mysql. Works both when Vitess manages // replication or not (using hook if not). func (tm *TabletManager) StartReplication(ctx context.Context) error { + log.Infof("StartReplication") if err := tm.lock(ctx); err != nil { return err } @@ -185,6 +191,7 @@ func (tm *TabletManager) StartReplication(ctx context.Context) error { // StartReplicationUntilAfter will start the replication and let it catch up // until and including the transactions in `position` func (tm *TabletManager) StartReplicationUntilAfter(ctx context.Context, position string, waitTime time.Duration) error { + log.Infof("StartReplicationUntilAfter: position: %v waitTime: %v", position, waitTime) if err := tm.lock(ctx); err != nil { return err } @@ -209,6 +216,7 @@ func (tm *TabletManager) GetReplicas(ctx context.Context) ([]string, error) { // ResetReplication completely resets the replication on the host. // All binary and relay logs are flushed. All replication positions are reset. func (tm *TabletManager) ResetReplication(ctx context.Context) error { + log.Infof("ResetReplication") if err := tm.lock(ctx); err != nil { return err } @@ -220,6 +228,7 @@ func (tm *TabletManager) ResetReplication(ctx context.Context) error { // InitMaster enables writes and returns the replication position. func (tm *TabletManager) InitMaster(ctx context.Context) (string, error) { + log.Infof("InitMaster") if err := tm.lock(ctx); err != nil { return "", err } @@ -259,6 +268,7 @@ func (tm *TabletManager) InitMaster(ctx context.Context) (string, error) { // PopulateReparentJournal adds an entry into the reparent_journal table. func (tm *TabletManager) PopulateReparentJournal(ctx context.Context, timeCreatedNS int64, actionName string, masterAlias *topodatapb.TabletAlias, position string) error { + log.Infof("PopulateReparentJournal: action: %v parent: %v position: %v", actionName, masterAlias, position) pos, err := mysql.DecodePosition(position) if err != nil { return err @@ -272,6 +282,7 @@ func (tm *TabletManager) PopulateReparentJournal(ctx context.Context, timeCreate // InitReplica sets replication master and position, and waits for the // reparent_journal table entry up to context timeout func (tm *TabletManager) InitReplica(ctx context.Context, parent *topodatapb.TabletAlias, position string, timeCreatedNS int64) error { + log.Infof("InitReplica: parent: %v position: %v", parent, position) if err := tm.lock(ctx); err != nil { return err } @@ -335,6 +346,7 @@ func (tm *TabletManager) InitReplica(ctx context.Context, parent *topodatapb.Tab // // If a step fails in the middle, it will try to undo any changes it made. func (tm *TabletManager) DemoteMaster(ctx context.Context) (*replicationdatapb.MasterStatus, error) { + log.Infof("DemoteMaster") // The public version always reverts on partial failure. return tm.demoteMaster(ctx, true /* revertPartialFailure */) } @@ -441,6 +453,7 @@ func (tm *TabletManager) demoteMaster(ctx context.Context, revertPartialFailure // it sets read-only to false, fixes semi-sync // and returns its master position. func (tm *TabletManager) UndoDemoteMaster(ctx context.Context) error { + log.Infof("UndoDemoteMaster") if err := tm.lock(ctx); err != nil { return err } @@ -477,12 +490,14 @@ func (tm *TabletManager) UndoDemoteMaster(ctx context.Context) error { // ReplicaWasPromoted promotes a replica to master, no questions asked. func (tm *TabletManager) ReplicaWasPromoted(ctx context.Context) error { + log.Infof("ReplicaWasPromoted") return tm.ChangeType(ctx, topodatapb.TabletType_MASTER) } // SetMaster sets replication master, and waits for the // reparent_journal table entry up to context timeout func (tm *TabletManager) SetMaster(ctx context.Context, parentAlias *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool) error { + log.Infof("SetMaster: parent: %v position: %v force: %v", parentAlias, waitPosition, forceStartReplication) if err := tm.lock(ctx); err != nil { return err } @@ -570,6 +585,10 @@ func (tm *TabletManager) setMasterLocked(ctx context.Context, parentAlias *topod } // Update the master address only if needed. // We don't want to interrupt replication for no reason. + if parentAlias == nil { + // if there is no master in the shard, return an error so that we can retry + return vterrors.New(vtrpc.Code_FAILED_PRECONDITION, "Shard masterAlias is nil") + } parent, err := tm.TopoServer.GetTablet(ctx, parentAlias) if err != nil { return err @@ -613,6 +632,8 @@ func (tm *TabletManager) setMasterLocked(ctx context.Context, parentAlias *topod return err } } + // Clear replication sentinel flag for this replica + tm.replManager.setReplicationStopped(false) } return nil @@ -620,6 +641,7 @@ func (tm *TabletManager) setMasterLocked(ctx context.Context, parentAlias *topod // ReplicaWasRestarted updates the parent record for a tablet. func (tm *TabletManager) ReplicaWasRestarted(ctx context.Context, parent *topodatapb.TabletAlias) error { + log.Infof("ReplicaWasRestarted: parent: %v", parent) if err := tm.lock(ctx); err != nil { return err } @@ -637,6 +659,7 @@ func (tm *TabletManager) ReplicaWasRestarted(ctx context.Context, parent *topoda // StopReplicationAndGetStatus stops MySQL replication, and returns the // current status. func (tm *TabletManager) StopReplicationAndGetStatus(ctx context.Context, stopReplicationMode replicationdatapb.StopReplicationMode) (StopReplicationAndGetStatusResponse, error) { + log.Infof("StopReplicationAndGetStatus: mode: %v", stopReplicationMode) if err := tm.lock(ctx); err != nil { return StopReplicationAndGetStatusResponse{}, err } @@ -727,6 +750,7 @@ type StopReplicationAndGetStatusResponse struct { // PromoteReplica makes the current tablet the master func (tm *TabletManager) PromoteReplica(ctx context.Context) (string, error) { + log.Infof("PromoteReplica") if err := tm.lock(ctx); err != nil { return "", err } @@ -746,6 +770,10 @@ func (tm *TabletManager) PromoteReplica(ctx context.Context) (string, error) { return "", err } + // Clear replication sentinel flag for this master, + // or we might block replication the next time we demote it + tm.replManager.setReplicationStopped(false) + return mysql.EncodePosition(pos), nil } diff --git a/go/vt/vttablet/tabletmanager/rpc_vexec.go b/go/vt/vttablet/tabletmanager/rpc_vexec.go index 6897159b2fb..8783230c696 100644 --- a/go/vt/vttablet/tabletmanager/rpc_vexec.go +++ b/go/vt/vttablet/tabletmanager/rpc_vexec.go @@ -20,7 +20,7 @@ import ( "fmt" querypb "vitess.io/vitess/go/vt/proto/query" - "vitess.io/vitess/go/vt/vttablet/onlineddl" + "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/vttablet/vexec" "context" @@ -33,7 +33,7 @@ func (tm *TabletManager) VExec(ctx context.Context, query, workflow, keyspace st return nil, err } switch vx.TableName { - case fmt.Sprintf("%s.%s", vexec.TableQualifier, onlineddl.SchemaMigrationsTableName): + case fmt.Sprintf("%s.%s", vexec.TableQualifier, schema.SchemaMigrationsTableName): return tm.QueryServiceControl.OnlineDDLExecutor().VExec(ctx, vx) default: return nil, fmt.Errorf("table not supported by vexec: %v", vx.TableName) diff --git a/go/vt/vttablet/tabletmanager/tm_init.go b/go/vt/vttablet/tabletmanager/tm_init.go index 98950516e70..58733c27ec1 100644 --- a/go/vt/vttablet/tabletmanager/tm_init.go +++ b/go/vt/vttablet/tabletmanager/tm_init.go @@ -646,6 +646,7 @@ func (tm *TabletManager) exportStats() { statsKeyspace.Set(tablet.Keyspace) statsShard.Set(tablet.Shard) statsTabletType.Set(topoproto.TabletTypeLString(tm.tmState.tablet.Type)) + statsTabletTypeCount.Add(topoproto.TabletTypeLString(tm.tmState.tablet.Type), 1) if key.KeyRangeIsPartial(tablet.KeyRange) { statsKeyRangeStart.Set(hex.EncodeToString(tablet.KeyRange.Start)) statsKeyRangeEnd.Set(hex.EncodeToString(tablet.KeyRange.End)) diff --git a/go/vt/vttablet/tabletmanager/tm_init_test.go b/go/vt/vttablet/tabletmanager/tm_init_test.go index 8ba4c0967e6..20c2e8b12e4 100644 --- a/go/vt/vttablet/tabletmanager/tm_init_test.go +++ b/go/vt/vttablet/tabletmanager/tm_init_test.go @@ -129,6 +129,8 @@ func TestStartCreateKeyspaceShard(t *testing.T) { defer tm.Stop() assert.Equal(t, "replica", statsTabletType.Get()) + assert.Equal(t, 1, len(statsTabletTypeCount.Counts())) + assert.Equal(t, int64(1), statsTabletTypeCount.Counts()["replica"]) _, err := ts.GetShard(ctx, "ks", "0") require.NoError(t, err) diff --git a/go/vt/vttablet/tabletmanager/tm_state.go b/go/vt/vttablet/tabletmanager/tm_state.go index c21d387cd1e..41b864bb013 100644 --- a/go/vt/vttablet/tabletmanager/tm_state.go +++ b/go/vt/vttablet/tabletmanager/tm_state.go @@ -21,8 +21,11 @@ import ( "fmt" "strings" "sync" + "syscall" "time" + "vitess.io/vitess/go/vt/servenv" + "context" "github.com/golang/protobuf/proto" @@ -369,6 +372,11 @@ func (ts *tmState) publishStateLocked(ctx context.Context) { return nil }) if err != nil { + if topo.IsErrType(err, topo.NoNode) { // Someone deleted the tablet record under us. Shut down gracefully. + log.Error("Tablet record has disappeared, shutting down") + servenv.ExitChan <- syscall.SIGTERM + return + } log.Errorf("Unable to publish state to topo, will keep retrying: %v", err) ts.isPublishing = true // Keep retrying until success. @@ -396,6 +404,11 @@ func (ts *tmState) retryPublish() { }) cancel() if err != nil { + if topo.IsErrType(err, topo.NoNode) { // Someone deleted the tablet record under us. Shut down gracefully. + log.Error("Tablet record has disappeared, shutting down") + servenv.ExitChan <- syscall.SIGTERM + return + } log.Errorf("Unable to publish state to topo, will keep retrying: %v", err) ts.mu.Unlock() time.Sleep(*publishRetryInterval) diff --git a/go/vt/vttablet/tabletmanager/tm_state_test.go b/go/vt/vttablet/tabletmanager/tm_state_test.go index 42f4b3205f5..f250ee29e1c 100644 --- a/go/vt/vttablet/tabletmanager/tm_state_test.go +++ b/go/vt/vttablet/tabletmanager/tm_state_test.go @@ -18,9 +18,12 @@ package tabletmanager import ( "encoding/json" + "os" "testing" "time" + "vitess.io/vitess/go/vt/servenv" + "context" "github.com/stretchr/testify/assert" @@ -324,9 +327,13 @@ func TestStateNonServing(t *testing.T) { func TestStateChangeTabletType(t *testing.T) { ctx := context.Background() ts := memorytopo.NewServer("cell1") + statsTabletTypeCount.ResetAll() tm := newTestTM(t, ts, 2, "ks", "0") defer tm.Stop() + assert.Equal(t, 1, len(statsTabletTypeCount.Counts())) + assert.Equal(t, int64(1), statsTabletTypeCount.Counts()["replica"]) + alias := &topodatapb.TabletAlias{ Cell: "cell1", Uid: 2, @@ -339,6 +346,8 @@ func TestStateChangeTabletType(t *testing.T) { assert.Equal(t, topodatapb.TabletType_MASTER, ti.Type) assert.NotNil(t, ti.MasterTermStartTime) assert.Equal(t, "master", statsTabletType.Get()) + assert.Equal(t, 2, len(statsTabletTypeCount.Counts())) + assert.Equal(t, int64(1), statsTabletTypeCount.Counts()["master"]) err = tm.tmState.ChangeTabletType(ctx, topodatapb.TabletType_REPLICA, DBActionNone) require.NoError(t, err) @@ -347,6 +356,8 @@ func TestStateChangeTabletType(t *testing.T) { assert.Equal(t, topodatapb.TabletType_REPLICA, ti.Type) assert.Nil(t, ti.MasterTermStartTime) assert.Equal(t, "replica", statsTabletType.Get()) + assert.Equal(t, 2, len(statsTabletTypeCount.Counts())) + assert.Equal(t, int64(2), statsTabletTypeCount.Counts()["replica"]) } func TestPublishStateNew(t *testing.T) { @@ -401,3 +412,30 @@ func TestPublishStateNew(t *testing.T) { require.NoError(t, err) assert.Equal(t, tab2, ttablet.Tablet) } + +func TestPublishDeleted(t *testing.T) { + ctx := context.Background() + ts := memorytopo.NewServer("cell1") + tm := newTestTM(t, ts, 2, "ks", "0") + defer tm.Stop() + + alias := &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 2, + } + + err := tm.tmState.ChangeTabletType(ctx, topodatapb.TabletType_MASTER, DBActionSetReadWrite) + require.NoError(t, err) + + err = ts.DeleteTablet(ctx, alias) + require.NoError(t, err) + + // we need to make sure to catch the signal + servenv.ExitChan = make(chan os.Signal, 1) + // Now change the tablet type and publish + err = tm.tmState.ChangeTabletType(ctx, topodatapb.TabletType_REPLICA, DBActionNone) + require.NoError(t, err) + tm.tmState.mu.Lock() + assert.False(t, tm.tmState.isPublishing) + tm.tmState.mu.Unlock() +} diff --git a/go/vt/vttablet/tabletmanager/vreplication/controller.go b/go/vt/vttablet/tabletmanager/vreplication/controller.go index 11597f60504..5a35f346f27 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/controller.go +++ b/go/vt/vttablet/tabletmanager/vreplication/controller.go @@ -118,7 +118,15 @@ func newController(ctx context.Context, params map[string]string, dbClientFactor } log.Infof("creating tablet picker for source keyspace/shard %v/%v with cell: %v and tabletTypes: %v", ct.source.Keyspace, ct.source.Shard, cell, tabletTypesStr) cells := strings.Split(cell, ",") - tp, err := discovery.NewTabletPicker(ts, cells, ct.source.Keyspace, ct.source.Shard, tabletTypesStr) + + sourceTopo := ts + if ct.source.ExternalCluster != "" { + sourceTopo, err = sourceTopo.OpenExternalVitessClusterServer(ctx, ct.source.ExternalCluster) + if err != nil { + return nil, err + } + } + tp, err := discovery.NewTabletPicker(sourceTopo, cells, ct.source.Keyspace, ct.source.Shard, tabletTypesStr) if err != nil { return nil, err } diff --git a/go/vt/vttablet/tabletmanager/vreplication/controller_test.go b/go/vt/vttablet/tabletmanager/vreplication/controller_test.go index c7581cdd2cd..f47a113376e 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/controller_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/controller_test.go @@ -36,9 +36,8 @@ import ( var ( testSettingsResponse = &sqltypes.Result{ - Fields: nil, - RowsAffected: 1, - InsertID: 0, + Fields: nil, + InsertID: 0, Rows: [][]sqltypes.Value{ { sqltypes.NewVarBinary("MariaDB/0-1-1083"), // pos @@ -299,9 +298,8 @@ func TestControllerStopPosition(t *testing.T) { dbClient.ExpectRequestRE("update _vt.vreplication set message='Picked source tablet.*", testDMLResponse, nil) dbClient.ExpectRequest("update _vt.vreplication set state='Running', message='' where id=1", testDMLResponse, nil) withStop := &sqltypes.Result{ - Fields: nil, - RowsAffected: 1, - InsertID: 0, + Fields: nil, + InsertID: 0, Rows: [][]sqltypes.Value{ { sqltypes.NewVarBinary("MariaDB/0-1-1083"), // pos diff --git a/go/vt/vttablet/tabletmanager/vreplication/engine.go b/go/vt/vttablet/tabletmanager/vreplication/engine.go index 7dcfd3335ed..6b5aa5644dd 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/engine.go +++ b/go/vt/vttablet/tabletmanager/vreplication/engine.go @@ -17,6 +17,7 @@ limitations under the License. package vreplication import ( + "context" "errors" "flag" "fmt" @@ -24,24 +25,22 @@ import ( "sync" "time" - "vitess.io/vitess/go/sync2" - "vitess.io/vitess/go/vt/dbconfigs" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" - "vitess.io/vitess/go/vt/withddl" - - "context" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/sync2" "vitess.io/vitess/go/vt/binlog/binlogplayer" + "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/mysqlctl" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/evalengine" + "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle" + "vitess.io/vitess/go/vt/withddl" ) const ( @@ -65,6 +64,10 @@ const ( var withDDL *withddl.WithDDL +const ( + throttlerAppName = "vreplication" +) + func init() { allddls := append([]string{}, binlogplayer.CreateVReplicationTable()...) allddls = append(allddls, binlogplayer.AlterVReplicationTable...) @@ -113,6 +116,8 @@ type Engine struct { journaler map[string]*journalEvent ec *externalConnector + + throttlerClient *throttle.Client } type journalEvent struct { @@ -123,14 +128,15 @@ type journalEvent struct { // NewEngine creates a new Engine. // A nil ts means that the Engine is disabled. -func NewEngine(config *tabletenv.TabletConfig, ts *topo.Server, cell string, mysqld mysqlctl.MysqlDaemon) *Engine { +func NewEngine(config *tabletenv.TabletConfig, ts *topo.Server, cell string, mysqld mysqlctl.MysqlDaemon, lagThrottler *throttle.Throttler) *Engine { vre := &Engine{ - controllers: make(map[int]*controller), - ts: ts, - cell: cell, - mysqld: mysqld, - journaler: make(map[string]*journalEvent), - ec: newExternalConnector(config.ExternalConnections), + controllers: make(map[int]*controller), + ts: ts, + cell: cell, + mysqld: mysqld, + journaler: make(map[string]*journalEvent), + ec: newExternalConnector(config.ExternalConnections), + throttlerClient: throttle.NewBackgroundClient(lagThrottler, throttlerAppName, throttle.ThrottleCheckPrimaryWrite), } return vre } diff --git a/go/vt/vttablet/tabletmanager/vreplication/engine_test.go b/go/vt/vttablet/tabletmanager/vreplication/engine_test.go index e28e400e101..1fddd276412 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/engine_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/engine_test.go @@ -416,7 +416,7 @@ func TestWaitForPosError(t *testing.T) { dbClient.ExpectRequest("select pos, state, message from _vt.vreplication where id=1", &sqltypes.Result{Rows: [][]sqltypes.Value{{}}}, nil) err = vre.WaitForPos(context.Background(), 1, "MariaDB/0-1-1084") - want = "unexpected result: &{[] 0 0 [[]] }" + want = "unexpected result: &{[] 0 0 [[]] 0}" assert.EqualError(t, err, want, "WaitForPos:") dbClient.ExpectRequest("select pos, state, message from _vt.vreplication where id=1", &sqltypes.Result{Rows: [][]sqltypes.Value{{ @@ -425,7 +425,7 @@ func TestWaitForPosError(t *testing.T) { sqltypes.NewVarBinary("MariaDB/0-1-1083"), }}}, nil) err = vre.WaitForPos(context.Background(), 1, "MariaDB/0-1-1084") - want = `unexpected result: &{[] 0 0 [[VARBINARY("MariaDB/0-1-1083")] [VARBINARY("MariaDB/0-1-1083")]] }` + want = `unexpected result: &{[] 0 0 [[VARBINARY("MariaDB/0-1-1083")] [VARBINARY("MariaDB/0-1-1083")]] 0}` assert.EqualError(t, err, want, "WaitForPos:") } @@ -497,6 +497,7 @@ func TestCreateDBAndTable(t *testing.T) { dbClient.ExpectRequestRE("CREATE TABLE IF NOT EXISTS _vt.vreplication.*", &sqltypes.Result{}, nil) dbClient.ExpectRequestRE("ALTER TABLE _vt.vreplication ADD COLUMN db_name.*", &sqltypes.Result{}, nil) dbClient.ExpectRequestRE("ALTER TABLE _vt.vreplication MODIFY source.*", &sqltypes.Result{}, nil) + dbClient.ExpectRequestRE("ALTER TABLE _vt.vreplication ADD KEY.*", &sqltypes.Result{}, nil) dbClient.ExpectRequestRE("create table if not exists _vt.resharding_journal.*", &sqltypes.Result{}, nil) dbClient.ExpectRequestRE("create table if not exists _vt.copy_state.*", &sqltypes.Result{}, nil) } diff --git a/go/vt/vttablet/tabletmanager/vreplication/external_connector.go b/go/vt/vttablet/tabletmanager/vreplication/external_connector.go index 8d3706fdbe4..20e187730ff 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/external_connector.go +++ b/go/vt/vttablet/tabletmanager/vreplication/external_connector.go @@ -89,7 +89,7 @@ func (ec *externalConnector) Get(name string) (*mysqlConnector, error) { c := &mysqlConnector{} c.env = tabletenv.NewEnv(config, name) c.se = schema.NewEngine(c.env) - c.vstreamer = vstreamer.NewEngine(c.env, nil, c.se, "") + c.vstreamer = vstreamer.NewEngine(c.env, nil, c.se, nil, "") c.vstreamer.InitDBConfig("") c.se.InitDBConfig(c.env.Config().DB.AllPrivsWithDB()) diff --git a/go/vt/vttablet/tabletmanager/vreplication/framework_test.go b/go/vt/vttablet/tabletmanager/vreplication/framework_test.go index 935699d7a97..10771975337 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/framework_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/framework_test.go @@ -99,7 +99,7 @@ func TestMain(m *testing.M) { // engines cannot be initialized in testenv because it introduces // circular dependencies. - streamerEngine = vstreamer.NewEngine(env.TabletEnv, env.SrvTopo, env.SchemaEngine, env.Cells[0]) + streamerEngine = vstreamer.NewEngine(env.TabletEnv, env.SrvTopo, env.SchemaEngine, nil, env.Cells[0]) streamerEngine.InitDBConfig(env.KeyspaceName) streamerEngine.Open() defer streamerEngine.Close() diff --git a/go/vt/vttablet/tabletmanager/vreplication/relaylog.go b/go/vt/vttablet/tabletmanager/vreplication/relaylog.go index 8b886bc594a..c2eb9c4af83 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/relaylog.go +++ b/go/vt/vttablet/tabletmanager/vreplication/relaylog.go @@ -64,6 +64,7 @@ func newRelayLog(ctx context.Context, maxItems, maxSize int) *relayLog { return rl } +// Send writes events to the relay log func (rl *relayLog) Send(events []*binlogdatapb.VEvent) error { rl.mu.Lock() defer rl.mu.Unlock() @@ -83,6 +84,7 @@ func (rl *relayLog) Send(events []*binlogdatapb.VEvent) error { return nil } +// Fetch returns all existing items in the relay log, and empties the log func (rl *relayLog) Fetch() ([][]*binlogdatapb.VEvent, error) { rl.mu.Lock() defer rl.mu.Unlock() diff --git a/go/vt/vttablet/tabletmanager/vreplication/replica_connector.go b/go/vt/vttablet/tabletmanager/vreplication/replica_connector.go index 550e75deb7f..2963dbb2f7f 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/replica_connector.go +++ b/go/vt/vttablet/tabletmanager/vreplication/replica_connector.go @@ -51,7 +51,7 @@ func NewReplicaConnector(connParams *mysql.ConnParams) *replicaConnector { env := tabletenv.NewEnv(config, "source") c.se = schema.NewEngine(env) c.se.SkipMetaCheck = true - c.vstreamer = vstreamer.NewEngine(env, nil, c.se, "") + c.vstreamer = vstreamer.NewEngine(env, nil, c.se, nil, "") c.se.InitDBConfig(dbconfigs.New(connParams)) // Open diff --git a/go/vt/vttablet/tabletmanager/vreplication/stats.go b/go/vt/vttablet/tabletmanager/vreplication/stats.go index 4705302783f..1721b38f033 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/stats.go +++ b/go/vt/vttablet/tabletmanager/vreplication/stats.go @@ -62,6 +62,15 @@ type vrStats struct { func (st *vrStats) register() { stats.NewGaugeFunc("VReplicationStreamCount", "Number of vreplication streams", st.numControllers) stats.NewGaugeFunc("VReplicationSecondsBehindMasterMax", "Max vreplication seconds behind master", st.maxSecondsBehindMaster) + stats.Publish("VReplicationStreamState", stats.StringMapFunc(func() map[string]string { + st.mu.Lock() + defer st.mu.Unlock() + result := make(map[string]string, len(st.controllers)) + for _, ct := range st.controllers { + result[ct.workflow+"."+fmt.Sprintf("%v", ct.id)] = ct.blpStats.State.Get() + } + return result + })) stats.NewGaugesFuncWithMultiLabels( "VReplicationSecondsBehindMaster", "vreplication seconds behind master per stream", diff --git a/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go b/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go index b7654f4d561..8eddc1b07af 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go +++ b/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go @@ -193,7 +193,7 @@ func buildTablePlan(tableName, filter string, pkInfoMap map[string][]*PrimaryKey query = buf.String() case key.IsKeyRange(filter): buf := sqlparser.NewTrackedBuffer(nil) - buf.Myprintf("select * from %v where in_keyrange(%v)", sqlparser.NewTableIdent(tableName), sqlparser.NewStrLiteral([]byte(filter))) + buf.Myprintf("select * from %v where in_keyrange(%v)", sqlparser.NewTableIdent(tableName), sqlparser.NewStrLiteral(filter)) query = buf.String() case filter == ExcludeStr: return nil, nil @@ -264,7 +264,7 @@ func buildTablePlan(tableName, filter string, pkInfoMap map[string][]*PrimaryKey if len(tpb.sendSelect.SelectExprs) == 0 { tpb.sendSelect.SelectExprs = sqlparser.SelectExprs([]sqlparser.SelectExpr{ &sqlparser.AliasedExpr{ - Expr: sqlparser.NewIntLiteral([]byte{'1'}), + Expr: sqlparser.NewIntLiteral("1"), }, }) } @@ -633,7 +633,11 @@ func (tpb *tablePlanBuilder) generateUpdateStatement() *sqlparser.ParsedQuery { switch cexpr.operation { case opExpr: bvf.mode = bvAfter - buf.Myprintf("%v", cexpr.expr) + if cexpr.colType == querypb.Type_JSON { + buf.Myprintf("convert(%v using utf8mb4)", cexpr.expr) + } else { + buf.Myprintf("%v", cexpr.expr) + } case opCount: buf.Myprintf("%v", cexpr.colName) case opSum: diff --git a/go/vt/vttablet/tabletmanager/vreplication/vcopier.go b/go/vt/vttablet/tabletmanager/vreplication/vcopier.go index b9271112f3f..1879cb62540 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vcopier.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vcopier.go @@ -221,11 +221,18 @@ func (vc *vcopier) copyTable(ctx context.Context, tableName string, copyState ma var updateCopyState *sqlparser.ParsedQuery var bv map[string]*querypb.BindVariable err = vc.vr.sourceVStreamer.VStreamRows(ctx, initialPlan.SendRule.Filter, lastpkpb, func(rows *binlogdatapb.VStreamRowsResponse) error { - select { - case <-ctx.Done(): - return io.EOF - default: + for { + select { + case <-ctx.Done(): + return io.EOF + default: + } + // verify throttler is happy, otherwise keep looping + if vc.vr.vre.throttlerClient.ThrottleCheckOKOrWait(ctx) { + break + } } + if vc.tablePlan == nil { if len(rows.Fields) == 0 { return fmt.Errorf("expecting field event first, got: %v", rows) @@ -249,6 +256,7 @@ func (vc *vcopier) copyTable(ctx context.Context, tableName string, copyState ma if len(rows.Rows) == 0 { return nil } + // The number of rows we receive depends on the packet size set // for the row streamer. Since the packet size is roughly equivalent // to data size, this should map to a uniform amount of pages affected diff --git a/go/vt/vttablet/tabletmanager/vreplication/vplayer.go b/go/vt/vttablet/tabletmanager/vreplication/vplayer.go index 5bf6291c5ce..966c8b07655 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vplayer.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vplayer.go @@ -59,6 +59,9 @@ type vplayer struct { lastTimestampNs int64 // timeOffsetNs keeps track of the clock difference with respect to source tablet. timeOffsetNs int64 + // numAccumulatedHeartbeats keeps track of how many heartbeats have been received since we updated the time_updated column of _vt.vreplication + numAccumulatedHeartbeats int + // canAcceptStmtEvents is set to true if the current player can accept events in statement mode. Only true for filters that are match all. canAcceptStmtEvents bool @@ -189,6 +192,7 @@ func (vp *vplayer) fetchAndApply(ctx context.Context) (err error) { } } +// applyStmtEvent applies an actual DML statement received from the source, directly onto the backend database func (vp *vplayer) applyStmtEvent(ctx context.Context, event *binlogdatapb.VEvent) error { sql := event.Statement if sql == "" { @@ -227,6 +231,7 @@ func (vp *vplayer) applyRowEvent(ctx context.Context, rowEvent *binlogdatapb.Row } func (vp *vplayer) updatePos(ts int64) (posReached bool, err error) { + vp.numAccumulatedHeartbeats = 0 update := binlogplayer.GenerateUpdatePos(vp.vr.id, vp.pos, time.Now().Unix(), ts) if _, err := vp.vr.dbClient.Execute(update); err != nil { return false, fmt.Errorf("error %v updating position", err) @@ -246,9 +251,7 @@ func (vp *vplayer) updatePos(ts int64) (posReached bool, err error) { return posReached, nil } -func (vp *vplayer) recordHeartbeat() (err error) { - tm := time.Now().Unix() - vp.vr.stats.RecordHeartbeat(tm) +func (vp *vplayer) updateCurrentTime(tm int64) error { update, err := binlogplayer.GenerateUpdateTime(vp.vr.id, tm) if err != nil { return err @@ -259,7 +262,20 @@ func (vp *vplayer) recordHeartbeat() (err error) { return nil } -// applyEvents is the main thread that applies the events. It has the following use +func (vp *vplayer) mustUpdateCurrentTime() bool { + return vp.numAccumulatedHeartbeats >= *vreplicationHeartbeatUpdateInterval || + vp.numAccumulatedHeartbeats >= vreplicationMinimumHeartbeatUpdateInterval +} + +func (vp *vplayer) recordHeartbeat() error { + tm := time.Now().Unix() + vp.vr.stats.RecordHeartbeat(tm) + if !vp.mustUpdateCurrentTime() { + return nil + } + vp.numAccumulatedHeartbeats = 0 + return vp.updateCurrentTime(tm) +} // applyEvents is the main thread that applies the events. It has the following use // cases to take into account: @@ -319,6 +335,11 @@ func (vp *vplayer) applyEvents(ctx context.Context, relay *relayLog) error { defer vp.vr.stats.VReplicationLags.Add(strconv.Itoa(int(vp.vr.id)), math.MaxInt64) var sbm int64 = -1 for { + // check throttler. + if !vp.vr.vre.throttlerClient.ThrottleCheckOKOrWait(ctx) { + continue + } + items, err := relay.Fetch() if err != nil { return err @@ -606,11 +627,13 @@ func (vp *vplayer) applyEvent(ctx context.Context, event *binlogdatapb.VEvent, m return io.EOF case binlogdatapb.VEventType_HEARTBEAT: if !vp.vr.dbClient.InTransaction { + vp.numAccumulatedHeartbeats++ err := vp.recordHeartbeat() if err != nil { return err } } } + return nil } diff --git a/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go b/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go index 158401e158f..31498ad2a9c 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go @@ -38,6 +38,40 @@ import ( binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) +func TestHeartbeatFrequencyFlag(t *testing.T) { + origVReplicationHeartbeatUpdateInterval := *vreplicationHeartbeatUpdateInterval + defer func() { + *vreplicationHeartbeatUpdateInterval = origVReplicationHeartbeatUpdateInterval + }() + + stats := binlogplayer.NewStats() + vp := &vplayer{vr: &vreplicator{dbClient: newVDBClient(realDBClientFactory(), stats), stats: stats}} + + type testcount struct { + count int + mustUpdate bool + } + type testcase struct { + name string + interval int + counts []testcount + } + testcases := []*testcase{ + {"default frequency", 1, []testcount{{count: 0, mustUpdate: false}, {1, true}}}, + {"custom frequency", 4, []testcount{{count: 0, mustUpdate: false}, {count: 3, mustUpdate: false}, {4, true}}}, + {"minumum frequency", 61, []testcount{{count: 59, mustUpdate: false}, {count: 60, mustUpdate: true}, {61, true}}}, + } + for _, tcase := range testcases { + t.Run(tcase.name, func(t *testing.T) { + *vreplicationHeartbeatUpdateInterval = tcase.interval + for _, tcount := range tcase.counts { + vp.numAccumulatedHeartbeats = tcount.count + require.Equal(t, tcount.mustUpdate, vp.mustUpdateCurrentTime()) + } + }) + } +} + func TestVReplicationTimeUpdated(t *testing.T) { ctx := context.Background() defer deleteTablet(addTablet(100)) @@ -1270,7 +1304,16 @@ func TestPlayerRowMove(t *testing.T) { func TestPlayerTypes(t *testing.T) { log.Errorf("TestPlayerTypes: flavor is %s", env.Flavor) - + enableJSONColumnTesting := false + flavor := strings.ToLower(env.Flavor) + // Disable tests on percona (which identifies as mysql56) and mariadb platforms in CI since they + // either don't support JSON or JSON support is not enabled by default + if strings.Contains(flavor, "mysql57") || strings.Contains(flavor, "mysql80") { + log.Infof("Running JSON column type tests on flavor %s", flavor) + enableJSONColumnTesting = true + } else { + log.Warningf("Not running JSON column type tests on flavor %s", flavor) + } defer deleteTablet(addTablet(100)) execStatements(t, []string{ @@ -1303,7 +1346,7 @@ func TestPlayerTypes(t *testing.T) { "drop table binary_pk", fmt.Sprintf("drop table %s.binary_pk", vrepldb), }) - if strings.Contains(env.Flavor, "mysql57") { + if enableJSONColumnTesting { execStatements(t, []string{ "create table vitess_json(id int auto_increment, val1 json, val2 json, val3 json, val4 json, val5 json, primary key(id))", fmt.Sprintf("create table %s.vitess_json(id int, val1 json, val2 json, val3 json, val4 json, val5 json, primary key(id))", vrepldb), @@ -1386,8 +1429,7 @@ func TestPlayerTypes(t *testing.T) { {"a\000\000\000", "bbb"}, }, }} - - if strings.Contains(env.Flavor, "mysql57") { + if enableJSONColumnTesting { testcases = append(testcases, testcase{ input: "insert into vitess_json(val1,val2,val3,val4,val5) values (null,'{}','123','{\"a\":[42,100]}', '{\"foo\":\"bar\"}')", output: "insert into vitess_json(id,val1,val2,val3,val4,val5) values (1," + @@ -1398,6 +1440,14 @@ func TestPlayerTypes(t *testing.T) { {"1", "", "{}", "123", `{"a": [42, 100]}`, `{"foo": "bar"}`}, }, }) + testcases = append(testcases, testcase{ + input: "update vitess_json set val4 = '{\"a\": [98, 123]}', val5 = convert(x'7b7d' using utf8mb4)", + output: "update vitess_json set val1=convert(null using utf8mb4), val2=convert('{}' using utf8mb4), val3=convert('123' using utf8mb4), val4=convert('{\\\"a\\\":[98,123]}' using utf8mb4), val5=convert('{}' using utf8mb4) where id=1", + table: "vitess_json", + data: [][]string{ + {"1", "", "{}", "123", `{"a": [98, 123]}`, `{}`}, + }, + }) } for _, tcases := range testcases { diff --git a/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go b/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go index b16ff27ac9d..7861019826b 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go @@ -41,7 +41,8 @@ var ( // idleTimeout is set to slightly above 1s, compared to heartbeatTime // set by VStreamer at slightly below 1s. This minimizes conflicts // between the two timeouts. - idleTimeout = 1100 * time.Millisecond + idleTimeout = 1100 * time.Millisecond + dbLockRetryDelay = 1 * time.Second relayLogMaxSize = flag.Int("relay_log_max_size", 250000, "Maximum buffer size (in bytes) for VReplication target buffering. If single rows are larger than this, a single row is buffered at a time.") relayLogMaxItems = flag.Int("relay_log_max_items", 5000, "Maximum number of rows for VReplication target buffering.") @@ -53,8 +54,10 @@ var ( // outages. Keep this high if // you have too many streams the extra write qps or cpu load due to these updates are unacceptable // you have too many streams and/or a large source field (lot of participating tables) which generates unacceptable increase in your binlog size + vreplicationHeartbeatUpdateInterval = flag.Int("vreplication_heartbeat_update_interval", 1, "Frequency (in seconds, default 1, max 60) at which the time_updated column of a vreplication stream when idling") // vreplicationMinimumHeartbeatUpdateInterval overrides vreplicationHeartbeatUpdateInterval if the latter is higher than this // to ensure that it satisfies liveness criteria implicitly expected by internal processes like Online DDL + vreplicationMinimumHeartbeatUpdateInterval = 60 vreplicationExperimentalFlags = flag.Int64("vreplication_experimental_flags", 0, "(Bitmask) of experimental features in vreplication to enable") @@ -99,6 +102,10 @@ type vreplicator struct { // More advanced constructs can be used. Please see the table plan builder // documentation for more info. func newVReplicator(id uint32, source *binlogdatapb.BinlogSource, sourceVStreamer VStreamerClient, stats *binlogplayer.Stats, dbClient binlogplayer.DBClient, mysqld mysqlctl.MysqlDaemon, vre *Engine) *vreplicator { + if *vreplicationHeartbeatUpdateInterval > vreplicationMinimumHeartbeatUpdateInterval { + log.Warningf("the supplied value for vreplication_heartbeat_update_interval:%d seconds is larger than the maximum allowed:%d seconds, vreplication will fallback to %d", + *vreplicationHeartbeatUpdateInterval, vreplicationMinimumHeartbeatUpdateInterval, vreplicationMinimumHeartbeatUpdateInterval) + } return &vreplicator{ vre: vre, id: id, @@ -363,7 +370,7 @@ func (vr *vreplicator) getSettingFKCheck() error { if err != nil { return err } - if qr.RowsAffected != 1 || len(qr.Fields) != 1 { + if len(qr.Rows) != 1 || len(qr.Fields) != 1 { return fmt.Errorf("unable to select @@foreign_key_checks") } vr.originalFKCheckSetting, err = evalengine.ToInt64(qr.Rows[0][0]) diff --git a/go/vt/vttablet/tabletserver/cached_size.go b/go/vt/vttablet/tabletserver/cached_size.go new file mode 100644 index 00000000000..7c803f4fe1e --- /dev/null +++ b/go/vt/vttablet/tabletserver/cached_size.go @@ -0,0 +1,49 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by Sizegen. DO NOT EDIT. + +package tabletserver + +func (cached *TabletPlan) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(128) + } + // field Plan *vitess.io/vitess/go/vt/vttablet/tabletserver/planbuilder.Plan + size += cached.Plan.CachedSize(true) + // field Original string + size += int64(len(cached.Original)) + // field Fields []*vitess.io/vitess/go/vt/proto/query.Field + { + size += int64(cap(cached.Fields)) * int64(8) + for _, elem := range cached.Fields { + size += elem.CachedSize(true) + } + } + // field Rules *vitess.io/vitess/go/vt/vttablet/tabletserver/rules.Rules + size += cached.Rules.CachedSize(true) + // field Authorized []*vitess.io/vitess/go/vt/tableacl.ACLResult + { + size += int64(cap(cached.Authorized)) * int64(8) + for _, elem := range cached.Authorized { + size += elem.CachedSize(true) + } + } + return size +} diff --git a/go/vt/vttablet/tabletserver/connpool/dbconn.go b/go/vt/vttablet/tabletserver/connpool/dbconn.go index 24250f1de1c..cabd8cd3c0a 100644 --- a/go/vt/vttablet/tabletserver/connpool/dbconn.go +++ b/go/vt/vttablet/tabletserver/connpool/dbconn.go @@ -172,6 +172,23 @@ func (dbc *DBConn) ExecOnce(ctx context.Context, query string, maxrows int, want return dbc.execOnce(ctx, query, maxrows, wantfields) } +// FetchNext returns the next result set. +func (dbc *DBConn) FetchNext(ctx context.Context, maxrows int, wantfields bool) (*sqltypes.Result, error) { + // Check if the context is already past its deadline before + // trying to fetch the next result. + select { + case <-ctx.Done(): + return nil, fmt.Errorf("%v before reading next result set", ctx.Err()) + default: + } + res, _, _, err := dbc.conn.ReadQueryResult(maxrows, wantfields) + if err != nil { + return nil, err + } + return res, err + +} + // Stream executes the query and streams the results. func (dbc *DBConn) Stream(ctx context.Context, query string, callback func(*sqltypes.Result) error, alloc func() *sqltypes.Result, streamBufferSize int, includedFields querypb.ExecuteOptions_IncludedFields) error { span, ctx := trace.NewSpan(ctx, "DBConn.Stream") @@ -358,6 +375,11 @@ func (dbc *DBConn) ID() int64 { return dbc.conn.ID() } +// BaseShowTables returns a query that shows tables and their sizes +func (dbc *DBConn) BaseShowTables() string { + return dbc.conn.BaseShowTables() +} + func (dbc *DBConn) reconnect(ctx context.Context) error { dbc.conn.Close() // Reuse MySQLTimings from dbc.conn. diff --git a/go/vt/vttablet/tabletserver/connpool/dbconn_test.go b/go/vt/vttablet/tabletserver/connpool/dbconn_test.go index e41c932e15e..0459898eba2 100644 --- a/go/vt/vttablet/tabletserver/connpool/dbconn_test.go +++ b/go/vt/vttablet/tabletserver/connpool/dbconn_test.go @@ -54,7 +54,7 @@ func TestDBConnExec(t *testing.T) { Fields: []*querypb.Field{ {Type: sqltypes.VarChar}, }, - RowsAffected: 1, + RowsAffected: 0, Rows: [][]sqltypes.Value{ {sqltypes.NewVarChar("123")}, }, @@ -126,7 +126,7 @@ func TestDBConnDeadline(t *testing.T) { Fields: []*querypb.Field{ {Type: sqltypes.VarChar}, }, - RowsAffected: 1, + RowsAffected: 0, Rows: [][]sqltypes.Value{ {sqltypes.NewVarChar("123")}, }, diff --git a/go/vt/vttablet/tabletserver/controller.go b/go/vt/vttablet/tabletserver/controller.go index 7590c8db82e..c95a2a1646d 100644 --- a/go/vt/vttablet/tabletserver/controller.go +++ b/go/vt/vttablet/tabletserver/controller.go @@ -22,11 +22,11 @@ import ( "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/vttablet/onlineddl" "vitess.io/vitess/go/vt/vttablet/queryservice" "vitess.io/vitess/go/vt/vttablet/tabletserver/rules" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" + "vitess.io/vitess/go/vt/vttablet/vexec" "time" @@ -83,7 +83,7 @@ type Controller interface { QueryService() queryservice.QueryService // OnlineDDLExecutor the online DDL executor used by this Controller - OnlineDDLExecutor() *onlineddl.Executor + OnlineDDLExecutor() vexec.Executor // SchemaEngine returns the SchemaEngine object used by this Controller SchemaEngine() *schema.Engine diff --git a/go/vt/vttablet/tabletserver/debugenv.go b/go/vt/vttablet/tabletserver/debugenv.go index 50a200ec3ad..87d2a81a424 100644 --- a/go/vt/vttablet/tabletserver/debugenv.go +++ b/go/vt/vttablet/tabletserver/debugenv.go @@ -23,6 +23,7 @@ import ( "net/http" "strconv" "text/template" + "time" "vitess.io/vitess/go/acl" "vitess.io/vitess/go/vt/log" @@ -72,6 +73,15 @@ func debugEnvHandler(tsv *TabletServer, w http.ResponseWriter, r *http.Request) f(ival) msg = fmt.Sprintf("Setting %v to: %v", varname, value) } + setDurationVal := func(f func(time.Duration)) { + durationVal, err := time.ParseDuration(value) + if err != nil { + msg = fmt.Sprintf("Failed setting value for %v: %v", varname, err) + return + } + f(durationVal) + msg = fmt.Sprintf("Setting %v to: %v", varname, value) + } switch varname { case "PoolSize": setIntVal(tsv.SetPoolSize) @@ -85,6 +95,10 @@ func debugEnvHandler(tsv *TabletServer, w http.ResponseWriter, r *http.Request) setIntVal(tsv.SetMaxResultSize) case "WarnResultSize": setIntVal(tsv.SetWarnResultSize) + case "UnhealthyThreshold": + setDurationVal(tsv.Config().Healthcheck.UnhealthyThresholdSeconds.Set) + setDurationVal(tsv.hs.SetUnhealthyThreshold) + setDurationVal(tsv.sm.SetUnhealthyThreshold) case "Consolidator": tsv.SetConsolidatorMode(value) msg = fmt.Sprintf("Setting %v to: %v", varname, value) @@ -98,12 +112,19 @@ func debugEnvHandler(tsv *TabletServer, w http.ResponseWriter, r *http.Request) Value: fmt.Sprintf("%v", f()), }) } + addDurationVar := func(varname string, f func() time.Duration) { + vars = append(vars, envValue{ + VarName: varname, + Value: fmt.Sprintf("%v", f()), + }) + } addIntVar("PoolSize", tsv.PoolSize) addIntVar("StreamPoolSize", tsv.StreamPoolSize) addIntVar("TxPoolSize", tsv.TxPoolSize) addIntVar("QueryCacheCapacity", tsv.QueryPlanCacheCap) addIntVar("MaxResultSize", tsv.MaxResultSize) addIntVar("WarnResultSize", tsv.WarnResultSize) + addDurationVar("UnhealthyThreshold", tsv.Config().Healthcheck.UnhealthyThresholdSeconds.Get) vars = append(vars, envValue{ VarName: "Consolidator", Value: tsv.ConsolidatorMode(), diff --git a/go/vt/vttablet/tabletserver/gc/tablegc.go b/go/vt/vttablet/tabletserver/gc/tablegc.go index 74402c2886a..35a5ce2ef25 100644 --- a/go/vt/vttablet/tabletserver/gc/tablegc.go +++ b/go/vt/vttablet/tabletserver/gc/tablegc.go @@ -22,12 +22,12 @@ import ( "fmt" "math" "math/rand" - "net/http" "sort" "sync" "sync/atomic" "time" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/timer" "vitess.io/vitess/go/vt/dbconnpool" "vitess.io/vitess/go/vt/log" @@ -43,9 +43,6 @@ import ( const ( leaderCheckInterval = 5 * time.Second purgeReentranceInterval = 1 * time.Minute - // throttleCheckDuration controls both how frequently the throttler is checked. as well as - // how long to sleep if throttler blocks us - throttleCheckDuration = 250 * time.Millisecond // evacHours is a hard coded, reasonable time for a table to spend in EVAC state evacHours = 72 throttlerAppName = "tablegc" @@ -58,12 +55,9 @@ var checkInterval = flag.Duration("gc_check_interval", 1*time.Hour, "Interval be var gcLifecycle = flag.String("table_gc_lifecycle", "hold,purge,evac,drop", "States for a DROP TABLE garbage collection cycle. Default is 'hold,purge,evac,drop', use any subset ('drop' implcitly always included)") var ( - sqlPurgeTable = `delete from %a limit 50` - sqlShowVtTables = `show tables like '\_vt\_%'` - sqlDropTable = "drop table if exists `%a`" - throttleFlags = &throttle.CheckFlags{ - LowPriority: true, - } + sqlPurgeTable = `delete from %a limit 50` + sqlShowVtTables = `show tables like '\_vt\_%'` + sqlDropTable = "drop table if exists `%a`" purgeReentranceFlag int64 ) @@ -91,17 +85,16 @@ type TableGC struct { shard string dbName string - lagThrottler *throttle.Throttler - isPrimary int64 - isOpen int64 + isPrimary int64 + isOpen int64 + + throttlerClient *throttle.Client env tabletenv.Env pool *connpool.Pool tabletTypeFunc func() topodatapb.TabletType ts *topo.Server - lastSuccessfulThrottleCheck time.Time - initMutex sync.Mutex purgeMutex sync.Mutex @@ -130,9 +123,9 @@ type GCStatus struct { // NewTableGC creates a table collector func NewTableGC(env tabletenv.Env, ts *topo.Server, tabletTypeFunc func() topodatapb.TabletType, lagThrottler *throttle.Throttler) *TableGC { collector := &TableGC{ - lagThrottler: lagThrottler, - isPrimary: 0, - isOpen: 0, + throttlerClient: throttle.NewBackgroundClient(lagThrottler, throttlerAppName, throttle.ThrottleCheckPrimaryWrite), + isPrimary: 0, + isOpen: 0, env: env, tabletTypeFunc: tabletTypeFunc, @@ -437,11 +430,33 @@ func (collector *TableGC) purge(ctx context.Context) (tableName string, err erro // This saves a lot of load from the replication stream, avoiding excessive lags. It also // avoids excessive IO on the replicas. // (note that the user may skip the PURGE step if they want, but the step is on by default) - if _, err := conn.ExecuteFetch("SET sql_log_bin = OFF", 0, false); err != nil { + + // However, disabling SQL_LOG_BIN requires SUPER privileges, and we don't know that we have that. + // Any externally managed database might not give SUPER privileges to the vitess accounts, and this is known to be the case for Amazon Aurora. + // We therefore disable log bin on best-effort basis. The logic is still fine and sound if binary logging + // is left enabled. We just lose some optimization. + disableLogBin := func() (bool, error) { + _, err := conn.ExecuteFetch("SET sql_log_bin = OFF", 0, false) + if err == nil { + return true, nil + } + if merr, ok := err.(*mysql.SQLError); ok { + if merr.Num == mysql.ERSpecifiedAccessDenied { + // We do not have privileges to disable binary logging. That's fine, we're on best effort, + // so we're going to silently ignore this error. + return false, nil + } + } + // We do not tolerate other errors, though. + return false, err + } + sqlLogBinDisabled, err := disableLogBin() + if err != nil { return tableName, err } + defer func() { - if !conn.IsClosed() { + if sqlLogBinDisabled && !conn.IsClosed() { if _, err := conn.ExecuteFetch("SET sql_log_bin = ON", 0, false); err != nil { log.Errorf("TableGC: error setting sql_log_bin = ON: %+v", err) // a followup defer() will run conn.Close() at any case. @@ -451,15 +466,8 @@ func (collector *TableGC) purge(ctx context.Context) (tableName string, err erro log.Infof("TableGC: purge begin for %s", tableName) for { - if time.Since(collector.lastSuccessfulThrottleCheck) > throttleCheckDuration { - // It's time to run a throttler check - checkResult := collector.lagThrottler.Check(ctx, throttlerAppName, "", throttleFlags) - if checkResult.StatusCode != http.StatusOK { - // sorry, we got throttled. Back off, sleep, try again - time.Sleep(throttleCheckDuration) - continue - } - collector.lastSuccessfulThrottleCheck = time.Now() + if !collector.throttlerClient.ThrottleCheckOKOrWait(ctx) { + continue } // OK, we're clear to go! diff --git a/go/vt/vttablet/tabletserver/health_streamer.go b/go/vt/vttablet/tabletserver/health_streamer.go index b42fcb4f1d6..36e856d3e8e 100644 --- a/go/vt/vttablet/tabletserver/health_streamer.go +++ b/go/vt/vttablet/tabletserver/health_streamer.go @@ -28,6 +28,7 @@ import ( "github.com/golang/protobuf/proto" "vitess.io/vitess/go/history" + "vitess.io/vitess/go/sync2" "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -51,7 +52,7 @@ var ( type healthStreamer struct { stats *tabletenv.Stats degradedThreshold time.Duration - unhealthyThreshold time.Duration + unhealthyThreshold sync2.AtomicDuration mu sync.Mutex ctx context.Context @@ -66,7 +67,7 @@ func newHealthStreamer(env tabletenv.Env, alias topodatapb.TabletAlias) *healthS return &healthStreamer{ stats: env.Stats(), degradedThreshold: env.Config().Healthcheck.DegradedThresholdSeconds.Get(), - unhealthyThreshold: env.Config().Healthcheck.UnhealthyThresholdSeconds.Get(), + unhealthyThreshold: sync2.NewAtomicDuration(env.Config().Healthcheck.UnhealthyThresholdSeconds.Get()), clients: make(map[chan *querypb.StreamHealthResponse]struct{}), state: &querypb.StreamHealthResponse{ @@ -220,7 +221,7 @@ func (hs *healthStreamer) AppendDetails(details []*kv) []*kv { sbm := time.Duration(hs.state.RealtimeStats.SecondsBehindMaster) * time.Second class := healthyClass switch { - case sbm > hs.unhealthyThreshold: + case sbm > hs.unhealthyThreshold.Get(): class = unhealthyClass case sbm > hs.degradedThreshold: class = unhappyClass @@ -240,3 +241,17 @@ func (hs *healthStreamer) AppendDetails(details []*kv) []*kv { return details } + +func (hs *healthStreamer) SetUnhealthyThreshold(v time.Duration) { + hs.unhealthyThreshold.Set(v) + shr := proto.Clone(hs.state).(*querypb.StreamHealthResponse) + for ch := range hs.clients { + select { + case ch <- shr: + default: + log.Info("Resetting health streamer clients due to unhealthy threshold change") + close(ch) + delete(hs.clients, ch) + } + } +} diff --git a/go/vt/vttablet/tabletserver/planbuilder/builder.go b/go/vt/vttablet/tabletserver/planbuilder/builder.go index f2bdbec76ab..51b72d3e62f 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/builder.go +++ b/go/vt/vttablet/tabletserver/planbuilder/builder.go @@ -33,9 +33,6 @@ func analyzeSelect(sel *sqlparser.Select, tables map[string]*schema.Table) (plan FieldQuery: GenerateFieldQuery(sel), FullQuery: GenerateLimitQuery(sel), } - if sel.Lock != sqlparser.NoLock { - plan.PlanID = PlanSelectLock - } if sel.Where != nil { comp, ok := sel.Where.Expr.(*sqlparser.ComparisonExpr) @@ -46,7 +43,7 @@ func analyzeSelect(sel *sqlparser.Select, tables map[string]*schema.Table) (plan } // Check if it's a NEXT VALUE statement. - if nextVal, ok := sel.SelectExprs[0].(sqlparser.Nextval); ok { + if nextVal, ok := sel.SelectExprs[0].(*sqlparser.Nextval); ok { if plan.Table == nil || plan.Table.Type != schema.Sequence { return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%s is not a sequence", sqlparser.String(sel.From)) } @@ -129,33 +126,46 @@ func analyzeInsert(ins *sqlparser.Insert, tables map[string]*schema.Table) (plan func analyzeShow(show *sqlparser.Show, dbName string) (plan *Plan, err error) { switch showInternal := show.Internal.(type) { - case *sqlparser.ShowLegacy: - if showInternal.Type == sqlparser.KeywordString(sqlparser.TABLES) { + case *sqlparser.ShowBasic: + if showInternal.Command == sqlparser.Table { // rewrite WHERE clause if it exists // `where Tables_in_Keyspace` => `where Tables_in_DbName` - if showInternal.ShowTablesOpt != nil && showInternal.ShowTablesOpt.Filter != nil { - filter := showInternal.ShowTablesOpt.Filter.Filter - if filter != nil { - sqlparser.Rewrite(filter, func(cursor *sqlparser.Cursor) bool { - switch n := cursor.Node().(type) { - case *sqlparser.ColName: - if n.Qualifier.IsEmpty() && strings.HasPrefix(n.Name.Lowered(), "tables_in_") { - cursor.Replace(sqlparser.NewColName("Tables_in_" + dbName)) - } - } - return true - }, nil) - } + if showInternal.Filter != nil { + showTableRewrite(showInternal, dbName) } - return &Plan{ - PlanID: PlanShowTables, - FullQuery: GenerateFullQuery(show), - }, nil } + return &Plan{ + PlanID: PlanShow, + FullQuery: GenerateFullQuery(show), + }, nil + case *sqlparser.ShowCreate: + if showInternal.Command == sqlparser.CreateDb && !sqlparser.SystemSchema(showInternal.Op.Name.String()) { + showInternal.Op.Name = sqlparser.NewTableIdent(dbName) + } + return &Plan{ + PlanID: PlanShow, + FullQuery: GenerateFullQuery(show), + }, nil } return &Plan{PlanID: PlanOtherRead}, nil } +func showTableRewrite(show *sqlparser.ShowBasic, dbName string) { + filter := show.Filter.Filter + if filter == nil { + return + } + _ = sqlparser.Rewrite(filter, func(cursor *sqlparser.Cursor) bool { + switch n := cursor.Node().(type) { + case *sqlparser.ColName: + if n.Qualifier.IsEmpty() && strings.HasPrefix(n.Name.Lowered(), "tables_in_") { + cursor.Replace(sqlparser.NewColName("Tables_in_" + dbName)) + } + } + return true + }, nil) +} + func analyzeSet(set *sqlparser.Set) (plan *Plan) { return &Plan{ PlanID: PlanSet, diff --git a/go/vt/vttablet/tabletserver/planbuilder/cached_size.go b/go/vt/vttablet/tabletserver/planbuilder/cached_size.go new file mode 100644 index 00000000000..a77f6083d5f --- /dev/null +++ b/go/vt/vttablet/tabletserver/planbuilder/cached_size.go @@ -0,0 +1,66 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by Sizegen. DO NOT EDIT. + +package planbuilder + +type cachedObject interface { + CachedSize(alloc bool) int64 +} + +func (cached *Permission) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + // field TableName string + size += int64(len(cached.TableName)) + return size +} +func (cached *Plan) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(168) + } + // field Table *vitess.io/vitess/go/vt/vttablet/tabletserver/schema.Table + size += cached.Table.CachedSize(true) + // field Permissions []vitess.io/vitess/go/vt/vttablet/tabletserver/planbuilder.Permission + { + size += int64(cap(cached.Permissions)) * int64(24) + for _, elem := range cached.Permissions { + size += elem.CachedSize(false) + } + } + // field FieldQuery *vitess.io/vitess/go/vt/sqlparser.ParsedQuery + size += cached.FieldQuery.CachedSize(true) + // field FullQuery *vitess.io/vitess/go/vt/sqlparser.ParsedQuery + size += cached.FullQuery.CachedSize(true) + // field NextCount vitess.io/vitess/go/sqltypes.PlanValue + size += cached.NextCount.CachedSize(false) + // field WhereClause *vitess.io/vitess/go/vt/sqlparser.ParsedQuery + size += cached.WhereClause.CachedSize(true) + // field FullStmt vitess.io/vitess/go/vt/sqlparser.Statement + if cc, ok := cached.FullStmt.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} diff --git a/go/vt/vttablet/tabletserver/planbuilder/permission.go b/go/vt/vttablet/tabletserver/planbuilder/permission.go index 1d64ea43c56..a762160758e 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/permission.go +++ b/go/vt/vttablet/tabletserver/planbuilder/permission.go @@ -47,17 +47,19 @@ func BuildPermissions(stmt sqlparser.Statement) []Permission { case *sqlparser.Delete: permissions = buildTableExprsPermissions(node.TableExprs, tableacl.WRITER, permissions) permissions = buildSubqueryPermissions(node, tableacl.READER, permissions) - case *sqlparser.Set, *sqlparser.Show, *sqlparser.OtherRead, *sqlparser.Explain: - // no-op case sqlparser.DDLStatement: for _, t := range node.AffectedTables() { permissions = buildTableNamePermissions(t, tableacl.ADMIN, permissions) } - case *sqlparser.OtherAdmin: - // no op - case *sqlparser.Begin, *sqlparser.Commit, *sqlparser.Rollback, *sqlparser.Load: - // no op - case *sqlparser.Savepoint, *sqlparser.Release, *sqlparser.SRollback: + case *sqlparser.AlterMigration: + permissions = []Permission{} // TODO(shlomi) what are the correct permissions here? Table is unknown + case *sqlparser.Flush: + for _, t := range node.TableNames { + permissions = buildTableNamePermissions(t, tableacl.ADMIN, permissions) + } + case *sqlparser.OtherAdmin, *sqlparser.CallProc, *sqlparser.Begin, *sqlparser.Commit, *sqlparser.Rollback, + *sqlparser.Load, *sqlparser.Savepoint, *sqlparser.Release, *sqlparser.SRollback, *sqlparser.Set, *sqlparser.Show, + *sqlparser.OtherRead, sqlparser.Explain: // no op default: panic(fmt.Errorf("BUG: unexpected statement type: %T", node)) diff --git a/go/vt/vttablet/tabletserver/planbuilder/permission_test.go b/go/vt/vttablet/tabletserver/planbuilder/permission_test.go index cb9b3a97543..17baa72595e 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/permission_test.go +++ b/go/vt/vttablet/tabletserver/planbuilder/permission_test.go @@ -85,6 +85,15 @@ func TestBuildPermissions(t *testing.T) { TableName: "t2", Role: tableacl.ADMIN, }}, + }, { + input: "flush tables t1, t2", + output: []Permission{{ + TableName: "t1", + Role: tableacl.ADMIN, + }, { + TableName: "t2", + Role: tableacl.ADMIN, + }}, }, { input: "drop table t", output: []Permission{{ diff --git a/go/vt/vttablet/tabletserver/planbuilder/plan.go b/go/vt/vttablet/tabletserver/planbuilder/plan.go index f9a4f0f543e..b4eb9718960 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/plan.go +++ b/go/vt/vttablet/tabletserver/planbuilder/plan.go @@ -30,7 +30,7 @@ import ( ) var ( - execLimit = &sqlparser.Limit{Rowcount: sqlparser.NewArgument([]byte(":#maxLimit"))} + execLimit = &sqlparser.Limit{Rowcount: sqlparser.NewArgument(":#maxLimit")} // PassthroughDMLs will return plans that pass-through the DMLs without changing them. PassthroughDMLs = false @@ -44,7 +44,6 @@ type PlanType int // The following are PlanType values. const ( PlanSelect PlanType = iota - PlanSelectLock PlanNextval PlanSelectImpossible PlanInsert @@ -65,18 +64,21 @@ const ( PlanSavepoint PlanRelease PlanSRollback - PlanShowTables + PlanShow // PlanLoad is for Load data statements PlanLoad + // PlanFlush is for FLUSH statements + PlanFlush PlanLockTables PlanUnlockTables + PlanCallProc + PlanAlterMigration NumPlans ) // Must exactly match order of plan constants. var planName = []string{ "Select", - "SelectLock", "Nextval", "SelectImpossible", "Insert", @@ -94,10 +96,13 @@ var planName = []string{ "Savepoint", "Release", "RollbackSavepoint", - "ShowTables", + "Show", "Load", + "Flush", "LockTables", "UnlockTables", + "CallProcedure", + "AlterMigration", } func (pt PlanType) String() string { @@ -129,7 +134,7 @@ func PlanByNameIC(s string) (pt PlanType, ok bool) { // IsSelect returns true if PlanType is about a select query. func (pt PlanType) IsSelect() bool { - return pt == PlanSelect || pt == PlanSelectLock || pt == PlanSelectImpossible + return pt == PlanSelect || pt == PlanSelectImpossible } // MarshalJSON returns a json string for PlanType. @@ -159,6 +164,9 @@ type Plan struct { // WhereClause is set for DMLs. It is used by the hot row protection // to serialize e.g. UPDATEs going to the same row. WhereClause *sqlparser.ParsedQuery + + // FullStmt can be used when the query does not operate on tables + FullStmt sqlparser.Statement } // TableName returns the table name for the plan. @@ -206,9 +214,11 @@ func Build(statement sqlparser.Statement, tables map[string]*schema.Table, isRes fullQuery = GenerateFullQuery(stmt) } plan = &Plan{PlanID: PlanDDL, FullQuery: fullQuery} + case *sqlparser.AlterMigration: + plan, err = &Plan{PlanID: PlanAlterMigration, FullStmt: stmt}, nil case *sqlparser.Show: plan, err = analyzeShow(stmt, dbName) - case *sqlparser.OtherRead, *sqlparser.Explain: + case *sqlparser.OtherRead, sqlparser.Explain: plan, err = &Plan{PlanID: PlanOtherRead}, nil case *sqlparser.OtherAdmin: plan, err = &Plan{PlanID: PlanOtherAdmin}, nil @@ -220,6 +230,10 @@ func Build(statement sqlparser.Statement, tables map[string]*schema.Table, isRes plan, err = &Plan{PlanID: PlanSRollback}, nil case *sqlparser.Load: plan, err = &Plan{PlanID: PlanLoad}, nil + case *sqlparser.Flush: + plan, err = &Plan{PlanID: PlanFlush, FullQuery: GenerateFullQuery(stmt)}, nil + case *sqlparser.CallProc: + plan, err = &Plan{PlanID: PlanCallProc, FullQuery: GenerateFullQuery(stmt)}, nil default: return nil, vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "invalid SQL") } @@ -256,7 +270,7 @@ func BuildStreaming(sql string, tables map[string]*schema.Table, isReservedConn return nil, vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, "select with lock not allowed for streaming") } plan.Table = lookupTable(stmt.From, tables) - case *sqlparser.OtherRead, *sqlparser.Show, *sqlparser.Union: + case *sqlparser.OtherRead, *sqlparser.Show, *sqlparser.Union, *sqlparser.CallProc, sqlparser.Explain: // pass default: return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "'%v' not allowed for streaming", sqlparser.String(stmt)) @@ -296,11 +310,7 @@ func checkForPoolingUnsafeConstructs(expr sqlparser.SQLNode) error { return sqlparser.Walk(func(in sqlparser.SQLNode) (kontinue bool, err error) { switch node := in.(type) { case *sqlparser.Set: - for _, setExpr := range node.Exprs { - if setExpr.Name.AtCount() > 0 { - return false, genError(node) - } - } + return false, genError(node) case *sqlparser.FuncExpr: if sqlparser.IsLockingFunc(node) { return false, genError(node) diff --git a/go/vt/vttablet/tabletserver/planbuilder/plan_test.go b/go/vt/vttablet/tabletserver/planbuilder/plan_test.go index d2898320a3d..1ea1d7eaed0 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/plan_test.go +++ b/go/vt/vttablet/tabletserver/planbuilder/plan_test.go @@ -95,7 +95,7 @@ func TestPlan(t *testing.T) { bout, _ := json.MarshalIndent(plan, "", " ") out = string(bout) } - fmt.Printf("\"%s\"\n%s\n\n", tcase.input, out) + fmt.Printf("\"in> %s\"\nout>%s\nexpected: %s\n\n", tcase.input, out, tcase.output) } }) } diff --git a/go/vt/vttablet/tabletserver/planbuilder/testdata/exec_cases.txt b/go/vt/vttablet/tabletserver/planbuilder/testdata/exec_cases.txt index a69b3562f6f..0ae5e18737e 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/testdata/exec_cases.txt +++ b/go/vt/vttablet/tabletserver/planbuilder/testdata/exec_cases.txt @@ -197,7 +197,7 @@ # for update "select eid from a for update" { - "PlanID": "SelectLock", + "PlanID": "Select", "TableName": "a", "Permissions": [ { @@ -212,7 +212,7 @@ # lock in share mode "select eid from a lock in share mode" { - "PlanID": "SelectLock", + "PlanID": "Select", "TableName": "a", "Permissions": [ { @@ -595,38 +595,6 @@ options:PassthroughDMLs "FullQuery": "delete from a limit 10" } -# int -"set a=1" -{ - "PlanID": "Set", - "TableName": "", - "FullQuery": "set a = 1" -} - -# float -"set a=1.2" -{ - "PlanID": "Set", - "TableName": "", - "FullQuery": "set a = 1.2" -} - -# string -"set a='b'" -{ - "PlanID": "Set", - "TableName": "", - "FullQuery": "set a = 'b'" -} - -# multi -"set a=1, b=2" -{ - "PlanID": "Set", - "TableName": "", - "FullQuery": "set a = 1, b = 2" -} - # create "create table a(a int, b varchar(8))" { @@ -715,7 +683,8 @@ options:PassthroughDMLs "TableName": "b", "Role": 2 } - ] + ], + "FullQuery":"rename table a to b" } # multi-rename @@ -740,7 +709,8 @@ options:PassthroughDMLs "TableName": "a", "Role": 2 } - ] + ], + "FullQuery":"rename table a to b, b to a" } # drop @@ -757,6 +727,20 @@ options:PassthroughDMLs "FullQuery": "drop table a" } +# drop +"truncate table a" +{ + "PlanID": "DDL", + "TableName": "", + "Permissions": [ + { + "TableName": "a", + "Role": 2 + } + ], + "FullQuery": "truncate table a" +} + # multi-drop "drop table a, b" { @@ -824,7 +808,7 @@ options:PassthroughDMLs # show tables #1 "show tables like 'key%'" { - "PlanID": "ShowTables", + "PlanID": "Show", "TableName":"", "FullQuery": "show tables like 'key%'" } @@ -832,7 +816,7 @@ options:PassthroughDMLs # show tables #2 "show tables where Tables_in_keyspace='apa'" { - "PlanID": "ShowTables", + "PlanID": "Show", "TableName":"", "FullQuery": "show tables where Tables_in_dbName = 'apa'" } @@ -840,15 +824,41 @@ options:PassthroughDMLs # show table status #1 "show table status like 'key%'" { - "PlanID": "OtherRead", - "TableName": "" + "PlanID": "Show", + "TableName":"", + "FullQuery": "show table status like 'key%'" } # show table status #2 "show table status where Name='apa'" { - "PlanID": "OtherRead", - "TableName": "" + "PlanID": "Show", + "TableName":"", + "FullQuery": "show table status where `Name` = 'apa'" +} + +# show create table +"show create table t1" +{ + "PlanID": "Show", + "TableName": "", + "FullQuery": "show create table t1" +} + +# show create database system_schema +"show create database mysql" +{ + "PlanID": "Show", + "TableName": "", + "FullQuery": "show create database mysql" +} + +# show create database +"show create database anything" +{ + "PlanID": "Show", + "TableName": "", + "FullQuery": "show create database dbName" } # load data @@ -903,3 +913,43 @@ options:PassthroughDMLs ], "FullQuery":"drop view a, b" } + +# create table with function as a default value +"create table function_default (x varchar(25) DEFAULT (TRIM(' check ')))" +{ + "PlanID": "DDL", + "TableName": "", + "Permissions": [ + { + "TableName": "function_default", + "Role": 2 + } + ], + "FullQuery": "create table function_default (\n\tx varchar(25) default TRIM(' check ')\n)" +} + +# flush statement +"flush tables a,b" +{ + "PlanID": "Flush", + "TableName": "", + "Permissions": [ + { + "TableName": "a", + "Role": 2 + }, + { + "TableName": "b", + "Role": 2 + } + ], + "FullQuery": "flush tables a, b" +} + +# call proc +"call getAllTheThings()" +{ + "PlanID": "CallProcedure", + "TableName": "", + "FullQuery": "call getAllTheThings()" +} diff --git a/go/vt/vttablet/tabletserver/planbuilder/testdata/pool_unsafe_cases.txt b/go/vt/vttablet/tabletserver/planbuilder/testdata/pool_unsafe_cases.txt index 2691c4928b7..9891736fe2a 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/testdata/pool_unsafe_cases.txt +++ b/go/vt/vttablet/tabletserver/planbuilder/testdata/pool_unsafe_cases.txt @@ -19,5 +19,13 @@ "release_lock('foo') not allowed without a reserved connections" # setting system variables must happen inside reserved connections -"set @sql_safe_updates = false" -"set @sql_safe_updates = false not allowed without a reserved connections" +"set sql_safe_updates = false" +"set sql_safe_updates = false not allowed without a reserved connections" + +# setting system variables must happen inside reserved connections +"set @@sql_safe_updates = false" +"set @@sql_safe_updates = false not allowed without a reserved connections" + +# setting system variables must happen inside reserved connections +"set @udv = false" +"set @udv = false not allowed without a reserved connections" diff --git a/go/vt/vttablet/tabletserver/planbuilder/testdata/stream_cases.txt b/go/vt/vttablet/tabletserver/planbuilder/testdata/stream_cases.txt index 4724e019b94..16ca07e1907 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/testdata/stream_cases.txt +++ b/go/vt/vttablet/tabletserver/planbuilder/testdata/stream_cases.txt @@ -50,7 +50,7 @@ { "PlanID": "SelectStream", "TableName": "", - "FullQuery": "otherread" + "FullQuery": "explain foo" } # dml diff --git a/go/vt/vttablet/tabletserver/query_engine.go b/go/vt/vttablet/tabletserver/query_engine.go index a4c6f27ddff..bcca89f3e93 100644 --- a/go/vt/vttablet/tabletserver/query_engine.go +++ b/go/vt/vttablet/tabletserver/query_engine.go @@ -22,6 +22,7 @@ import ( "fmt" "net/http" "sync" + "sync/atomic" "time" "context" @@ -55,43 +56,37 @@ import ( // and track stats. type TabletPlan struct { *planbuilder.Plan + Original string Fields []*querypb.Field Rules *rules.Rules Authorized []*tableacl.ACLResult - mu sync.Mutex - QueryCount int64 - Time time.Duration - MysqlTime time.Duration - RowCount int64 - ErrorCount int64 -} - -// Size allows TabletPlan to be in cache.LRUCache. -func (*TabletPlan) Size() int { - return 1 + QueryCount uint64 + Time uint64 + MysqlTime uint64 + RowsAffected uint64 + RowsReturned uint64 + ErrorCount uint64 } // AddStats updates the stats for the current TabletPlan. -func (ep *TabletPlan) AddStats(queryCount int64, duration, mysqlTime time.Duration, rowCount, errorCount int64) { - ep.mu.Lock() - ep.QueryCount += queryCount - ep.Time += duration - ep.MysqlTime += mysqlTime - ep.RowCount += rowCount - ep.ErrorCount += errorCount - ep.mu.Unlock() +func (ep *TabletPlan) AddStats(queryCount uint64, duration, mysqlTime time.Duration, rowsAffected, rowsReturned, errorCount uint64) { + atomic.AddUint64(&ep.QueryCount, queryCount) + atomic.AddUint64(&ep.Time, uint64(duration)) + atomic.AddUint64(&ep.MysqlTime, uint64(mysqlTime)) + atomic.AddUint64(&ep.RowsAffected, rowsAffected) + atomic.AddUint64(&ep.RowsReturned, rowsReturned) + atomic.AddUint64(&ep.ErrorCount, errorCount) } // Stats returns the current stats of TabletPlan. -func (ep *TabletPlan) Stats() (queryCount int64, duration, mysqlTime time.Duration, rowCount, errorCount int64) { - ep.mu.Lock() - queryCount = ep.QueryCount - duration = ep.Time - mysqlTime = ep.MysqlTime - rowCount = ep.RowCount - errorCount = ep.ErrorCount - ep.mu.Unlock() +func (ep *TabletPlan) Stats() (queryCount uint64, duration, mysqlTime time.Duration, rowsAffected, rowsReturned, errorCount uint64) { + queryCount = atomic.LoadUint64(&ep.QueryCount) + duration = time.Duration(atomic.LoadUint64(&ep.Time)) + mysqlTime = time.Duration(atomic.LoadUint64(&ep.MysqlTime)) + rowsAffected = atomic.LoadUint64(&ep.RowsAffected) + rowsReturned = atomic.LoadUint64(&ep.RowsReturned) + errorCount = atomic.LoadUint64(&ep.ErrorCount) return } @@ -120,7 +115,7 @@ type QueryEngine struct { // mu protects the following fields. mu sync.RWMutex tables map[string]*schema.Table - plans *cache.LRUCache + plans cache.Cache queryRuleSources *rules.Map // Pools @@ -166,11 +161,17 @@ type QueryEngine struct { // You must call this only once. func NewQueryEngine(env tabletenv.Env, se *schema.Engine) *QueryEngine { config := env.Config() + cacheCfg := &cache.Config{ + MaxEntries: int64(config.QueryCacheSize), + MaxMemoryUsage: config.QueryCacheMemory, + LFU: config.QueryCacheLFU, + } + qe := &QueryEngine{ env: env, se: se, tables: make(map[string]*schema.Table), - plans: cache.NewLRUCache(int64(config.QueryCacheSize)), + plans: cache.NewDefaultCacheImpl(cacheCfg), queryRuleSources: rules.NewMap(), } @@ -215,13 +216,12 @@ func NewQueryEngine(env tabletenv.Env, se *schema.Engine) *QueryEngine { env.Exporter().NewGaugeFunc("StreamBufferSize", "Query engine stream buffer size", qe.streamBufferSize.Get) env.Exporter().NewCounterFunc("TableACLExemptCount", "Query engine table ACL exempt count", qe.tableaclExemptCount.Get) - env.Exporter().NewGaugeFunc("QueryCacheLength", "Query engine query cache length", qe.plans.Length) - env.Exporter().NewGaugeFunc("QueryCacheSize", "Query engine query cache size", qe.plans.Size) - env.Exporter().NewGaugeFunc("QueryCacheCapacity", "Query engine query cache capacity", qe.plans.Capacity) + env.Exporter().NewGaugeFunc("QueryCacheLength", "Query engine query cache length", func() int64 { + return int64(qe.plans.Len()) + }) + env.Exporter().NewGaugeFunc("QueryCacheSize", "Query engine query cache size", qe.plans.UsedCapacity) + env.Exporter().NewGaugeFunc("QueryCacheCapacity", "Query engine query cache capacity", qe.plans.MaxCapacity) env.Exporter().NewCounterFunc("QueryCacheEvictions", "Query engine query cache evictions", qe.plans.Evictions) - env.Exporter().Publish("QueryCacheOldest", stats.StringFunc(func() string { - return fmt.Sprintf("%v", qe.plans.Oldest()) - })) qe.queryCounts = env.Exporter().NewCountersWithMultiLabels("QueryCounts", "query counts", []string{"Table", "Plan"}) qe.queryTimes = env.Exporter().NewCountersWithMultiLabels("QueryTimesNs", "query times in ns", []string{"Table", "Plan"}) qe.queryRowCounts = env.Exporter().NewCountersWithMultiLabels("QueryRowCounts", "query row counts", []string{"Table", "Plan"}) @@ -290,6 +290,7 @@ func (qe *QueryEngine) GetPlan(ctx context.Context, logStats *tabletenv.LogStats defer span.Finish() if plan := qe.getQuery(sql); plan != nil { + logStats.CachedPlan = true return plan, nil } @@ -309,11 +310,11 @@ func (qe *QueryEngine) GetPlan(ctx context.Context, logStats *tabletenv.LogStats if err != nil { return nil, err } - plan := &TabletPlan{Plan: splan} + plan := &TabletPlan{Plan: splan, Original: sql} plan.Rules = qe.queryRuleSources.FilterByPlan(sql, plan.PlanID, plan.TableName().String()) plan.buildAuthorized() if plan.PlanID.IsSelect() { - if qe.enableQueryPlanFieldCaching && plan.FieldQuery != nil { + if !skipQueryPlanCache && qe.enableQueryPlanFieldCaching && plan.FieldQuery != nil { conn, err := qe.conns.Get(ctx) if err != nil { return nil, err @@ -347,7 +348,7 @@ func (qe *QueryEngine) GetStreamPlan(sql string, isReservedConn bool) (*TabletPl if err != nil { return nil, err } - plan := &TabletPlan{Plan: splan} + plan := &TabletPlan{Plan: splan, Original: sql} plan.Rules = qe.queryRuleSources.FilterByPlan(sql, plan.PlanID, plan.TableName().String()) plan.buildAuthorized() return plan, nil @@ -403,14 +404,6 @@ func (qe *QueryEngine) getQuery(sql string) *TabletPlan { return nil } -// peekQuery fetches the plan without changing the LRU order. -func (qe *QueryEngine) peekQuery(sql string) *TabletPlan { - if cacheResult, ok := qe.plans.Peek(sql); ok { - return cacheResult.(*TabletPlan) - } - return nil -} - // SetQueryPlanCacheCap sets the query plan cache capacity. func (qe *QueryEngine) SetQueryPlanCacheCap(size int) { if size <= 0 { @@ -421,7 +414,13 @@ func (qe *QueryEngine) SetQueryPlanCacheCap(size int) { // QueryPlanCacheCap returns the capacity of the query cache. func (qe *QueryEngine) QueryPlanCacheCap() int { - return int(qe.plans.Capacity()) + return int(qe.plans.MaxCapacity()) +} + +// QueryPlanCacheLen returns the length (size in entries) of the query cache +func (qe *QueryEngine) QueryPlanCacheLen() int { + qe.plans.Wait() + return qe.plans.Len() } // AddStats adds the given stats for the planName.tableName @@ -435,14 +434,15 @@ func (qe *QueryEngine) AddStats(planName, tableName string, queryCount int64, du } type perQueryStats struct { - Query string - Table string - Plan planbuilder.PlanType - QueryCount int64 - Time time.Duration - MysqlTime time.Duration - RowCount int64 - ErrorCount int64 + Query string + Table string + Plan planbuilder.PlanType + QueryCount uint64 + Time time.Duration + MysqlTime time.Duration + RowsAffected uint64 + RowsReturned uint64 + ErrorCount uint64 } func (qe *QueryEngine) handleHTTPQueryPlans(response http.ResponseWriter, request *http.Request) { @@ -450,20 +450,19 @@ func (qe *QueryEngine) handleHTTPQueryPlans(response http.ResponseWriter, reques acl.SendError(response, err) return } - keys := qe.plans.Keys() + response.Header().Set("Content-Type", "text/plain") - response.Write([]byte(fmt.Sprintf("Length: %d\n", len(keys)))) - for _, v := range keys { - response.Write([]byte(fmt.Sprintf("%#v\n", sqlparser.TruncateForUI(v)))) - if plan := qe.peekQuery(v); plan != nil { - if b, err := json.MarshalIndent(plan.Plan, "", " "); err != nil { - response.Write([]byte(err.Error())) - } else { - response.Write(b) - } - response.Write(([]byte)("\n\n")) + qe.plans.ForEach(func(value interface{}) bool { + plan := value.(*TabletPlan) + response.Write([]byte(fmt.Sprintf("%#v\n", sqlparser.TruncateForUI(plan.Original)))) + if b, err := json.MarshalIndent(plan.Plan, "", " "); err != nil { + response.Write([]byte(err.Error())) + } else { + response.Write(b) } - } + response.Write(([]byte)("\n\n")) + return true + }) } func (qe *QueryEngine) handleHTTPQueryStats(response http.ResponseWriter, request *http.Request) { @@ -471,20 +470,20 @@ func (qe *QueryEngine) handleHTTPQueryStats(response http.ResponseWriter, reques acl.SendError(response, err) return } - keys := qe.plans.Keys() response.Header().Set("Content-Type", "application/json; charset=utf-8") - qstats := make([]perQueryStats, 0, len(keys)) - for _, v := range keys { - if plan := qe.peekQuery(v); plan != nil { - var pqstats perQueryStats - pqstats.Query = unicoded(sqlparser.TruncateForUI(v)) - pqstats.Table = plan.TableName().String() - pqstats.Plan = plan.PlanID - pqstats.QueryCount, pqstats.Time, pqstats.MysqlTime, pqstats.RowCount, pqstats.ErrorCount = plan.Stats() - - qstats = append(qstats, pqstats) - } - } + var qstats []perQueryStats + qe.plans.ForEach(func(value interface{}) bool { + plan := value.(*TabletPlan) + + var pqstats perQueryStats + pqstats.Query = unicoded(sqlparser.TruncateForUI(plan.Original)) + pqstats.Table = plan.TableName().String() + pqstats.Plan = plan.PlanID + pqstats.QueryCount, pqstats.Time, pqstats.MysqlTime, pqstats.RowsAffected, pqstats.RowsReturned, pqstats.ErrorCount = plan.Stats() + + qstats = append(qstats, pqstats) + return true + }) if b, err := json.MarshalIndent(qstats, "", " "); err != nil { response.Write([]byte(err.Error())) } else { diff --git a/go/vt/vttablet/tabletserver/query_engine_test.go b/go/vt/vttablet/tabletserver/query_engine_test.go index 9ca7469a275..d74dfbb90b6 100644 --- a/go/vt/vttablet/tabletserver/query_engine_test.go +++ b/go/vt/vttablet/tabletserver/query_engine_test.go @@ -17,20 +17,29 @@ limitations under the License. package tabletserver import ( + "context" "expvar" + "fmt" + "math/rand" "net/http" "net/http/httptest" + "os" + "path" "reflect" "strings" + "sync" + "sync/atomic" "testing" "time" - "context" + "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/streamlog" + "github.com/stretchr/testify/require" + "vitess.io/vitess/go/cache" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/streamlog" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/tableacl" "vitess.io/vitess/go/vt/vttablet/tabletserver/planbuilder" @@ -92,7 +101,7 @@ func TestGetPlanPanicDuetoEmptyQuery(t *testing.T) { for query, result := range schematest.Queries() { db.AddQuery(query, result) } - qe := newTestQueryEngine(10, 10*time.Second, true, newDBConfigs(db)) + qe := newTestQueryEngine(10*time.Second, true, newDBConfigs(db)) qe.se.Open() qe.Open() defer qe.Close() @@ -100,10 +109,24 @@ func TestGetPlanPanicDuetoEmptyQuery(t *testing.T) { ctx := context.Background() logStats := tabletenv.NewLogStats(ctx, "GetPlanStats") _, err := qe.GetPlan(ctx, logStats, "", false, false /* inReservedConn */) - want := "empty statement" - if err == nil || !strings.Contains(err.Error(), want) { - t.Errorf("qe.GetPlan: %v, want %s", err, want) - } + require.EqualError(t, err, "Query was empty") +} + +func addSchemaEngineQueries(db *fakesqldb.DB) { + db.AddQueryPattern(baseShowTablesPattern, &sqltypes.Result{ + Fields: mysql.BaseShowTablesFields, + Rows: [][]sqltypes.Value{ + mysql.BaseShowTablesRow("test_table_01", false, ""), + mysql.BaseShowTablesRow("test_table_02", false, ""), + mysql.BaseShowTablesRow("test_table_03", false, ""), + mysql.BaseShowTablesRow("seq", false, "vitess_sequence"), + mysql.BaseShowTablesRow("msg", false, "vitess_message,vt_ack_wait=30,vt_purge_after=120,vt_batch_size=1,vt_cache_size=10,vt_poller_interval=30"), + }}) + db.AddQuery("show status like 'Innodb_rows_read'", sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "Variable_name|Value", + "varchar|int64"), + "Innodb_rows_read|0", + )) } func TestGetMessageStreamPlan(t *testing.T) { @@ -112,7 +135,10 @@ func TestGetMessageStreamPlan(t *testing.T) { for query, result := range schematest.Queries() { db.AddQuery(query, result) } - qe := newTestQueryEngine(10, 10*time.Second, true, newDBConfigs(db)) + + addSchemaEngineQueries(db) + + qe := newTestQueryEngine(10*time.Second, true, newDBConfigs(db)) qe.se.Open() qe.Open() defer qe.Close() @@ -137,6 +163,17 @@ func TestGetMessageStreamPlan(t *testing.T) { } } +func assertPlanCacheSize(t *testing.T, qe *QueryEngine, expected int) { + var size int + qe.plans.ForEach(func(_ interface{}) bool { + size++ + return true + }) + if size != expected { + t.Fatalf("expected query plan cache to contain %d entries, found %d", expected, size) + } +} + func TestQueryPlanCache(t *testing.T) { db := fakesqldb.New(t) defer db.Close() @@ -149,14 +186,18 @@ func TestQueryPlanCache(t *testing.T) { db.AddQuery("select * from test_table_01 where 1 != 1", &sqltypes.Result{}) db.AddQuery("select * from test_table_02 where 1 != 1", &sqltypes.Result{}) - qe := newTestQueryEngine(10, 10*time.Second, true, newDBConfigs(db)) + qe := newTestQueryEngine(10*time.Second, true, newDBConfigs(db)) qe.se.Open() qe.Open() defer qe.Close() ctx := context.Background() logStats := tabletenv.NewLogStats(ctx, "GetPlanStats") - qe.SetQueryPlanCacheCap(1) + if cache.DefaultConfig.LFU { + qe.SetQueryPlanCacheCap(1024) + } else { + qe.SetQueryPlanCacheCap(1) + } firstPlan, err := qe.GetPlan(ctx, logStats, firstQuery, false, false /* inReservedConn */) if err != nil { t.Fatal(err) @@ -174,9 +215,7 @@ func TestQueryPlanCache(t *testing.T) { expvar.Do(func(kv expvar.KeyValue) { _ = kv.Value.String() }) - if qe.plans.Size() == 0 { - t.Fatalf("query plan cache should not be 0") - } + assertPlanCacheSize(t, qe, 1) qe.ClearQueryPlanCache() } @@ -191,14 +230,14 @@ func TestNoQueryPlanCache(t *testing.T) { db.AddQuery("select * from test_table_01 where 1 != 1", &sqltypes.Result{}) db.AddQuery("select * from test_table_02 where 1 != 1", &sqltypes.Result{}) - qe := newTestQueryEngine(10, 10*time.Second, true, newDBConfigs(db)) + qe := newTestQueryEngine(10*time.Second, true, newDBConfigs(db)) qe.se.Open() qe.Open() defer qe.Close() ctx := context.Background() logStats := tabletenv.NewLogStats(ctx, "GetPlanStats") - qe.SetQueryPlanCacheCap(1) + qe.SetQueryPlanCacheCap(1024) firstPlan, err := qe.GetPlan(ctx, logStats, firstQuery, true, false /* inReservedConn */) if err != nil { t.Fatal(err) @@ -206,9 +245,7 @@ func TestNoQueryPlanCache(t *testing.T) { if firstPlan == nil { t.Fatalf("plan should not be nil") } - if qe.plans.Size() != 0 { - t.Fatalf("query plan cache should be 0") - } + assertPlanCacheSize(t, qe, 0) qe.ClearQueryPlanCache() } @@ -223,14 +260,14 @@ func TestNoQueryPlanCacheDirective(t *testing.T) { db.AddQuery("select /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ * from test_table_01 where 1 != 1", &sqltypes.Result{}) db.AddQuery("select /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ * from test_table_02 where 1 != 1", &sqltypes.Result{}) - qe := newTestQueryEngine(10, 10*time.Second, true, newDBConfigs(db)) + qe := newTestQueryEngine(10*time.Second, true, newDBConfigs(db)) qe.se.Open() qe.Open() defer qe.Close() ctx := context.Background() logStats := tabletenv.NewLogStats(ctx, "GetPlanStats") - qe.SetQueryPlanCacheCap(1) + qe.SetQueryPlanCacheCap(1024) firstPlan, err := qe.GetPlan(ctx, logStats, firstQuery, false, false /* inReservedConn */) if err != nil { t.Fatal(err) @@ -238,9 +275,7 @@ func TestNoQueryPlanCacheDirective(t *testing.T) { if firstPlan == nil { t.Fatalf("plan should not be nil") } - if qe.plans.Size() != 0 { - t.Fatalf("query plan cache should be 0") - } + assertPlanCacheSize(t, qe, 0) qe.ClearQueryPlanCache() } @@ -252,7 +287,7 @@ func TestStatsURL(t *testing.T) { } query := "select * from test_table_01" db.AddQuery("select * from test_table_01 where 1 != 1", &sqltypes.Result{}) - qe := newTestQueryEngine(10, 1*time.Second, true, newDBConfigs(db)) + qe := newTestQueryEngine(1*time.Second, true, newDBConfigs(db)) qe.se.Open() qe.Open() defer qe.Close() @@ -274,10 +309,9 @@ func TestStatsURL(t *testing.T) { qe.handleHTTPQueryRules(response, request) } -func newTestQueryEngine(queryCacheSize int, idleTimeout time.Duration, strict bool, dbcfgs *dbconfigs.DBConfigs) *QueryEngine { +func newTestQueryEngine(idleTimeout time.Duration, strict bool, dbcfgs *dbconfigs.DBConfigs) *QueryEngine { config := tabletenv.NewDefaultConfig() config.DB = dbcfgs - config.QueryCacheSize = queryCacheSize config.OltpReadPool.IdleTimeoutSeconds.Set(idleTimeout) config.OlapReadPool.IdleTimeoutSeconds.Set(idleTimeout) config.TxPool.IdleTimeoutSeconds.Set(idleTimeout) @@ -292,7 +326,7 @@ func runConsolidatedQuery(t *testing.T, sql string) *QueryEngine { db := fakesqldb.New(t) defer db.Close() - qe := newTestQueryEngine(10, 1*time.Second, true, newDBConfigs(db)) + qe := newTestQueryEngine(1*time.Second, true, newDBConfigs(db)) qe.se.Open() qe.Open() defer qe.Close() @@ -346,3 +380,215 @@ func TestConsolidationsUIRedaction(t *testing.T) { t.Fatalf("Response missing redacted consolidated query: %v %v", redactedSQL, redactedResponse.Body.String()) } } + +func BenchmarkPlanCacheThroughput(b *testing.B) { + db := fakesqldb.New(b) + defer db.Close() + + for query, result := range schematest.Queries() { + db.AddQuery(query, result) + } + + db.AddQueryPattern(".*", &sqltypes.Result{}) + + qe := newTestQueryEngine(10*time.Second, true, newDBConfigs(db)) + qe.se.Open() + qe.Open() + defer qe.Close() + + ctx := context.Background() + logStats := tabletenv.NewLogStats(ctx, "GetPlanStats") + + for i := 0; i < b.N; i++ { + query := fmt.Sprintf("SELECT (a, b, c) FROM test_table_%d", rand.Intn(500)) + _, err := qe.GetPlan(ctx, logStats, query, false, false /* inReservedConn */) + if err != nil { + b.Fatal(err) + } + } +} + +func benchmarkPlanCache(b *testing.B, db *fakesqldb.DB, lfu bool, par int) { + b.Helper() + + dbcfgs := newDBConfigs(db) + config := tabletenv.NewDefaultConfig() + config.DB = dbcfgs + config.QueryCacheLFU = lfu + + env := tabletenv.NewEnv(config, "TabletServerTest") + se := schema.NewEngine(env) + qe := NewQueryEngine(env, se) + + se.InitDBConfig(dbcfgs.DbaWithDB()) + require.NoError(b, se.Open()) + require.NoError(b, qe.Open()) + defer qe.Close() + + b.SetParallelism(par) + b.RunParallel(func(pb *testing.PB) { + ctx := context.Background() + logStats := tabletenv.NewLogStats(ctx, "GetPlanStats") + + for pb.Next() { + query := fmt.Sprintf("SELECT (a, b, c) FROM test_table_%d", rand.Intn(500)) + _, err := qe.GetPlan(ctx, logStats, query, false, false /* inReservedConn */) + require.NoErrorf(b, err, "bad query: %s", query) + } + }) +} + +func BenchmarkPlanCacheContention(b *testing.B) { + db := fakesqldb.New(b) + defer db.Close() + + for query, result := range schematest.Queries() { + db.AddQuery(query, result) + } + + db.AddQueryPattern(".*", &sqltypes.Result{}) + + for par := 1; par <= 8; par *= 2 { + b.Run(fmt.Sprintf("ContentionLRU-%d", par), func(b *testing.B) { + benchmarkPlanCache(b, db, false, par) + }) + + b.Run(fmt.Sprintf("ContentionLFU-%d", par), func(b *testing.B) { + benchmarkPlanCache(b, db, true, par) + }) + } +} + +func TestPlanCachePollution(t *testing.T) { + plotPath := os.Getenv("CACHE_PLOT_PATH") + if plotPath == "" { + t.Skipf("CACHE_PLOT_PATH not set") + } + + const NormalQueries = 500000 + const PollutingQueries = NormalQueries / 2 + + db := fakesqldb.New(t) + defer db.Close() + + for query, result := range schematest.Queries() { + db.AddQuery(query, result) + } + + db.AddQueryPattern(".*", &sqltypes.Result{}) + + dbcfgs := newDBConfigs(db) + config := tabletenv.NewDefaultConfig() + config.DB = dbcfgs + // config.LFUQueryCacheSizeBytes = 3 * 1024 * 1024 + + env := tabletenv.NewEnv(config, "TabletServerTest") + se := schema.NewEngine(env) + qe := NewQueryEngine(env, se) + + se.InitDBConfig(dbcfgs.DbaWithDB()) + se.Open() + + qe.Open() + defer qe.Close() + + type Stats struct { + queries uint64 + cached uint64 + interval time.Duration + } + + var stats1, stats2 Stats + var wg sync.WaitGroup + + go func() { + cacheMode := "lru" + if config.QueryCacheLFU { + cacheMode = "lfu" + } + + out, err := os.Create(path.Join(plotPath, + fmt.Sprintf("cache_plot_%d_%d_%s.dat", + config.QueryCacheSize, config.QueryCacheMemory, cacheMode, + )), + ) + require.NoError(t, err) + defer out.Close() + + var last1 uint64 + var last2 uint64 + + for range time.Tick(100 * time.Millisecond) { + var avg1, avg2 time.Duration + + if stats1.queries-last1 > 0 { + avg1 = stats1.interval / time.Duration(stats1.queries-last1) + } + if stats2.queries-last2 > 0 { + avg2 = stats2.interval / time.Duration(stats2.queries-last2) + } + + stats1.interval = 0 + last1 = stats1.queries + stats2.interval = 0 + last2 = stats2.queries + + cacheUsed, cacheCap := qe.plans.UsedCapacity(), qe.plans.MaxCapacity() + + t.Logf("%d queries (%f hit rate), cache %d / %d (%f usage), %v %v", + stats1.queries+stats2.queries, + float64(stats1.cached)/float64(stats1.queries), + cacheUsed, cacheCap, + float64(cacheUsed)/float64(cacheCap), avg1, avg2) + + if out != nil { + fmt.Fprintf(out, "%d %f %f %f %f %d %d\n", + stats1.queries+stats2.queries, + float64(stats1.queries)/float64(NormalQueries), + float64(stats2.queries)/float64(PollutingQueries), + float64(stats1.cached)/float64(stats1.queries), + float64(cacheUsed)/float64(cacheCap), + avg1.Microseconds(), + avg2.Microseconds(), + ) + } + } + }() + + runner := func(totalQueries uint64, stats *Stats, sample func() string) { + for i := uint64(0); i < totalQueries; i++ { + ctx := context.Background() + logStats := tabletenv.NewLogStats(ctx, "GetPlanStats") + query := sample() + + start := time.Now() + _, err := qe.GetPlan(ctx, logStats, query, false, false /* inReservedConn */) + require.NoErrorf(t, err, "bad query: %s", query) + stats.interval += time.Since(start) + + atomic.AddUint64(&stats.queries, 1) + if logStats.CachedPlan { + atomic.AddUint64(&stats.cached, 1) + } + } + } + + wg.Add(2) + + go func() { + defer wg.Done() + runner(NormalQueries, &stats1, func() string { + return fmt.Sprintf("SELECT (a, b, c) FROM test_table_%d", rand.Intn(5000)) + }) + }() + + go func() { + defer wg.Done() + time.Sleep(500 * time.Millisecond) + runner(PollutingQueries, &stats2, func() string { + return fmt.Sprintf("INSERT INTO test_table_00 VALUES (1, 2, 3, %d)", rand.Int()) + }) + }() + + wg.Wait() +} diff --git a/go/vt/vttablet/tabletserver/query_executor.go b/go/vt/vttablet/tabletserver/query_executor.go index 8128341554d..a0021e092ac 100644 --- a/go/vt/vttablet/tabletserver/query_executor.go +++ b/go/vt/vttablet/tabletserver/query_executor.go @@ -38,6 +38,7 @@ import ( "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" "vitess.io/vitess/go/vt/vttablet/tabletserver/planbuilder" + p "vitess.io/vitess/go/vt/vttablet/tabletserver/planbuilder" "vitess.io/vitess/go/vt/vttablet/tabletserver/rules" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" @@ -112,11 +113,11 @@ func (qre *QueryExecutor) Execute() (reply *sqltypes.Result, err error) { if reply == nil { qre.tsv.qe.AddStats(planName, tableName, 1, duration, mysqlTime, 0, 1) - qre.plan.AddStats(1, duration, mysqlTime, 0, 1) + qre.plan.AddStats(1, duration, mysqlTime, 0, 0, 1) return } qre.tsv.qe.AddStats(planName, tableName, 1, duration, mysqlTime, int64(reply.RowsAffected), 0) - qre.plan.AddStats(1, duration, mysqlTime, int64(reply.RowsAffected), 0) + qre.plan.AddStats(1, duration, mysqlTime, reply.RowsAffected, uint64(len(reply.Rows)), 0) qre.logStats.RowsAffected = int(reply.RowsAffected) qre.logStats.Rows = reply.Rows qre.tsv.Stats().ResultHistogram.Add(int64(len(reply.Rows))) @@ -127,9 +128,9 @@ func (qre *QueryExecutor) Execute() (reply *sqltypes.Result, err error) { } switch qre.plan.PlanID { - case planbuilder.PlanNextval: + case p.PlanNextval: return qre.execNextval() - case planbuilder.PlanSelectImpossible: + case p.PlanSelectImpossible: // If the fields did not get cached, we have send the query // to mysql, which you can see below. if qre.plan.Fields != nil { @@ -150,7 +151,7 @@ func (qre *QueryExecutor) Execute() (reply *sqltypes.Result, err error) { } switch qre.plan.PlanID { - case planbuilder.PlanSelect, planbuilder.PlanSelectImpossible, planbuilder.PlanShowTables: + case p.PlanSelect, p.PlanSelectImpossible, p.PlanShow: maxrows := qre.getSelectLimit() qre.bindVars["#maxLimit"] = sqltypes.Int64BindVariable(maxrows + 1) if qre.bindVars[sqltypes.BvReplaceSchemaName] != nil { @@ -164,16 +165,18 @@ func (qre *QueryExecutor) Execute() (reply *sqltypes.Result, err error) { return nil, err } return qr, nil - case planbuilder.PlanSelectLock: - return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "%s disallowed outside transaction", qre.plan.PlanID.String()) - case planbuilder.PlanSet, planbuilder.PlanOtherRead, planbuilder.PlanOtherAdmin: + case p.PlanOtherRead, p.PlanOtherAdmin, p.PlanFlush: return qre.execOther() - case planbuilder.PlanSavepoint, planbuilder.PlanRelease, planbuilder.PlanSRollback: + case p.PlanSavepoint, p.PlanRelease, p.PlanSRollback: return qre.execOther() - case planbuilder.PlanInsert, planbuilder.PlanUpdate, planbuilder.PlanDelete, planbuilder.PlanInsertMessage, planbuilder.PlanDDL, planbuilder.PlanLoad: + case p.PlanInsert, p.PlanUpdate, p.PlanDelete, p.PlanInsertMessage, p.PlanDDL, p.PlanLoad: return qre.execAutocommit(qre.txConnExec) - case planbuilder.PlanUpdateLimit, planbuilder.PlanDeleteLimit: + case p.PlanUpdateLimit, p.PlanDeleteLimit: return qre.execAsTransaction(qre.txConnExec) + case p.PlanCallProc: + return qre.execCallProc() + case p.PlanAlterMigration: + return qre.execAlterMigration() } return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "%s unexpected plan type", qre.plan.PlanID.String()) } @@ -224,18 +227,18 @@ func (qre *QueryExecutor) execAsTransaction(f func(conn *StatefulConnection) (*s func (qre *QueryExecutor) txConnExec(conn *StatefulConnection) (*sqltypes.Result, error) { switch qre.plan.PlanID { - case planbuilder.PlanInsert, planbuilder.PlanUpdate, planbuilder.PlanDelete: + case p.PlanInsert, p.PlanUpdate, p.PlanDelete, p.PlanSet: return qre.txFetch(conn, true) - case planbuilder.PlanInsertMessage: + case p.PlanInsertMessage: qre.bindVars["#time_now"] = sqltypes.Int64BindVariable(time.Now().UnixNano()) return qre.txFetch(conn, true) - case planbuilder.PlanUpdateLimit, planbuilder.PlanDeleteLimit: + case p.PlanUpdateLimit, p.PlanDeleteLimit: return qre.execDMLLimit(conn) - case planbuilder.PlanSet, planbuilder.PlanOtherRead, planbuilder.PlanOtherAdmin: + case p.PlanOtherRead, p.PlanOtherAdmin, p.PlanFlush: return qre.execStatefulConn(conn, qre.query, true) - case planbuilder.PlanSavepoint, planbuilder.PlanRelease, planbuilder.PlanSRollback: + case p.PlanSavepoint, p.PlanRelease, p.PlanSRollback: return qre.execStatefulConn(conn, qre.query, true) - case planbuilder.PlanSelect, planbuilder.PlanSelectLock, planbuilder.PlanSelectImpossible, planbuilder.PlanShowTables: + case p.PlanSelect, p.PlanSelectImpossible, p.PlanShow: maxrows := qre.getSelectLimit() qre.bindVars["#maxLimit"] = sqltypes.Int64BindVariable(maxrows + 1) if qre.bindVars[sqltypes.BvReplaceSchemaName] != nil { @@ -249,10 +252,12 @@ func (qre *QueryExecutor) txConnExec(conn *StatefulConnection) (*sqltypes.Result return nil, err } return qr, nil - case planbuilder.PlanDDL: + case p.PlanDDL: return qre.execDDL(conn) - case planbuilder.PlanLoad: + case p.PlanLoad: return qre.execLoad(conn) + case p.PlanCallProc: + return qre.execProc(conn) } return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "%s unexpected plan type", qre.plan.PlanID.String()) } @@ -569,12 +574,11 @@ func (qre *QueryExecutor) execNextval() (*sqltypes.Result, error) { Rows: [][]sqltypes.Value{{ sqltypes.NewInt64(ret), }}, - RowsAffected: 1, }, nil } // execSelect sends a query to mysql only if another identical query is not running. Otherwise, it waits and -// reuses the result. If the plan is missng field info, it sends the query to mysql requesting full info. +// reuses the result. If the plan is missing field info, it sends the query to mysql requesting full info. func (qre *QueryExecutor) execSelect() (*sqltypes.Result, error) { if qre.tsv.qe.enableQueryPlanFieldCaching && qre.plan.Fields != nil { result, err := qre.qFetch(qre.logStats, qre.plan.FullQuery, qre.bindVars) @@ -744,6 +748,101 @@ func (qre *QueryExecutor) generateFinalSQL(parsedQuery *sqlparser.ParsedQuery, b return buf.String(), query, nil } +func rewriteOUTParamError(err error) error { + sqlErr, ok := err.(*mysql.SQLError) + if !ok { + return err + } + if sqlErr.Num == mysql.ErSPNotVarArg { + return vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "OUT and INOUT parameters are not supported") + } + return err +} + +func (qre *QueryExecutor) execCallProc() (*sqltypes.Result, error) { + conn, err := qre.getConn() + if err != nil { + return nil, err + } + defer conn.Recycle() + sql, _, err := qre.generateFinalSQL(qre.plan.FullQuery, qre.bindVars) + if err != nil { + return nil, err + } + + qr, err := qre.execDBConn(conn, sql, true) + if err != nil { + return nil, rewriteOUTParamError(err) + } + if !qr.IsMoreResultsExists() { + if qr.IsInTransaction() { + conn.Close() + return nil, vterrors.New(vtrpcpb.Code_CANCELED, "Transaction not concluded inside the stored procedure, leaking transaction from stored procedure is not allowed") + } + return qr, nil + } + err = qre.drainResultSetOnConn(conn) + if err != nil { + return nil, err + } + return nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "Multi-Resultset not supported in stored procedure") +} + +func (qre *QueryExecutor) execProc(conn *StatefulConnection) (*sqltypes.Result, error) { + beforeInTx := conn.IsInTransaction() + sql, _, err := qre.generateFinalSQL(qre.plan.FullQuery, qre.bindVars) + if err != nil { + return nil, err + } + qr, err := qre.execStatefulConn(conn, sql, true) + if err != nil { + return nil, rewriteOUTParamError(err) + } + if !qr.IsMoreResultsExists() { + afterInTx := qr.IsInTransaction() + if beforeInTx != afterInTx { + conn.Close() + return nil, vterrors.New(vtrpcpb.Code_CANCELED, "Transaction state change inside the stored procedure is not allowed") + } + return qr, nil + } + err = qre.drainResultSetOnConn(conn.UnderlyingDBConn()) + if err != nil { + return nil, err + } + return nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "Multi-Resultset not supported in stored procedure") +} + +func (qre *QueryExecutor) execAlterMigration() (*sqltypes.Result, error) { + alterMigration, ok := qre.plan.FullStmt.(*sqlparser.AlterMigration) + if !ok { + return nil, vterrors.New(vtrpcpb.Code_INTERNAL, "Expecting ALTER VITESS_MIGRATION plan") + } + switch alterMigration.Type { + case sqlparser.RetryMigrationType: + return qre.tsv.onlineDDLExecutor.RetryMigration(qre.ctx, alterMigration.UUID) + case sqlparser.CompleteMigrationType: + return nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "ALTER VITESS_MIGRATION COMPLETE is not implemented yet") + case sqlparser.CancelMigrationType: + return qre.tsv.onlineDDLExecutor.CancelMigration(qre.ctx, alterMigration.UUID, true, "CANCEL issued by user") + case sqlparser.CancelAllMigrationType: + return qre.tsv.onlineDDLExecutor.CancelPendingMigrations(qre.ctx, "CANCEL ALL issued by user") + } + return nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "ALTER VITESS_MIGRATION not implemented") +} + +func (qre *QueryExecutor) drainResultSetOnConn(conn *connpool.DBConn) error { + more := true + for more { + qr, err := conn.FetchNext(qre.ctx, int(qre.getSelectLimit()), true) + if err != nil { + return err + } + more = qr.IsMoreResultsExists() + } + return nil +} + func (qre *QueryExecutor) getSelectLimit() int64 { maxRows := qre.tsv.qe.maxResultSize.Get() sqlLimit := qre.options.GetSqlSelectLimit() @@ -808,7 +907,7 @@ func (qre *QueryExecutor) recordUserQuery(queryType string, duration int64) { } tableName := qre.plan.TableName().String() qre.tsv.Stats().UserTableQueryCount.Add([]string{tableName, username, queryType}, 1) - qre.tsv.Stats().UserTableQueryTimesNs.Add([]string{tableName, username, queryType}, int64(duration)) + qre.tsv.Stats().UserTableQueryTimesNs.Add([]string{tableName, username, queryType}, duration) } // resolveNumber extracts a number from a bind variable or sql value. diff --git a/go/vt/vttablet/tabletserver/query_executor_test.go b/go/vt/vttablet/tabletserver/query_executor_test.go index bd72971d71a..7e67ce5d3b3 100644 --- a/go/vt/vttablet/tabletserver/query_executor_test.go +++ b/go/vt/vttablet/tabletserver/query_executor_test.go @@ -112,15 +112,6 @@ func TestQueryExecutorPlans(t *testing.T) { // Because the fields would have been cached before, the field query will // not get re-executed. inTxWant: "select * from t limit 1", - }, { - input: "set a=1", - dbResponses: []dbResponse{{ - query: "set a=1", - result: dmlResult, - }}, - resultWant: dmlResult, - planWant: "Set", - logWant: "set a=1", }, { input: "show engines", dbResponses: []dbResponse{{ @@ -223,13 +214,13 @@ func TestQueryExecutorPlans(t *testing.T) { }, { input: "create index a on user(id)", dbResponses: []dbResponse{{ - query: "alter table user add index a (id)", + query: "alter table `user` add index a (id)", result: emptyResult, }}, resultWant: emptyResult, planWant: "DDL", - logWant: "alter table user add index a (id)", - inTxWant: "alter table user add index a (id)", + logWant: "alter table `user` add index a (id)", + inTxWant: "alter table `user` add index a (id)", }, { input: "create index a on user(id1 + id2)", dbResponses: []dbResponse{{ @@ -260,9 +251,36 @@ func TestQueryExecutorPlans(t *testing.T) { planWant: "Release", logWant: "RELEASE savepoint a", inTxWant: "RELEASE savepoint a", + }, { + input: "show create database db_name", + dbResponses: []dbResponse{{ + query: "show create database ks", + result: emptyResult, + }}, + resultWant: emptyResult, + planWant: "Show", + logWant: "show create database ks", + }, { + input: "show create database mysql", + dbResponses: []dbResponse{{ + query: "show create database mysql", + result: emptyResult, + }}, + resultWant: emptyResult, + planWant: "Show", + logWant: "show create database mysql", + }, { + input: "show create table mysql.user", + dbResponses: []dbResponse{{ + query: "show create table mysql.`user`", + result: emptyResult, + }}, + resultWant: emptyResult, + planWant: "Show", + logWant: "show create table mysql.`user`", }} for _, tcase := range testcases { - func() { + t.Run(tcase.input, func(t *testing.T) { db := setUpQueryExecutorTest(t) defer db.Close() for _, dbr := range tcase.dbResponses { @@ -270,6 +288,7 @@ func TestQueryExecutorPlans(t *testing.T) { } ctx := context.Background() tsv := newTestTabletServer(ctx, noFlags, db) + tsv.config.DB.DBName = "ks" defer tsv.StopService() tsv.SetPassthroughDMLs(tcase.passThrough) @@ -282,6 +301,9 @@ func TestQueryExecutorPlans(t *testing.T) { assert.Equal(t, tcase.planWant, qre.logStats.PlanType, tcase.input) assert.Equal(t, tcase.logWant, qre.logStats.RewrittenSQL(), tcase.input) + // Wait for the existing query to be processed by the cache + tsv.QueryPlanCacheWait() + // Test inside a transaction. target := tsv.sm.Target() txid, alias, err := tsv.Begin(ctx, &target, nil) @@ -300,7 +322,7 @@ func TestQueryExecutorPlans(t *testing.T) { want = tcase.inTxWant } assert.Equal(t, want, qre.logStats.RewrittenSQL(), "in tx: %v", tcase.input) - }() + }) } } @@ -505,12 +527,9 @@ func TestQueryExecutorPlanPassSelectWithLockOutsideATransaction(t *testing.T) { tsv := newTestTabletServer(ctx, noFlags, db) qre := newTestQueryExecutor(ctx, tsv, query, 0) defer tsv.StopService() - assert.Equal(t, planbuilder.PlanSelectLock, qre.plan.PlanID) + assert.Equal(t, planbuilder.PlanSelect, qre.plan.PlanID) _, err := qre.Execute() - if code := vterrors.Code(err); code != vtrpcpb.Code_FAILED_PRECONDITION { - assert.NoError(t, err) - t.Fatalf("qre.Execute: %v, want %v", code, vtrpcpb.Code_FAILED_PRECONDITION) - } + assert.NoError(t, err) } func TestQueryExecutorPlanNextval(t *testing.T) { @@ -526,7 +545,6 @@ func TestQueryExecutorPlanNextval(t *testing.T) { sqltypes.NewInt64(1), sqltypes.NewInt64(3), }}, - RowsAffected: 1, }) updateQuery := "update seq set next_id = 4 where id = 0" db.AddQuery(updateQuery, &sqltypes.Result{}) @@ -547,7 +565,6 @@ func TestQueryExecutorPlanNextval(t *testing.T) { Rows: [][]sqltypes.Value{{ sqltypes.NewInt64(1), }}, - RowsAffected: 1, } assert.Equal(t, want, got) @@ -567,7 +584,6 @@ func TestQueryExecutorPlanNextval(t *testing.T) { Rows: [][]sqltypes.Value{{ sqltypes.NewInt64(2), }}, - RowsAffected: 1, } if !reflect.DeepEqual(got, want) { t.Fatalf("qre.Execute() =\n%#v, want:\n%#v", got, want) @@ -584,7 +600,6 @@ func TestQueryExecutorPlanNextval(t *testing.T) { sqltypes.NewInt64(4), sqltypes.NewInt64(3), }}, - RowsAffected: 1, }) updateQuery = "update seq set next_id = 7 where id = 0" db.AddQuery(updateQuery, &sqltypes.Result{}) @@ -601,7 +616,6 @@ func TestQueryExecutorPlanNextval(t *testing.T) { Rows: [][]sqltypes.Value{{ sqltypes.NewInt64(3), }}, - RowsAffected: 1, } if !reflect.DeepEqual(got, want) { t.Fatalf("qre.Execute() =\n%#v, want:\n%#v", got, want) @@ -618,7 +632,6 @@ func TestQueryExecutorPlanNextval(t *testing.T) { sqltypes.NewInt64(7), sqltypes.NewInt64(3), }}, - RowsAffected: 2, }) updateQuery = "update seq set next_id = 13 where id = 0" db.AddQuery(updateQuery, &sqltypes.Result{}) @@ -635,7 +648,6 @@ func TestQueryExecutorPlanNextval(t *testing.T) { Rows: [][]sqltypes.Value{{ sqltypes.NewInt64(5), }}, - RowsAffected: 1, } if !reflect.DeepEqual(got, want) { t.Fatalf("qre.Execute() =\n%#v, want:\n%#v", got, want) @@ -1173,14 +1185,29 @@ func newTestQueryExecutor(ctx context.Context, tsv *TabletServer, sql string, tx func setUpQueryExecutorTest(t *testing.T) *fakesqldb.DB { db := fakesqldb.New(t) - initQueryExecutorTestDB(db, true) + initQueryExecutorTestDB(db) return db } -func initQueryExecutorTestDB(db *fakesqldb.DB, testTableHasMultipleUniqueKeys bool) { - for query, result := range getQueryExecutorSupportedQueries(testTableHasMultipleUniqueKeys) { +const baseShowTablesPattern = `SELECT t\.table_name.*` + +func initQueryExecutorTestDB(db *fakesqldb.DB) { + for query, result := range getQueryExecutorSupportedQueries() { db.AddQuery(query, result) } + db.AddQueryPattern(baseShowTablesPattern, &sqltypes.Result{ + Fields: mysql.BaseShowTablesFields, + Rows: [][]sqltypes.Value{ + mysql.BaseShowTablesRow("test_table", false, ""), + mysql.BaseShowTablesRow("seq", false, "vitess_sequence"), + mysql.BaseShowTablesRow("msg", false, "vitess_message,vt_ack_wait=30,vt_purge_after=120,vt_batch_size=1,vt_cache_size=10,vt_poller_interval=30"), + }, + }) + db.AddQuery("show status like 'Innodb_rows_read'", sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "Variable_name|Value", + "varchar|int64"), + "Innodb_rows_read|0", + )) } func getTestTableFields() []*querypb.Field { @@ -1191,7 +1218,7 @@ func getTestTableFields() []*querypb.Field { } } -func getQueryExecutorSupportedQueries(testTableHasMultipleUniqueKeys bool) map[string]*sqltypes.Result { +func getQueryExecutorSupportedQueries() map[string]*sqltypes.Result { return map[string]*sqltypes.Result{ // queries for twopc fmt.Sprintf(sqlCreateSidecarDB, "_vt"): {}, @@ -1211,7 +1238,6 @@ func getQueryExecutorSupportedQueries(testTableHasMultipleUniqueKeys bool) map[s Rows: [][]sqltypes.Value{ {sqltypes.NewInt32(1427325875)}, }, - RowsAffected: 1, }, "select @@global.sql_mode": { Fields: []*querypb.Field{{ @@ -1220,7 +1246,6 @@ func getQueryExecutorSupportedQueries(testTableHasMultipleUniqueKeys bool) map[s Rows: [][]sqltypes.Value{ {sqltypes.NewVarBinary("STRICT_TRANS_TABLES")}, }, - RowsAffected: 1, }, "select @@autocommit": { Fields: []*querypb.Field{{ @@ -1229,7 +1254,6 @@ func getQueryExecutorSupportedQueries(testTableHasMultipleUniqueKeys bool) map[s Rows: [][]sqltypes.Value{ {sqltypes.NewVarBinary("1")}, }, - RowsAffected: 1, }, "select @@sql_auto_is_null": { Fields: []*querypb.Field{{ @@ -1238,7 +1262,6 @@ func getQueryExecutorSupportedQueries(testTableHasMultipleUniqueKeys bool) map[s Rows: [][]sqltypes.Value{ {sqltypes.NewVarBinary("0")}, }, - RowsAffected: 1, }, "select @@version_comment from dual where 1 != 1": { Fields: []*querypb.Field{{ @@ -1252,30 +1275,18 @@ func getQueryExecutorSupportedQueries(testTableHasMultipleUniqueKeys bool) map[s Rows: [][]sqltypes.Value{ {sqltypes.NewVarBinary("fakedb server")}, }, - RowsAffected: 1, }, "(select 0 as x from dual where 1 != 1) union (select 1 as y from dual where 1 != 1)": { Fields: []*querypb.Field{{ Type: sqltypes.Uint64, }}, - Rows: [][]sqltypes.Value{}, - RowsAffected: 0, + Rows: [][]sqltypes.Value{}, }, "(select 0 as x from dual where 1 != 1) union (select 1 as y from dual where 1 != 1) limit 10001": { Fields: []*querypb.Field{{ Type: sqltypes.Uint64, }}, - Rows: [][]sqltypes.Value{}, - RowsAffected: 0, - }, - mysql.BaseShowTables: { - Fields: mysql.BaseShowTablesFields, - Rows: [][]sqltypes.Value{ - mysql.BaseShowTablesRow("test_table", false, ""), - mysql.BaseShowTablesRow("seq", false, "vitess_sequence"), - mysql.BaseShowTablesRow("msg", false, "vitess_message,vt_ack_wait=30,vt_purge_after=120,vt_batch_size=1,vt_cache_size=10,vt_poller_interval=30"), - }, - RowsAffected: 3, + Rows: [][]sqltypes.Value{}, }, mysql.BaseShowPrimary: { Fields: mysql.ShowPrimaryFields, @@ -1284,7 +1295,6 @@ func getQueryExecutorSupportedQueries(testTableHasMultipleUniqueKeys bool) map[s mysql.ShowPrimaryRow("seq", "id"), mysql.ShowPrimaryRow("msg", "id"), }, - RowsAffected: 3, }, "select * from test_table where 1 != 1": { Fields: []*querypb.Field{{ diff --git a/go/vt/vttablet/tabletserver/queryz.go b/go/vt/vttablet/tabletserver/queryz.go index ef37e77c5cf..a564cc3a420 100644 --- a/go/vt/vttablet/tabletserver/queryz.go +++ b/go/vt/vttablet/tabletserver/queryz.go @@ -39,11 +39,13 @@ var ( Count Time MySQL Time - Rows + Rows affected + Rows returned Errors Time per query MySQL Time per query - Rows per query + Rows affected per query + Rows returned per query Errors per query @@ -56,11 +58,13 @@ var ( {{.Count}} {{.Time}} {{.MysqlTime}} - {{.Rows}} + {{.RowsAffected}} + {{.RowsReturned}} {{.Errors}} {{.TimePQ}} {{.MysqlTimePQ}} - {{.RowsPQ}} + {{.RowsAffectedPQ}} + {{.RowsReturnedPQ}} {{.ErrorsPQ}} `)) @@ -69,15 +73,16 @@ var ( // queryzRow is used for rendering query stats // using go's template. type queryzRow struct { - Query string - Table string - Plan planbuilder.PlanType - Count int64 - tm time.Duration - mysqlTime time.Duration - Rows int64 - Errors int64 - Color string + Query string + Table string + Plan planbuilder.PlanType + Count uint64 + tm time.Duration + mysqlTime time.Duration + RowsAffected uint64 + RowsReturned uint64 + Errors uint64 + Color string } // Time returns the total time as a string. @@ -105,9 +110,15 @@ func (qzs *queryzRow) MysqlTimePQ() string { return fmt.Sprintf("%.6f", val) } -// RowsPQ returns the row count per query as a string. -func (qzs *queryzRow) RowsPQ() string { - val := float64(qzs.Rows) / float64(qzs.Count) +// RowsReturnedPQ returns the row count per query as a string. +func (qzs *queryzRow) RowsReturnedPQ() string { + val := float64(qzs.RowsReturned) / float64(qzs.Count) + return fmt.Sprintf("%.6f", val) +} + +// RowsAffectedPQ returns the row count per query as a string. +func (qzs *queryzRow) RowsAffectedPQ() string { + val := float64(qzs.RowsAffected) / float64(qzs.Count) return fmt.Sprintf("%.6f", val) } @@ -134,27 +145,26 @@ func queryzHandler(qe *QueryEngine, w http.ResponseWriter, r *http.Request) { defer logz.EndHTMLTable(w) w.Write(queryzHeader) - keys := qe.plans.Keys() sorter := queryzSorter{ - rows: make([]*queryzRow, 0, len(keys)), + rows: nil, less: func(row1, row2 *queryzRow) bool { return row1.timePQ() > row2.timePQ() }, } - for _, v := range qe.plans.Keys() { - plan := qe.peekQuery(v) + qe.plans.ForEach(func(value interface{}) bool { + plan := value.(*TabletPlan) if plan == nil { - continue + return true } Value := &queryzRow{ - Query: logz.Wrappable(sqlparser.TruncateForUI(v)), + Query: logz.Wrappable(sqlparser.TruncateForUI(plan.Original)), Table: plan.TableName().String(), Plan: plan.PlanID, } - Value.Count, Value.tm, Value.mysqlTime, Value.Rows, Value.Errors = plan.Stats() + Value.Count, Value.tm, Value.mysqlTime, Value.RowsAffected, Value.RowsReturned, Value.Errors = plan.Stats() var timepq time.Duration if Value.Count != 0 { - timepq = time.Duration(int64(Value.tm) / Value.Count) + timepq = Value.tm / time.Duration(Value.Count) } if timepq < 10*time.Millisecond { Value.Color = "low" @@ -164,7 +174,8 @@ func queryzHandler(qe *QueryEngine, w http.ResponseWriter, r *http.Request) { Value.Color = "high" } sorter.rows = append(sorter.rows, Value) - } + return true + }) sort.Sort(&sorter) for _, Value := range sorter.rows { if err := queryzTmpl.Execute(w, Value); err != nil { diff --git a/go/vt/vttablet/tabletserver/queryz_test.go b/go/vt/vttablet/tabletserver/queryz_test.go index cfcc26a45e5..90278cbf22b 100644 --- a/go/vt/vttablet/tabletserver/queryz_test.go +++ b/go/vt/vttablet/tabletserver/queryz_test.go @@ -35,50 +35,60 @@ import ( func TestQueryzHandler(t *testing.T) { resp := httptest.NewRecorder() req, _ := http.NewRequest("GET", "/schemaz", nil) - qe := newTestQueryEngine(100, 10*time.Second, true, &dbconfigs.DBConfigs{}) + qe := newTestQueryEngine(10*time.Second, true, &dbconfigs.DBConfigs{}) + const query1 = "select name from test_table" plan1 := &TabletPlan{ + Original: query1, Plan: &planbuilder.Plan{ Table: &schema.Table{Name: sqlparser.NewTableIdent("test_table")}, PlanID: planbuilder.PlanSelect, }, } - plan1.AddStats(10, 2*time.Second, 1*time.Second, 2, 0) - qe.plans.Set("select name from test_table", plan1) + plan1.AddStats(10, 2*time.Second, 1*time.Second, 0, 2, 0) + qe.plans.Set(query1, plan1) + const query2 = "insert into test_table values 1" plan2 := &TabletPlan{ + Original: query2, Plan: &planbuilder.Plan{ Table: &schema.Table{Name: sqlparser.NewTableIdent("test_table")}, PlanID: planbuilder.PlanDDL, }, } - plan2.AddStats(1, 2*time.Millisecond, 1*time.Millisecond, 1, 0) - qe.plans.Set("insert into test_table values 1", plan2) + plan2.AddStats(1, 2*time.Millisecond, 1*time.Millisecond, 1, 0, 0) + qe.plans.Set(query2, plan2) + const query3 = "show tables" plan3 := &TabletPlan{ + Original: query3, Plan: &planbuilder.Plan{ Table: &schema.Table{Name: sqlparser.NewTableIdent("")}, PlanID: planbuilder.PlanOtherRead, }, } - plan3.AddStats(1, 75*time.Millisecond, 50*time.Millisecond, 1, 0) - qe.plans.Set("show tables", plan3) + plan3.AddStats(1, 75*time.Millisecond, 50*time.Millisecond, 0, 1, 0) + qe.plans.Set(query3, plan3) qe.plans.Set("", (*TabletPlan)(nil)) + hugeInsert := "insert into test_table values 0" + for i := 1; i < 1000; i++ { + hugeInsert = hugeInsert + fmt.Sprintf(", %d", i) + } plan4 := &TabletPlan{ + Original: hugeInsert, Plan: &planbuilder.Plan{ Table: &schema.Table{Name: sqlparser.NewTableIdent("")}, PlanID: planbuilder.PlanOtherRead, }, } - plan4.AddStats(1, 1*time.Millisecond, 1*time.Millisecond, 1, 0) - hugeInsert := "insert into test_table values 0" - for i := 1; i < 1000; i++ { - hugeInsert = hugeInsert + fmt.Sprintf(", %d", i) - } + plan4.AddStats(1, 1*time.Millisecond, 1*time.Millisecond, 1, 0, 0) qe.plans.Set(hugeInsert, plan4) qe.plans.Set("", (*TabletPlan)(nil)) + // Wait for cache to settle + qe.plans.Wait() + queryzHandler(qe, resp, req) body, _ := ioutil.ReadAll(resp.Body) planPattern1 := []string{ @@ -89,10 +99,12 @@ func TestQueryzHandler(t *testing.T) { `10`, `2.000000`, `1.000000`, + `0`, `2`, `0`, `0.200000`, `0.100000`, + `0.000000`, `0.200000`, `0.000000`, } @@ -107,10 +119,12 @@ func TestQueryzHandler(t *testing.T) { `0.001000`, `1`, `0`, + `0`, `0.002000`, `0.001000`, `1.000000`, `0.000000`, + `0.000000`, } checkQueryzHasPlan(t, planPattern2, plan2, body) planPattern3 := []string{ @@ -121,10 +135,12 @@ func TestQueryzHandler(t *testing.T) { `1`, `0.075000`, `0.050000`, + `0`, `1`, `0`, `0.075000`, `0.050000`, + `0.000000`, `1.000000`, `0.000000`, } @@ -139,10 +155,12 @@ func TestQueryzHandler(t *testing.T) { `0.001000`, `1`, `0`, + `0`, `0.001000`, `0.001000`, `1.000000`, `0.000000`, + `0.000000`, } checkQueryzHasPlan(t, planPattern4, plan4, body) } @@ -150,6 +168,6 @@ func TestQueryzHandler(t *testing.T) { func checkQueryzHasPlan(t *testing.T, planPattern []string, plan *TabletPlan, page []byte) { matcher := regexp.MustCompile(strings.Join(planPattern, `\s*`)) if !matcher.Match(page) { - t.Fatalf("queryz page does not contain\nplan:\n%v\npattern:\n%v\npage:\n%s", plan, strings.Join(planPattern, `\s*`), string(page)) + t.Fatalf("queryz page does not contain\nplan:\n%#v\npattern:\n%v\npage:\n%s", plan, strings.Join(planPattern, `\s*`), string(page)) } } diff --git a/go/vt/vttablet/tabletserver/repltracker/poller.go b/go/vt/vttablet/tabletserver/repltracker/poller.go index dd77cc22188..da85da78008 100644 --- a/go/vt/vttablet/tabletserver/repltracker/poller.go +++ b/go/vt/vttablet/tabletserver/repltracker/poller.go @@ -20,11 +20,15 @@ import ( "sync" "time" + "vitess.io/vitess/go/stats" + "vitess.io/vitess/go/vt/mysqlctl" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" ) +var replicationLagSeconds = stats.NewGauge("replicationLagSec", "replication lag in seconds") + type poller struct { mysqld mysqlctl.MysqlDaemon @@ -55,5 +59,6 @@ func (p *poller) Status() (time.Duration, error) { p.lag = time.Duration(status.SecondsBehindMaster) * time.Second p.timeRecorded = time.Now() + replicationLagSeconds.Set(int64(p.lag.Seconds())) return p.lag, nil } diff --git a/go/vt/vttablet/tabletserver/rules/cached_size.go b/go/vt/vttablet/tabletserver/rules/cached_size.go new file mode 100644 index 00000000000..60403475105 --- /dev/null +++ b/go/vt/vttablet/tabletserver/rules/cached_size.go @@ -0,0 +1,128 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by Sizegen. DO NOT EDIT. + +package rules + +type cachedObject interface { + CachedSize(alloc bool) int64 +} + +func (cached *BindVarCond) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field name string + size += int64(len(cached.name)) + // field value vitess.io/vitess/go/vt/vttablet/tabletserver/rules.bvcValue + if cc, ok := cached.value.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *Rule) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(232) + } + // field Description string + size += int64(len(cached.Description)) + // field Name string + size += int64(len(cached.Name)) + // field requestIP vitess.io/vitess/go/vt/vttablet/tabletserver/rules.namedRegexp + size += cached.requestIP.CachedSize(false) + // field user vitess.io/vitess/go/vt/vttablet/tabletserver/rules.namedRegexp + size += cached.user.CachedSize(false) + // field query vitess.io/vitess/go/vt/vttablet/tabletserver/rules.namedRegexp + size += cached.query.CachedSize(false) + // field leadingComment vitess.io/vitess/go/vt/vttablet/tabletserver/rules.namedRegexp + size += cached.leadingComment.CachedSize(false) + // field trailingComment vitess.io/vitess/go/vt/vttablet/tabletserver/rules.namedRegexp + size += cached.trailingComment.CachedSize(false) + // field plans []vitess.io/vitess/go/vt/vttablet/tabletserver/planbuilder.PlanType + { + size += int64(cap(cached.plans)) * int64(8) + } + // field tableNames []string + { + size += int64(cap(cached.tableNames)) * int64(16) + for _, elem := range cached.tableNames { + size += int64(len(elem)) + } + } + // field bindVarConds []vitess.io/vitess/go/vt/vttablet/tabletserver/rules.BindVarCond + { + size += int64(cap(cached.bindVarConds)) * int64(48) + for _, elem := range cached.bindVarConds { + size += elem.CachedSize(false) + } + } + return size +} +func (cached *Rules) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + // field rules []*vitess.io/vitess/go/vt/vttablet/tabletserver/rules.Rule + { + size += int64(cap(cached.rules)) * int64(8) + for _, elem := range cached.rules { + size += elem.CachedSize(true) + } + } + return size +} +func (cached *bvcre) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(8) + } + // field re *regexp.Regexp + if cached.re != nil { + size += int64(153) + } + return size +} +func (cached *namedRegexp) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + // field name string + size += int64(len(cached.name)) + // field Regexp *regexp.Regexp + if cached.Regexp != nil { + size += int64(153) + } + return size +} diff --git a/go/vt/vttablet/tabletserver/rules/rules_test.go b/go/vt/vttablet/tabletserver/rules/rules_test.go index 71783588c7c..951f4146789 100644 --- a/go/vt/vttablet/tabletserver/rules/rules_test.go +++ b/go/vt/vttablet/tabletserver/rules/rules_test.go @@ -113,7 +113,7 @@ func TestFilterByPlan(t *testing.T) { qr2 := NewQueryRule("rule 2", "r2", QRFail) qr2.AddPlanCond(planbuilder.PlanSelect) - qr2.AddPlanCond(planbuilder.PlanSelectLock) + qr2.AddPlanCond(planbuilder.PlanSelect) qr2.AddBindVarCond("a", true, false, QRNoOp, nil) qr3 := NewQueryRule("rule 3", "r3", QRFail) @@ -180,7 +180,7 @@ func TestFilterByPlan(t *testing.T) { t.Errorf("qrs1:\n%s, want\n%s", got, want) } - qrs1 = qrs.FilterByPlan("insert", planbuilder.PlanSelectLock, "a") + qrs1 = qrs.FilterByPlan("insert", planbuilder.PlanSelect, "a") got = marshalled(qrs1) if got != want { t.Errorf("qrs1:\n%s, want\n%s", got, want) diff --git a/go/vt/vttablet/tabletserver/schema/cached_size.go b/go/vt/vttablet/tabletserver/schema/cached_size.go new file mode 100644 index 00000000000..e01571ce58b --- /dev/null +++ b/go/vt/vttablet/tabletserver/schema/cached_size.go @@ -0,0 +1,73 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by Sizegen. DO NOT EDIT. + +package schema + +func (cached *MessageInfo) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(80) + } + // field Fields []*vitess.io/vitess/go/vt/proto/query.Field + { + size += int64(cap(cached.Fields)) * int64(8) + for _, elem := range cached.Fields { + size += elem.CachedSize(true) + } + } + return size +} +func (cached *SequenceInfo) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + return size +} +func (cached *Table) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(104) + } + // field Name vitess.io/vitess/go/vt/sqlparser.TableIdent + size += cached.Name.CachedSize(false) + // field Fields []*vitess.io/vitess/go/vt/proto/query.Field + { + size += int64(cap(cached.Fields)) * int64(8) + for _, elem := range cached.Fields { + size += elem.CachedSize(true) + } + } + // field PKColumns []int + { + size += int64(cap(cached.PKColumns)) * int64(8) + } + // field SequenceInfo *vitess.io/vitess/go/vt/vttablet/tabletserver/schema.SequenceInfo + size += cached.SequenceInfo.CachedSize(true) + // field MessageInfo *vitess.io/vitess/go/vt/vttablet/tabletserver/schema.MessageInfo + size += cached.MessageInfo.CachedSize(true) + return size +} diff --git a/go/vt/vttablet/tabletserver/schema/engine.go b/go/vt/vttablet/tabletserver/schema/engine.go index f49229c9ca7..a8c91c2663e 100644 --- a/go/vt/vttablet/tabletserver/schema/engine.go +++ b/go/vt/vttablet/tabletserver/schema/engine.go @@ -24,6 +24,7 @@ import ( "sync" "time" + "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/dbconnpool" "vitess.io/vitess/go/vt/vtgate/evalengine" @@ -76,6 +77,10 @@ type Engine struct { // dbCreationFailed is for preventing log spam. dbCreationFailed bool + + tableFileSizeGauge *stats.GaugesWithSingleLabel + tableAllocatedSizeGauge *stats.GaugesWithSingleLabel + innoDbReadRowsGauge *stats.Gauge } // NewEngine creates a new Engine. @@ -93,6 +98,9 @@ func NewEngine(env tabletenv.Env) *Engine { reloadTime: reloadTime, } _ = env.Exporter().NewGaugeDurationFunc("SchemaReloadTime", "vttablet keeps table schemas in its own memory and periodically refreshes it from MySQL. This config controls the reload time.", se.ticks.Interval) + se.tableFileSizeGauge = env.Exporter().NewGaugesWithSingleLabel("TableFileSize", "tracks table file size", "Table") + se.tableAllocatedSizeGauge = env.Exporter().NewGaugesWithSingleLabel("TableAllocatedSize", "tracks table allocated size", "Table") + se.innoDbReadRowsGauge = env.Exporter().NewGauge("InnodbRowsRead", "number of rows read by mysql") env.Exporter().HandleFunc("/debug/schema", se.handleDebugSchema) env.Exporter().HandleFunc("/schemaz", func(w http.ResponseWriter, r *http.Request) { @@ -284,9 +292,7 @@ func (se *Engine) ReloadAt(ctx context.Context, pos mysql.Position) error { // reload reloads the schema. It can also be used to initialize it. func (se *Engine) reload(ctx context.Context) error { - //start := time.Now() defer func() { - //log.Infof("Time taken to load the schema: %v", time.Since(start)) se.env.LogError() }() @@ -305,7 +311,12 @@ func (se *Engine) reload(ctx context.Context) error { if se.SkipMetaCheck { return nil } - tableData, err := conn.Exec(ctx, mysql.BaseShowTables, maxTableCount, false) + tableData, err := conn.Exec(ctx, conn.BaseShowTables(), maxTableCount, false) + if err != nil { + return err + } + + err = se.updateInnoDBRowsRead(ctx, conn) if err != nil { return err } @@ -321,19 +332,32 @@ func (se *Engine) reload(ctx context.Context) error { tableName := row[0].ToString() curTables[tableName] = true createTime, _ := evalengine.ToInt64(row[2]) + fileSize, _ := evalengine.ToUint64(row[4]) + allocatedSize, _ := evalengine.ToUint64(row[5]) + + // publish the size metrics + se.tableFileSizeGauge.Set(tableName, int64(fileSize)) + se.tableAllocatedSizeGauge.Set(tableName, int64(allocatedSize)) + // TODO(sougou); find a better way detect changed tables. This method // seems unreliable. The endtoend test flags all tables as changed. - if _, ok := se.tables[tableName]; ok && createTime < se.lastChange { + tbl, isInTablesMap := se.tables[tableName] + if isInTablesMap && createTime < se.lastChange { + tbl.FileSize = fileSize + tbl.AllocatedSize = allocatedSize continue } + log.V(2).Infof("Reading schema for table: %s", tableName) - table, err := LoadTable(conn, tableName, row[1].ToString(), row[3].ToString()) + table, err := LoadTable(conn, tableName, row[3].ToString()) if err != nil { rec.RecordError(err) continue } + table.FileSize = fileSize + table.AllocatedSize = allocatedSize changedTables[tableName] = table - if _, ok := se.tables[tableName]; ok { + if isInTablesMap { altered = append(altered, tableName) } else { created = append(created, tableName) @@ -346,11 +370,10 @@ func (se *Engine) reload(ctx context.Context) error { // Compute and handle dropped tables. var dropped []string for tableName := range se.tables { - if curTables[tableName] { - continue + if !curTables[tableName] { + dropped = append(dropped, tableName) + delete(se.tables, tableName) } - dropped = append(dropped, tableName) - delete(se.tables, tableName) } // Populate PKColumns for changed tables. @@ -370,6 +393,25 @@ func (se *Engine) reload(ctx context.Context) error { return nil } +func (se *Engine) updateInnoDBRowsRead(ctx context.Context, conn *connpool.DBConn) error { + readRowsData, err := conn.Exec(ctx, mysql.ShowRowsRead, 10, false) + if err != nil { + return err + } + + if len(readRowsData.Rows) == 1 && len(readRowsData.Rows[0]) == 2 { + value, err := evalengine.ToInt64(readRowsData.Rows[0][1]) + if err != nil { + return err + } + + se.innoDbReadRowsGauge.Set(value) + } else { + log.Warningf("got strange results from 'show status': %v", readRowsData.Rows) + } + return nil +} + func (se *Engine) mysqlTime(ctx context.Context, conn *connpool.DBConn) (int64, error) { // Keep `SELECT UNIX_TIMESTAMP` is in uppercase because binlog server queries are case sensitive and expect it to be so. tm, err := conn.Exec(ctx, "SELECT UNIX_TIMESTAMP()", 1, false) @@ -512,10 +554,10 @@ func (se *Engine) handleDebugSchema(response http.ResponseWriter, request *http. acl.SendError(response, err) return } - se.handleHTTPSchema(response, request) + se.handleHTTPSchema(response) } -func (se *Engine) handleHTTPSchema(response http.ResponseWriter, request *http.Request) { +func (se *Engine) handleHTTPSchema(response http.ResponseWriter) { // Ensure schema engine is Open. If vttablet came up in a non_serving role, // the schema engine may not have been initialized. err := se.Open() diff --git a/go/vt/vttablet/tabletserver/schema/engine_test.go b/go/vt/vttablet/tabletserver/schema/engine_test.go index ab5dc20cf03..5a0d908436b 100644 --- a/go/vt/vttablet/tabletserver/schema/engine_test.go +++ b/go/vt/vttablet/tabletserver/schema/engine_test.go @@ -26,6 +26,8 @@ import ( "testing" "time" + "vitess.io/vitess/go/test/utils" + "context" "github.com/stretchr/testify/assert" @@ -42,12 +44,36 @@ import ( querypb "vitess.io/vitess/go/vt/proto/query" ) +const baseShowTablesPattern = `SELECT t\.table_name.*` + +var mustMatch = utils.MustMatchFn( + []interface{}{ // types with unexported fields + sqlparser.TableIdent{}, + }, + []string{".Mutex"}, // ignored fields +) + func TestOpenAndReload(t *testing.T) { db := fakesqldb.New(t) defer db.Close() for query, result := range schematest.Queries() { db.AddQuery(query, result) } + db.AddQueryPattern(baseShowTablesPattern, + &sqltypes.Result{ + Fields: mysql.BaseShowTablesFields, + RowsAffected: 0, + InsertID: 0, + Rows: [][]sqltypes.Value{ + mysql.BaseShowTablesRow("test_table_01", false, ""), + mysql.BaseShowTablesRow("test_table_02", false, ""), + mysql.BaseShowTablesRow("test_table_03", false, ""), + mysql.BaseShowTablesRow("seq", false, "vitess_sequence"), + mysql.BaseShowTablesRow("msg", false, "vitess_message,vt_ack_wait=30,vt_purge_after=120,vt_batch_size=1,vt_cache_size=10,vt_poller_interval=30"), + }, + SessionStateChanges: "", + StatusFlags: 0, + }) // pre-advance to above the default 1427325875. db.AddQuery("select unix_timestamp()", sqltypes.MakeTestResult(sqltypes.MakeTestFields( @@ -55,12 +81,14 @@ func TestOpenAndReload(t *testing.T) { "int64"), "1427325876", )) - se := newEngine(10, 10*time.Second, 10*time.Second, true, db) + firstReadRowsValue := 12 + AddFakeInnoDBReadRowsResult(db, firstReadRowsValue) + se := newEngine(10, 10*time.Second, 10*time.Second, db) se.Open() defer se.Close() want := initialSchema() - assert.Equal(t, want, se.GetSchema()) + mustMatch(t, want, se.GetSchema()) // Advance time some more. db.AddQuery("select unix_timestamp()", sqltypes.MakeTestResult(sqltypes.MakeTestFields( @@ -68,20 +96,23 @@ func TestOpenAndReload(t *testing.T) { "int64"), "1427325877", )) + assert.EqualValues(t, firstReadRowsValue, se.innoDbReadRowsGauge.Get()) // Modify test_table_03 // Add test_table_04 // Drop msg - db.AddQuery(mysql.BaseShowTables, &sqltypes.Result{ + db.ClearQueryPattern() + db.AddQueryPattern(baseShowTablesPattern, &sqltypes.Result{ Fields: mysql.BaseShowTablesFields, Rows: [][]sqltypes.Value{ mysql.BaseShowTablesRow("test_table_01", false, ""), mysql.BaseShowTablesRow("test_table_02", false, ""), { - sqltypes.MakeTrusted(sqltypes.VarChar, []byte("test_table_03")), - sqltypes.MakeTrusted(sqltypes.VarChar, []byte("BASE TABLE")), - // Match the timestamp. - sqltypes.MakeTrusted(sqltypes.Int64, []byte("1427325877")), - sqltypes.MakeTrusted(sqltypes.VarChar, []byte("")), + sqltypes.MakeTrusted(sqltypes.VarChar, []byte("test_table_03")), // table_name + sqltypes.MakeTrusted(sqltypes.VarChar, []byte("BASE TABLE")), // table_type + sqltypes.MakeTrusted(sqltypes.Int64, []byte("1427325877")), // unix_timestamp(t.create_time) // Match the timestamp. + sqltypes.MakeTrusted(sqltypes.VarChar, []byte("")), // table_comment + sqltypes.MakeTrusted(sqltypes.Int64, []byte("128")), // file_size + sqltypes.MakeTrusted(sqltypes.Int64, []byte("256")), // allocated_size }, // test_table_04 will in spite of older timestamp because it doesn't exist yet. mysql.BaseShowTablesRow("test_table_04", false, ""), @@ -117,6 +148,8 @@ func TestOpenAndReload(t *testing.T) { mysql.ShowPrimaryRow("seq", "id"), }, }) + secondReadRowsValue := 123 + AddFakeInnoDBReadRowsResult(db, secondReadRowsValue) firstTime := true notifier := func(full map[string]*Table, created, altered, dropped []string) { @@ -137,6 +170,8 @@ func TestOpenAndReload(t *testing.T) { err := se.Reload(context.Background()) require.NoError(t, err) + assert.EqualValues(t, secondReadRowsValue, se.innoDbReadRowsGauge.Get()) + want["test_table_03"] = &Table{ Name: sqlparser.NewTableIdent("test_table_03"), Fields: []*querypb.Field{{ @@ -149,7 +184,9 @@ func TestOpenAndReload(t *testing.T) { Name: "val", Type: sqltypes.Int32, }}, - PKColumns: []int{0, 1}, + PKColumns: []int{0, 1}, + FileSize: 128, + AllocatedSize: 256, } want["test_table_04"] = &Table{ Name: sqlparser.NewTableIdent("test_table_04"), @@ -157,7 +194,9 @@ func TestOpenAndReload(t *testing.T) { Name: "pk", Type: sqltypes.Int32, }}, - PKColumns: []int{0}, + PKColumns: []int{0}, + FileSize: 100, + AllocatedSize: 150, } delete(want, "msg") assert.Equal(t, want, se.GetSchema()) @@ -177,8 +216,8 @@ func TestOpenAndReload(t *testing.T) { require.NoError(t, err) assert.Equal(t, want, se.GetSchema()) - // delete table test_table_03 - db.AddQuery(mysql.BaseShowTables, &sqltypes.Result{ + db.ClearQueryPattern() + db.AddQueryPattern(baseShowTablesPattern, &sqltypes.Result{ Fields: mysql.BaseShowTablesFields, Rows: [][]sqltypes.Value{ mysql.BaseShowTablesRow("test_table_01", false, ""), @@ -220,7 +259,7 @@ func TestOpenFailedDueToMissMySQLTime(t *testing.T) { {sqltypes.NewVarBinary("1427325875")}, }, }) - se := newEngine(10, 1*time.Second, 1*time.Second, false, db) + se := newEngine(10, 1*time.Second, 1*time.Second, db) err := se.Open() want := "could not get MySQL time" if err == nil || !strings.Contains(err.Error(), want) { @@ -240,7 +279,7 @@ func TestOpenFailedDueToIncorrectMysqlRowNum(t *testing.T) { {sqltypes.NULL}, }, }) - se := newEngine(10, 1*time.Second, 1*time.Second, false, db) + se := newEngine(10, 1*time.Second, 1*time.Second, db) err := se.Open() want := "unexpected result for MySQL time" if err == nil || !strings.Contains(err.Error(), want) { @@ -260,7 +299,7 @@ func TestOpenFailedDueToInvalidTimeFormat(t *testing.T) { {sqltypes.NewVarBinary("invalid_time")}, }, }) - se := newEngine(10, 1*time.Second, 1*time.Second, false, db) + se := newEngine(10, 1*time.Second, 1*time.Second, db) err := se.Open() want := "could not parse time" if err == nil || !strings.Contains(err.Error(), want) { @@ -274,10 +313,11 @@ func TestOpenFailedDueToExecErr(t *testing.T) { for query, result := range schematest.Queries() { db.AddQuery(query, result) } - db.AddRejectedQuery(mysql.BaseShowTables, fmt.Errorf("injected error")) - se := newEngine(10, 1*time.Second, 1*time.Second, false, db) - err := se.Open() + want := "injected error" + db.RejectQueryPattern(baseShowTablesPattern, want) + se := newEngine(10, 1*time.Second, 1*time.Second, db) + err := se.Open() if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("se.Open: %v, want %s", err, want) } @@ -289,7 +329,7 @@ func TestOpenFailedDueToTableErr(t *testing.T) { for query, result := range schematest.Queries() { db.AddQuery(query, result) } - db.AddQuery(mysql.BaseShowTables, &sqltypes.Result{ + db.AddQueryPattern(baseShowTablesPattern, &sqltypes.Result{ Fields: mysql.BaseShowTablesFields, Rows: [][]sqltypes.Value{ mysql.BaseShowTablesRow("test_table", false, ""), @@ -306,7 +346,8 @@ func TestOpenFailedDueToTableErr(t *testing.T) { {sqltypes.NewVarBinary("")}, }, }) - se := newEngine(10, 1*time.Second, 1*time.Second, false, db) + AddFakeInnoDBReadRowsResult(db, 0) + se := newEngine(10, 1*time.Second, 1*time.Second, db) err := se.Open() want := "Row count exceeded" if err == nil || !strings.Contains(err.Error(), want) { @@ -320,7 +361,7 @@ func TestExportVars(t *testing.T) { for query, result := range schematest.Queries() { db.AddQuery(query, result) } - se := newEngine(10, 1*time.Second, 1*time.Second, true, db) + se := newEngine(10, 1*time.Second, 1*time.Second, db) se.Open() defer se.Close() expvar.Do(func(kv expvar.KeyValue) { @@ -334,7 +375,7 @@ func TestStatsURL(t *testing.T) { for query, result := range schematest.Queries() { db.AddQuery(query, result) } - se := newEngine(10, 1*time.Second, 1*time.Second, true, db) + se := newEngine(10, 1*time.Second, 1*time.Second, db) se.Open() defer se.Close() @@ -343,7 +384,7 @@ func TestStatsURL(t *testing.T) { se.handleDebugSchema(response, request) } -func newEngine(queryCacheSize int, reloadTime time.Duration, idleTimeout time.Duration, strict bool, db *fakesqldb.DB) *Engine { +func newEngine(queryCacheSize int, reloadTime time.Duration, idleTimeout time.Duration, db *fakesqldb.DB) *Engine { config := tabletenv.NewDefaultConfig() config.QueryCacheSize = queryCacheSize config.SchemaReloadIntervalSeconds.Set(reloadTime) @@ -372,7 +413,9 @@ func initialSchema() map[string]*Table { Name: "pk", Type: sqltypes.Int32, }}, - PKColumns: []int{0}, + PKColumns: []int{0}, + FileSize: 0x64, + AllocatedSize: 0x96, }, "test_table_02": { Name: sqlparser.NewTableIdent("test_table_02"), @@ -380,7 +423,9 @@ func initialSchema() map[string]*Table { Name: "pk", Type: sqltypes.Int32, }}, - PKColumns: []int{0}, + PKColumns: []int{0}, + FileSize: 0x64, + AllocatedSize: 0x96, }, "test_table_03": { Name: sqlparser.NewTableIdent("test_table_03"), @@ -388,7 +433,9 @@ func initialSchema() map[string]*Table { Name: "pk", Type: sqltypes.Int32, }}, - PKColumns: []int{0}, + PKColumns: []int{0}, + FileSize: 0x64, + AllocatedSize: 0x96, }, "seq": { Name: sqlparser.NewTableIdent("seq"), @@ -406,8 +453,10 @@ func initialSchema() map[string]*Table { Name: "increment", Type: sqltypes.Int64, }}, - PKColumns: []int{0}, - SequenceInfo: &SequenceInfo{}, + PKColumns: []int{0}, + FileSize: 0x64, + AllocatedSize: 0x96, + SequenceInfo: &SequenceInfo{}, }, "msg": { Name: sqlparser.NewTableIdent("msg"), @@ -431,7 +480,9 @@ func initialSchema() map[string]*Table { Name: "message", Type: sqltypes.Int64, }}, - PKColumns: []int{0}, + PKColumns: []int{0}, + FileSize: 0x64, + AllocatedSize: 0x96, MessageInfo: &MessageInfo{ Fields: []*querypb.Field{{ Name: "id", @@ -450,3 +501,11 @@ func initialSchema() map[string]*Table { }, } } + +func AddFakeInnoDBReadRowsResult(db *fakesqldb.DB, value int) *fakesqldb.ExpectedResult { + return db.AddQuery("show status like 'Innodb_rows_read'", sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "Variable_name|Value", + "varchar|int64"), + fmt.Sprintf("Innodb_rows_read|%d", value), + )) +} diff --git a/go/vt/vttablet/tabletserver/schema/load_table.go b/go/vt/vttablet/tabletserver/schema/load_table.go index ce3b2f3f881..3d32dbd9075 100644 --- a/go/vt/vttablet/tabletserver/schema/load_table.go +++ b/go/vt/vttablet/tabletserver/schema/load_table.go @@ -28,7 +28,7 @@ import ( ) // LoadTable creates a Table from the schema info in the database. -func LoadTable(conn *connpool.DBConn, tableName string, tableType string, comment string) (*Table, error) { +func LoadTable(conn *connpool.DBConn, tableName string, comment string) (*Table, error) { ta := NewTable(tableName) sqlTableName := sqlparser.String(ta.Name) if err := fetchColumns(ta, conn, sqlTableName); err != nil { diff --git a/go/vt/vttablet/tabletserver/schema/load_table_test.go b/go/vt/vttablet/tabletserver/schema/load_table_test.go index 25c0924b693..8d7e784ff0b 100644 --- a/go/vt/vttablet/tabletserver/schema/load_table_test.go +++ b/go/vt/vttablet/tabletserver/schema/load_table_test.go @@ -173,7 +173,7 @@ func newTestLoadTable(tableType string, comment string, db *fakesqldb.DB) (*Tabl } defer conn.Recycle() - return LoadTable(conn, "test_table", tableType, comment) + return LoadTable(conn, "test_table", comment) } func getTestLoadTableQueries() map[string]*sqltypes.Result { diff --git a/go/vt/vttablet/tabletserver/schema/main_test.go b/go/vt/vttablet/tabletserver/schema/main_test.go index e260cbbc697..ada5c8085a1 100644 --- a/go/vt/vttablet/tabletserver/schema/main_test.go +++ b/go/vt/vttablet/tabletserver/schema/main_test.go @@ -34,10 +34,10 @@ func getTestSchemaEngine(t *testing.T) (*Engine, *fakesqldb.DB, func()) { "int64"), "1427325876", )) - db.AddQuery(mysql.BaseShowTables, &sqltypes.Result{}) - + db.AddQueryPattern(baseShowTablesPattern, &sqltypes.Result{}) db.AddQuery(mysql.BaseShowPrimary, &sqltypes.Result{}) - se := newEngine(10, 10*time.Second, 10*time.Second, true, db) + AddFakeInnoDBReadRowsResult(db, 1) + se := newEngine(10, 10*time.Second, 10*time.Second, db) require.NoError(t, se.Open()) cancel := func() { defer db.Close() diff --git a/go/vt/vttablet/tabletserver/schema/schema.go b/go/vt/vttablet/tabletserver/schema/schema.go index ebf622bddb6..69aae6387ac 100644 --- a/go/vt/vttablet/tabletserver/schema/schema.go +++ b/go/vt/vttablet/tabletserver/schema/schema.go @@ -52,6 +52,9 @@ type Table struct { // MessageInfo contains info for message tables. MessageInfo *MessageInfo + + FileSize uint64 + AllocatedSize uint64 } // SequenceInfo contains info specific to sequence tabels. diff --git a/go/vt/vttablet/tabletserver/schema/schematest/schematest.go b/go/vt/vttablet/tabletserver/schema/schematest/schematest.go index 64395121714..236ed3d024f 100644 --- a/go/vt/vttablet/tabletserver/schema/schematest/schematest.go +++ b/go/vt/vttablet/tabletserver/schema/schematest/schematest.go @@ -61,16 +61,6 @@ func Queries() map[string]*sqltypes.Result { {sqltypes.NewVarBinary("0")}, }, }, - mysql.BaseShowTables: { - Fields: mysql.BaseShowTablesFields, - Rows: [][]sqltypes.Value{ - mysql.BaseShowTablesRow("test_table_01", false, ""), - mysql.BaseShowTablesRow("test_table_02", false, ""), - mysql.BaseShowTablesRow("test_table_03", false, ""), - mysql.BaseShowTablesRow("seq", false, "vitess_sequence"), - mysql.BaseShowTablesRow("msg", false, "vitess_message,vt_ack_wait=30,vt_purge_after=120,vt_batch_size=1,vt_cache_size=10,vt_poller_interval=30"), - }, - }, mysql.BaseShowPrimary: { Fields: mysql.ShowPrimaryFields, Rows: [][]sqltypes.Value{ diff --git a/go/vt/vttablet/tabletserver/state_manager.go b/go/vt/vttablet/tabletserver/state_manager.go index 2ba19135a1e..909dca84208 100644 --- a/go/vt/vttablet/tabletserver/state_manager.go +++ b/go/vt/vttablet/tabletserver/state_manager.go @@ -122,7 +122,7 @@ type stateManager struct { checkMySQLThrottler *sync2.Semaphore timebombDuration time.Duration - unhealthyThreshold time.Duration + unhealthyThreshold sync2.AtomicDuration shutdownGracePeriod time.Duration transitionGracePeriod time.Duration } @@ -187,7 +187,7 @@ func (sm *stateManager) Init(env tabletenv.Env, target querypb.Target) { sm.checkMySQLThrottler = sync2.NewSemaphore(1, 0) sm.timebombDuration = env.Config().OltpReadPool.TimeoutSeconds.Get() * 10 sm.hcticks = timer.NewTimer(env.Config().Healthcheck.IntervalSeconds.Get()) - sm.unhealthyThreshold = env.Config().Healthcheck.UnhealthyThresholdSeconds.Get() + sm.unhealthyThreshold = sync2.NewAtomicDuration(env.Config().Healthcheck.UnhealthyThresholdSeconds.Get()) sm.shutdownGracePeriod = env.Config().GracePeriods.ShutdownSeconds.Get() sm.transitionGracePeriod = env.Config().GracePeriods.TransitionSeconds.Get() } @@ -445,7 +445,6 @@ func (sm *stateManager) serveNonMaster(wantTabletType topodatapb.TabletType) err sm.ddle.Close() sm.tableGC.Close() - sm.throttler.Close() sm.messager.Close() sm.tracker.Close() sm.se.MakeNonMaster() @@ -457,6 +456,7 @@ func (sm *stateManager) serveNonMaster(wantTabletType topodatapb.TabletType) err sm.te.AcceptReadOnly() sm.rt.MakeNonMaster() sm.watcher.Open() + sm.throttler.Open() sm.setState(wantTabletType, StateServing) return nil } @@ -627,7 +627,7 @@ func (sm *stateManager) refreshReplHealthLocked() (time.Duration, error) { } sm.replHealthy = false } else { - if lag > sm.unhealthyThreshold { + if lag > sm.unhealthyThreshold.Get() { if sm.replHealthy { log.Infof("Going unhealthy due to high replication lag: %v", lag) } @@ -755,3 +755,7 @@ func (sm *stateManager) IsServingString() string { } return "NOT_SERVING" } + +func (sm *stateManager) SetUnhealthyThreshold(v time.Duration) { + sm.unhealthyThreshold.Set(v) +} diff --git a/go/vt/vttablet/tabletserver/state_manager_test.go b/go/vt/vttablet/tabletserver/state_manager_test.go index bc1ff4ca52e..53396e052ba 100644 --- a/go/vt/vttablet/tabletserver/state_manager_test.go +++ b/go/vt/vttablet/tabletserver/state_manager_test.go @@ -101,18 +101,18 @@ func TestStateManagerServeNonMaster(t *testing.T) { verifySubcomponent(t, 1, sm.ddle, testStateClosed) verifySubcomponent(t, 2, sm.tableGC, testStateClosed) - verifySubcomponent(t, 3, sm.throttler, testStateClosed) - verifySubcomponent(t, 4, sm.messager, testStateClosed) - verifySubcomponent(t, 5, sm.tracker, testStateClosed) + verifySubcomponent(t, 3, sm.messager, testStateClosed) + verifySubcomponent(t, 4, sm.tracker, testStateClosed) assert.True(t, sm.se.(*testSchemaEngine).nonMaster) - verifySubcomponent(t, 6, sm.se, testStateOpen) - verifySubcomponent(t, 7, sm.vstreamer, testStateOpen) - verifySubcomponent(t, 8, sm.qe, testStateOpen) - verifySubcomponent(t, 9, sm.txThrottler, testStateOpen) - verifySubcomponent(t, 10, sm.te, testStateNonMaster) - verifySubcomponent(t, 11, sm.rt, testStateNonMaster) - verifySubcomponent(t, 12, sm.watcher, testStateOpen) + verifySubcomponent(t, 5, sm.se, testStateOpen) + verifySubcomponent(t, 6, sm.vstreamer, testStateOpen) + verifySubcomponent(t, 7, sm.qe, testStateOpen) + verifySubcomponent(t, 8, sm.txThrottler, testStateOpen) + verifySubcomponent(t, 9, sm.te, testStateNonMaster) + verifySubcomponent(t, 10, sm.rt, testStateNonMaster) + verifySubcomponent(t, 11, sm.watcher, testStateOpen) + verifySubcomponent(t, 12, sm.throttler, testStateOpen) assert.Equal(t, topodatapb.TabletType_REPLICA, sm.target.TabletType) assert.Equal(t, StateServing, sm.state) @@ -292,18 +292,18 @@ func TestStateManagerSetServingTypeNoChange(t *testing.T) { verifySubcomponent(t, 1, sm.ddle, testStateClosed) verifySubcomponent(t, 2, sm.tableGC, testStateClosed) - verifySubcomponent(t, 3, sm.throttler, testStateClosed) - verifySubcomponent(t, 4, sm.messager, testStateClosed) - verifySubcomponent(t, 5, sm.tracker, testStateClosed) + verifySubcomponent(t, 3, sm.messager, testStateClosed) + verifySubcomponent(t, 4, sm.tracker, testStateClosed) assert.True(t, sm.se.(*testSchemaEngine).nonMaster) - verifySubcomponent(t, 6, sm.se, testStateOpen) - verifySubcomponent(t, 7, sm.vstreamer, testStateOpen) - verifySubcomponent(t, 8, sm.qe, testStateOpen) - verifySubcomponent(t, 9, sm.txThrottler, testStateOpen) - verifySubcomponent(t, 10, sm.te, testStateNonMaster) - verifySubcomponent(t, 11, sm.rt, testStateNonMaster) - verifySubcomponent(t, 12, sm.watcher, testStateOpen) + verifySubcomponent(t, 5, sm.se, testStateOpen) + verifySubcomponent(t, 6, sm.vstreamer, testStateOpen) + verifySubcomponent(t, 7, sm.qe, testStateOpen) + verifySubcomponent(t, 8, sm.txThrottler, testStateOpen) + verifySubcomponent(t, 9, sm.te, testStateNonMaster) + verifySubcomponent(t, 10, sm.rt, testStateNonMaster) + verifySubcomponent(t, 11, sm.watcher, testStateOpen) + verifySubcomponent(t, 12, sm.throttler, testStateOpen) assert.Equal(t, topodatapb.TabletType_REPLICA, sm.target.TabletType) assert.Equal(t, StateServing, sm.state) diff --git a/go/vt/vttablet/tabletserver/stateful_connection.go b/go/vt/vttablet/tabletserver/stateful_connection.go index da079e5938f..004ce27b625 100644 --- a/go/vt/vttablet/tabletserver/stateful_connection.go +++ b/go/vt/vttablet/tabletserver/stateful_connection.go @@ -113,6 +113,14 @@ func (sc *StatefulConnection) execWithRetry(ctx context.Context, query string, m return nil } +// FetchNext returns the next result set. +func (sc *StatefulConnection) FetchNext(ctx context.Context, maxrows int, wantfields bool) (*sqltypes.Result, error) { + if sc.IsClosed() { + return nil, vterrors.New(vtrpcpb.Code_CANCELED, "connection is closed") + } + return sc.dbConn.FetchNext(ctx, maxrows, wantfields) +} + // Unlock returns the connection to the pool. The connection remains active. // This method is idempotent and can be called multiple times func (sc *StatefulConnection) Unlock() { diff --git a/go/vt/vttablet/tabletserver/tabletenv/config.go b/go/vt/vttablet/tabletserver/tabletenv/config.go index 06fc1bfac70..db7e3546528 100644 --- a/go/vt/vttablet/tabletserver/tabletenv/config.go +++ b/go/vt/vttablet/tabletserver/tabletenv/config.go @@ -24,6 +24,7 @@ import ( "github.com/golang/protobuf/proto" + "vitess.io/vitess/go/cache" "vitess.io/vitess/go/flagutil" "vitess.io/vitess/go/streamlog" "vitess.io/vitess/go/vt/dbconfigs" @@ -103,6 +104,8 @@ func init() { flag.IntVar(¤tConfig.StreamBufferSize, "queryserver-config-stream-buffer-size", defaultConfig.StreamBufferSize, "query server stream buffer size, the maximum number of bytes sent from vttablet for each stream call. It's recommended to keep this value in sync with vtgate's stream_buffer_size.") flag.IntVar(¤tConfig.QueryCacheSize, "queryserver-config-query-cache-size", defaultConfig.QueryCacheSize, "query server query cache size, maximum number of queries to be cached. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache.") + flag.Int64Var(¤tConfig.QueryCacheMemory, "queryserver-config-query-cache-memory", defaultConfig.QueryCacheMemory, "query server query cache size in bytes, maximum amount of memory to be used for caching. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache.") + flag.BoolVar(¤tConfig.QueryCacheLFU, "queryserver-config-query-cache-lfu", defaultConfig.QueryCacheLFU, "query server cache algorithm. when set to true, a new cache algorithm based on a TinyLFU admission policy will be used to improve cache behavior and prevent pollution from sparse queries") SecondsVar(¤tConfig.SchemaReloadIntervalSeconds, "queryserver-config-schema-reload-time", defaultConfig.SchemaReloadIntervalSeconds, "query server schema reload time, how often vttablet reloads schemas from underlying MySQL instance in seconds. vttablet keeps table schemas in its own memory and periodically refreshes it from MySQL. This config controls the reload time.") SecondsVar(¤tConfig.Oltp.QueryTimeoutSeconds, "queryserver-config-query-timeout", defaultConfig.Oltp.QueryTimeoutSeconds, "query server query timeout (in seconds), this is the query timeout in vttablet side. If a query takes more than this timeout, it will be killed.") SecondsVar(¤tConfig.OltpReadPool.TimeoutSeconds, "queryserver-config-query-pool-timeout", defaultConfig.OltpReadPool.TimeoutSeconds, "query server query pool timeout (in seconds), it is how long vttablet waits for a connection from the query pool. If set to 0 (default) then the overall query timeout is used instead.") @@ -123,9 +126,9 @@ func init() { flag.BoolVar(¤tConfig.TwoPCEnable, "twopc_enable", defaultConfig.TwoPCEnable, "if the flag is on, 2pc is enabled. Other 2pc flags must be supplied.") flag.StringVar(¤tConfig.TwoPCCoordinatorAddress, "twopc_coordinator_address", defaultConfig.TwoPCCoordinatorAddress, "address of the (VTGate) process(es) that will be used to notify of abandoned transactions.") SecondsVar(¤tConfig.TwoPCAbandonAge, "twopc_abandon_age", defaultConfig.TwoPCAbandonAge, "time in seconds. Any unresolved transaction older than this time will be sent to the coordinator to be resolved.") - flag.BoolVar(¤tConfig.EnableTxThrottler, "enable-tx-throttler", defaultConfig.EnableTxThrottler, "If true replication-lag-based throttling on transactions will be enabled.") - flag.StringVar(¤tConfig.TxThrottlerConfig, "tx-throttler-config", defaultConfig.TxThrottlerConfig, "The configuration of the transaction throttler as a text formatted throttlerdata.Configuration protocol buffer message") - flagutil.StringListVar(¤tConfig.TxThrottlerHealthCheckCells, "tx-throttler-healthcheck-cells", defaultConfig.TxThrottlerHealthCheckCells, "A comma-separated list of cells. Only tabletservers running in these cells will be monitored for replication lag by the transaction throttler.") + flagutil.DualFormatBoolVar(¤tConfig.EnableTxThrottler, "enable_tx_throttler", defaultConfig.EnableTxThrottler, "If true replication-lag-based throttling on transactions will be enabled.") + flagutil.DualFormatStringVar(¤tConfig.TxThrottlerConfig, "tx_throttler_config", defaultConfig.TxThrottlerConfig, "The configuration of the transaction throttler as a text formatted throttlerdata.Configuration protocol buffer message") + flagutil.DualFormatStringListVar(¤tConfig.TxThrottlerHealthCheckCells, "tx_throttler_healthcheck_cells", defaultConfig.TxThrottlerHealthCheckCells, "A comma-separated list of cells. Only tabletservers running in these cells will be monitored for replication lag by the transaction throttler.") flag.BoolVar(&enableHotRowProtection, "enable_hot_row_protection", false, "If true, incoming transactions for the same row (range) will be queued and cannot consume all txpool slots.") flag.BoolVar(&enableHotRowProtectionDryRun, "enable_hot_row_protection_dry_run", false, "If true, hot row protection is not enforced but logs if transactions would have been queued.") @@ -143,12 +146,12 @@ func init() { flag.BoolVar(&enableHeartbeat, "heartbeat_enable", false, "If true, vttablet records (if master) or checks (if replica) the current time of a replication heartbeat in the table _vt.heartbeat. The result is used to inform the serving state of the vttablet via healthchecks.") flag.DurationVar(&heartbeatInterval, "heartbeat_interval", 1*time.Second, "How frequently to read and write replication heartbeat.") - flag.BoolVar(¤tConfig.EnableLagThrottler, "enable-lag-throttler", defaultConfig.EnableLagThrottler, "If true, vttablet will run a throttler service, and will implicitly enable heartbeats") + flagutil.DualFormatBoolVar(¤tConfig.EnableLagThrottler, "enable_lag_throttler", defaultConfig.EnableLagThrottler, "If true, vttablet will run a throttler service, and will implicitly enable heartbeats") flag.BoolVar(¤tConfig.EnforceStrictTransTables, "enforce_strict_trans_tables", defaultConfig.EnforceStrictTransTables, "If true, vttablet requires MySQL to run with STRICT_TRANS_TABLES or STRICT_ALL_TABLES on. It is recommended to not turn this flag off. Otherwise MySQL may alter your supplied values before saving them to the database.") - flag.BoolVar(&enableConsolidator, "enable-consolidator", true, "This option enables the query consolidator.") - flag.BoolVar(&enableConsolidatorReplicas, "enable-consolidator-replicas", false, "This option enables the query consolidator only on replicas.") - flag.BoolVar(¤tConfig.CacheResultFields, "enable-query-plan-field-caching", defaultConfig.CacheResultFields, "This option fetches & caches fields (columns) when storing query plans") + flagutil.DualFormatBoolVar(&enableConsolidator, "enable_consolidator", true, "This option enables the query consolidator.") + flagutil.DualFormatBoolVar(&enableConsolidatorReplicas, "enable_consolidator_replicas", false, "This option enables the query consolidator only on replicas.") + flagutil.DualFormatBoolVar(¤tConfig.CacheResultFields, "enable_query_plan_field_caching", defaultConfig.CacheResultFields, "This option fetches & caches fields (columns) when storing query plans") flag.DurationVar(&healthCheckInterval, "health_check_interval", 20*time.Second, "Interval between health checks") flag.DurationVar(°radedThreshold, "degraded_threshold", 30*time.Second, "replication lag after which a replica is considered degraded") @@ -245,6 +248,8 @@ type TabletConfig struct { ConsolidatorStreamTotalSize int64 `json:"consolidatorStreamTotalSize,omitempty"` ConsolidatorStreamQuerySize int64 `json:"consolidatorStreamQuerySize,omitempty"` QueryCacheSize int `json:"queryCacheSize,omitempty"` + QueryCacheMemory int64 `json:"queryCacheMemory,omitempty"` + QueryCacheLFU bool `json:"queryCacheLFU,omitempty"` SchemaReloadIntervalSeconds Seconds `json:"schemaReloadIntervalSeconds,omitempty"` WatchReplication bool `json:"watchReplication,omitempty"` TrackSchemaVersions bool `json:"trackSchemaVersions,omitempty"` @@ -451,7 +456,9 @@ var defaultConfig = TabletConfig{ // great (the overhead makes the final packets on the wire about twice // bigger than this). StreamBufferSize: 32 * 1024, - QueryCacheSize: 5000, + QueryCacheSize: int(cache.DefaultConfig.MaxEntries), + QueryCacheMemory: cache.DefaultConfig.MaxMemoryUsage, + QueryCacheLFU: cache.DefaultConfig.LFU, SchemaReloadIntervalSeconds: 30 * 60, MessagePostponeParallelism: 4, CacheResultFields: true, diff --git a/go/vt/vttablet/tabletserver/tabletenv/config_test.go b/go/vt/vttablet/tabletserver/tabletenv/config_test.go index cd45d4dd738..2ce2c0a1ff4 100644 --- a/go/vt/vttablet/tabletserver/tabletenv/config_test.go +++ b/go/vt/vttablet/tabletserver/tabletenv/config_test.go @@ -23,6 +23,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/cache" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/yaml2" ) @@ -132,6 +133,8 @@ oltpReadPool: idleTimeoutSeconds: 1800 maxWaiters: 5000 size: 16 +queryCacheLFU: true +queryCacheMemory: 33554432 queryCacheSize: 5000 replicationTracker: heartbeatIntervalSeconds: 0.25 @@ -192,7 +195,9 @@ func TestFlags(t *testing.T) { MaxConcurrency: 5, }, StreamBufferSize: 32768, - QueryCacheSize: 5000, + QueryCacheSize: int(cache.DefaultConfig.MaxEntries), + QueryCacheMemory: cache.DefaultConfig.MaxMemoryUsage, + QueryCacheLFU: cache.DefaultConfig.LFU, SchemaReloadIntervalSeconds: 1800, TrackSchemaVersions: false, MessagePostponeParallelism: 4, diff --git a/go/vt/vttablet/tabletserver/tabletenv/logstats.go b/go/vt/vttablet/tabletserver/tabletenv/logstats.go index 5e63bd89db0..a0e2e12a89a 100644 --- a/go/vt/vttablet/tabletserver/tabletenv/logstats.go +++ b/go/vt/vttablet/tabletserver/tabletenv/logstats.go @@ -61,6 +61,7 @@ type LogStats struct { TransactionID int64 ReservedID int64 Error error + CachedPlan bool } // NewLogStats constructs a new LogStats with supplied Method and ctx @@ -181,7 +182,7 @@ func (stats *LogStats) CallInfo() (string, string) { // Logf formats the log record to the given writer, either as // tab-separated list of logged fields or as JSON. func (stats *LogStats) Logf(w io.Writer, params url.Values) error { - if !streamlog.ShouldEmitLog(stats.OriginalSQL) { + if !streamlog.ShouldEmitLog(stats.OriginalSQL, uint64(stats.RowsAffected), uint64(len(stats.Rows))) { return nil } diff --git a/go/vt/vttablet/tabletserver/tabletserver.go b/go/vt/vttablet/tabletserver/tabletserver.go index 96ebf9881af..c61ef7fd910 100644 --- a/go/vt/vttablet/tabletserver/tabletserver.go +++ b/go/vt/vttablet/tabletserver/tabletserver.go @@ -66,6 +66,7 @@ import ( "vitess.io/vitess/go/vt/vttablet/tabletserver/txserializer" "vitess.io/vitess/go/vt/vttablet/tabletserver/txthrottler" "vitess.io/vitess/go/vt/vttablet/tabletserver/vstreamer" + "vitess.io/vitess/go/vt/vttablet/vexec" ) // logPoolFull is for throttling transaction / query pool full messages in the log. @@ -157,13 +158,21 @@ func NewTabletServer(name string, config *tabletenv.TabletConfig, topoServer *to tsOnce.Do(func() { srvTopoServer = srvtopo.NewResilientServer(topoServer, "TabletSrvTopo") }) + tabletTypeFunc := func() topodatapb.TabletType { + if tsv.sm == nil { + return topodatapb.TabletType_UNKNOWN + } + return tsv.sm.Target().TabletType + } + tsv.statelessql = NewQueryList("oltp-stateless") tsv.statefulql = NewQueryList("oltp-stateful") tsv.olapql = NewQueryList("olap") + tsv.lagThrottler = throttle.NewThrottler(tsv, topoServer, tabletTypeFunc) tsv.hs = newHealthStreamer(tsv, alias) tsv.se = schema.NewEngine(tsv) tsv.rt = repltracker.NewReplTracker(tsv, alias) - tsv.vstreamer = vstreamer.NewEngine(tsv, srvTopoServer, tsv.se, alias.Cell) + tsv.vstreamer = vstreamer.NewEngine(tsv, srvTopoServer, tsv.se, tsv.lagThrottler, alias.Cell) tsv.tracker = schema.NewTracker(tsv, tsv.vstreamer, tsv.se) tsv.watcher = NewBinlogWatcher(tsv, tsv.vstreamer, tsv.config) tsv.qe = NewQueryEngine(tsv, tsv.se) @@ -171,14 +180,7 @@ func NewTabletServer(name string, config *tabletenv.TabletConfig, topoServer *to tsv.te = NewTxEngine(tsv) tsv.messager = messager.NewEngine(tsv, tsv.se, tsv.vstreamer) - tabletTypeFunc := func() topodatapb.TabletType { - if tsv.sm == nil { - return topodatapb.TabletType_UNKNOWN - } - return tsv.sm.Target().TabletType - } tsv.onlineDDLExecutor = onlineddl.NewExecutor(tsv, alias, topoServer, tabletTypeFunc) - tsv.lagThrottler = throttle.NewThrottler(tsv, topoServer, tabletTypeFunc) tsv.tableGC = gc.NewTableGC(tsv, topoServer, tabletTypeFunc, tsv.lagThrottler) tsv.sm = &stateManager{ @@ -397,6 +399,32 @@ func (tsv *TabletServer) ReloadSchema(ctx context.Context) error { return tsv.se.Reload(ctx) } +// WaitForSchemaReset blocks the TabletServer until there's been at least `timeout` duration without +// any schema changes. This is useful for tests that need to wait for all the currently existing schema +// changes to finish being applied. +func (tsv *TabletServer) WaitForSchemaReset(timeout time.Duration) { + onSchemaChange := make(chan struct{}, 1) + tsv.se.RegisterNotifier("_tsv_wait", func(_ map[string]*schema.Table, _, _, _ []string) { + onSchemaChange <- struct{}{} + }) + defer tsv.se.UnregisterNotifier("_tsv_wait") + + after := time.NewTimer(timeout) + defer after.Stop() + + for { + select { + case <-after.C: + return + case <-onSchemaChange: + if !after.Stop() { + <-after.C + } + after.Reset(timeout) + } + } +} + // ClearQueryPlanCache clears internal query plan cache func (tsv *TabletServer) ClearQueryPlanCache() { // We should ideally bracket this with start & endErequest, @@ -411,7 +439,7 @@ func (tsv *TabletServer) QueryService() queryservice.QueryService { } // OnlineDDLExecutor returns the onlineddl.Executor part of TabletServer. -func (tsv *TabletServer) OnlineDDLExecutor() *onlineddl.Executor { +func (tsv *TabletServer) OnlineDDLExecutor() vexec.Executor { return tsv.onlineDDLExecutor } @@ -715,10 +743,15 @@ func (tsv *TabletServer) Execute(ctx context.Context, target *querypb.Target, sq result = result.StripMetadata(sqltypes.IncludeFieldsOrDefault(options)) // Change database name in mysql output to the keyspace name - if sqltypes.IncludeFieldsOrDefault(options) == querypb.ExecuteOptions_ALL { - for _, f := range result.Fields { - if f.Database != "" { - f.Database = tsv.sm.target.Keyspace + if tsv.sm.target.Keyspace != tsv.config.DB.DBName && sqltypes.IncludeFieldsOrDefault(options) == querypb.ExecuteOptions_ALL { + switch qre.plan.PlanID { + case planbuilder.PlanSelect, planbuilder.PlanSelectImpossible: + dbName := tsv.config.DB.DBName + ksName := tsv.sm.target.Keyspace + for _, f := range result.Fields { + if f.Database == dbName { + f.Database = ksName + } } } } @@ -1557,7 +1590,7 @@ func (tsv *TabletServer) registerMigrationStatusHandler() { tsv.exporter.HandleFunc("/schema-migration/report-status", func(w http.ResponseWriter, r *http.Request) { ctx := tabletenv.LocalContext() query := r.URL.Query() - if err := tsv.onlineDDLExecutor.OnSchemaMigrationStatus(ctx, query.Get("uuid"), query.Get("status"), query.Get("dryrun"), query.Get("progress")); err != nil { + if err := tsv.onlineDDLExecutor.OnSchemaMigrationStatus(ctx, query.Get("uuid"), query.Get("status"), query.Get("dryrun"), query.Get("progress"), query.Get("eta")); err != nil { http.Error(w, fmt.Sprintf("not ok: %v", err), http.StatusInternalServerError) return } @@ -1565,35 +1598,39 @@ func (tsv *TabletServer) registerMigrationStatusHandler() { }) } -// registerThrottlerCheckHandler registers a throttler "check" request -func (tsv *TabletServer) registerThrottlerCheckHandler() { - tsv.exporter.HandleFunc("/throttler/check", func(w http.ResponseWriter, r *http.Request) { - ctx := tabletenv.LocalContext() - remoteAddr := r.Header.Get("X-Forwarded-For") - if remoteAddr == "" { - remoteAddr = r.RemoteAddr - remoteAddr = strings.Split(remoteAddr, ":")[0] - } - appName := r.URL.Query().Get("app") - if appName == "" { - appName = throttle.DefaultAppName - } - flags := &throttle.CheckFlags{ - LowPriority: (r.URL.Query().Get("p") == "low"), - } - checkResult := tsv.lagThrottler.Check(ctx, appName, remoteAddr, flags) - if checkResult.StatusCode == http.StatusNotFound && flags.OKIfNotExists { - checkResult.StatusCode = http.StatusOK // 200 - } +// registerThrottlerCheckHandlers registers throttler "check" requests +func (tsv *TabletServer) registerThrottlerCheckHandlers() { + handle := func(path string, checkType throttle.ThrottleCheckType) { + tsv.exporter.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) { + ctx := tabletenv.LocalContext() + remoteAddr := r.Header.Get("X-Forwarded-For") + if remoteAddr == "" { + remoteAddr = r.RemoteAddr + remoteAddr = strings.Split(remoteAddr, ":")[0] + } + appName := r.URL.Query().Get("app") + if appName == "" { + appName = throttle.DefaultAppName + } + flags := &throttle.CheckFlags{ + LowPriority: (r.URL.Query().Get("p") == "low"), + } + checkResult := tsv.lagThrottler.CheckByType(ctx, appName, remoteAddr, flags, checkType) + if checkResult.StatusCode == http.StatusNotFound && flags.OKIfNotExists { + checkResult.StatusCode = http.StatusOK // 200 + } - if r.Method == http.MethodGet { - w.Header().Set("Content-Type", "application/json") - } - w.WriteHeader(checkResult.StatusCode) - if r.Method == http.MethodGet { - json.NewEncoder(w).Encode(checkResult) - } - }) + if r.Method == http.MethodGet { + w.Header().Set("Content-Type", "application/json") + } + w.WriteHeader(checkResult.StatusCode) + if r.Method == http.MethodGet { + json.NewEncoder(w).Encode(checkResult) + } + }) + } + handle("/throttler/check", throttle.ThrottleCheckPrimaryWrite) + handle("/throttler/check-self", throttle.ThrottleCheckSelf) } // registerThrottlerStatusHandler registers a throttler "status" request @@ -1606,10 +1643,34 @@ func (tsv *TabletServer) registerThrottlerStatusHandler() { }) } +// registerThrottlerThrottleAppHandler registers a throttler "throttle-app" request +func (tsv *TabletServer) registerThrottlerThrottleAppHandler() { + tsv.exporter.HandleFunc("/throttler/throttle-app", func(w http.ResponseWriter, r *http.Request) { + appName := r.URL.Query().Get("app") + d, err := time.ParseDuration(r.URL.Query().Get("duration")) + if err != nil { + http.Error(w, fmt.Sprintf("not ok: %v", err), http.StatusInternalServerError) + return + } + appThrottle := tsv.lagThrottler.ThrottleApp(appName, time.Now().Add(d), 1) + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(appThrottle) + }) + tsv.exporter.HandleFunc("/throttler/unthrottle-app", func(w http.ResponseWriter, r *http.Request) { + appName := r.URL.Query().Get("app") + appThrottle := tsv.lagThrottler.UnthrottleApp(appName) + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(appThrottle) + }) +} + // registerThrottlerHandlers registers all throttler handlers func (tsv *TabletServer) registerThrottlerHandlers() { - tsv.registerThrottlerCheckHandler() + tsv.registerThrottlerCheckHandlers() tsv.registerThrottlerStatusHandler() + tsv.registerThrottlerThrottleAppHandler() } func (tsv *TabletServer) registerDebugEnvHandler() { @@ -1624,6 +1685,13 @@ func (tsv *TabletServer) EnableHeartbeat(enabled bool) { tsv.rt.EnableHeartbeat(enabled) } +// EnableThrottler forces throttler to be on or off. +// When throttler is off, it responds to all check requests with HTTP 200 OK +// Only to be used for testing. +func (tsv *TabletServer) EnableThrottler(enabled bool) { + tsv.Config().EnableLagThrottler = enabled +} + // SetTracking forces tracking to be on or off. // Only to be used for testing. func (tsv *TabletServer) SetTracking(enabled bool) { @@ -1690,9 +1758,19 @@ func (tsv *TabletServer) SetQueryPlanCacheCap(val int) { tsv.qe.SetQueryPlanCacheCap(val) } -// QueryPlanCacheCap returns the pool size. +// QueryPlanCacheCap returns the plan cache capacity func (tsv *TabletServer) QueryPlanCacheCap() int { - return int(tsv.qe.QueryPlanCacheCap()) + return tsv.qe.QueryPlanCacheCap() +} + +// QueryPlanCacheLen returns the plan cache length +func (tsv *TabletServer) QueryPlanCacheLen() int { + return tsv.qe.QueryPlanCacheLen() +} + +// QueryPlanCacheWait waits until the query plan cache has processed all recent queries +func (tsv *TabletServer) QueryPlanCacheWait() { + tsv.qe.plans.Wait() } // SetMaxResultSize changes the max result size to the specified value. @@ -1768,5 +1846,5 @@ func skipQueryPlanCache(options *querypb.ExecuteOptions) bool { if options == nil { return false } - return options.SkipQueryPlanCache + return options.SkipQueryPlanCache || options.HasCreatedTempTables } diff --git a/go/vt/vttablet/tabletserver/tabletserver_test.go b/go/vt/vttablet/tabletserver/tabletserver_test.go index 96665297eec..b8422498604 100644 --- a/go/vt/vttablet/tabletserver/tabletserver_test.go +++ b/go/vt/vttablet/tabletserver/tabletserver_test.go @@ -1853,7 +1853,7 @@ func TestTerseErrorsIgnoreFailoverInProgress(t *testing.T) { defer tl.Close() err := tsv.convertAndLogError(ctx, "select * from test_table where id = :a", map[string]*querypb.BindVariable{"a": sqltypes.Int64BindVariable(1)}, - mysql.NewSQLError(1227, mysql.SSSyntaxErrorOrAccessViolation, "failover in progress"), + mysql.NewSQLError(1227, mysql.SSClientError, "failover in progress"), nil, ) if got, want := err.Error(), "failover in progress (errno 1227) (sqlstate 42000)"; !strings.HasPrefix(got, want) { @@ -2187,7 +2187,7 @@ func TestReserveStats(t *testing.T) { func TestDatabaseNameReplaceByKeyspaceNameExecuteMethod(t *testing.T) { db, tsv := setupTabletServerTest(t, "keyspaceName") - db.SetName("databaseInMysql") + setDBName(db, tsv, "databaseInMysql") defer tsv.StopService() defer db.Close() @@ -2221,7 +2221,7 @@ func TestDatabaseNameReplaceByKeyspaceNameExecuteMethod(t *testing.T) { func TestDatabaseNameReplaceByKeyspaceNameStreamExecuteMethod(t *testing.T) { db, tsv := setupTabletServerTest(t, "keyspaceName") - db.SetName("databaseInMysql") + setDBName(db, tsv, "databaseInMysql") defer tsv.StopService() defer db.Close() @@ -2256,7 +2256,7 @@ func TestDatabaseNameReplaceByKeyspaceNameStreamExecuteMethod(t *testing.T) { func TestDatabaseNameReplaceByKeyspaceNameExecuteBatchMethod(t *testing.T) { db, tsv := setupTabletServerTest(t, "keyspaceName") - db.SetName("databaseInMysql") + setDBName(db, tsv, "databaseInMysql") defer tsv.StopService() defer db.Close() @@ -2297,7 +2297,7 @@ func TestDatabaseNameReplaceByKeyspaceNameExecuteBatchMethod(t *testing.T) { func TestDatabaseNameReplaceByKeyspaceNameBeginExecuteMethod(t *testing.T) { db, tsv := setupTabletServerTest(t, "keyspaceName") - db.SetName("databaseInMysql") + setDBName(db, tsv, "databaseInMysql") defer tsv.StopService() defer db.Close() @@ -2327,9 +2327,14 @@ func TestDatabaseNameReplaceByKeyspaceNameBeginExecuteMethod(t *testing.T) { require.NoError(t, err) } +func setDBName(db *fakesqldb.DB, tsv *TabletServer, s string) { + tsv.config.DB.DBName = "databaseInMysql" + db.SetName("databaseInMysql") +} + func TestDatabaseNameReplaceByKeyspaceNameBeginExecuteBatchMethod(t *testing.T) { db, tsv := setupTabletServerTest(t, "keyspaceName") - db.SetName("databaseInMysql") + setDBName(db, tsv, "databaseInMysql") defer tsv.StopService() defer db.Close() @@ -2372,7 +2377,7 @@ func TestDatabaseNameReplaceByKeyspaceNameBeginExecuteBatchMethod(t *testing.T) func TestDatabaseNameReplaceByKeyspaceNameReserveExecuteMethod(t *testing.T) { db, tsv := setupTabletServerTest(t, "keyspaceName") - db.SetName("databaseInMysql") + setDBName(db, tsv, "databaseInMysql") defer tsv.StopService() defer db.Close() @@ -2404,7 +2409,7 @@ func TestDatabaseNameReplaceByKeyspaceNameReserveExecuteMethod(t *testing.T) { func TestDatabaseNameReplaceByKeyspaceNameReserveBeginExecuteMethod(t *testing.T) { db, tsv := setupTabletServerTest(t, "keyspaceName") - db.SetName("databaseInMysql") + setDBName(db, tsv, "databaseInMysql") defer tsv.StopService() defer db.Close() @@ -2458,6 +2463,19 @@ func setupFakeDB(t *testing.T) *fakesqldb.DB { for query, result := range getSupportedQueries() { db.AddQuery(query, result) } + db.AddQueryPattern(baseShowTablesPattern, &sqltypes.Result{ + Fields: mysql.BaseShowTablesFields, + Rows: [][]sqltypes.Value{ + mysql.BaseShowTablesRow("test_table", false, ""), + mysql.BaseShowTablesRow("msg", false, "vitess_message,vt_ack_wait=30,vt_purge_after=120,vt_batch_size=1,vt_cache_size=10,vt_poller_interval=30"), + }, + }) + db.AddQuery("show status like 'Innodb_rows_read'", sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "Variable_name|Value", + "varchar|int64"), + "Innodb_rows_read|0", + )) + return db } @@ -2520,13 +2538,6 @@ func getSupportedQueries() map[string]*sqltypes.Result { {sqltypes.NewVarBinary("0")}, }, }, - mysql.BaseShowTables: { - Fields: mysql.BaseShowTablesFields, - Rows: [][]sqltypes.Value{ - mysql.BaseShowTablesRow("test_table", false, ""), - mysql.BaseShowTablesRow("msg", false, "vitess_message,vt_ack_wait=30,vt_purge_after=120,vt_batch_size=1,vt_cache_size=10,vt_poller_interval=30"), - }, - }, mysql.BaseShowPrimary: { Fields: mysql.ShowPrimaryFields, Rows: [][]sqltypes.Value{ diff --git a/go/vt/vttablet/tabletserver/throttle/base/app_throttle.go b/go/vt/vttablet/tabletserver/throttle/base/app_throttle.go index 46101ba87e6..0633e99c95c 100644 --- a/go/vt/vttablet/tabletserver/throttle/base/app_throttle.go +++ b/go/vt/vttablet/tabletserver/throttle/base/app_throttle.go @@ -23,13 +23,15 @@ import ( // AppThrottle is the definition for an app throttling instruction // - Ratio: [0..1], 0 == no throttle, 1 == fully throttle type AppThrottle struct { + AppName string ExpireAt time.Time Ratio float64 } // NewAppThrottle creates an AppThrottle struct -func NewAppThrottle(expireAt time.Time, ratio float64) *AppThrottle { +func NewAppThrottle(appName string, expireAt time.Time, ratio float64) *AppThrottle { result := &AppThrottle{ + AppName: appName, ExpireAt: expireAt, Ratio: ratio, } diff --git a/go/vt/vttablet/tabletserver/throttle/base/throttle_metric.go b/go/vt/vttablet/tabletserver/throttle/base/throttle_metric.go index 4baf063c6fd..ff6e1b146d9 100644 --- a/go/vt/vttablet/tabletserver/throttle/base/throttle_metric.go +++ b/go/vt/vttablet/tabletserver/throttle/base/throttle_metric.go @@ -27,6 +27,9 @@ var errNoResultYet = errors.New("Metric not collected yet") // ErrNoSuchMetric is for when a user requests a metric by an unknown metric name var ErrNoSuchMetric = errors.New("No such metric") +// ErrInvalidCheckType is an internal error indicating an unknown check type +var ErrInvalidCheckType = errors.New("Unknown throttler check type") + // IsDialTCPError sees if th egiven error indicates a TCP issue func IsDialTCPError(e error) bool { if e == nil { diff --git a/go/vt/vttablet/tabletserver/throttle/check_result.go b/go/vt/vttablet/tabletserver/throttle/check_result.go index 9981109694c..52d52b78468 100644 --- a/go/vt/vttablet/tabletserver/throttle/check_result.go +++ b/go/vt/vttablet/tabletserver/throttle/check_result.go @@ -44,3 +44,5 @@ func NewErrorCheckResult(statusCode int, err error) *CheckResult { var NoSuchMetricCheckResult = NewErrorCheckResult(http.StatusNotFound, base.ErrNoSuchMetric) var okMetricCheckResult = NewCheckResult(http.StatusOK, 0, 0, nil) + +var invalidCheckTypeCheckResult = NewErrorCheckResult(http.StatusInternalServerError, base.ErrInvalidCheckType) diff --git a/go/vt/vttablet/tabletserver/throttle/client.go b/go/vt/vttablet/tabletserver/throttle/client.go new file mode 100644 index 00000000000..76f3ab1981f --- /dev/null +++ b/go/vt/vttablet/tabletserver/throttle/client.go @@ -0,0 +1,117 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package throttle + +import ( + "context" + "net/http" + "time" +) + +const ( + throttleCheckDuration = 250 * time.Millisecond +) + +// Client construct is used by apps who wish to consult with a throttler. It encapsulates the check/throttling/backoff logic +type Client struct { + throttler *Throttler + appName string + checkType ThrottleCheckType + flags CheckFlags + + lastSuccessfulThrottleCheck time.Time +} + +// NewProductionClient creates a client suitable for foreground/production jobs, which have normal priority. +func NewProductionClient(throttler *Throttler, appName string, checkType ThrottleCheckType) *Client { + return &Client{ + throttler: throttler, + appName: appName, + checkType: checkType, + flags: CheckFlags{ + LowPriority: false, + }, + } +} + +// NewBackgroundClient creates a client suitable for background jobs, which have low priority over productio ntraffic, +// e.g. migration, table pruning, vreplication +func NewBackgroundClient(throttler *Throttler, appName string, checkType ThrottleCheckType) *Client { + return &Client{ + throttler: throttler, + appName: appName, + checkType: checkType, + flags: CheckFlags{ + LowPriority: true, + }, + } +} + +// ThrottleCheckOK checks the throttler, and returns 'true' when the throttler is satisfied. +// It does not sleep. +// The function caches results for a brief amount of time, hence it's safe and efficient to +// be called very frequenty. +// The function is not thread safe. +func (c *Client) ThrottleCheckOK(ctx context.Context) (throttleCheckOK bool) { + if c == nil { + // no client + return true + } + if c.throttler == nil { + // no throttler + return true + } + if time.Since(c.lastSuccessfulThrottleCheck) <= throttleCheckDuration { + // if last check was OK just very recently there is no need to check again + return true + } + // It's time to run a throttler check + checkResult := c.throttler.CheckByType(ctx, c.appName, "", &c.flags, c.checkType) + if checkResult.StatusCode != http.StatusOK { + return false + } + c.lastSuccessfulThrottleCheck = time.Now() + return true + +} + +// ThrottleCheckOKOrWait checks the throttler; if throttler is satisfied, the function returns 'true' mmediately, +// otherwise it briefly sleeps and returns 'false'. +// The function is not thread safe. +func (c *Client) ThrottleCheckOKOrWait(ctx context.Context) bool { + ok := c.ThrottleCheckOK(ctx) + if !ok { + time.Sleep(throttleCheckDuration) + } + return ok +} + +// Throttle throttles until the throttler is satisfied, or until context is cancelled. +// The function sleeps between throttle checks. +// The function is not thread safe. +func (c *Client) Throttle(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + default: + } + if c.ThrottleCheckOKOrWait(ctx) { + break + } + } +} diff --git a/go/vt/vttablet/tabletserver/throttle/mysql/instance_key.go b/go/vt/vttablet/tabletserver/throttle/mysql/instance_key.go index d86d1317606..adcd6f422fb 100644 --- a/go/vt/vttablet/tabletserver/throttle/mysql/instance_key.go +++ b/go/vt/vttablet/tabletserver/throttle/mysql/instance_key.go @@ -17,6 +17,10 @@ type InstanceKey struct { Port int } +// SelfInstanceKey is a special indicator for "this instance", e.g. denoting the MySQL server associated with local tablet +// The values of this key are immaterial and are intentionally descriptive +var SelfInstanceKey = &InstanceKey{Hostname: "(self)", Port: 1} + // newRawInstanceKey will parse an InstanceKey from a string representation such as 127.0.0.1:3306 // It expects such format and returns with error if input differs in format func newRawInstanceKey(hostPort string) (*InstanceKey, error) { @@ -70,6 +74,14 @@ func (i *InstanceKey) IsValid() bool { return len(i.Hostname) > 0 && i.Port > 0 } +// IsSelf checks if this is the special "self" instance key +func (i *InstanceKey) IsSelf() bool { + if SelfInstanceKey == i { + return true + } + return SelfInstanceKey.Equals(i) +} + // StringCode returns an official string representation of this key func (i *InstanceKey) StringCode() string { return fmt.Sprintf("%s:%d", i.Hostname, i.Port) diff --git a/go/vt/vttablet/tabletserver/throttle/mysql/mysql_throttle_metric.go b/go/vt/vttablet/tabletserver/throttle/mysql/mysql_throttle_metric.go index 99f130efadd..9863c93b701 100644 --- a/go/vt/vttablet/tabletserver/throttle/mysql/mysql_throttle_metric.go +++ b/go/vt/vttablet/tabletserver/throttle/mysql/mysql_throttle_metric.go @@ -17,6 +17,20 @@ import ( "vitess.io/vitess/go/vt/orchestrator/external/golib/sqlutils" ) +// MetricsQueryType indicates the type of metrics query on MySQL backend. See following. +type MetricsQueryType int + +const ( + // MetricsQueryTypeDefault indictes the default, internal implementation. Specifically, our throttler runs a replication lag query + MetricsQueryTypeDefault MetricsQueryType = iota + // MetricsQueryTypeShowGlobal indicatesa SHOW GLOBAL (STATUS|VARIABLES) query + MetricsQueryTypeShowGlobal + // MetricsQueryTypeSelect indicates a custom SELECT query + MetricsQueryTypeSelect + // MetricsQueryTypeUnknown is an unknown query type, which we cannot run. This is an error + MetricsQueryTypeUnknown +) + var mysqlMetricCache = cache.New(cache.NoExpiration, 10*time.Millisecond) func getMySQLMetricCacheKey(probe *Probe) string { @@ -44,6 +58,20 @@ func getCachedMySQLThrottleMetric(probe *Probe) *MySQLThrottleMetric { return nil } +// GetMetricsQueryType analyzes the type of a metrics query +func GetMetricsQueryType(query string) MetricsQueryType { + if query == "" { + return MetricsQueryTypeDefault + } + if strings.HasPrefix(strings.ToLower(query), "select") { + return MetricsQueryTypeSelect + } + if strings.HasPrefix(strings.ToLower(query), "show global") { + return MetricsQueryTypeShowGlobal + } + return MetricsQueryTypeUnknown +} + // MySQLThrottleMetric has the probed metric for a mysql instance type MySQLThrottleMetric struct { ClusterName string @@ -67,9 +95,9 @@ func (metric *MySQLThrottleMetric) Get() (float64, error) { return metric.Value, metric.Err } -// ReadThrottleMetric returns replication lag for a given connection config; either by explicit query +// ReadThrottleMetric returns a metric for the given probe. Either by explicit query // or via SHOW SLAVE STATUS -func ReadThrottleMetric(probe *Probe, clusterName string) (mySQLThrottleMetric *MySQLThrottleMetric) { +func ReadThrottleMetric(probe *Probe, clusterName string, overrideGetMetricFunc func() *MySQLThrottleMetric) (mySQLThrottleMetric *MySQLThrottleMetric) { if mySQLThrottleMetric := getCachedMySQLThrottleMetric(probe); mySQLThrottleMetric != nil { return mySQLThrottleMetric // On cached results we avoid taking latency metrics @@ -90,6 +118,11 @@ func ReadThrottleMetric(probe *Probe, clusterName string) (mySQLThrottleMetric * }() }(mySQLThrottleMetric, started) + if overrideGetMetricFunc != nil { + mySQLThrottleMetric = overrideGetMetricFunc() + return cacheMySQLThrottleMetric(probe, mySQLThrottleMetric) + } + dbURI := probe.GetDBUri("information_schema") db, fromCache, err := sqlutils.GetDB(dbURI) @@ -101,33 +134,28 @@ func ReadThrottleMetric(probe *Probe, clusterName string) (mySQLThrottleMetric * db.SetMaxOpenConns(maxPoolConnections) db.SetMaxIdleConns(maxIdleConnections) } - if strings.HasPrefix(strings.ToLower(probe.MetricQuery), "select") { + metricsQueryType := GetMetricsQueryType(probe.MetricQuery) + switch metricsQueryType { + case MetricsQueryTypeSelect: mySQLThrottleMetric.Err = db.QueryRow(probe.MetricQuery).Scan(&mySQLThrottleMetric.Value) return cacheMySQLThrottleMetric(probe, mySQLThrottleMetric) - } - - if strings.HasPrefix(strings.ToLower(probe.MetricQuery), "show global") { + case MetricsQueryTypeShowGlobal: var variableName string // just a placeholder mySQLThrottleMetric.Err = db.QueryRow(probe.MetricQuery).Scan(&variableName, &mySQLThrottleMetric.Value) return cacheMySQLThrottleMetric(probe, mySQLThrottleMetric) + case MetricsQueryTypeDefault: + mySQLThrottleMetric.Err = sqlutils.QueryRowsMap(db, `show slave status`, func(m sqlutils.RowMap) error { + slaveIORunning := m.GetString("Slave_IO_Running") + slaveSQLRunning := m.GetString("Slave_SQL_Running") + secondsBehindMaster := m.GetNullInt64("Seconds_Behind_Master") + if !secondsBehindMaster.Valid { + return fmt.Errorf("replication not running; Slave_IO_Running=%+v, Slave_SQL_Running=%+v", slaveIORunning, slaveSQLRunning) + } + mySQLThrottleMetric.Value = float64(secondsBehindMaster.Int64) + return nil + }) + return cacheMySQLThrottleMetric(probe, mySQLThrottleMetric) } - - if probe.MetricQuery != "" { - mySQLThrottleMetric.Err = fmt.Errorf("Unsupported metrics query type: %s", probe.MetricQuery) - return mySQLThrottleMetric - } - - // No metric query? By default we look at replication lag as output of SHOW SLAVE STATUS - - mySQLThrottleMetric.Err = sqlutils.QueryRowsMap(db, `show slave status`, func(m sqlutils.RowMap) error { - slaveIORunning := m.GetString("Slave_IO_Running") - slaveSQLRunning := m.GetString("Slave_SQL_Running") - secondsBehindMaster := m.GetNullInt64("Seconds_Behind_Master") - if !secondsBehindMaster.Valid { - return fmt.Errorf("replication not running; Slave_IO_Running=%+v, Slave_SQL_Running=%+v", slaveIORunning, slaveSQLRunning) - } - mySQLThrottleMetric.Value = float64(secondsBehindMaster.Int64) - return nil - }) - return cacheMySQLThrottleMetric(probe, mySQLThrottleMetric) + mySQLThrottleMetric.Err = fmt.Errorf("Unsupported metrics query type: %s", probe.MetricQuery) + return mySQLThrottleMetric } diff --git a/go/vt/vttablet/tabletserver/throttle/throttler.go b/go/vt/vttablet/tabletserver/throttle/throttler.go index 123ea09d23e..c2eade1312e 100644 --- a/go/vt/vttablet/tabletserver/throttle/throttler.go +++ b/go/vt/vttablet/tabletserver/throttle/throttler.go @@ -10,8 +10,10 @@ import ( "context" "flag" "fmt" + "math" "math/rand" "net/http" + "strconv" "strings" "sync" "sync/atomic" @@ -54,12 +56,17 @@ const ( maxPasswordLength = 32 - localStoreName = "local" + shardStoreName = "shard" + selfStoreName = "self" ) -var throttleThreshold = flag.Duration("throttle_threshold", 1*time.Second, "Replication lag threshold for throttling") -var throttleTabletTypes = flag.String("throttle_tablet_types", "replica", "Comma separated VTTablet types to be considered by the throttler. default: 'replica'. example: 'replica,rdonly'. 'replica' aways implicitly included") - +var ( + throttleThreshold = flag.Duration("throttle_threshold", 1*time.Second, "Replication lag threshold for default lag throttling") + throttleTabletTypes = flag.String("throttle_tablet_types", "replica", "Comma separated VTTablet types to be considered by the throttler. default: 'replica'. example: 'replica,rdonly'. 'replica' aways implicitly included") + throttleMetricQuery = flag.String("throttle_metrics_query", "", "Override default heartbeat/lag metric. Use either `SELECT` (must return single row, single value) or `SHOW GLOBAL ... LIKE ...` queries. Set -throttle_metrics_threshold respectively.") + throttleMetricThreshold = flag.Float64("throttle_metrics_threshold", math.MaxFloat64, "Override default throttle threshold, respective to -throttle_metrics_query") + throttlerCheckAsCheckSelf = flag.Bool("throttle_check_as_check_self", false, "Should throttler/check return a throttler/check-self result (changes throttler behavior for writes)") +) var ( throttlerUser = "vt_tablet_throttler" throttlerGrant = fmt.Sprintf("'%s'@'%s'", throttlerUser, "%") @@ -71,7 +78,17 @@ var ( sqlGrantThrottlerUser = []string{ `GRANT SELECT ON _vt.heartbeat TO %s`, } - replicationLagQuery = `select unix_timestamp(now(6))-max(ts/1000000000) from _vt.heartbeat` + replicationLagQuery = `select unix_timestamp(now(6))-max(ts/1000000000) as replication_lag from _vt.heartbeat` +) + +// ThrottleCheckType allows a client to indicate what type of check it wants to issue. See available types below. +type ThrottleCheckType int + +const ( + // ThrottleCheckPrimaryWrite indicates a check before making a write on a primary server + ThrottleCheckPrimaryWrite ThrottleCheckType = iota + // ThrottleCheckSelf indicates a check on a specific server health + ThrottleCheckSelf ) func init() { @@ -101,6 +118,10 @@ type Throttler struct { mysqlInventory *mysql.Inventory + metricsQuery string + metricsThreshold float64 + metricsQueryType mysql.MetricsQueryType + mysqlClusterThresholds *cache.Cache aggregatedMetrics *cache.Cache throttledApps *cache.Cache @@ -150,6 +171,9 @@ func NewThrottler(env tabletenv.Env, ts *topo.Server, tabletTypeFunc func() topo mysqlClusterProbesChan: make(chan *mysql.ClusterProbes), mysqlInventory: mysql.NewInventory(), + metricsQuery: replicationLagQuery, + metricsThreshold: throttleThreshold.Seconds(), + throttledApps: cache.New(cache.NoExpiration, 10*time.Second), mysqlClusterThresholds: cache.New(cache.NoExpiration, 0), aggregatedMetrics: cache.New(aggregatedMetricsExpiration, aggregatedMetricsCleanup), @@ -165,10 +189,14 @@ func NewThrottler(env tabletenv.Env, ts *topo.Server, tabletTypeFunc func() topo throttler.initThrottleTabletTypes() throttler.ThrottleApp("abusing-app", time.Now().Add(time.Hour*24*365*10), defaultThrottleRatio) throttler.check = NewThrottlerCheck(throttler) + throttler.initConfig("") + throttler.check.SelfChecks(context.Background()) return throttler } +// initThrottleTabletTypes reads the user supplied throttle_tablet_types and sets these +// for the duration of this tablet's lifetime func (throttler *Throttler) initThrottleTabletTypes() { throttler.throttleTabletTypesMap = make(map[topodatapb.TabletType]bool) @@ -199,18 +227,34 @@ func (throttler *Throttler) initConfig(password string) { Stores: config.StoresSettings{ MySQL: config.MySQLConfigurationSettings{ IgnoreDialTCPErrors: true, - Clusters: map[string](*config.MySQLClusterConfigurationSettings){ - localStoreName: &config.MySQLClusterConfigurationSettings{ - User: throttlerUser, - Password: password, - ThrottleThreshold: throttleThreshold.Seconds(), - MetricQuery: replicationLagQuery, - IgnoreHostsCount: 0, - }, - }, + Clusters: map[string](*config.MySQLClusterConfigurationSettings){}, }, }, } + if *throttleMetricQuery != "" { + throttler.metricsQuery = *throttleMetricQuery + } + if *throttleMetricThreshold != math.MaxFloat64 { + throttler.metricsThreshold = *throttleMetricThreshold + } + throttler.metricsQueryType = mysql.GetMetricsQueryType(throttler.metricsQuery) + + config.Instance.Stores.MySQL.Clusters[selfStoreName] = &config.MySQLClusterConfigurationSettings{ + User: "", // running on local tablet server, will use vttablet DBA user + Password: "", // running on local tablet server, will use vttablet DBA user + MetricQuery: throttler.metricsQuery, + ThrottleThreshold: throttler.metricsThreshold, + IgnoreHostsCount: 0, + } + if password != "" { + config.Instance.Stores.MySQL.Clusters[shardStoreName] = &config.MySQLClusterConfigurationSettings{ + User: throttlerUser, + Password: password, + MetricQuery: throttler.metricsQuery, + ThrottleThreshold: throttler.metricsThreshold, + IgnoreHostsCount: 0, + } + } } // Open opens database pool and initializes the schema @@ -227,6 +271,8 @@ func (throttler *Throttler) Open() error { for _, t := range throttler.tickers { t.Resume() + // since we just resume now, speed up the tickers by forcng an immediate tick + go t.TickNow() } return nil @@ -308,6 +354,49 @@ func (throttler *Throttler) createThrottlerUser(ctx context.Context) (password s return password, nil } +// readSelfMySQLThrottleMetric reads the mysql metric from thi very tablet's backend mysql. +func (throttler *Throttler) readSelfMySQLThrottleMetric() *mysql.MySQLThrottleMetric { + metric := &mysql.MySQLThrottleMetric{ + ClusterName: selfStoreName, + Key: *mysql.SelfInstanceKey, + Value: 0, + Err: nil, + } + ctx := context.Background() + conn, err := throttler.pool.Get(ctx) + if err != nil { + metric.Err = err + return metric + } + defer conn.Recycle() + + tm, err := conn.Exec(ctx, throttler.metricsQuery, 1, true) + if err != nil { + metric.Err = err + return metric + } + row := tm.Named().Row() + if row == nil { + metric.Err = fmt.Errorf("no results for readSelfMySQLThrottleMetric") + return metric + } + + switch throttler.metricsQueryType { + case mysql.MetricsQueryTypeSelect: + // We expect a single row, single column result. + // The "for" iteration below is just a way to get first result without knowning column name + for k := range row { + metric.Value, metric.Err = row.ToFloat64(k) + } + case mysql.MetricsQueryTypeShowGlobal: + metric.Value, metric.Err = strconv.ParseFloat(row["Value"].ToString(), 64) + default: + metric.Err = fmt.Errorf("Unsupported metrics query type for query %s", throttler.metricsQuery) + } + + return metric +} + // ThrottledAppsSnapshot returns a snapshot (a copy) of current throttled apps func (throttler *Throttler) ThrottledAppsSnapshot() map[string]cache.Item { return throttler.throttledApps.Items() @@ -371,6 +460,8 @@ func (throttler *Throttler) Operate(ctx context.Context) { if err == nil { throttler.initConfig(password) shouldCreateThrottlerUser = false + // transitioned into leadership, let's speed up the next 'refresh' and 'collect' ticks + go mysqlRefreshTicker.TickNow() } else { log.Errorf("Error creating throttler account: %+v", err) } @@ -379,7 +470,7 @@ func (throttler *Throttler) Operate(ctx context.Context) { } case <-mysqlCollectTicker.C: { - if atomic.LoadInt64(&throttler.isLeader) > 0 { + if atomic.LoadInt64(&throttler.isOpen) > 0 { // frequent if !throttler.isDormant() { throttler.collectMySQLMetrics(ctx) @@ -388,7 +479,7 @@ func (throttler *Throttler) Operate(ctx context.Context) { } case <-mysqlDormantCollectTicker.C: { - if atomic.LoadInt64(&throttler.isLeader) > 0 { + if atomic.LoadInt64(&throttler.isOpen) > 0 { // infrequent if throttler.isDormant() { throttler.collectMySQLMetrics(ctx) @@ -403,7 +494,7 @@ func (throttler *Throttler) Operate(ctx context.Context) { case <-mysqlRefreshTicker.C: { // sparse - if atomic.LoadInt64(&throttler.isLeader) > 0 { + if atomic.LoadInt64(&throttler.isOpen) > 0 { go throttler.refreshMySQLInventory(ctx) } } @@ -414,13 +505,13 @@ func (throttler *Throttler) Operate(ctx context.Context) { } case <-mysqlAggregateTicker.C: { - if atomic.LoadInt64(&throttler.isLeader) > 0 { + if atomic.LoadInt64(&throttler.isOpen) > 0 { throttler.aggregateMySQLMetrics(ctx) } } case <-throttledAppsTicker.C: { - if atomic.LoadInt64(&throttler.isLeader) > 0 { + if atomic.LoadInt64(&throttler.isOpen) > 0 { go throttler.expireThrottledApps() } } @@ -445,7 +536,14 @@ func (throttler *Throttler) collectMySQLMetrics(ctx context.Context) error { return } defer atomic.StoreInt64(&probe.QueryInProgress, 0) - throttleMetrics := mysql.ReadThrottleMetric(probe, clusterName) + + // Apply an override to metrics read, if this is the special "self" cluster + // (where we incidentally know there's a single probe) + overrideGetMySQLThrottleMetricFunc := throttler.readSelfMySQLThrottleMetric + if clusterName != selfStoreName { + overrideGetMySQLThrottleMetricFunc = nil + } + throttleMetrics := mysql.ReadThrottleMetric(probe, clusterName, overrideGetMySQLThrottleMetricFunc) throttler.mysqlThrottleMetricChan <- throttleMetrics }() } @@ -454,10 +552,8 @@ func (throttler *Throttler) collectMySQLMetrics(ctx context.Context) error { return nil } -// refreshMySQLInventory will re-structure the inventory based on reading config settings, and potentially -// re-querying dynamic data such as HAProxy list of hosts +// refreshMySQLInventory will re-structure the inventory based on reading config settings func (throttler *Throttler) refreshMySQLInventory(ctx context.Context) error { - log.Infof("refreshing MySQL inventory") addInstanceKey := func(key *mysql.InstanceKey, clusterName string, clusterSettings *config.MySQLClusterConfigurationSettings, probes *mysql.Probes) { for _, ignore := range clusterSettings.IgnoreHosts { @@ -466,11 +562,10 @@ func (throttler *Throttler) refreshMySQLInventory(ctx context.Context) error { return } } - if !key.IsValid() { + if !key.IsValid() && !key.IsSelf() { log.Infof("Throttler: read invalid instance key: [%+v] for cluster %+v", key, clusterName) return } - log.Infof("Throttler: read instance key: %+v", key) probe := &mysql.Probe{ Key: *key, @@ -488,18 +583,30 @@ func (throttler *Throttler) refreshMySQLInventory(ctx context.Context) error { // config may dynamically change, but internal structure (config.Settings().Stores.MySQL.Clusters in our case) // is immutable and can only be _replaced_. Hence, it's safe to read in a goroutine: go func() { - err := func() error { - throttler.mysqlClusterThresholds.Set(clusterName, clusterSettings.ThrottleThreshold, cache.DefaultExpiration) + throttler.mysqlClusterThresholds.Set(clusterName, clusterSettings.ThrottleThreshold, cache.DefaultExpiration) + clusterProbes := &mysql.ClusterProbes{ + ClusterName: clusterName, + IgnoreHostsCount: clusterSettings.IgnoreHostsCount, + InstanceProbes: mysql.NewProbes(), + } + if clusterName == selfStoreName { + // special case: just looking at this tablet's MySQL server + // We will probe this "cluster" (of one server) is a special way. + addInstanceKey(mysql.SelfInstanceKey, clusterName, clusterSettings, clusterProbes.InstanceProbes) + throttler.mysqlClusterProbesChan <- clusterProbes + return + } + if atomic.LoadInt64(&throttler.isLeader) == 0 { + // not the leader (primary tablet)? Then no more work for us. + return + } + // The primary tablet is also in charge of collecting the shard's metrics + err := func() error { tabletAliases, err := throttler.ts.FindAllTabletAliasesInShard(ctx, throttler.keyspace, throttler.shard) if err != nil { return err } - clusterProbes := &mysql.ClusterProbes{ - ClusterName: clusterName, - IgnoreHostsCount: clusterSettings.IgnoreHostsCount, - InstanceProbes: mysql.NewProbes(), - } for _, tabletAlias := range tabletAliases { tablet, err := throttler.ts.GetTablet(ctx, tabletAlias) if err != nil { @@ -523,7 +630,6 @@ func (throttler *Throttler) refreshMySQLInventory(ctx context.Context) error { // synchronous update of inventory func (throttler *Throttler) updateMySQLClusterProbes(ctx context.Context, clusterProbes *mysql.ClusterProbes) error { - log.Infof("Throttler: updating MySQLClusterProbes: %s", clusterProbes.ClusterName) throttler.mysqlInventory.ClustersProbes[clusterProbes.ClusterName] = clusterProbes.InstanceProbes throttler.mysqlInventory.IgnoreHostsCount[clusterProbes.ClusterName] = clusterProbes.IgnoreHostsCount throttler.mysqlInventory.IgnoreHostsThreshold[clusterProbes.ClusterName] = clusterProbes.IgnoreHostsThreshold @@ -579,11 +685,10 @@ func (throttler *Throttler) expireThrottledApps() { } // ThrottleApp instructs the throttler to begin throttling an app, to som eperiod and with some ratio. -func (throttler *Throttler) ThrottleApp(appName string, expireAt time.Time, ratio float64) { +func (throttler *Throttler) ThrottleApp(appName string, expireAt time.Time, ratio float64) (appThrottle *base.AppThrottle) { throttler.throttledAppsMutex.Lock() defer throttler.throttledAppsMutex.Unlock() - var appThrottle *base.AppThrottle now := time.Now() if object, found := throttler.throttledApps.Get(appName); found { appThrottle = object.(*base.AppThrottle) @@ -600,18 +705,20 @@ func (throttler *Throttler) ThrottleApp(appName string, expireAt time.Time, rati if ratio < 0 { ratio = defaultThrottleRatio } - appThrottle = base.NewAppThrottle(expireAt, ratio) + appThrottle = base.NewAppThrottle(appName, expireAt, ratio) } if now.Before(appThrottle.ExpireAt) { throttler.throttledApps.Set(appName, appThrottle, cache.DefaultExpiration) } else { throttler.UnthrottleApp(appName) } + return appThrottle } // UnthrottleApp cancels any throttling, if any, for a given app -func (throttler *Throttler) UnthrottleApp(appName string) { +func (throttler *Throttler) UnthrottleApp(appName string) (appThrottle *base.AppThrottle) { throttler.throttledApps.Delete(appName) + return base.NewAppThrottle(appName, time.Now(), 0) } // IsAppThrottled tells whether some app should be throttled. @@ -707,12 +814,37 @@ func (throttler *Throttler) AppRequestMetricResult(ctx context.Context, appName return metricResultFunc() } -// Check is the main serving function of the throttler, and returns a check result for this cluster's lag -func (throttler *Throttler) Check(ctx context.Context, appName string, remoteAddr string, flags *CheckFlags) (checkResult *CheckResult) { +// checkStore checks the aggregated value of given MySQL store +func (throttler *Throttler) checkStore(ctx context.Context, appName string, storeName string, remoteAddr string, flags *CheckFlags) (checkResult *CheckResult) { if !throttler.env.Config().EnableLagThrottler { return okMetricCheckResult } - return throttler.check.Check(ctx, appName, "mysql", localStoreName, remoteAddr, flags) + return throttler.check.Check(ctx, appName, "mysql", storeName, remoteAddr, flags) +} + +// checkShard checks the health of the shard, and runs on the primary tablet only +func (throttler *Throttler) checkShard(ctx context.Context, appName string, remoteAddr string, flags *CheckFlags) (checkResult *CheckResult) { + return throttler.checkStore(ctx, appName, shardStoreName, remoteAddr, flags) +} + +// CheckSelf is checks the mysql/self metric, and is available on each tablet +func (throttler *Throttler) checkSelf(ctx context.Context, appName string, remoteAddr string, flags *CheckFlags) (checkResult *CheckResult) { + return throttler.checkStore(ctx, appName, selfStoreName, remoteAddr, flags) +} + +// CheckByType runs a check by requested check type +func (throttler *Throttler) CheckByType(ctx context.Context, appName string, remoteAddr string, flags *CheckFlags, checkType ThrottleCheckType) (checkResult *CheckResult) { + switch checkType { + case ThrottleCheckSelf: + return throttler.checkSelf(ctx, appName, remoteAddr, flags) + case ThrottleCheckPrimaryWrite: + if *throttlerCheckAsCheckSelf { + return throttler.checkSelf(ctx, appName, remoteAddr, flags) + } + return throttler.checkShard(ctx, appName, remoteAddr, flags) + default: + return invalidCheckTypeCheckResult + } } // Status exports a status breakdown diff --git a/go/vt/vttablet/tabletserver/vstreamer/engine.go b/go/vt/vttablet/tabletserver/vstreamer/engine.go index 5ddc4ecaeaf..8485e16858e 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/engine.go +++ b/go/vt/vttablet/tabletserver/vstreamer/engine.go @@ -35,11 +35,16 @@ import ( "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" vschemapb "vitess.io/vitess/go/vt/proto/vschema" ) +const ( + throttlerAppName = "vstreamer" +) + // Engine is the engine for handling vreplication streaming requests. type Engine struct { env tabletenv.Env @@ -84,17 +89,20 @@ type Engine struct { errorCounts *stats.CountersWithSingleLabel vstreamersCreated *stats.Counter vstreamersEndedWithErrors *stats.Counter + + throttlerClient *throttle.Client } // NewEngine creates a new Engine. // Initialization sequence is: NewEngine->InitDBConfig->Open. // Open and Close can be called multiple times and are idempotent. -func NewEngine(env tabletenv.Env, ts srvtopo.Server, se *schema.Engine, cell string) *Engine { +func NewEngine(env tabletenv.Env, ts srvtopo.Server, se *schema.Engine, lagThrottler *throttle.Throttler, cell string) *Engine { vse := &Engine{ - env: env, - ts: ts, - se: se, - cell: cell, + env: env, + ts: ts, + se: se, + cell: cell, + throttlerClient: throttle.NewBackgroundClient(lagThrottler, throttlerAppName, throttle.ThrottleCheckSelf), streamers: make(map[int]*uvstreamer), rowStreamers: make(map[int]*rowStreamer), @@ -178,6 +186,7 @@ func (vse *Engine) vschema() *vindexes.VSchema { } // Stream starts a new stream. +// This streams events from the binary logs func (vse *Engine) Stream(ctx context.Context, startPos string, tablePKs []*binlogdatapb.TableLastPK, filter *binlogdatapb.Filter, send func([]*binlogdatapb.VEvent) error) error { // Ensure vschema is initialized and the watcher is started. // Starting of the watcher has to be delayed till the first call to Stream @@ -217,6 +226,7 @@ func (vse *Engine) Stream(ctx context.Context, startPos string, tablePKs []*binl } // StreamRows streams rows. +// This streams the table data rows (so we can copy the table data snapshot) func (vse *Engine) StreamRows(ctx context.Context, query string, lastpk []sqltypes.Value, send func(*binlogdatapb.VStreamRowsResponse) error) error { // Ensure vschema is initialized and the watcher is started. // Starting of the watcher has to be delayed till the first call to Stream @@ -231,6 +241,7 @@ func (vse *Engine) StreamRows(ctx context.Context, query string, lastpk []sqltyp if !vse.isOpen { return nil, 0, errors.New("VStreamer is not open") } + rowStreamer := newRowStreamer(ctx, vse.env.Config().DB.AppWithDB(), vse.se, query, lastpk, vse.lvschema, send, vse) idx := vse.streamIdx vse.rowStreamers[idx] = rowStreamer diff --git a/go/vt/vttablet/tabletserver/vstreamer/main_test.go b/go/vt/vttablet/tabletserver/vstreamer/main_test.go index 6eb68dd5221..495c06a5d7b 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/main_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/main_test.go @@ -53,7 +53,7 @@ func TestMain(m *testing.M) { // engine cannot be initialized in testenv because it introduces // circular dependencies - engine = NewEngine(env.TabletEnv, env.SrvTopo, env.SchemaEngine, env.Cells[0]) + engine = NewEngine(env.TabletEnv, env.SrvTopo, env.SchemaEngine, nil, env.Cells[0]) engine.InitDBConfig(env.KeyspaceName) engine.Open() defer engine.Close() @@ -70,7 +70,7 @@ func customEngine(t *testing.T, modifier func(mysql.ConnParams) mysql.ConnParams config := env.TabletEnv.Config().Clone() config.DB = dbconfigs.NewTestDBConfigs(modified, modified, modified.DbName) - engine := NewEngine(tabletenv.NewEnv(config, "VStreamerTest"), env.SrvTopo, env.SchemaEngine, env.Cells[0]) + engine := NewEngine(tabletenv.NewEnv(config, "VStreamerTest"), env.SrvTopo, env.SchemaEngine, nil, env.Cells[0]) engine.InitDBConfig(env.KeyspaceName) engine.Open() return engine diff --git a/go/vt/vttablet/tabletserver/vstreamer/resultstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/resultstreamer.go index f1a4bdae192..c7897b756ca 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/resultstreamer.go +++ b/go/vt/vttablet/tabletserver/vstreamer/resultstreamer.go @@ -97,6 +97,11 @@ func (rs *resultStreamer) Stream() error { default: } + // check throttler. + if !rs.vse.throttlerClient.ThrottleCheckOKOrWait(rs.ctx) { + continue + } + row, err := conn.FetchNext() if err != nil { return err diff --git a/go/vt/vttablet/tabletserver/vstreamer/rowstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/rowstreamer.go index 4e8d921b823..95863d1384f 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/rowstreamer.go +++ b/go/vt/vttablet/tabletserver/vstreamer/rowstreamer.go @@ -244,6 +244,11 @@ func (rs *rowStreamer) streamQuery(conn *snapshotConn, send func(*binlogdatapb.V default: } + // check throttler. + if !rs.vse.throttlerClient.ThrottleCheckOKOrWait(rs.ctx) { + continue + } + row, err := conn.FetchNext() if err != nil { return err diff --git a/go/vt/vttablet/tabletserver/vstreamer/snapshot_conn_test.go b/go/vt/vttablet/tabletserver/vstreamer/snapshot_conn_test.go index 46aa9950452..eca56797db5 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/snapshot_conn_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/snapshot_conn_test.go @@ -53,7 +53,7 @@ func TestStartSnapshot(t *testing.T) { Rows: [][]sqltypes.Value{ {sqltypes.NewInt32(1), sqltypes.NewVarBinary("aaa")}, }, - RowsAffected: 1, + StatusFlags: sqltypes.ServerStatusNoIndexUsed | sqltypes.ServerStatusAutocommit | sqltypes.ServerStatusInTrans, } qr, err := conn.ExecuteFetch("select * from t1", 10, false) require.NoError(t, err) diff --git a/go/vt/vttablet/tabletserver/vstreamer/uvstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/uvstreamer.go index b3caa498fab..066674b5593 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/uvstreamer.go +++ b/go/vt/vttablet/tabletserver/vstreamer/uvstreamer.go @@ -211,7 +211,7 @@ func getQuery(tableName string, filter string) string { query = buf.String() case key.IsKeyRange(filter): buf := sqlparser.NewTrackedBuffer(nil) - buf.Myprintf("select * from %v where in_keyrange(%v)", sqlparser.NewTableIdent(tableName), sqlparser.NewStrLiteral([]byte(filter))) + buf.Myprintf("select * from %v where in_keyrange(%v)", sqlparser.NewTableIdent(tableName), sqlparser.NewStrLiteral(filter)) query = buf.String() } return query diff --git a/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go index fb0333da33d..9a46b9affbc 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go +++ b/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go @@ -177,6 +177,7 @@ func (vs *vstreamer) replicate(ctx context.Context) error { return wrapError(err, vs.pos, vs.vse) } defer conn.Close() + events, err := conn.StartBinlogDumpFromPosition(vs.ctx, vs.pos) if err != nil { return wrapError(err, vs.pos, vs.vse) @@ -268,6 +269,27 @@ func (vs *vstreamer) parseEvents(ctx context.Context, events <-chan mysql.Binlog // Main loop: calls bufferAndTransmit as events arrive. timer := time.NewTimer(HeartbeatTime) defer timer.Stop() + + // throttledEvents can be read just like you would read from events + // throttledEvents pulls data from events, but throttles pulling data, + // which in turn blocks the BinlogConnection from pushing events to the channel + throttledEvents := make(chan mysql.BinlogEvent) + go func() { + for { + // check throttler. + if !vs.vse.throttlerClient.ThrottleCheckOKOrWait(ctx) { + continue + } + + ev, ok := <-events + if ok { + throttledEvents <- ev + } else { + close(throttledEvents) + return + } + } + }() for { timer.Reset(HeartbeatTime) // Drain event if timer fired before reset. @@ -277,7 +299,7 @@ func (vs *vstreamer) parseEvents(ctx context.Context, events <-chan mysql.Binlog } select { - case ev, ok := <-events: + case ev, ok := <-throttledEvents: if !ok { select { case <-ctx.Done(): @@ -361,11 +383,6 @@ func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent) ([]*binlogdatapb.VEvent, e return nil, fmt.Errorf("can't strip checksum from binlog event: %v, event data: %#v", err, ev) } - // Get the DbName for vstreamer - params, err := vs.cp.MysqlParams() - if err != nil { - return nil, err - } var vevents []*binlogdatapb.VEvent switch { case ev.IsGTID(): @@ -396,7 +413,7 @@ func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent) ([]*binlogdatapb.VEvent, e switch cat := sqlparser.Preview(q.SQL); cat { case sqlparser.StmtInsert: - mustSend := mustSendStmt(q, params.DbName) + mustSend := mustSendStmt(q, vs.cp.DBName()) if mustSend { vevents = append(vevents, &binlogdatapb.VEvent{ Type: binlogdatapb.VEventType_INSERT, @@ -404,7 +421,7 @@ func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent) ([]*binlogdatapb.VEvent, e }) } case sqlparser.StmtUpdate: - mustSend := mustSendStmt(q, params.DbName) + mustSend := mustSendStmt(q, vs.cp.DBName()) if mustSend { vevents = append(vevents, &binlogdatapb.VEvent{ Type: binlogdatapb.VEventType_UPDATE, @@ -412,7 +429,7 @@ func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent) ([]*binlogdatapb.VEvent, e }) } case sqlparser.StmtDelete: - mustSend := mustSendStmt(q, params.DbName) + mustSend := mustSendStmt(q, vs.cp.DBName()) if mustSend { vevents = append(vevents, &binlogdatapb.VEvent{ Type: binlogdatapb.VEventType_DELETE, @@ -420,7 +437,7 @@ func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent) ([]*binlogdatapb.VEvent, e }) } case sqlparser.StmtReplace: - mustSend := mustSendStmt(q, params.DbName) + mustSend := mustSendStmt(q, vs.cp.DBName()) if mustSend { vevents = append(vevents, &binlogdatapb.VEvent{ Type: binlogdatapb.VEventType_REPLACE, @@ -436,7 +453,7 @@ func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent) ([]*binlogdatapb.VEvent, e Type: binlogdatapb.VEventType_COMMIT, }) case sqlparser.StmtDDL: - if mustSendDDL(q, params.DbName, vs.filter) { + if mustSendDDL(q, vs.cp.DBName(), vs.filter) { vevents = append(vevents, &binlogdatapb.VEvent{ Type: binlogdatapb.VEventType_GTID, Gtid: mysql.EncodePosition(vs.pos), @@ -444,10 +461,6 @@ func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent) ([]*binlogdatapb.VEvent, e Type: binlogdatapb.VEventType_DDL, Statement: q.SQL, }) - // Reload schema only if the DDL change is relevant. - // TODO(sougou): move this back to always load after - // the schema reload bug is fixed. - vs.se.ReloadAt(context.Background(), vs.pos) } else { // If the DDL need not be sent, send a dummy OTHER event. vevents = append(vevents, &binlogdatapb.VEvent{ @@ -459,14 +472,14 @@ func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent) ([]*binlogdatapb.VEvent, e } vs.se.ReloadAt(context.Background(), vs.pos) case sqlparser.StmtSavepoint: - mustSend := mustSendStmt(q, params.DbName) + mustSend := mustSendStmt(q, vs.cp.DBName()) if mustSend { vevents = append(vevents, &binlogdatapb.VEvent{ Type: binlogdatapb.VEventType_SAVEPOINT, Statement: q.SQL, }) } - case sqlparser.StmtOther, sqlparser.StmtPriv, sqlparser.StmtSet, sqlparser.StmtComment: + case sqlparser.StmtOther, sqlparser.StmtPriv, sqlparser.StmtSet, sqlparser.StmtComment, sqlparser.StmtFlush: // These are either: // 1) DBA statements like REPAIR that can be ignored. // 2) Privilege-altering statements like GRANT/REVOKE @@ -504,7 +517,7 @@ func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent) ([]*binlogdatapb.VEvent, e // Generates a Version event when it detects that a schema is stored in the schema_version table. return nil, vs.buildVersionPlan(id, tm) } - if tm.Database != "" && tm.Database != params.DbName { + if tm.Database != "" && tm.Database != vs.cp.DBName() { vs.plans[id] = nil return nil, nil } @@ -554,6 +567,9 @@ func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent) ([]*binlogdatapb.VEvent, e if err != nil { return nil, err } + case ev.IsCompressed(): + log.Errorf("VReplication does not handle binlog compression") + return nil, fmt.Errorf("VReplication does not handle binlog compression") } for _, vevent := range vevents { vevent.Timestamp = int64(ev.Timestamp()) diff --git a/go/vt/vttablet/tabletserver/vstreamer/vstreamer_test.go b/go/vt/vttablet/tabletserver/vstreamer/vstreamer_test.go index b1e4aa23651..2c9a4b57c43 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/vstreamer_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/vstreamer_test.go @@ -251,7 +251,7 @@ func TestVersion(t *testing.T) { require.NoError(t, err) defer env.SchemaEngine.EnableHistorian(false) - engine = NewEngine(engine.env, env.SrvTopo, env.SchemaEngine, env.Cells[0]) + engine = NewEngine(engine.env, env.SrvTopo, env.SchemaEngine, nil, env.Cells[0]) engine.InitDBConfig(env.KeyspaceName) engine.Open() defer engine.Close() @@ -1923,7 +1923,7 @@ func TestFilteredMultipleWhere(t *testing.T) { } func runCases(t *testing.T, filter *binlogdatapb.Filter, testcases []testcase, position string, tablePK []*binlogdatapb.TableLastPK) { - t.Helper() + ctx, cancel := context.WithCancel(context.Background()) defer cancel() wg, ch := startStream(ctx, t, filter, position, tablePK) @@ -1956,7 +1956,6 @@ func runCases(t *testing.T, filter *binlogdatapb.Filter, testcases []testcase, p } func expectLog(ctx context.Context, t *testing.T, input interface{}, ch <-chan []*binlogdatapb.VEvent, output [][]string) { - t.Helper() timer := time.NewTimer(1 * time.Minute) defer timer.Stop() for _, wantset := range output { diff --git a/go/vt/vttablet/tabletservermock/controller.go b/go/vt/vttablet/tabletservermock/controller.go index 1e826cca50b..a0d5b716cd2 100644 --- a/go/vt/vttablet/tabletservermock/controller.go +++ b/go/vt/vttablet/tabletservermock/controller.go @@ -28,11 +28,11 @@ import ( "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/vttablet/onlineddl" "vitess.io/vitess/go/vt/vttablet/queryservice" "vitess.io/vitess/go/vt/vttablet/tabletserver/rules" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" + "vitess.io/vitess/go/vt/vttablet/vexec" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -176,7 +176,7 @@ func (tqsc *Controller) ReloadSchema(ctx context.Context) error { } // OnlineDDLExecutor is part of the tabletserver.Controller interface -func (tqsc *Controller) OnlineDDLExecutor() *onlineddl.Executor { +func (tqsc *Controller) OnlineDDLExecutor() vexec.Executor { return nil } diff --git a/go/vt/vttablet/vexec/executor.go b/go/vt/vttablet/vexec/executor.go new file mode 100644 index 00000000000..27c4590432a --- /dev/null +++ b/go/vt/vttablet/vexec/executor.go @@ -0,0 +1,12 @@ +package vexec + +import ( + "context" + + querypb "vitess.io/vitess/go/vt/proto/query" +) + +// Executor should be implemented by any tablet-side structs which accept VExec commands +type Executor interface { + VExec(ctx context.Context, vx *TabletVExec) (qr *querypb.QueryResult, err error) +} diff --git a/go/vt/vttablet/vexec/vexec.go b/go/vt/vttablet/vexec/vexec.go index aeb6ad213d6..50b84ceb431 100644 --- a/go/vt/vttablet/vexec/vexec.go +++ b/go/vt/vttablet/vexec/vexec.go @@ -48,7 +48,7 @@ func NewTabletVExec(workflow, keyspace string) *TabletVExec { func (e *TabletVExec) ToStringVal(val string) *sqlparser.Literal { return &sqlparser.Literal{ Type: sqlparser.StrVal, - Val: []byte(val), + Val: val, } } diff --git a/go/vt/vttest/environment.go b/go/vt/vttest/environment.go index b5be1884106..70cadf97333 100644 --- a/go/vt/vttest/environment.go +++ b/go/vt/vttest/environment.go @@ -144,6 +144,7 @@ func (env *LocalTestEnv) MySQLManager(mycnf []string, snapshot string) (MySQLMan Port: env.PortForProtocol("mysql", ""), MyCnf: append(env.DefaultMyCnf, mycnf...), Env: env.EnvVars(), + UID: 1, }, nil } @@ -241,9 +242,11 @@ func NewLocalTestEnv(flavor string, basePort int) (*LocalTestEnv, error) { // NewLocalTestEnvWithDirectory returns a new instance of the default test // environment with a directory explicitly specified. func NewLocalTestEnvWithDirectory(flavor string, basePort int, directory string) (*LocalTestEnv, error) { - err := os.Mkdir(path.Join(directory, "logs"), 0700) - if err != nil { - return nil, err + if _, err := os.Stat(path.Join(directory, "logs")); os.IsNotExist(err) { + err := os.Mkdir(path.Join(directory, "logs"), 0700) + if err != nil { + return nil, err + } } flavor, mycnf, err := GetMySQLOptions(flavor) diff --git a/go/vt/vttest/local_cluster.go b/go/vt/vttest/local_cluster.go index ced4a0084c2..3c4bb645436 100644 --- a/go/vt/vttest/local_cluster.go +++ b/go/vt/vttest/local_cluster.go @@ -88,6 +88,14 @@ type Config struct { // not be started. OnlyMySQL bool + // PersistentMode can be set so that MySQL data directory is not cleaned up + // when LocalCluster.TearDown() is called. This is useful for running + // vttestserver as a database container in local developer environments. Note + // that db and vschema migration files (-schema_dir option) and seeding of + // random data (-initialize_with_random_data option) will only run during + // cluster startup if the data directory does not already exist. + PersistentMode bool + // MySQL protocol bind address. // vtcombo will bind to this address when exposing the mysql protocol socket MySQLBindHost string @@ -226,23 +234,38 @@ func (db *LocalCluster) Setup() error { return err } - log.Infof("Initializing MySQL Manager (%T)...", db.mysql) + initializing := true + if db.PersistentMode && dirExist(db.mysql.TabletDir()) { + initializing = false + } - if err := db.mysql.Setup(); err != nil { - log.Errorf("Mysqlctl failed to start: %s", err) - if err, ok := err.(*exec.ExitError); ok { - log.Errorf("stderr: %s", err.Stderr) + if initializing { + log.Infof("Initializing MySQL Manager (%T)...", db.mysql) + if err := db.mysql.Setup(); err != nil { + log.Errorf("Mysqlctl failed to start: %s", err) + if err, ok := err.(*exec.ExitError); ok { + log.Errorf("stderr: %s", err.Stderr) + } + return err + } + + if err := db.createDatabases(); err != nil { + return err + } + } else { + log.Infof("Starting MySQL Manager (%T)...", db.mysql) + if err := db.mysql.Start(); err != nil { + log.Errorf("Mysqlctl failed to start: %s", err) + if err, ok := err.(*exec.ExitError); ok { + log.Errorf("stderr: %s", err.Stderr) + } + return err } - return err } mycfg, _ := json.Marshal(db.mysql.Params("")) log.Infof("MySQL up: %s", mycfg) - if err := db.createDatabases(); err != nil { - return err - } - if !db.OnlyMySQL { log.Infof("Starting vtcombo...") db.vt = VtcomboProcess(db.Env, &db.Config, db.mysql) @@ -252,13 +275,22 @@ func (db *LocalCluster) Setup() error { log.Infof("vtcombo up: %s", db.vt.Address()) } - // Load schema will apply db and vschema migrations. Running after vtcombo starts to be able to apply vschema migrations - if err := db.loadSchema(); err != nil { - return err - } + if initializing { + log.Info("Mysql data directory does not exist. Initializing cluster with database and vschema migrations...") + // Load schema will apply db and vschema migrations. Running after vtcombo starts to be able to apply vschema migrations + if err := db.loadSchema(true); err != nil { + return err + } - if db.Seed != nil { - if err := db.populateWithRandomData(); err != nil { + if db.Seed != nil { + log.Info("Populating database with random data...") + if err := db.populateWithRandomData(); err != nil { + return err + } + } + } else { + log.Info("Mysql data directory exists in persistent mode. Will only execute vschema migrations during startup") + if err := db.loadSchema(false); err != nil { return err } } @@ -288,8 +320,10 @@ func (db *LocalCluster) TearDown() error { } } - if err := db.Env.TearDown(); err != nil { - errors = append(errors, fmt.Sprintf("environment: %s", err)) + if !db.PersistentMode { + if err := db.Env.TearDown(); err != nil { + errors = append(errors, fmt.Sprintf("environment: %s", err)) + } } if len(errors) > 0 { @@ -317,7 +351,7 @@ func isDir(path string) bool { } // loadSchema applies sql and vschema migrations respectively for each keyspace in the topology -func (db *LocalCluster) loadSchema() error { +func (db *LocalCluster) loadSchema(shouldRunDatabaseMigrations bool) error { if db.SchemaDir == "" { return nil } @@ -360,6 +394,10 @@ func (db *LocalCluster) loadSchema() error { continue } + if !shouldRunDatabaseMigrations { + continue + } + for _, dbname := range db.shardNames(kpb) { if err := db.Execute(cmds, dbname); err != nil { return err @@ -484,6 +522,14 @@ func (db *LocalCluster) reloadSchemaKeyspace(keyspace string) error { return err } +func dirExist(dir string) bool { + exist := true + if _, err := os.Stat(dir); os.IsNotExist(err) { + exist = false + } + return exist +} + // LoadSQLFile loads a parses a .sql file from disk, removing all the // different comments that mysql/mysqldump inserts in these, and returning // each individual SQL statement as its own string. diff --git a/go/vt/vttest/mysqlctl.go b/go/vt/vttest/mysqlctl.go index 8824d66b541..fc48c2160d5 100644 --- a/go/vt/vttest/mysqlctl.go +++ b/go/vt/vttest/mysqlctl.go @@ -26,16 +26,19 @@ import ( "time" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/vt/mysqlctl" ) // MySQLManager is an interface to a mysqld process manager, capable // of starting/shutting down mysqld services and initializing them. type MySQLManager interface { Setup() error + Start() error TearDown() error Auth() (string, string) Address() (string, int) UnixSocket() string + TabletDir() string Params(dbname string) mysql.ConnParams } @@ -47,6 +50,7 @@ type Mysqlctl struct { Port int MyCnf []string Env []string + UID uint32 } // Setup spawns a new mysqld service and initializes it with the defaults. @@ -58,7 +62,7 @@ func (ctl *Mysqlctl) Setup() error { cmd := exec.CommandContext(ctx, ctl.Binary, "-alsologtostderr", - "-tablet_uid", "1", + "-tablet_uid", fmt.Sprintf("%d", ctl.UID), "-mysql_port", fmt.Sprintf("%d", ctl.Port), "init", "-init_db_sql_file", ctl.InitFile, @@ -74,6 +78,30 @@ func (ctl *Mysqlctl) Setup() error { return err } +// Start spawns a mysqld service for an existing data directory +// The service is kept running in the background until TearDown() is called. +func (ctl *Mysqlctl) Start() error { + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + cmd := exec.CommandContext(ctx, + ctl.Binary, + "-alsologtostderr", + "-tablet_uid", fmt.Sprintf("%d", ctl.UID), + "-mysql_port", fmt.Sprintf("%d", ctl.Port), + "start", + ) + + myCnf := strings.Join(ctl.MyCnf, ":") + + cmd.Env = append(cmd.Env, os.Environ()...) + cmd.Env = append(cmd.Env, ctl.Env...) + cmd.Env = append(cmd.Env, fmt.Sprintf("EXTRA_MY_CNF=%s", myCnf)) + + _, err := cmd.Output() + return err +} + // TearDown shutdowns the running mysqld service func (ctl *Mysqlctl) TearDown() error { ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) @@ -82,7 +110,7 @@ func (ctl *Mysqlctl) TearDown() error { cmd := exec.CommandContext(ctx, ctl.Binary, "-alsologtostderr", - "-tablet_uid", "1", + "-tablet_uid", fmt.Sprintf("%d", ctl.UID), "-mysql_port", fmt.Sprintf("%d", ctl.Port), "shutdown", ) @@ -106,7 +134,12 @@ func (ctl *Mysqlctl) Address() (string, int) { // UnixSocket returns the path to the local Unix socket required to connect to mysqld func (ctl *Mysqlctl) UnixSocket() string { - return path.Join(ctl.Directory, "vt_0000000001", "mysql.sock") + return path.Join(ctl.TabletDir(), "mysql.sock") +} + +// TabletDir returns the path where data for this Tablet would be stored +func (ctl *Mysqlctl) TabletDir() string { + return mysqlctl.DefaultTabletDirAtRoot(ctl.Directory, ctl.UID) } // Params returns the mysql.ConnParams required to connect directly to mysqld diff --git a/go/vt/vttest/vtprocess.go b/go/vt/vttest/vtprocess.go index 84c62e66e21..0abe591e9fa 100644 --- a/go/vt/vttest/vtprocess.go +++ b/go/vt/vttest/vtprocess.go @@ -221,6 +221,7 @@ func VtcomboProcess(env Environment, args *Config, mysql MySQLManager) *VtProces "-mycnf_server_id", "1", "-mycnf_socket_file", socket, "-normalize_queries", + "-enable_query_plan_field_caching=false", }...) vt.ExtraArgs = append(vt.ExtraArgs, QueryServerArgs...) @@ -247,6 +248,9 @@ func VtcomboProcess(env Environment, args *Config, mysql MySQLManager) *VtProces if args.VSchemaDDLAuthorizedUsers != "" { vt.ExtraArgs = append(vt.ExtraArgs, []string{"-vschema_ddl_authorized_users", args.VSchemaDDLAuthorizedUsers}...) } + if *servenv.MySQLServerVersion != "" { + vt.ExtraArgs = append(vt.ExtraArgs, "-mysql_server_version", *servenv.MySQLServerVersion) + } if socket != "" { vt.ExtraArgs = append(vt.ExtraArgs, []string{ diff --git a/go/vt/vttls/vttls.go b/go/vt/vttls/vttls.go index 65c8724d95b..d7f212252e0 100644 --- a/go/vt/vttls/vttls.go +++ b/go/vt/vttls/vttls.go @@ -94,15 +94,21 @@ func ClientConfig(cert, key, ca, name string) (*tls.Config, error) { // ServerConfig returns the TLS config to use for a server to // accept client connections. -func ServerConfig(cert, key, ca string) (*tls.Config, error) { +func ServerConfig(cert, key, ca, serverCA string) (*tls.Config, error) { config := newTLSConfig() - certificates, err := loadTLSCertificate(cert, key) + var certificates *[]tls.Certificate + var err error + + if serverCA != "" { + certificates, err = combineAndLoadTLSCertificates(serverCA, cert, key) + } else { + certificates, err = loadTLSCertificate(cert, key) + } if err != nil { return nil, err } - config.Certificates = *certificates // if specified, load ca to validate client, @@ -161,8 +167,8 @@ func doLoadx509CertPool(ca string) error { var tlsCertificates = sync.Map{} -func tlsCertificatesIdentifier(cert, key string) string { - return strings.Join([]string{cert, key}, ";") +func tlsCertificatesIdentifier(tokens ...string) string { + return strings.Join(tokens, ";") } func loadTLSCertificate(cert, key string) (*[]tls.Certificate, error) { @@ -203,3 +209,62 @@ func doLoadTLSCertificate(cert, key string) error { return nil } + +var combinedTlsCertificates = sync.Map{} + +func combineAndLoadTLSCertificates(ca, cert, key string) (*[]tls.Certificate, error) { + combinedTlsIdentifier := tlsCertificatesIdentifier(ca, cert, key) + once, _ := onceByKeys.LoadOrStore(combinedTlsIdentifier, &sync.Once{}) + + var err error + once.(*sync.Once).Do(func() { + err = doLoadAndCombineTLSCertificates(ca, cert, key) + }) + + if err != nil { + return nil, err + } + + result, ok := combinedTlsCertificates.Load(combinedTlsIdentifier) + + if !ok { + return nil, vterrors.Errorf(vtrpc.Code_NOT_FOUND, "Cannot find loaded tls certificate chain with ca: %s, cert: %s, key: %s", ca, cert, key) + } + + return result.(*[]tls.Certificate), nil +} + +func doLoadAndCombineTLSCertificates(ca, cert, key string) error { + combinedTlsIdentifier := tlsCertificatesIdentifier(ca, cert, key) + + // Read CA certificates chain + ca_b, err := ioutil.ReadFile(ca) + if err != nil { + return vterrors.Errorf(vtrpc.Code_NOT_FOUND, "failed to read ca file: %s", ca) + } + + // Read server certificate + cert_b, err := ioutil.ReadFile(cert) + if err != nil { + return vterrors.Errorf(vtrpc.Code_NOT_FOUND, "failed to read server cert file: %s", cert) + } + + // Read server key file + key_b, err := ioutil.ReadFile(key) + if err != nil { + return vterrors.Errorf(vtrpc.Code_NOT_FOUND, "failed to read key file: %s", key) + } + + // Load CA, server cert and key. + var certificate []tls.Certificate + crt, err := tls.X509KeyPair(append(cert_b, ca_b...), key_b) + if err != nil { + return vterrors.Errorf(vtrpc.Code_NOT_FOUND, "failed to load and merge tls certificate with CA, ca %s, cert %s, key: %s", ca, cert, key) + } + + certificate = []tls.Certificate{crt} + + combinedTlsCertificates.Store(combinedTlsIdentifier, &certificate) + + return nil +} diff --git a/go/vt/withddl/withddl_test.go b/go/vt/withddl/withddl_test.go index 1c2569a9c3c..dd44a325d75 100644 --- a/go/vt/withddl/withddl_test.go +++ b/go/vt/withddl/withddl_test.go @@ -186,6 +186,9 @@ func TestExec(t *testing.T) { wd := New(test.ddls) qr, err := wd.Exec(ctx, test.query, fun.f) + if test.qr != nil { + test.qr.StatusFlags = sqltypes.ServerStatusAutocommit + } checkResult(t, test.qr, test.err, qr, err) for _, query := range test.cleanup { diff --git a/go/vt/worker/result_merger.go b/go/vt/worker/result_merger.go index 0d74c409ce0..01e4f57333d 100644 --- a/go/vt/worker/result_merger.go +++ b/go/vt/worker/result_merger.go @@ -173,9 +173,8 @@ func (rm *ResultMerger) Next() (*sqltypes.Result, error) { } result := &sqltypes.Result{ - Fields: rm.fields, - RowsAffected: uint64(len(rm.output)), - Rows: rm.output, + Fields: rm.fields, + Rows: rm.output, } rm.reset() diff --git a/go/vt/worker/result_merger_test.go b/go/vt/worker/result_merger_test.go index acbb6b07fda..b1d4ef501e7 100644 --- a/go/vt/worker/result_merger_test.go +++ b/go/vt/worker/result_merger_test.go @@ -191,9 +191,8 @@ func mergedResults(fields []*querypb.Field, rowsTotal, rowsPerResult int) []*sql // Last row in the Result or last row in total. if id%rowsPerResult == (rowsPerResult-1) || id == (rowsTotal-1) { results = append(results, &sqltypes.Result{ - Fields: fields, - RowsAffected: uint64(len(rows)), - Rows: rows, + Fields: fields, + Rows: rows, }) rows = make([][]sqltypes.Value, 0) } @@ -284,7 +283,6 @@ func TestResultMerger(t *testing.T) { results := mergedResults(singlePk, 3, ResultSizeRows) // Duplicate Row 1. New Rows: 0, 1, 1, 2 results[0].Rows = append(results[0].Rows[:2], results[0].Rows[1:]...) - results[0].RowsAffected = 4 return results }(), }, @@ -299,7 +297,6 @@ func TestResultMerger(t *testing.T) { results := mergedResults(singlePk, 3, ResultSizeRows) // Remove row 1. New Rows: 0, 2 results[0].Rows = append(results[0].Rows[:1], results[0].Rows[2:]...) - results[0].RowsAffected = 2 return results }(), }, diff --git a/go/vt/wrangler/external_cluster.go b/go/vt/wrangler/external_cluster.go new file mode 100644 index 00000000000..e3f93f62d4c --- /dev/null +++ b/go/vt/wrangler/external_cluster.go @@ -0,0 +1,55 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wrangler + +import ( + "context" + "fmt" + + "vitess.io/vitess/go/vt/proto/topodata" +) + +// MountExternalVitessCluster adds a topo record for cluster with specified parameters so that it is available to a Migrate command +func (wr *Wrangler) MountExternalVitessCluster(ctx context.Context, clusterName, topoType, topoServer, topoRoot string) error { + vci, err := wr.TopoServer().GetExternalVitessCluster(ctx, clusterName) + if err != nil { + return err + } + if vci != nil { + return fmt.Errorf("there is already a vitess cluster named %s", clusterName) + } + vc := &topodata.ExternalVitessCluster{ + TopoConfig: &topodata.TopoConfig{ + TopoType: topoType, + Server: topoServer, + Root: topoRoot, + }, + } + return wr.TopoServer().CreateExternalVitessCluster(ctx, clusterName, vc) +} + +// UnmountExternalVitessCluster deletes a mounted cluster from the topo +func (wr *Wrangler) UnmountExternalVitessCluster(ctx context.Context, clusterName string) error { + vci, err := wr.TopoServer().GetExternalVitessCluster(ctx, clusterName) + if err != nil { + return err + } + if vci == nil { + return fmt.Errorf("there is no vitess cluster named %s", clusterName) + } + return wr.TopoServer().DeleteExternalVitessCluster(ctx, clusterName) +} diff --git a/go/vt/wrangler/external_cluster_test.go b/go/vt/wrangler/external_cluster_test.go new file mode 100644 index 00000000000..3be5970a769 --- /dev/null +++ b/go/vt/wrangler/external_cluster_test.go @@ -0,0 +1,67 @@ +package wrangler + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/topo/memorytopo" +) + +func TestVitessCluster(t *testing.T) { + ctx := context.Background() + ts := memorytopo.NewServer("zone1") + tmc := newTestWranglerTMClient() + wr := New(logutil.NewConsoleLogger(), ts, tmc) + name, topoType, topoServer, topoRoot := "c1", "x", "y", "z" + + t.Run("Zero clusters to start", func(t *testing.T) { + clusters, err := ts.GetExternalVitessClusters(ctx) + require.NoError(t, err) + require.Equal(t, 0, len(clusters)) + }) + t.Run("Mount first cluster", func(t *testing.T) { + err := wr.MountExternalVitessCluster(ctx, name, topoType, topoServer, topoRoot) + require.NoError(t, err) + vci, err := ts.GetExternalVitessCluster(ctx, name) + require.NoError(t, err) + require.Equal(t, vci.ClusterName, name) + expectedVc := &topodata.ExternalVitessCluster{ + TopoConfig: &topodata.TopoConfig{ + TopoType: topoType, + Server: topoServer, + Root: topoRoot, + }, + } + require.Equal(t, expectedVc, vci.ExternalVitessCluster) + }) + + t.Run("Mount second cluster", func(t *testing.T) { + name2 := "c2" + err := wr.MountExternalVitessCluster(ctx, name2, topoType, topoServer, topoRoot) + require.NoError(t, err) + }) + + t.Run("List clusters should return c1,c2", func(t *testing.T) { + clusters, err := ts.GetExternalVitessClusters(ctx) + require.NoError(t, err) + require.Equal(t, 2, len(clusters)) + require.EqualValues(t, []string{"c1", "c2"}, clusters) + }) + t.Run("Unmount first cluster", func(t *testing.T) { + err := wr.UnmountExternalVitessCluster(ctx, name) + require.NoError(t, err) + vci, err := ts.GetExternalVitessCluster(ctx, name) + require.NoError(t, err) + require.Nil(t, vci) + }) + t.Run("List clusters should return c2", func(t *testing.T) { + clusters, err := ts.GetExternalVitessClusters(ctx) + require.NoError(t, err) + require.Equal(t, 1, len(clusters)) + require.EqualValues(t, []string{"c2"}, clusters) + }) +} diff --git a/go/vt/wrangler/keyspace.go b/go/vt/wrangler/keyspace.go index e2292972e1f..6ecd1b1d774 100644 --- a/go/vt/wrangler/keyspace.go +++ b/go/vt/wrangler/keyspace.go @@ -379,7 +379,7 @@ func (wr *Wrangler) cancelHorizontalResharding(ctx context.Context, keyspace, sh destinationShards[i] = updatedShard - if err := wr.RefreshTabletsByShard(ctx, si, nil, nil); err != nil { + if err := wr.RefreshTabletsByShard(ctx, si, nil); err != nil { return err } } @@ -450,6 +450,8 @@ func (wr *Wrangler) MigrateServedTypes(ctx context.Context, keyspace, shard stri // refresh // TODO(b/26388813): Integrate vtctl WaitForDrain here instead of just sleeping. // Anything that's not a replica will use the RDONLY sleep time. + // Master Migrate performs its own refresh but we will refresh all non master + // tablets after each migration waitForDrainSleep := *waitForDrainSleepRdonly if servedType == topodatapb.TabletType_REPLICA { waitForDrainSleep = *waitForDrainSleepReplica @@ -465,7 +467,7 @@ func (wr *Wrangler) MigrateServedTypes(ctx context.Context, keyspace, shard stri refreshShards = destinationShards } for _, si := range refreshShards { - rec.RecordError(wr.RefreshTabletsByShard(ctx, si, []topodatapb.TabletType{servedType}, cells)) + rec.RecordError(wr.RefreshTabletsByShard(ctx, si, cells)) } return rec.Error() } @@ -813,6 +815,12 @@ func (wr *Wrangler) masterMigrateServedType(ctx context.Context, keyspace string } } + for _, si := range destinationShards { + if err := wr.RefreshTabletsByShard(ctx, si, nil); err != nil { + return err + } + } + event.DispatchUpdate(ev, "finished") return nil } @@ -932,7 +940,7 @@ func (wr *Wrangler) updateShardRecords(ctx context.Context, keyspace string, sha // For 'to' shards, refresh to make them serve. // The 'from' shards will be refreshed after traffic has migrated. if !isFrom { - wr.RefreshTabletsByShard(ctx, si, []topodatapb.TabletType{servedType}, cells) + wr.RefreshTabletsByShard(ctx, si, cells) } } return nil @@ -1268,7 +1276,7 @@ func (wr *Wrangler) replicaMigrateServedFrom(ctx context.Context, ki *topo.Keysp // Now refresh the source servers so they reload their // blacklisted table list event.DispatchUpdate(ev, "refreshing sources tablets state so they update their blacklisted tables") - return wr.RefreshTabletsByShard(ctx, sourceShard, []topodatapb.TabletType{servedType}, cells) + return wr.RefreshTabletsByShard(ctx, sourceShard, cells) } // masterMigrateServedFrom handles the master migration. The ordering is @@ -1374,10 +1382,8 @@ func (wr *Wrangler) SetKeyspaceServedFrom(ctx context.Context, keyspace string, return wr.ts.UpdateKeyspace(ctx, ki) } -// RefreshTabletsByShard calls RefreshState on all the tables of a -// given type in a shard. It would work for the master, but the -// discovery wouldn't be very efficient. -func (wr *Wrangler) RefreshTabletsByShard(ctx context.Context, si *topo.ShardInfo, tabletTypes []topodatapb.TabletType, cells []string) error { +// RefreshTabletsByShard calls RefreshState on all the tablets in a given shard. +func (wr *Wrangler) RefreshTabletsByShard(ctx context.Context, si *topo.ShardInfo, cells []string) error { wr.Logger().Infof("RefreshTabletsByShard called on shard %v/%v", si.Keyspace(), si.ShardName()) tabletMap, err := wr.ts.GetTabletMapForShardByCell(ctx, si.Keyspace(), si.ShardName(), cells) switch { @@ -1392,9 +1398,6 @@ func (wr *Wrangler) RefreshTabletsByShard(ctx context.Context, si *topo.ShardInf // ignore errors in this phase wg := sync.WaitGroup{} for _, ti := range tabletMap { - if tabletTypes != nil && !topoproto.IsTypeInList(ti.Type, tabletTypes) { - continue - } if ti.Hostname == "" { // The tablet is not running, we don't have the host // name to connect to, so we just skip this tablet. diff --git a/go/vt/wrangler/materializer.go b/go/vt/wrangler/materializer.go index a7afe63810d..c639611e748 100644 --- a/go/vt/wrangler/materializer.go +++ b/go/vt/wrangler/materializer.go @@ -64,11 +64,21 @@ const ( // MoveTables initiates moving table(s) over to another keyspace func (wr *Wrangler) MoveTables(ctx context.Context, workflow, sourceKeyspace, targetKeyspace, tableSpecs, - cell, tabletTypes string, allTables bool, excludeTables string) error { + cell, tabletTypes string, allTables bool, excludeTables string, autoStart, stopAfterCopy bool, + externalCluster string) error { //FIXME validate tableSpecs, allTables, excludeTables var tables []string + var externalTopo *topo.Server var err error + if externalCluster != "" { + externalTopo, err = wr.ts.OpenExternalVitessClusterServer(ctx, externalCluster) + if err != nil { + return err + } + wr.sourceTs = externalTopo + log.Infof("Successfully opened external topo: %+v", externalTopo) + } var vschema *vschemapb.Keyspace vschema, err = wr.ts.GetVSchema(ctx, targetKeyspace) if err != nil { @@ -86,9 +96,6 @@ func (wr *Wrangler) MoveTables(ctx context.Context, workflow, sourceKeyspace, ta if err := json2.Unmarshal([]byte(wrap), ks); err != nil { return err } - if err != nil { - return err - } for table, vtab := range ks.Tables { vschema.Tables[table] = vtab tables = append(tables, table) @@ -97,7 +104,7 @@ func (wr *Wrangler) MoveTables(ctx context.Context, workflow, sourceKeyspace, ta if len(strings.TrimSpace(tableSpecs)) > 0 { tables = strings.Split(tableSpecs, ",") } - ksTables, err := wr.getKeyspaceTables(ctx, sourceKeyspace) + ksTables, err := wr.getKeyspaceTables(ctx, sourceKeyspace, wr.sourceTs) if err != nil { return err } @@ -148,44 +155,46 @@ func (wr *Wrangler) MoveTables(ctx context.Context, workflow, sourceKeyspace, ta } } } - - // Save routing rules before vschema. If we save vschema first, and routing rules - // fails to save, we may generate duplicate table errors. - rules, err := wr.getRoutingRules(ctx) - if err != nil { - return err - } - for _, table := range tables { - toSource := []string{sourceKeyspace + "." + table} - rules[table] = toSource - rules[table+"@replica"] = toSource - rules[table+"@rdonly"] = toSource - rules[targetKeyspace+"."+table] = toSource - rules[targetKeyspace+"."+table+"@replica"] = toSource - rules[targetKeyspace+"."+table+"@rdonly"] = toSource - rules[targetKeyspace+"."+table] = toSource - rules[sourceKeyspace+"."+table+"@replica"] = toSource - rules[sourceKeyspace+"."+table+"@rdonly"] = toSource - } - if err := wr.saveRoutingRules(ctx, rules); err != nil { - return err - } - if vschema != nil { - // We added to the vschema. - if err := wr.ts.SaveVSchema(ctx, targetKeyspace, vschema); err != nil { + if externalTopo == nil { + // Save routing rules before vschema. If we save vschema first, and routing rules + // fails to save, we may generate duplicate table errors. + rules, err := wr.getRoutingRules(ctx) + if err != nil { return err } + for _, table := range tables { + toSource := []string{sourceKeyspace + "." + table} + rules[table] = toSource + rules[table+"@replica"] = toSource + rules[table+"@rdonly"] = toSource + rules[targetKeyspace+"."+table] = toSource + rules[targetKeyspace+"."+table+"@replica"] = toSource + rules[targetKeyspace+"."+table+"@rdonly"] = toSource + rules[targetKeyspace+"."+table] = toSource + rules[sourceKeyspace+"."+table+"@replica"] = toSource + rules[sourceKeyspace+"."+table+"@rdonly"] = toSource + } + if err := wr.saveRoutingRules(ctx, rules); err != nil { + return err + } + if vschema != nil { + // We added to the vschema. + if err := wr.ts.SaveVSchema(ctx, targetKeyspace, vschema); err != nil { + return err + } + } } if err := wr.ts.RebuildSrvVSchema(ctx, nil); err != nil { return err } - ms := &vtctldatapb.MaterializeSettings{ - Workflow: workflow, - SourceKeyspace: sourceKeyspace, - TargetKeyspace: targetKeyspace, - Cell: cell, - TabletTypes: tabletTypes, + Workflow: workflow, + SourceKeyspace: sourceKeyspace, + TargetKeyspace: targetKeyspace, + Cell: cell, + TabletTypes: tabletTypes, + StopAfterCopy: stopAfterCopy, + ExternalCluster: externalCluster, } for _, table := range tables { buf := sqlparser.NewTrackedBuffer(nil) @@ -210,19 +219,26 @@ func (wr *Wrangler) MoveTables(ctx context.Context, workflow, sourceKeyspace, ta return err } - exists, tablets, err := wr.checkIfPreviousJournalExists(ctx, mz, migrationID) - if err != nil { - return err + if externalCluster == "" { + exists, tablets, err := wr.checkIfPreviousJournalExists(ctx, mz, migrationID) + if err != nil { + return err + } + if exists { + wr.Logger().Errorf("Found a previous journal entry for %d", migrationID) + msg := fmt.Sprintf("found an entry from a previous run for migration id %d in _vt.resharding_journal of tablets %s,", + migrationID, strings.Join(tablets, ",")) + msg += fmt.Sprintf("please review and delete it before proceeding and restart the workflow using the Workflow %s.%s start", + workflow, targetKeyspace) + return fmt.Errorf(msg) + } } - if exists { - wr.Logger().Errorf("Found a previous journal entry for %d", migrationID) - msg := fmt.Sprintf("found an entry from a previous run for migration id %d in _vt.resharding_journal of tablets %s,", - migrationID, strings.Join(tablets, ",")) - msg += fmt.Sprintf("please review and delete it before proceeding and restart the workflow using the Workflow %s.%s start", - workflow, targetKeyspace) - return fmt.Errorf(msg) + if autoStart { + return mz.startStreams(ctx) } - return mz.startStreams(ctx) + wr.Logger().Infof("Streams will not be started since -auto_start is set to false") + + return nil } func (wr *Wrangler) validateSourceTablesExist(ctx context.Context, sourceKeyspace string, ksTables, tables []string) error { @@ -246,8 +262,8 @@ func (wr *Wrangler) validateSourceTablesExist(ctx context.Context, sourceKeyspac return nil } -func (wr *Wrangler) getKeyspaceTables(ctx context.Context, ks string) ([]string, error) { - shards, err := wr.ts.GetServingShards(ctx, ks) +func (wr *Wrangler) getKeyspaceTables(ctx context.Context, ks string, ts *topo.Server) ([]string, error) { + shards, err := ts.GetServingShards(ctx, ks) if err != nil { return nil, err } @@ -260,7 +276,11 @@ func (wr *Wrangler) getKeyspaceTables(ctx context.Context, ks string) ([]string, } allTables := []string{"/.*/"} - schema, err := wr.GetSchema(ctx, master, allTables, nil, false) + ti, err := ts.GetTablet(ctx, master) + if err != nil { + return nil, err + } + schema, err := wr.tmc.GetSchema(ctx, ti.Tablet, allTables, nil, false) if err != nil { return nil, err } @@ -721,7 +741,10 @@ func (wr *Wrangler) ExternalizeVindex(ctx context.Context, qualifiedVindexName s // Remove the write_only param and save the source vschema. delete(sourceVindex.Params, "write_only") - return wr.ts.SaveVSchema(ctx, sourceKeyspace, sourceVSchema) + if err := wr.ts.SaveVSchema(ctx, sourceKeyspace, sourceVSchema); err != nil { + return err + } + return wr.ts.RebuildSrvVSchema(ctx, nil) } // @@ -817,7 +840,7 @@ func (wr *Wrangler) buildMaterializer(ctx context.Context, ms *vtctldatapb.Mater } } - sourceShards, err := wr.ts.GetServingShards(ctx, ms.SourceKeyspace) + sourceShards, err := wr.sourceTs.GetServingShards(ctx, ms.SourceKeyspace) if err != nil { return nil, err } @@ -843,8 +866,11 @@ func (mz *materializer) getSourceTableDDLs(ctx context.Context) (map[string]stri return nil, fmt.Errorf("source shard must have a master for copying schema: %v", mz.sourceShards[0].ShardName()) } - var err error - sourceSchema, err := mz.wr.GetSchema(ctx, sourceMaster, allTables, nil, false) + ti, err := mz.wr.sourceTs.GetTablet(ctx, sourceMaster) + if err != nil { + return nil, err + } + sourceSchema, err := mz.wr.tmc.GetSchema(ctx, ti.Tablet, allTables, nil, false) if err != nil { return nil, err } @@ -978,10 +1004,11 @@ func (mz *materializer) generateInserts(ctx context.Context) (string, error) { for _, source := range mz.sourceShards { bls := &binlogdatapb.BinlogSource{ - Keyspace: mz.ms.SourceKeyspace, - Shard: source.ShardName(), - Filter: &binlogdatapb.Filter{}, - StopAfterCopy: mz.ms.StopAfterCopy, + Keyspace: mz.ms.SourceKeyspace, + Shard: source.ShardName(), + Filter: &binlogdatapb.Filter{}, + StopAfterCopy: mz.ms.StopAfterCopy, + ExternalCluster: mz.ms.ExternalCluster, } for _, ts := range mz.ms.TableSettings { rule := &binlogdatapb.Rule{ @@ -1022,8 +1049,8 @@ func (mz *materializer) generateInserts(ctx context.Context) (string, error) { subExprs = append(subExprs, &sqlparser.AliasedExpr{Expr: mappedCol}) } vindexName := fmt.Sprintf("%s.%s", mz.ms.TargetKeyspace, cv.Name) - subExprs = append(subExprs, &sqlparser.AliasedExpr{Expr: sqlparser.NewStrLiteral([]byte(vindexName))}) - subExprs = append(subExprs, &sqlparser.AliasedExpr{Expr: sqlparser.NewStrLiteral([]byte("{{.keyrange}}"))}) + subExprs = append(subExprs, &sqlparser.AliasedExpr{Expr: sqlparser.NewStrLiteral(vindexName)}) + subExprs = append(subExprs, &sqlparser.AliasedExpr{Expr: sqlparser.NewStrLiteral("{{.keyrange}}")}) sel.Where = &sqlparser.Where{ Type: sqlparser.WhereClause, Expr: &sqlparser.FuncExpr{ diff --git a/go/vt/wrangler/materializer_test.go b/go/vt/wrangler/materializer_test.go index 07358266d80..8a70bf549af 100644 --- a/go/vt/wrangler/materializer_test.go +++ b/go/vt/wrangler/materializer_test.go @@ -17,13 +17,12 @@ limitations under the License. package wrangler import ( + "context" "fmt" "sort" "strings" "testing" - "context" - "github.com/golang/protobuf/proto" "github.com/stretchr/testify/require" @@ -63,7 +62,7 @@ func TestMigrateTables(t *testing.T) { env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) ctx := context.Background() - err := env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", "t1", "", "", false, "") + err := env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", "t1", "", "", false, "", true, false, "") require.NoError(t, err) vschema, err := env.wr.ts.GetSrvVSchema(ctx, env.cell) require.NoError(t, err) @@ -104,11 +103,11 @@ func TestMissingTables(t *testing.T) { env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) ctx := context.Background() - err := env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", "t1,tyt", "", "", false, "") + err := env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", "t1,tyt", "", "", false, "", true, false, "") require.EqualError(t, err, "table(s) not found in source keyspace sourceks: tyt") - err = env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", "t1,tyt,t2,txt", "", "", false, "") + err = env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", "t1,tyt,t2,txt", "", "", false, "", true, false, "") require.EqualError(t, err, "table(s) not found in source keyspace sourceks: tyt,txt") - err = env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", "t1", "", "", false, "") + err = env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", "t1", "", "", false, "", true, false, "") require.NoError(t, err) } @@ -164,7 +163,7 @@ func TestMoveTablesAllAndExclude(t *testing.T) { env.tmc.expectVRQuery(200, insertPrefix, &sqltypes.Result{}) env.tmc.expectVRQuery(200, mzSelectIDQuery, &sqltypes.Result{}) env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) - err = env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", "", "", "", tcase.allTables, tcase.excludeTables) + err = env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", "", "", "", tcase.allTables, tcase.excludeTables, true, false, "") require.NoError(t, err) require.EqualValues(t, tcase.want, targetTables(env)) }) @@ -173,6 +172,37 @@ func TestMoveTablesAllAndExclude(t *testing.T) { } +func TestMoveTablesStopFlags(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + Workflow: "workflow", + SourceKeyspace: "sourceks", + TargetKeyspace: "targetks", + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select * from t1", + }}, + } + + ctx := context.Background() + var err error + t.Run("StopStartedAndStopAfterCopyFlags", func(t *testing.T) { + env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) + defer env.close() + env.tmc.expectVRQuery(100, mzCheckJournal, &sqltypes.Result{}) + env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) + // insert expects flag stop_after_copy to be true + insert := `/insert into _vt.vreplication\(workflow, source, pos, max_tps, max_replication_lag, cell, tablet_types, time_updated, transaction_timestamp, state, db_name\) values .*stop_after_copy:true.*` + + env.tmc.expectVRQuery(200, insert, &sqltypes.Result{}) + env.tmc.expectVRQuery(200, mzSelectIDQuery, &sqltypes.Result{}) + // -auto_start=false is tested by NOT expecting the update query which sets state to RUNNING + err = env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", "t1", "", + "", false, "", false, true, "") + require.NoError(t, err) + env.tmc.verifyQueries(t) + }) +} + func TestMigrateVSchema(t *testing.T) { ms := &vtctldatapb.MaterializeSettings{ Workflow: "workflow", @@ -193,7 +223,7 @@ func TestMigrateVSchema(t *testing.T) { env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) ctx := context.Background() - err := env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", `{"t1":{}}`, "", "", false, "") + err := env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", `{"t1":{}}`, "", "", false, "", true, false, "") require.NoError(t, err) vschema, err := env.wr.ts.GetSrvVSchema(ctx, env.cell) require.NoError(t, err) @@ -1544,6 +1574,8 @@ func TestMaterializerOneToOne(t *testing.T) { CreateDdl: "t4ddl", }, }, + Cell: "zone1", + TabletTypes: "master,rdonly", } env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) defer env.close() @@ -1560,7 +1592,7 @@ func TestMaterializerOneToOne(t *testing.T) { `rules: `+ `rules: `+ `> ', `)+ - `'', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_targetks'`+ + `'', [0-9]*, [0-9]*, 'zone1', 'master,rdonly', [0-9]*, 0, 'Stopped', 'vt_targetks'`+ `\)`+eol, &sqltypes.Result{}, ) diff --git a/go/vt/wrangler/reparent.go b/go/vt/wrangler/reparent.go index d56e3690f5b..db3390b08e4 100644 --- a/go/vt/wrangler/reparent.go +++ b/go/vt/wrangler/reparent.go @@ -27,28 +27,25 @@ import ( "time" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/utils/pointer" "vitess.io/vitess/go/event" - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" "vitess.io/vitess/go/vt/topotools/events" - "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver" + "vitess.io/vitess/go/vt/vtctl/reparentutil" replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" ) const ( - initShardMasterOperation = "InitShardMaster" - plannedReparentShardOperation = "PlannedReparentShard" - emergencyReparentShardOperation = "EmergencyReparentShard" + plannedReparentShardOperation = "PlannedReparentShard" //nolint + emergencyReparentShardOperation = "EmergencyReparentShard" //nolint tabletExternallyReparentedOperation = "TabletExternallyReparented" //nolint ) @@ -149,7 +146,12 @@ func (wr *Wrangler) InitShardMaster(ctx context.Context, keyspace, shard string, ev := &events.Reparent{} // do the work - err = wr.initShardMasterLocked(ctx, ev, keyspace, shard, masterElectTabletAlias, force, waitReplicasTimeout) + err = grpcvtctldserver.NewVtctldServer(wr.ts).InitShardPrimaryLocked(ctx, ev, &vtctldatapb.InitShardPrimaryRequest{ + Keyspace: keyspace, + Shard: shard, + PrimaryElectTabletAlias: masterElectTabletAlias, + Force: force, + }, waitReplicasTimeout, wr.tmc, wr.logger) if err != nil { event.DispatchUpdate(ev, "failed InitShardMaster: "+err.Error()) } else { @@ -158,1098 +160,37 @@ func (wr *Wrangler) InitShardMaster(ctx context.Context, keyspace, shard string, return err } -func (wr *Wrangler) initShardMasterLocked(ctx context.Context, ev *events.Reparent, keyspace, shard string, masterElectTabletAlias *topodatapb.TabletAlias, force bool, waitReplicasTimeout time.Duration) error { - shardInfo, err := wr.ts.GetShard(ctx, keyspace, shard) - if err != nil { - return err - } - ev.ShardInfo = *shardInfo - - event.DispatchUpdate(ev, "reading tablet map") - tabletMap, err := wr.ts.GetTabletMapForShard(ctx, keyspace, shard) - if err != nil { - return err - } - - // Check the master elect is in tabletMap. - masterElectTabletAliasStr := topoproto.TabletAliasString(masterElectTabletAlias) - masterElectTabletInfo, ok := tabletMap[masterElectTabletAliasStr] - if !ok { - return fmt.Errorf("master-elect tablet %v is not in the shard", topoproto.TabletAliasString(masterElectTabletAlias)) - } - ev.NewMaster = *masterElectTabletInfo.Tablet - - // Check the master is the only master is the shard, or -force was used. - _, masterTabletMap := topotools.SortedTabletMap(tabletMap) - if !topoproto.TabletAliasEqual(shardInfo.MasterAlias, masterElectTabletAlias) { - if !force { - return fmt.Errorf("master-elect tablet %v is not the shard master, use -force to proceed anyway", topoproto.TabletAliasString(masterElectTabletAlias)) - } - wr.logger.Warningf("master-elect tablet %v is not the shard master, proceeding anyway as -force was used", topoproto.TabletAliasString(masterElectTabletAlias)) - } - if _, ok := masterTabletMap[masterElectTabletAliasStr]; !ok { - if !force { - return fmt.Errorf("master-elect tablet %v is not a master in the shard, use -force to proceed anyway", topoproto.TabletAliasString(masterElectTabletAlias)) - } - wr.logger.Warningf("master-elect tablet %v is not a master in the shard, proceeding anyway as -force was used", topoproto.TabletAliasString(masterElectTabletAlias)) - } - haveOtherMaster := false - for alias := range masterTabletMap { - if masterElectTabletAliasStr != alias { - haveOtherMaster = true - } - } - if haveOtherMaster { - if !force { - return fmt.Errorf("master-elect tablet %v is not the only master in the shard, use -force to proceed anyway", topoproto.TabletAliasString(masterElectTabletAlias)) - } - wr.logger.Warningf("master-elect tablet %v is not the only master in the shard, proceeding anyway as -force was used", topoproto.TabletAliasString(masterElectTabletAlias)) - } - - // First phase: reset replication on all tablets. If anyone fails, - // we stop. It is probably because it is unreachable, and may leave - // an unstable database process in the mix, with a database daemon - // at a wrong replication spot. - - // Create a context for the following RPCs that respects waitReplicasTimeout - resetCtx, resetCancel := context.WithTimeout(ctx, waitReplicasTimeout) - defer resetCancel() - - event.DispatchUpdate(ev, "resetting replication on all tablets") - wg := sync.WaitGroup{} - rec := concurrency.AllErrorRecorder{} - for alias, tabletInfo := range tabletMap { - wg.Add(1) - go func(alias string, tabletInfo *topo.TabletInfo) { - defer wg.Done() - wr.logger.Infof("resetting replication on tablet %v", alias) - if err := wr.tmc.ResetReplication(resetCtx, tabletInfo.Tablet); err != nil { - rec.RecordError(fmt.Errorf("tablet %v ResetReplication failed (either fix it, or Scrap it): %v", alias, err)) - } - }(alias, tabletInfo) - } - wg.Wait() - if err := rec.Error(); err != nil { - // if any of the replicas failed - return err - } - - // Check we still have the topology lock. - if err := topo.CheckShardLocked(ctx, keyspace, shard); err != nil { - return fmt.Errorf("lost topology lock, aborting: %v", err) - } - - // Tell the new master to break its replicas, return its replication - // position - wr.logger.Infof("initializing master on %v", topoproto.TabletAliasString(masterElectTabletAlias)) - event.DispatchUpdate(ev, "initializing master") - rp, err := wr.tmc.InitMaster(ctx, masterElectTabletInfo.Tablet) - if err != nil { - return err - } - - // Check we stil have the topology lock. - if err := topo.CheckShardLocked(ctx, keyspace, shard); err != nil { - return fmt.Errorf("lost topology lock, aborting: %v", err) - } - - // Create a cancelable context for the following RPCs. - // If error conditions happen, we can cancel all outgoing RPCs. - replCtx, replCancel := context.WithTimeout(ctx, waitReplicasTimeout) - defer replCancel() - - // Now tell the new master to insert the reparent_journal row, - // and tell everybody else to become a replica of the new master, - // and wait for the row in the reparent_journal table. - // We start all these in parallel, to handle the semi-sync - // case: for the master to be able to commit its row in the - // reparent_journal table, it needs connected replicas. - event.DispatchUpdate(ev, "reparenting all tablets") - now := time.Now().UnixNano() - wgMaster := sync.WaitGroup{} - wgReplicas := sync.WaitGroup{} - var masterErr error - for alias, tabletInfo := range tabletMap { - if alias == masterElectTabletAliasStr { - wgMaster.Add(1) - go func(alias string, tabletInfo *topo.TabletInfo) { - defer wgMaster.Done() - wr.logger.Infof("populating reparent journal on new master %v", alias) - masterErr = wr.tmc.PopulateReparentJournal(replCtx, tabletInfo.Tablet, now, initShardMasterOperation, masterElectTabletAlias, rp) - }(alias, tabletInfo) - } else { - wgReplicas.Add(1) - go func(alias string, tabletInfo *topo.TabletInfo) { - defer wgReplicas.Done() - wr.logger.Infof("initializing replica %v", alias) - if err := wr.tmc.InitReplica(replCtx, tabletInfo.Tablet, masterElectTabletAlias, rp, now); err != nil { - rec.RecordError(fmt.Errorf("tablet %v InitReplica failed: %v", alias, err)) - } - }(alias, tabletInfo) - } - } - - // After the master is done, we can update the shard record - // (note with semi-sync, it also means at least one replica is done). - wgMaster.Wait() - if masterErr != nil { - // The master failed, there is no way the - // replicas will work. So we cancel them all. - wr.logger.Warningf("master failed to PopulateReparentJournal, canceling replicas") - replCancel() - wgReplicas.Wait() - return fmt.Errorf("failed to PopulateReparentJournal on master: %v", masterErr) - } - if !topoproto.TabletAliasEqual(shardInfo.MasterAlias, masterElectTabletAlias) { - if _, err := wr.ts.UpdateShardFields(ctx, keyspace, shard, func(si *topo.ShardInfo) error { - si.MasterAlias = masterElectTabletAlias - return nil - }); err != nil { - wgReplicas.Wait() - return fmt.Errorf("failed to update shard master record: %v", err) - } - } - - // Wait for the replicas to complete. If some of them fail, we - // don't want to rebuild the shard serving graph (the failure - // will most likely be a timeout, and our context will be - // expired, so the rebuild will fail anyway) - wgReplicas.Wait() - if err := rec.Error(); err != nil { - return err - } - - // Create database if necessary on the master. replicas will get it too through - // replication. Since the user called InitShardMaster, they've told us to - // assume that whatever data is on all the replicas is what they intended. - // If the database doesn't exist, it means the user intends for these tablets - // to begin serving with no data (i.e. first time initialization). - createDB := fmt.Sprintf("CREATE DATABASE IF NOT EXISTS %s", sqlescape.EscapeID(topoproto.TabletDbName(masterElectTabletInfo.Tablet))) - if _, err := wr.tmc.ExecuteFetchAsDba(ctx, masterElectTabletInfo.Tablet, false, []byte(createDB), 1, false, true); err != nil { - return fmt.Errorf("failed to create database: %v", err) - } - // Refresh the state to force the tabletserver to reconnect after db has been created. - if err := wr.tmc.RefreshState(ctx, masterElectTabletInfo.Tablet); err != nil { - log.Warningf("RefreshState failed: %v", err) - } - - return nil -} - // PlannedReparentShard will make the provided tablet the master for the shard, // when both the current and new master are reachable and in good shape. func (wr *Wrangler) PlannedReparentShard(ctx context.Context, keyspace, shard string, masterElectTabletAlias, avoidMasterAlias *topodatapb.TabletAlias, waitReplicasTimeout time.Duration) (err error) { - // lock the shard - lockAction := fmt.Sprintf( - "PlannedReparentShard(%v, avoid_master=%v)", - topoproto.TabletAliasString(masterElectTabletAlias), - topoproto.TabletAliasString(avoidMasterAlias)) - ctx, unlock, lockErr := wr.ts.LockShard(ctx, keyspace, shard, lockAction) - if lockErr != nil { - return lockErr - } - defer unlock(&err) - - // Create reusable Reparent event with available info - ev := &events.Reparent{} - - // Attempt to set avoidMasterAlias if not provided by parameters - if masterElectTabletAlias == nil && avoidMasterAlias == nil { - shardInfo, err := wr.ts.GetShard(ctx, keyspace, shard) - if err != nil { - return err - } - avoidMasterAlias = shardInfo.MasterAlias - } + _, err = reparentutil.NewPlannedReparenter(wr.ts, wr.tmc, wr.logger).ReparentShard( + ctx, + keyspace, + shard, + reparentutil.PlannedReparentOptions{ + AvoidPrimaryAlias: avoidMasterAlias, + NewPrimaryAlias: masterElectTabletAlias, + WaitReplicasTimeout: waitReplicasTimeout, + }, + ) - // do the work - err = wr.plannedReparentShardLocked(ctx, ev, keyspace, shard, masterElectTabletAlias, avoidMasterAlias, waitReplicasTimeout) - if err != nil { - event.DispatchUpdate(ev, "failed PlannedReparentShard: "+err.Error()) - } else { - event.DispatchUpdate(ev, "finished PlannedReparentShard") - } return err } -func (wr *Wrangler) plannedReparentShardLocked(ctx context.Context, ev *events.Reparent, keyspace, shard string, masterElectTabletAlias, avoidMasterTabletAlias *topodatapb.TabletAlias, waitReplicasTimeout time.Duration) error { - shardInfo, err := wr.ts.GetShard(ctx, keyspace, shard) - if err != nil { - return err - } - ev.ShardInfo = *shardInfo - - event.DispatchUpdate(ev, "reading tablet map") - tabletMap, err := wr.ts.GetTabletMapForShard(ctx, keyspace, shard) - if err != nil { - return err - } - - // Check invariants we're going to depend on. - if topoproto.TabletAliasEqual(masterElectTabletAlias, avoidMasterTabletAlias) { - return fmt.Errorf("master-elect tablet %v is the same as the tablet to avoid", topoproto.TabletAliasString(masterElectTabletAlias)) - } - if masterElectTabletAlias == nil { - if !topoproto.TabletAliasEqual(avoidMasterTabletAlias, shardInfo.MasterAlias) { - event.DispatchUpdate(ev, "current master is different than -avoid_master, nothing to do") - return nil - } - event.DispatchUpdate(ev, "searching for master candidate") - masterElectTabletAlias, err = wr.chooseNewMaster(ctx, shardInfo, tabletMap, avoidMasterTabletAlias, waitReplicasTimeout) - if err != nil { - return err - } - if masterElectTabletAlias == nil { - return fmt.Errorf("cannot find a tablet to reparent to") - } - wr.logger.Infof("elected new master candidate %v", topoproto.TabletAliasString(masterElectTabletAlias)) - event.DispatchUpdate(ev, "elected new master candidate") - } - masterElectTabletAliasStr := topoproto.TabletAliasString(masterElectTabletAlias) - masterElectTabletInfo, ok := tabletMap[masterElectTabletAliasStr] - if !ok { - return fmt.Errorf("master-elect tablet %v is not in the shard", masterElectTabletAliasStr) - } - ev.NewMaster = *masterElectTabletInfo.Tablet - if topoproto.TabletAliasIsZero(shardInfo.MasterAlias) { - return fmt.Errorf("the shard has no master, use EmergencyReparentShard") - } - - // Find the current master (if any) based on the tablet states. We no longer - // trust the shard record for this, because it is updated asynchronously. - currentMaster := wr.findCurrentMaster(tabletMap) - - var reparentJournalPos string - - if currentMaster == nil { - // We don't know who the current master is. Either there is no current - // master at all (no tablet claims to be MASTER), or there is no clear - // winner (multiple MASTER tablets with the same timestamp). - // Check if it's safe to promote the selected master candidate. - wr.logger.Infof("No clear winner found for current master term; checking if it's safe to recover by electing %v", masterElectTabletAliasStr) - - // As we contact each tablet, we'll send its replication position here. - type tabletPos struct { - tabletAliasStr string - tablet *topodatapb.Tablet - pos mysql.Position - } - positions := make(chan tabletPos, len(tabletMap)) - - // First stop the world, to ensure no writes are happening anywhere. - // Since we don't trust that we know which tablets might be acting as - // masters, we simply demote everyone. - // - // Unlike the normal, single-master case, we don't try to undo this if - // we bail out. If we're here, it means there is no clear master, so we - // don't know that it's safe to roll back to the previous state. - // Leaving everything read-only is probably safer than whatever weird - // state we were in before. - // - // If any tablets are unreachable, we can't be sure it's safe, because - // one of the unreachable ones might have a replication position farther - // ahead than the candidate master. - wgStopAll := sync.WaitGroup{} - rec := concurrency.AllErrorRecorder{} - - stopAllCtx, stopAllCancel := context.WithTimeout(ctx, *topo.RemoteOperationTimeout) - defer stopAllCancel() - - for tabletAliasStr, tablet := range tabletMap { - wgStopAll.Add(1) - go func(tabletAliasStr string, tablet *topodatapb.Tablet) { - defer wgStopAll.Done() - - // Regardless of what type this tablet thinks it is, we always - // call DemoteMaster to ensure the underlying MySQL is read-only - // and to check its replication position. DemoteMaster is - // idempotent so it's fine to call it on a replica that's - // already read-only. - wr.logger.Infof("demote tablet %v", tabletAliasStr) - masterStatus, err := wr.tmc.DemoteMaster(stopAllCtx, tablet) - if err != nil { - rec.RecordError(vterrors.Wrapf(err, "DemoteMaster failed on contested master %v", tabletAliasStr)) - return - } - pos, err := mysql.DecodePosition(masterStatus.Position) - if err != nil { - rec.RecordError(vterrors.Wrapf(err, "can't decode replication position for tablet %v", tabletAliasStr)) - return - } - positions <- tabletPos{ - tabletAliasStr: tabletAliasStr, - tablet: tablet, - pos: pos, - } - }(tabletAliasStr, tablet.Tablet) - } - wgStopAll.Wait() - close(positions) - if rec.HasErrors() { - return vterrors.Wrap(rec.Error(), "failed to demote all tablets") - } - - // Make a map of tablet positions. - tabletPosMap := make(map[string]tabletPos, len(tabletMap)) - for tp := range positions { - tabletPosMap[tp.tabletAliasStr] = tp - } - - // Make sure no tablet has a replication position farther ahead than the - // candidate master. It's up to our caller to choose a suitable - // candidate, and to choose another one if this check fails. - // - // Note that we still allow replication to run during this time, but we - // assume that no new high water mark can appear because we demoted all - // tablets to read-only. - // - // TODO: Consider temporarily replicating from another tablet to catch up. - tp, ok := tabletPosMap[masterElectTabletAliasStr] - if !ok { - return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "master-elect tablet %v not found in tablet map", masterElectTabletAliasStr) - } - masterElectPos := tp.pos - for _, tp := range tabletPosMap { - // The master elect pos has to be at least as far as every tablet. - if !masterElectPos.AtLeast(tp.pos) { - return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "tablet %v position (%v) contains transactions not found in master-elect %v position (%v)", - tp.tabletAliasStr, tp.pos, masterElectTabletAliasStr, masterElectPos) - } - } - - // Check we still have the topology lock. - if err := topo.CheckShardLocked(ctx, keyspace, shard); err != nil { - return vterrors.Wrap(err, "lost topology lock; aborting") - } - - // Promote the selected candidate to master. - promoteCtx, promoteCancel := context.WithTimeout(ctx, *topo.RemoteOperationTimeout) - defer promoteCancel() - rp, err := wr.tmc.PromoteReplica(promoteCtx, masterElectTabletInfo.Tablet) - if err != nil { - return vterrors.Wrapf(err, "failed to promote %v to master", masterElectTabletAliasStr) - } - reparentJournalPos = rp - } else if topoproto.TabletAliasEqual(currentMaster.Alias, masterElectTabletAlias) { - // It is possible that a previous attempt to reparent failed to SetReadWrite - // so call it here to make sure underlying mysql is ReadWrite - rwCtx, rwCancel := context.WithTimeout(ctx, *topo.RemoteOperationTimeout) - defer rwCancel() - - if err := wr.tmc.SetReadWrite(rwCtx, masterElectTabletInfo.Tablet); err != nil { - return vterrors.Wrapf(err, "failed to SetReadWrite on current master %v", masterElectTabletAliasStr) - } - // The master is already the one we want according to its tablet record. - refreshCtx, refreshCancel := context.WithTimeout(ctx, *topo.RemoteOperationTimeout) - defer refreshCancel() - - // Get the position so we can try to fix replicas (below). - rp, err := wr.tmc.MasterPosition(refreshCtx, masterElectTabletInfo.Tablet) - if err != nil { - return vterrors.Wrapf(err, "failed to get replication position of current master %v", masterElectTabletAliasStr) - } - reparentJournalPos = rp - } else { - // There is already a master and it's not the one we want. - oldMasterTabletInfo := currentMaster - ev.OldMaster = *oldMasterTabletInfo.Tablet - - // Before demoting the old master, first make sure replication is - // working from the old master to the candidate master. If it's not - // working, we can't do a planned reparent because the candidate won't - // catch up. - wr.logger.Infof("Checking replication on master-elect %v", masterElectTabletAliasStr) - - // First we find the position of the current master. Note that this is - // just a snapshot of the position since we let it keep accepting new - // writes until we're sure we're going to proceed. - snapshotCtx, snapshotCancel := context.WithTimeout(ctx, *topo.RemoteOperationTimeout) - defer snapshotCancel() - - snapshotPos, err := wr.tmc.MasterPosition(snapshotCtx, currentMaster.Tablet) - if err != nil { - return vterrors.Wrapf(err, "can't get replication position on current master %v; current master must be healthy to perform planned reparent", currentMaster.AliasString()) - } - - // Now wait for the master-elect to catch up to that snapshot point. - // If it catches up to that point within the waitReplicasTimeout, - // we can be fairly confident it will catch up on everything that's - // happened in the meantime once we demote the master to stop writes. - // - // We do this as an idempotent SetMaster to make sure the replica knows - // who the current master is. - setMasterCtx, setMasterCancel := context.WithTimeout(ctx, waitReplicasTimeout) - defer setMasterCancel() - - err = wr.tmc.SetMaster(setMasterCtx, masterElectTabletInfo.Tablet, currentMaster.Alias, 0, snapshotPos, true) - if err != nil { - return vterrors.Wrapf(err, "replication on master-elect %v did not catch up in time; replication must be healthy to perform planned reparent", masterElectTabletAliasStr) - } - - // Check we still have the topology lock. - if err := topo.CheckShardLocked(ctx, keyspace, shard); err != nil { - return vterrors.Wrap(err, "lost topology lock; aborting") - } - - // Demote the old master and get its replication position. It's fine if - // the old master was already demoted, since DemoteMaster is idempotent. - wr.logger.Infof("demote current master %v", oldMasterTabletInfo.Alias) - event.DispatchUpdate(ev, "demoting old master") - - demoteCtx, demoteCancel := context.WithTimeout(ctx, *topo.RemoteOperationTimeout) - defer demoteCancel() - - masterStatus, err := wr.tmc.DemoteMaster(demoteCtx, oldMasterTabletInfo.Tablet) - if err != nil { - return fmt.Errorf("old master tablet %v DemoteMaster failed: %v", topoproto.TabletAliasString(shardInfo.MasterAlias), err) - } - - waitCtx, waitCancel := context.WithTimeout(ctx, waitReplicasTimeout) - defer waitCancel() - - waitErr := wr.tmc.WaitForPosition(waitCtx, masterElectTabletInfo.Tablet, masterStatus.Position) - if waitErr != nil || ctx.Err() == context.DeadlineExceeded { - // If the new master fails to catch up within the timeout, - // we try to roll back to the original master before aborting. - // It is possible that we have used up the original context, or that - // not enough time is left on it before it times out. - // But at this point we really need to be able to Undo so as not to - // leave the cluster in a bad state. - // So we create a fresh context based on context.Background(). - undoCtx, undoCancel := context.WithTimeout(context.Background(), *topo.RemoteOperationTimeout) - defer undoCancel() - if undoErr := wr.tmc.UndoDemoteMaster(undoCtx, oldMasterTabletInfo.Tablet); undoErr != nil { - log.Warningf("Encountered error while trying to undo DemoteMaster: %v", undoErr) - } - if waitErr != nil { - return vterrors.Wrapf(err, "master-elect tablet %v failed to catch up with replication", masterElectTabletAliasStr) - } - return vterrors.New(vtrpcpb.Code_DEADLINE_EXCEEDED, "PlannedReparent timed out, please try again.") - } - - promoteCtx, promoteCancel := context.WithTimeout(ctx, waitReplicasTimeout) - defer promoteCancel() - rp, err := wr.tmc.PromoteReplica(promoteCtx, masterElectTabletInfo.Tablet) - if err != nil { - return vterrors.Wrapf(err, "master-elect tablet %v failed to be upgraded to master - please try again", masterElectTabletAliasStr) - } - - if ctx.Err() == context.DeadlineExceeded { - // PromoteReplica succeeded but the context has expired. PRS needs to be re-run to complete - return vterrors.New(vtrpcpb.Code_DEADLINE_EXCEEDED, "PlannedReparent timed out after promoting new master. Please re-run to fixup replicas.") - } - reparentJournalPos = rp - } - - // Check we still have the topology lock. - if err := topo.CheckShardLocked(ctx, keyspace, shard); err != nil { - return vterrors.Wrap(err, "lost topology lock, aborting") - } - - // Create a cancelable context for the following RPCs. - // If error conditions happen, we can cancel all outgoing RPCs. - replCtx, replCancel := context.WithTimeout(ctx, waitReplicasTimeout) - defer replCancel() - - // Go through all the tablets: - // - new master: populate the reparent journal - // - everybody else: reparent to new master, wait for row - event.DispatchUpdate(ev, "reparenting all tablets") - - // We add a (hopefully) unique record to the reparent journal table on the - // new master so we can check if replicas got it through replication. - reparentJournalTimestamp := time.Now().UnixNano() - - // Point all replicas at the new master and check that they receive the - // reparent journal entry, proving they are replicating from the new master. - // We do this concurrently with adding the journal entry (below), because - // if semi-sync is enabled, the update to the journal table can't succeed - // until at least one replica is successfully attached to the new master. - wgReplicas := sync.WaitGroup{} - rec := concurrency.AllErrorRecorder{} - for alias, tabletInfo := range tabletMap { - if alias == masterElectTabletAliasStr { - continue - } - wgReplicas.Add(1) - go func(alias string, tabletInfo *topo.TabletInfo) { - defer wgReplicas.Done() - wr.logger.Infof("setting new master on replica %v", alias) - - // We used to force replica start on the old master, but now that - // we support "resuming" a PRS attempt that failed, we can no - // longer assume that we know who the old master was. - // Instead, we rely on the old master to remember that it needs - // to start replication after being converted to a replica. - forceStartReplication := false - - if err := wr.tmc.SetMaster(replCtx, tabletInfo.Tablet, masterElectTabletAlias, reparentJournalTimestamp, "", forceStartReplication); err != nil { - rec.RecordError(fmt.Errorf("tablet %v SetMaster failed: %v", alias, err)) - return - } - }(alias, tabletInfo) - } - - // Add a reparent journal entry on the new master. - wr.logger.Infof("populating reparent journal on new master %v", masterElectTabletAliasStr) - err = wr.tmc.PopulateReparentJournal(replCtx, masterElectTabletInfo.Tablet, reparentJournalTimestamp, plannedReparentShardOperation, masterElectTabletAlias, reparentJournalPos) - if err != nil { - // The master failed. There's no way the replicas will work, so cancel them all. - wr.logger.Warningf("master failed to PopulateReparentJournal, canceling replica reparent attempts") - replCancel() - wgReplicas.Wait() - return fmt.Errorf("failed to PopulateReparentJournal on master: %v", err) - } - - // Wait for the replicas to complete. - wgReplicas.Wait() - if err := rec.Error(); err != nil { - wr.Logger().Errorf2(err, "some replicas failed to reparent; retry PlannedReparentShard with the same new master alias to retry failed replicas") - return err - } - - return nil -} - -// findCurrentMaster returns the current master of a shard, if any. -// -// The tabletMap must be a complete map (not a partial result) for the shard. -// -// The current master is whichever MASTER tablet (if any) has the highest -// MasterTermStartTime, which is the same rule that vtgate uses to route master -// traffic. -// -// The return value is nil if the current master can't be definitively -// determined. This can happen either if no tablet claims to be MASTER, or if -// multiple MASTER tablets claim to have the same timestamp (a tie). -func (wr *Wrangler) findCurrentMaster(tabletMap map[string]*topo.TabletInfo) *topo.TabletInfo { - var currentMaster *topo.TabletInfo - var currentMasterTime time.Time - - for _, tablet := range tabletMap { - // Only look at masters. - if tablet.Type != topodatapb.TabletType_MASTER { - continue - } - // Fill in first master we find. - if currentMaster == nil { - currentMaster = tablet - currentMasterTime = tablet.GetMasterTermStartTime() - continue - } - // If we find any other masters, compare timestamps. - newMasterTime := tablet.GetMasterTermStartTime() - if newMasterTime.After(currentMasterTime) { - currentMaster = tablet - currentMasterTime = newMasterTime - continue - } - if newMasterTime.Equal(currentMasterTime) { - // A tie shouldn't happen unless the upgrade order was violated - // (some vttablets have not yet been upgraded) or if we get really - // unlucky. However, if it does happen, we need to be safe and not - // assume we know who the true master is. - wr.logger.Warningf("Multiple masters (%v and %v) are tied for MasterTermStartTime; can't determine the true master.", - topoproto.TabletAliasString(currentMaster.Alias), - topoproto.TabletAliasString(tablet.Alias)) - return nil - } - } - - return currentMaster -} - -// maxReplPosSearch is a struct helping to search for a tablet with the largest replication -// position querying status from all tablets in parallel. -type maxReplPosSearch struct { - wrangler *Wrangler - ctx context.Context - waitReplicasTimeout time.Duration - waitGroup sync.WaitGroup - maxPosLock sync.Mutex - maxPos mysql.Position - maxPosTablet *topodatapb.Tablet -} - -func (maxPosSearch *maxReplPosSearch) processTablet(tablet *topodatapb.Tablet) { - defer maxPosSearch.waitGroup.Done() - maxPosSearch.wrangler.logger.Infof("getting replication position from %v", topoproto.TabletAliasString(tablet.Alias)) - - replicaStatusCtx, cancelReplicaStatus := context.WithTimeout(maxPosSearch.ctx, maxPosSearch.waitReplicasTimeout) - defer cancelReplicaStatus() - - status, err := maxPosSearch.wrangler.tmc.ReplicationStatus(replicaStatusCtx, tablet) - if err != nil { - maxPosSearch.wrangler.logger.Warningf("failed to get replication status from %v, ignoring tablet: %v", topoproto.TabletAliasString(tablet.Alias), err) - return - } - replPos, err := mysql.DecodePosition(status.Position) - if err != nil { - maxPosSearch.wrangler.logger.Warningf("cannot decode replica %v position %v: %v", topoproto.TabletAliasString(tablet.Alias), status.Position, err) - return - } - - maxPosSearch.maxPosLock.Lock() - if maxPosSearch.maxPosTablet == nil || !maxPosSearch.maxPos.AtLeast(replPos) { - maxPosSearch.maxPos = replPos - maxPosSearch.maxPosTablet = tablet - } - maxPosSearch.maxPosLock.Unlock() -} - -// chooseNewMaster finds a tablet that is going to become master after reparent. The criteria -// for the new master-elect are (preferably) to be in the same cell as the current master, and -// to be different from avoidMasterTabletAlias. The tablet with the largest replication -// position is chosen to minimize the time of catching up with the master. Note that the search -// for largest replication position will race with transactions being executed on the master at -// the same time, so when all tablets are roughly at the same position then the choice of the -// new master-elect will be somewhat unpredictable. -func (wr *Wrangler) chooseNewMaster( - ctx context.Context, - shardInfo *topo.ShardInfo, - tabletMap map[string]*topo.TabletInfo, - avoidMasterTabletAlias *topodatapb.TabletAlias, - waitReplicasTimeout time.Duration) (*topodatapb.TabletAlias, error) { - - if avoidMasterTabletAlias == nil { - return nil, fmt.Errorf("tablet to avoid for reparent is not provided, cannot choose new master") - } - var masterCell string - if shardInfo.MasterAlias != nil { - masterCell = shardInfo.MasterAlias.Cell - } - - maxPosSearch := maxReplPosSearch{ - wrangler: wr, - ctx: ctx, - waitReplicasTimeout: waitReplicasTimeout, - waitGroup: sync.WaitGroup{}, - maxPosLock: sync.Mutex{}, - } - for _, tabletInfo := range tabletMap { - if (masterCell != "" && tabletInfo.Alias.Cell != masterCell) || - topoproto.TabletAliasEqual(tabletInfo.Alias, avoidMasterTabletAlias) || - tabletInfo.Tablet.Type != topodatapb.TabletType_REPLICA { - continue - } - maxPosSearch.waitGroup.Add(1) - go maxPosSearch.processTablet(tabletInfo.Tablet) - } - maxPosSearch.waitGroup.Wait() - - if maxPosSearch.maxPosTablet == nil { - return nil, nil - } - return maxPosSearch.maxPosTablet.Alias, nil -} - // EmergencyReparentShard will make the provided tablet the master for // the shard, when the old master is completely unreachable. func (wr *Wrangler) EmergencyReparentShard(ctx context.Context, keyspace, shard string, masterElectTabletAlias *topodatapb.TabletAlias, waitReplicasTimeout time.Duration, ignoredTablets sets.String) (err error) { - // lock the shard - actionMsg := emergencyReparentShardOperation - if masterElectTabletAlias != nil { - actionMsg += fmt.Sprintf("(%v)", topoproto.TabletAliasString(masterElectTabletAlias)) - } - ctx, unlock, lockErr := wr.ts.LockShard(ctx, keyspace, shard, actionMsg) - if lockErr != nil { - return lockErr - } - defer unlock(&err) + _, err = reparentutil.NewEmergencyReparenter(wr.ts, wr.tmc, wr.logger).ReparentShard( + ctx, + keyspace, + shard, + reparentutil.EmergencyReparentOptions{ + NewPrimaryAlias: masterElectTabletAlias, + WaitReplicasTimeout: waitReplicasTimeout, + IgnoreReplicas: ignoredTablets, + }, + ) - // Create reusable Reparent event with available info - ev := &events.Reparent{} - - // do the work - err = wr.emergencyReparentShardLocked(ctx, ev, keyspace, shard, masterElectTabletAlias, waitReplicasTimeout, ignoredTablets) - if err != nil { - event.DispatchUpdate(ev, "failed EmergencyReparentShard: "+err.Error()) - } else { - event.DispatchUpdate(ev, "finished EmergencyReparentShard") - } - return err -} - -func (wr *Wrangler) emergencyReparentShardLocked(ctx context.Context, ev *events.Reparent, keyspace, shard string, masterElectTabletAlias *topodatapb.TabletAlias, waitReplicasTimeout time.Duration, ignoredTablets sets.String) error { - shardInfo, err := wr.ts.GetShard(ctx, keyspace, shard) - if err != nil { - return err - } - ev.ShardInfo = *shardInfo - - event.DispatchUpdate(ev, "reading all tablets") - tabletMap, err := wr.ts.GetTabletMapForShard(ctx, keyspace, shard) - if err != nil { - return vterrors.Wrapf(err, "failed to get tablet map for shard %v in keyspace %v: %v", shard, keyspace, err) - } - - statusMap, masterStatusMap, err := wr.stopReplicationAndBuildStatusMaps(ctx, ev, tabletMap, waitReplicasTimeout, ignoredTablets) - if err != nil { - return vterrors.Wrapf(err, "failed to stop replication and build status maps: %v", err) - } - - // Check we still have the topology lock. - if err := topo.CheckShardLocked(ctx, keyspace, shard); err != nil { - return vterrors.Wrapf(err, "lost topology lock, aborting: %v", err) - } - - validCandidates, err := wr.findValidReparentCandidates(statusMap, masterStatusMap) - if err != nil { - return err - } - if len(validCandidates) == 0 { - return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "no valid candidates for emergency reparent") - } - - waiterCount := 0 - - errChan := make(chan error) - rec := &concurrency.AllErrorRecorder{} - groupCtx, groupCancel := context.WithTimeout(ctx, waitReplicasTimeout) - defer groupCancel() - for candidate := range validCandidates { - status, ok := statusMap[candidate] - if !ok { - wr.logger.Infof("EmergencyReparent candidate %v not in replica status map; this means it was not running replication (because it was formerly MASTER), so skipping WaitForRelayLogsToApply step for this candidate", candidate) - continue - } - - go func(alias string, status *replicationdatapb.StopReplicationStatus) { - var err error - defer func() { errChan <- err }() - err = wr.WaitForRelayLogsToApply(groupCtx, tabletMap[alias], status) - }(candidate, status) - - waiterCount++ - } - - resultCounter := 0 - for waitErr := range errChan { - resultCounter++ - if waitErr != nil { - rec.RecordError(waitErr) - groupCancel() - } - if resultCounter == waiterCount { - break - } - } - if len(rec.Errors) != 0 { - return vterrors.Wrapf(rec.Error(), "could not apply all relay logs within the provided wait_replicas_timeout: %v", rec.Error()) - } - - var winningPosition mysql.Position - var newMasterTabletAliasStr string - for alias, position := range validCandidates { - if winningPosition.IsZero() { - winningPosition = position - newMasterTabletAliasStr = alias - continue - } - if position.AtLeast(winningPosition) { - winningPosition = position - newMasterTabletAliasStr = alias - } - } - - if masterElectTabletAlias != nil { - newMasterTabletAliasStr = topoproto.TabletAliasString(masterElectTabletAlias) - masterPos, ok := validCandidates[newMasterTabletAliasStr] - if !ok { - return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "master elect %v has errant GTIDs", newMasterTabletAliasStr) - } - if !masterPos.AtLeast(winningPosition) { - return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "master elect: %v at position %v, is not fully caught up. Winning position: %v", newMasterTabletAliasStr, masterPos, winningPosition) - } - } - - // Check we still have the topology lock. - if err := topo.CheckShardLocked(ctx, keyspace, shard); err != nil { - return vterrors.Wrapf(err, "lost topology lock, aborting: %v", err) - } - - // Promote the masterElect - wr.logger.Infof("promote tablet %v to master", newMasterTabletAliasStr) - event.DispatchUpdate(ev, "promoting replica") - rp, err := wr.tmc.PromoteReplica(ctx, tabletMap[newMasterTabletAliasStr].Tablet) - if err != nil { - return vterrors.Wrapf(err, "master-elect tablet %v failed to be upgraded to master: %v", newMasterTabletAliasStr, err) - } - - // Check we still have the topology lock. - if err := topo.CheckShardLocked(ctx, keyspace, shard); err != nil { - return vterrors.Wrapf(err, "lost topology lock, aborting: %v", err) - } - - // Create a cancelable context for the following RPCs. - // If error conditions happen, we can cancel all outgoing RPCs. - replCtx, replCancel := context.WithTimeout(ctx, waitReplicasTimeout) - defer replCancel() - - replSuccessCtx, replSuccessCancel := context.WithCancel(context.Background()) - allReplicasDoneCtx, allReplicasDoneCancel := context.WithCancel(context.Background()) - - // Reset replication on all replicas to point to the new master, and - // insert test row in the new master. - // Go through all the tablets: - // - new master: populate the reparent journal - // - everybody else: reparent to new master, wait for row - event.DispatchUpdate(ev, "reparenting all tablets") - now := time.Now().UnixNano() - replWg := sync.WaitGroup{} - // we will reuse the concurrency.AllErrorRecorder for the actual reparent - // starting here because we've validated above that there were zero errors - // up to this point - - handleMaster := func(alias string, tabletInfo *topo.TabletInfo) error { - wr.logger.Infof("populating reparent journal on new master %v", alias) - return wr.tmc.PopulateReparentJournal(replCtx, tabletInfo.Tablet, now, emergencyReparentShardOperation, tabletMap[newMasterTabletAliasStr].Alias, rp) - } - handleReplica := func(alias string, tabletInfo *topo.TabletInfo) { - defer replWg.Done() - - wr.logger.Infof("setting new master on replica %v", alias) - forceStart := false - if status, ok := statusMap[alias]; ok { - fs, err := replicaWasRunning(status) - if err != nil { - err = vterrors.Wrapf(err, "tablet %v could not determine StopReplicationStatus: %v", alias, err) - rec.RecordError(err) - - return - } - - forceStart = fs - } - err := wr.tmc.SetMaster(replCtx, tabletInfo.Tablet, tabletMap[newMasterTabletAliasStr].Alias, now, "", forceStart) - if err != nil { - err = vterrors.Wrapf(err, "tablet %v SetMaster failed: %v", alias, err) - rec.RecordError(err) - return - } - - // Signal that at least one goroutine succeeded to SetMaster. - replSuccessCancel() - } - - numReplicas := 0 - - for alias, tabletInfo := range tabletMap { - if alias == newMasterTabletAliasStr { - continue - } else if !ignoredTablets.Has(alias) { - replWg.Add(1) - numReplicas++ - go handleReplica(alias, tabletInfo) - } - } - - // Spin up a background goroutine to wait until all replica goroutines - // finished. Polling this way allows us to have promoteNewPrimary return - // success as soon as (a) the primary successfully populates its reparent - // journal and (b) at least one replica successfully begins replicating. - // - // If we were to follow the more common pattern of blocking on replWg.Wait() - // in the main body of promoteNewPrimary, we would be bound to the - // time of slowest replica, instead of the time of the fastest successful - // replica, and we want ERS to be fast. - go func() { - replWg.Wait() - allReplicasDoneCancel() - }() - - masterErr := handleMaster(newMasterTabletAliasStr, tabletMap[newMasterTabletAliasStr]) - if masterErr != nil { - wr.logger.Warningf("master failed to PopulateReparentJournal") - replCancel() - return vterrors.Wrapf(masterErr, "failed to PopulateReparentJournal on master: %v", masterErr) - } - - select { - case <-replSuccessCtx.Done(): - // At least one replica was able to SetMaster successfully - return nil - case <-allReplicasDoneCtx.Done(): - // There are certain timing issues between replSuccessCtx.Done firing - // and allReplicasDoneCtx.Done firing, so we check again if truly all - // replicas failed (where `numReplicas` goroutines recorded an error) or - // one or more actually managed to succeed. - errCount := len(rec.Errors) - - switch { - case errCount > numReplicas: - // Technically, rec.Errors should never be greater than numReplicas, - // but it's better to err on the side of caution here, but also - // we're going to be explicit that this is doubly unexpected. - return vterrors.Wrapf(rec.Error(), "received more errors (= %d) than replicas (= %d), which should be impossible: %v", errCount, numReplicas, rec.Error()) - case errCount == numReplicas: - return vterrors.Wrapf(rec.Error(), "%d replica(s) failed: %v", numReplicas, rec.Error()) - default: - return nil - } - } -} - -// waitOnNMinusOneTablets will wait until N-1 tablets have responded via a supplied error channel. In that case that N-1 tablets have responded, -// the supplied cancel function will be called, and we will wait until N tablets return their errors, and then return an AllErrorRecorder to the caller. -func waitOnNMinusOneTablets(ctxCancel context.CancelFunc, tabletCount int, errorChannel chan error, acceptableErrCnt int) *concurrency.AllErrorRecorder { - errCounter := 0 - successCounter := 0 - responseCounter := 0 - rec := &concurrency.AllErrorRecorder{} - - for err := range errorChannel { - responseCounter++ - if err != nil { - errCounter++ - rec.RecordError(err) - } else { - successCounter++ - } - if responseCounter == tabletCount { - // We must wait for any cancelled goroutines to return their error. - break - } - if errCounter > acceptableErrCnt || successCounter == tabletCount-1 { - ctxCancel() - } - } - - return rec -} - -// findValidReparentCandidates will find valid candidates for emergency reparent, and if successful, returning them as a list of tablet aliases. -func (wr *Wrangler) findValidReparentCandidates(statusMap map[string]*replicationdatapb.StopReplicationStatus, masterStatusMap map[string]*replicationdatapb.MasterStatus) (map[string]mysql.Position, error) { - // Build out replication status list from proto types. - replicationStatusMap := make(map[string]*mysql.ReplicationStatus, len(statusMap)) - for alias, protoStatus := range statusMap { - status := mysql.ProtoToReplicationStatus(protoStatus.After) - replicationStatusMap[alias] = &status - } - - // Determine if we need to find errant GTIDs. - var gtidBased *bool - for alias, status := range replicationStatusMap { - if gtidBased == nil { - _, ok := status.RelayLogPosition.GTIDSet.(mysql.Mysql56GTIDSet) - gtidBased = pointer.BoolPtr(ok) - } else if !*gtidBased { - break - } else if status.RelayLogPosition.IsZero() { - // Bail. We have an odd one in the bunch. - return nil, vterrors.Errorf(vtrpcpb.Code_UNAVAILABLE, "encountered tablet %v with no relay log position, when at least one other tablet in the status map has GTID based relay log positions", alias) - } - } - - // Create relevant position list of errant GTID based positions for later comparison. - positionMap := make(map[string]mysql.Position) - for alias, status := range replicationStatusMap { - // Find errantGTIDs and clean them from status map if relevant. - if *gtidBased { - // We need to remove this status from a copy of the list, otherwise the diff will be empty always. - statusList := make([]*mysql.ReplicationStatus, 0, len(replicationStatusMap)-1) - for a, s := range replicationStatusMap { - if a != alias { - statusList = append(statusList, s) - } - } - relayLogGTIDSet, ok := status.RelayLogPosition.GTIDSet.(mysql.Mysql56GTIDSet) - if !ok { - return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "we got a filled in relay log position, but it's not of type Mysql56GTIDSet, even though we've determined we need to use GTID based assessment") - } - errantGTIDs, err := status.FindErrantGTIDs(statusList) - if err != nil { - // Could not find errant GTIDs when we must. - return nil, err - } - if len(errantGTIDs) != 0 { - // Skip inserting this tablet. It's not a valid candidate. - continue - } - - pos := mysql.Position{GTIDSet: relayLogGTIDSet} - positionMap[alias] = pos - } else { - positionMap[alias] = status.Position - } - } - - for alias, masterStatus := range masterStatusMap { - executedPosition, err := mysql.DecodePosition(masterStatus.Position) - if err != nil { - return nil, vterrors.Wrapf(err, "could not decode a master status executed position for tablet %v: %v", alias, err) - } - positionMap[alias] = executedPosition - } - - return positionMap, nil -} - -func (wr *Wrangler) stopReplicationAndBuildStatusMaps(ctx context.Context, ev *events.Reparent, tabletMap map[string]*topo.TabletInfo, waitReplicasTimeout time.Duration, ignoredTablets sets.String) (map[string]*replicationdatapb.StopReplicationStatus, map[string]*replicationdatapb.MasterStatus, error) { - // Stop replication on all replicas, get their current - // replication position - event.DispatchUpdate(ev, "stop replication on all replicas") - statusMap := make(map[string]*replicationdatapb.StopReplicationStatus) - masterStatusMap := make(map[string]*replicationdatapb.MasterStatus) - mu := sync.Mutex{} - - errChan := make(chan error) - groupCtx, groupCancel := context.WithTimeout(ctx, waitReplicasTimeout) - defer groupCancel() - fillStatus := func(alias string, tabletInfo *topo.TabletInfo) { - err := vterrors.Errorf(vtrpcpb.Code_UNAVAILABLE, "fillStatus did not successfully complete") - defer func() { errChan <- err }() - - wr.logger.Infof("getting replication position from %v", alias) - var stopReplicationStatus *replicationdatapb.StopReplicationStatus - _, stopReplicationStatus, err = wr.tmc.StopReplicationAndGetStatus(groupCtx, tabletInfo.Tablet, replicationdatapb.StopReplicationMode_IOTHREADONLY) - switch err { - case mysql.ErrNotReplica: - var masterStatus *replicationdatapb.MasterStatus - masterStatus, err = wr.tmc.DemoteMaster(groupCtx, tabletInfo.Tablet) - if err != nil { - wr.logger.Warningf("replica %v thinks it's master but we failed to demote it", alias) - err = vterrors.Wrapf(err, "replica %v thinks it's master but we failed to demote it: %v", alias, err) - return - } - mu.Lock() - masterStatusMap[alias] = masterStatus - mu.Unlock() - - case nil: - mu.Lock() - statusMap[alias] = stopReplicationStatus - mu.Unlock() - - default: - wr.logger.Warningf("failed to get replication status from %v: %v", alias, err) - err = vterrors.Wrapf(err, "error when getting replication status for alias %v: %v", alias, err) - } - } - - for alias, tabletInfo := range tabletMap { - if !ignoredTablets.Has(alias) { - go fillStatus(alias, tabletInfo) - } - } - - errRecorder := waitOnNMinusOneTablets(groupCancel, len(tabletMap)-ignoredTablets.Len(), errChan, 1) - - if len(errRecorder.Errors) > 1 { - return nil, nil, vterrors.Wrapf(errRecorder.Error(), "encountered more than one error when trying to stop replication and get positions: %v", errRecorder.Error()) - } - return statusMap, masterStatusMap, nil -} - -// WaitForRelayLogsToApply will block execution waiting for the given tablets relay logs to apply, unless the supplied -// context is cancelled, or waitReplicasTimeout is exceeded. -func (wr *Wrangler) WaitForRelayLogsToApply(ctx context.Context, tabletInfo *topo.TabletInfo, status *replicationdatapb.StopReplicationStatus) error { - var err error - if status.After.RelayLogPosition != "" { - err = wr.tmc.WaitForPosition(ctx, tabletInfo.Tablet, status.After.RelayLogPosition) - } else { - err = wr.tmc.WaitForPosition(ctx, tabletInfo.Tablet, status.After.FileRelayLogPosition) - } return err } @@ -1300,14 +241,3 @@ func (wr *Wrangler) TabletExternallyReparented(ctx context.Context, newMasterAli } return nil } - -// replicaWasRunning returns true if a StopReplicationStatus indicates that the -// replica had running replication threads before being stopped. It returns an -// error if the Before state of replication is nil. -func replicaWasRunning(stopReplicationStatus *replicationdatapb.StopReplicationStatus) (bool, error) { - if stopReplicationStatus == nil || stopReplicationStatus.Before == nil { - return false, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "could not determine Before state of StopReplicationStatus %v", stopReplicationStatus) - } - - return stopReplicationStatus.Before.IoThreadRunning || stopReplicationStatus.Before.SqlThreadRunning, nil -} diff --git a/go/vt/wrangler/resharder.go b/go/vt/wrangler/resharder.go index 5901f10862c..27de39c859b 100644 --- a/go/vt/wrangler/resharder.go +++ b/go/vt/wrangler/resharder.go @@ -17,11 +17,12 @@ limitations under the License. package wrangler import ( + "context" "fmt" "sync" "time" - "context" + "vitess.io/vitess/go/vt/log" "github.com/golang/protobuf/proto" "github.com/pkg/errors" @@ -51,6 +52,7 @@ type resharder struct { refStreams map[string]*refStream cell string //single cell or cellsAlias or comma-separated list of cells/cellsAliases tabletTypes string + stopAfterCopy bool } type refStream struct { @@ -61,14 +63,22 @@ type refStream struct { } // Reshard initiates a resharding workflow. -func (wr *Wrangler) Reshard(ctx context.Context, keyspace, workflow string, sources, targets []string, skipSchemaCopy bool, cell, tabletTypes string) error { +func (wr *Wrangler) Reshard(ctx context.Context, keyspace, workflow string, sources, targets []string, + skipSchemaCopy bool, cell, tabletTypes string, autoStart, stopAfterCopy bool) error { if err := wr.validateNewWorkflow(ctx, keyspace, workflow); err != nil { return err } + if err := wr.ts.ValidateSrvKeyspace(ctx, keyspace, cell); err != nil { + err2 := vterrors.Wrapf(err, "SrvKeyspace for keyspace %s is corrupt in cell %s", keyspace, cell) + log.Errorf("%w", err2) + return err2 + } + rs, err := wr.buildResharder(ctx, keyspace, workflow, sources, targets, cell, tabletTypes) if err != nil { return vterrors.Wrap(err, "buildResharder") } + rs.stopAfterCopy = stopAfterCopy if !skipSchemaCopy { if err := rs.copySchema(ctx); err != nil { return vterrors.Wrap(err, "copySchema") @@ -77,10 +87,14 @@ func (wr *Wrangler) Reshard(ctx context.Context, keyspace, workflow string, sour if err := rs.createStreams(ctx); err != nil { return vterrors.Wrap(err, "createStreams") } - if err := rs.startStreams(ctx); err != nil { - return vterrors.Wrap(err, "startStream") - } + if autoStart { + if err := rs.startStreams(ctx); err != nil { + return vterrors.Wrap(err, "startStreams") + } + } else { + wr.Logger().Infof("Streams will not be started since -auto_start is set to false") + } return nil } @@ -301,9 +315,10 @@ func (rs *resharder) createStreams(ctx context.Context) error { }), } bls := &binlogdatapb.BinlogSource{ - Keyspace: rs.keyspace, - Shard: source.ShardName(), - Filter: filter, + Keyspace: rs.keyspace, + Shard: source.ShardName(), + Filter: filter, + StopAfterCopy: rs.stopAfterCopy, } ig.AddRow(rs.workflow, bls, "", rs.cell, rs.tabletTypes) } diff --git a/go/vt/wrangler/resharder_env_test.go b/go/vt/wrangler/resharder_env_test.go index d3aad21f1d1..ed0c20ab638 100644 --- a/go/vt/wrangler/resharder_env_test.go +++ b/go/vt/wrangler/resharder_env_test.go @@ -24,6 +24,10 @@ import ( "sync" "testing" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/key" + "context" "vitess.io/vitess/go/sqltypes" @@ -55,7 +59,36 @@ var ( //---------------------------------------------- // testResharderEnv -func newTestResharderEnv(sources, targets []string) *testResharderEnv { +func getPartition(t *testing.T, shards []string) *topodatapb.SrvKeyspace_KeyspacePartition { + partition := &topodatapb.SrvKeyspace_KeyspacePartition{ + ServedType: topodatapb.TabletType_MASTER, + ShardReferences: []*topodatapb.ShardReference{}, + } + for _, shard := range shards { + keyRange, err := key.ParseShardingSpec(shard) + require.NoError(t, err) + require.Equal(t, 1, len(keyRange)) + partition.ShardReferences = append(partition.ShardReferences, &topodatapb.ShardReference{ + Name: shard, + KeyRange: keyRange[0], + }) + } + return partition +} +func initTopo(t *testing.T, topo *topo.Server, keyspace string, sources, targets, cells []string) { + ctx := context.Background() + srvKeyspace := &topodatapb.SrvKeyspace{ + Partitions: []*topodatapb.SrvKeyspace_KeyspacePartition{}, + } + srvKeyspace.Partitions = append(srvKeyspace.Partitions, getPartition(t, sources)) + srvKeyspace.Partitions = append(srvKeyspace.Partitions, getPartition(t, targets)) + for _, cell := range cells { + topo.UpdateSrvKeyspace(ctx, cell, keyspace, srvKeyspace) + } + topo.ValidateSrvKeyspace(ctx, keyspace, strings.Join(cells, ",")) +} + +func newTestResharderEnv(t *testing.T, sources, targets []string) *testResharderEnv { env := &testResharderEnv{ keyspace: "ks", workflow: "resharderTest", @@ -67,7 +100,7 @@ func newTestResharderEnv(sources, targets []string) *testResharderEnv { tmc: newTestResharderTMClient(), } env.wr = New(logutil.NewConsoleLogger(), env.topoServ, env.tmc) - + initTopo(t, env.topoServ, "ks", sources, targets, []string{"cell"}) tabletID := 100 for _, shard := range sources { _ = env.addTablet(tabletID, env.keyspace, shard, topodatapb.TabletType_MASTER) diff --git a/go/vt/wrangler/resharder_test.go b/go/vt/wrangler/resharder_test.go index 4197bd037fa..7b7199fee19 100644 --- a/go/vt/wrangler/resharder_test.go +++ b/go/vt/wrangler/resharder_test.go @@ -39,7 +39,7 @@ const insertPrefix = `/insert into _vt.vreplication\(workflow, source, pos, max_ const eol = "$" func TestResharderOneToMany(t *testing.T) { - env := newTestResharderEnv([]string{"0"}, []string{"-80", "80-"}) + env := newTestResharderEnv(t, []string{"0"}, []string{"-80", "80-"}) defer env.close() schm := &tabletmanagerdatapb.SchemaDefinition{ @@ -73,7 +73,7 @@ func TestResharderOneToMany(t *testing.T) { testCases = append(testCases, newTestCase("", "replica,rdonly")) for _, tc := range testCases { - env := newTestResharderEnv([]string{"0"}, []string{"-80", "80-"}) + env := newTestResharderEnv(t, []string{"0"}, []string{"-80", "80-"}) schm := &tabletmanagerdatapb.SchemaDefinition{ TableDefinitions: []*tabletmanagerdatapb.TableDefinition{{ @@ -106,7 +106,7 @@ func TestResharderOneToMany(t *testing.T) { env.tmc.expectVRQuery(200, "update _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) env.tmc.expectVRQuery(210, "update _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) - err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, env.sources, env.targets, true, tc.cells, tc.tabletTypes) + err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, env.sources, env.targets, true, tc.cells, tc.tabletTypes, true, false) require.NoError(t, err) env.tmc.verifyQueries(t) }) @@ -115,7 +115,7 @@ func TestResharderOneToMany(t *testing.T) { } func TestResharderManyToOne(t *testing.T) { - env := newTestResharderEnv([]string{"-80", "80-"}, []string{"0"}) + env := newTestResharderEnv(t, []string{"-80", "80-"}, []string{"0"}) defer env.close() schm := &tabletmanagerdatapb.SchemaDefinition{ @@ -142,13 +142,13 @@ func TestResharderManyToOne(t *testing.T) { env.tmc.expectVRQuery(200, "update _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) - err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, env.sources, env.targets, true, "", "") + err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, env.sources, env.targets, true, "", "", true, false) assert.NoError(t, err) env.tmc.verifyQueries(t) } func TestResharderManyToMany(t *testing.T) { - env := newTestResharderEnv([]string{"-40", "40-"}, []string{"-80", "80-"}) + env := newTestResharderEnv(t, []string{"-40", "40-"}, []string{"-80", "80-"}) defer env.close() schm := &tabletmanagerdatapb.SchemaDefinition{ @@ -183,7 +183,7 @@ func TestResharderManyToMany(t *testing.T) { env.tmc.expectVRQuery(200, "update _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) env.tmc.expectVRQuery(210, "update _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) - err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, env.sources, env.targets, true, "", "") + err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, env.sources, env.targets, true, "", "", true, false) assert.NoError(t, err) env.tmc.verifyQueries(t) } @@ -191,7 +191,7 @@ func TestResharderManyToMany(t *testing.T) { // TestResharderOneRefTable tests the case where there's one ref table, but no stream for it. // This means that the table is being updated manually. func TestResharderOneRefTable(t *testing.T) { - env := newTestResharderEnv([]string{"0"}, []string{"-80", "80-"}) + env := newTestResharderEnv(t, []string{"0"}, []string{"-80", "80-"}) defer env.close() schm := &tabletmanagerdatapb.SchemaDefinition{ @@ -236,14 +236,65 @@ func TestResharderOneRefTable(t *testing.T) { env.tmc.expectVRQuery(200, "update _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) env.tmc.expectVRQuery(210, "update _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) - err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, env.sources, env.targets, true, "", "") + err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, env.sources, env.targets, true, "", "", true, false) + assert.NoError(t, err) + env.tmc.verifyQueries(t) +} + +// TestReshardStopFlags tests the flags -stop_started and -stop_after_copy +func TestReshardStopFlags(t *testing.T) { + env := newTestResharderEnv(t, []string{"0"}, []string{"-80", "80-"}) + defer env.close() + + schm := &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{{ + Name: "t1", + Columns: []string{"c1", "c2"}, + PrimaryKeyColumns: []string{"c1"}, + Fields: sqltypes.MakeTestFields("c1|c2", "int64|int64"), + }}, + } + env.tmc.schema = schm + + vs := &vschemapb.Keyspace{ + Tables: map[string]*vschemapb.Table{ + "t1": { + Type: vindexes.TypeReference, + }, + }, + } + if err := env.wr.ts.SaveVSchema(context.Background(), env.keyspace, vs); err != nil { + t.Fatal(err) + } + + env.expectValidation() + env.expectNoRefStream() + // inserts into the two shards expects flag stop_after_copy to be true + + env.tmc.expectVRQuery( + 200, + insertPrefix+ + `\('resharderTest', 'keyspace:\\"ks\\" shard:\\"0\\" filter: rules: > stop_after_copy:true ', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_ks'\)`+ + eol, + &sqltypes.Result{}, + ) + env.tmc.expectVRQuery( + 210, + insertPrefix+ + `\('resharderTest', 'keyspace:\\"ks\\" shard:\\"0\\" filter: rules: > stop_after_copy:true ', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_ks'\)`+ + eol, + &sqltypes.Result{}, + ) + // -auto_start=false is tested by NOT expecting the update query which sets state to RUNNING + + err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, env.sources, env.targets, true, "", "", false, true) assert.NoError(t, err) env.tmc.verifyQueries(t) } // TestResharderOneRefStream tests the case where there's one ref table and an associated stream. func TestResharderOneRefStream(t *testing.T) { - env := newTestResharderEnv([]string{"0"}, []string{"-80", "80-"}) + env := newTestResharderEnv(t, []string{"0"}, []string{"-80", "80-"}) defer env.close() schm := &tabletmanagerdatapb.SchemaDefinition{ @@ -304,14 +355,14 @@ func TestResharderOneRefStream(t *testing.T) { env.tmc.expectVRQuery(200, "update _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) env.tmc.expectVRQuery(210, "update _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) - err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, env.sources, env.targets, true, "", "") + err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, env.sources, env.targets, true, "", "", true, false) assert.NoError(t, err) env.tmc.verifyQueries(t) } // TestResharderNoRefStream tests the case where there's a stream, but it's not a reference. func TestResharderNoRefStream(t *testing.T) { - env := newTestResharderEnv([]string{"0"}, []string{"-80", "80-"}) + env := newTestResharderEnv(t, []string{"0"}, []string{"-80", "80-"}) defer env.close() schm := &tabletmanagerdatapb.SchemaDefinition{ @@ -381,13 +432,13 @@ func TestResharderNoRefStream(t *testing.T) { env.tmc.expectVRQuery(200, "update _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) env.tmc.expectVRQuery(210, "update _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) - err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, env.sources, env.targets, true, "", "") + err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, env.sources, env.targets, true, "", "", true, false) assert.NoError(t, err) env.tmc.verifyQueries(t) } func TestResharderCopySchema(t *testing.T) { - env := newTestResharderEnv([]string{"0"}, []string{"-80", "80-"}) + env := newTestResharderEnv(t, []string{"0"}, []string{"-80", "80-"}) defer env.close() schm := &tabletmanagerdatapb.SchemaDefinition{ @@ -425,13 +476,13 @@ func TestResharderCopySchema(t *testing.T) { env.tmc.expectVRQuery(200, "update _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) env.tmc.expectVRQuery(210, "update _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) - err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, env.sources, env.targets, false, "", "") + err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, env.sources, env.targets, false, "", "", true, false) assert.NoError(t, err) env.tmc.verifyQueries(t) } func TestResharderDupWorkflow(t *testing.T) { - env := newTestResharderEnv([]string{"0"}, []string{"-80", "80-"}) + env := newTestResharderEnv(t, []string{"0"}, []string{"-80", "80-"}) defer env.close() schm := &tabletmanagerdatapb.SchemaDefinition{ @@ -456,13 +507,13 @@ func TestResharderDupWorkflow(t *testing.T) { env.tmc.expectVRQuery(200, rsSelectFrozenQuery, &sqltypes.Result{}) env.tmc.expectVRQuery(100, rsSelectFrozenQuery, &sqltypes.Result{}) - err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, env.sources, env.targets, true, "", "") + err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, env.sources, env.targets, true, "", "", true, false) assert.EqualError(t, err, "validateWorkflowName.VReplicationExec: workflow resharderTest already exists in keyspace ks on tablet 210") env.tmc.verifyQueries(t) } func TestResharderServingState(t *testing.T) { - env := newTestResharderEnv([]string{"0"}, []string{"-80", "80-"}) + env := newTestResharderEnv(t, []string{"0"}, []string{"-80", "80-"}) defer env.close() schm := &tabletmanagerdatapb.SchemaDefinition{ @@ -481,7 +532,7 @@ func TestResharderServingState(t *testing.T) { env.tmc.expectVRQuery(100, rsSelectFrozenQuery, &sqltypes.Result{}) env.tmc.expectVRQuery(200, rsSelectFrozenQuery, &sqltypes.Result{}) env.tmc.expectVRQuery(210, rsSelectFrozenQuery, &sqltypes.Result{}) - err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, []string{"-80"}, nil, true, "", "") + err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, []string{"-80"}, nil, true, "", "", true, false) assert.EqualError(t, err, "buildResharder: source shard -80 is not in serving state") env.tmc.expectVRQuery(100, fmt.Sprintf("select 1 from _vt.vreplication where db_name='vt_%s' and workflow='%s'", env.keyspace, env.workflow), &sqltypes.Result{}) @@ -490,7 +541,7 @@ func TestResharderServingState(t *testing.T) { env.tmc.expectVRQuery(100, rsSelectFrozenQuery, &sqltypes.Result{}) env.tmc.expectVRQuery(200, rsSelectFrozenQuery, &sqltypes.Result{}) env.tmc.expectVRQuery(210, rsSelectFrozenQuery, &sqltypes.Result{}) - err = env.wr.Reshard(context.Background(), env.keyspace, env.workflow, []string{"0"}, []string{"0"}, true, "", "") + err = env.wr.Reshard(context.Background(), env.keyspace, env.workflow, []string{"0"}, []string{"0"}, true, "", "", true, false) assert.EqualError(t, err, "buildResharder: target shard 0 is in serving state") env.tmc.expectVRQuery(100, fmt.Sprintf("select 1 from _vt.vreplication where db_name='vt_%s' and workflow='%s'", env.keyspace, env.workflow), &sqltypes.Result{}) @@ -499,12 +550,12 @@ func TestResharderServingState(t *testing.T) { env.tmc.expectVRQuery(100, rsSelectFrozenQuery, &sqltypes.Result{}) env.tmc.expectVRQuery(200, rsSelectFrozenQuery, &sqltypes.Result{}) env.tmc.expectVRQuery(210, rsSelectFrozenQuery, &sqltypes.Result{}) - err = env.wr.Reshard(context.Background(), env.keyspace, env.workflow, []string{"0"}, []string{"-80"}, true, "", "") + err = env.wr.Reshard(context.Background(), env.keyspace, env.workflow, []string{"0"}, []string{"-80"}, true, "", "", true, false) assert.EqualError(t, err, "buildResharder: ValidateForReshard: source and target keyranges don't match: - vs -80") } func TestResharderTargetAlreadyResharding(t *testing.T) { - env := newTestResharderEnv([]string{"0"}, []string{"-80", "80-"}) + env := newTestResharderEnv(t, []string{"0"}, []string{"-80", "80-"}) defer env.close() schm := &tabletmanagerdatapb.SchemaDefinition{ @@ -531,13 +582,13 @@ func TestResharderTargetAlreadyResharding(t *testing.T) { env.tmc.expectVRQuery(200, fmt.Sprintf("select 1 from _vt.vreplication where db_name='vt_%s'", env.keyspace), result) env.tmc.expectVRQuery(210, fmt.Sprintf("select 1 from _vt.vreplication where db_name='vt_%s'", env.keyspace), &sqltypes.Result{}) env.tmc.expectVRQuery(100, rsSelectFrozenQuery, &sqltypes.Result{}) - err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, env.sources, env.targets, true, "", "") + err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, env.sources, env.targets, true, "", "", true, false) assert.EqualError(t, err, "buildResharder: validateTargets: some streams already exist in the target shards, please clean them up and retry the command") env.tmc.verifyQueries(t) } func TestResharderUnnamedStream(t *testing.T) { - env := newTestResharderEnv([]string{"0"}, []string{"-80", "80-"}) + env := newTestResharderEnv(t, []string{"0"}, []string{"-80", "80-"}) defer env.close() schm := &tabletmanagerdatapb.SchemaDefinition{ @@ -579,13 +630,13 @@ func TestResharderUnnamedStream(t *testing.T) { ) env.tmc.expectVRQuery(100, fmt.Sprintf("select workflow, source, cell, tablet_types from _vt.vreplication where db_name='vt_%s' and message != 'FROZEN'", env.keyspace), result) - err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, env.sources, env.targets, true, "", "") + err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, env.sources, env.targets, true, "", "", true, false) assert.EqualError(t, err, "buildResharder: readRefStreams: VReplication streams must have named workflows for migration: shard: ks:0") env.tmc.verifyQueries(t) } func TestResharderMismatchedRefStreams(t *testing.T) { - env := newTestResharderEnv([]string{"-80", "80-"}, []string{"0"}) + env := newTestResharderEnv(t, []string{"-80", "80-"}, []string{"0"}) defer env.close() schm := &tabletmanagerdatapb.SchemaDefinition{ @@ -643,7 +694,7 @@ func TestResharderMismatchedRefStreams(t *testing.T) { ) env.tmc.expectVRQuery(110, fmt.Sprintf("select workflow, source, cell, tablet_types from _vt.vreplication where db_name='vt_%s' and message != 'FROZEN'", env.keyspace), result2) - err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, env.sources, env.targets, true, "", "") + err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, env.sources, env.targets, true, "", "", true, false) want := "buildResharder: readRefStreams: streams are mismatched across source shards" if err == nil || !strings.HasPrefix(err.Error(), want) { t.Errorf("Reshard err: %v, want %v", err, want) @@ -652,7 +703,7 @@ func TestResharderMismatchedRefStreams(t *testing.T) { } func TestResharderTableNotInVSchema(t *testing.T) { - env := newTestResharderEnv([]string{"0"}, []string{"-80", "80-"}) + env := newTestResharderEnv(t, []string{"0"}, []string{"-80", "80-"}) defer env.close() schm := &tabletmanagerdatapb.SchemaDefinition{ @@ -683,13 +734,13 @@ func TestResharderTableNotInVSchema(t *testing.T) { ) env.tmc.expectVRQuery(100, fmt.Sprintf("select workflow, source, cell, tablet_types from _vt.vreplication where db_name='vt_%s' and message != 'FROZEN'", env.keyspace), result) - err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, env.sources, env.targets, true, "", "") + err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, env.sources, env.targets, true, "", "", true, false) assert.EqualError(t, err, "buildResharder: readRefStreams: blsIsReference: table t1 not found in vschema") env.tmc.verifyQueries(t) } func TestResharderMixedTablesOrder1(t *testing.T) { - env := newTestResharderEnv([]string{"0"}, []string{"-80", "80-"}) + env := newTestResharderEnv(t, []string{"0"}, []string{"-80", "80-"}) defer env.close() schm := &tabletmanagerdatapb.SchemaDefinition{ @@ -747,7 +798,7 @@ func TestResharderMixedTablesOrder1(t *testing.T) { ) env.tmc.expectVRQuery(100, fmt.Sprintf("select workflow, source, cell, tablet_types from _vt.vreplication where db_name='vt_%s' and message != 'FROZEN'", env.keyspace), result) - err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, env.sources, env.targets, true, "", "") + err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, env.sources, env.targets, true, "", "", true, false) want := "buildResharder: readRefStreams: blsIsReference: cannot reshard streams with a mix of reference and sharded tables" if err == nil || !strings.HasPrefix(err.Error(), want) { t.Errorf("Reshard err: %v, want %v", err.Error(), want) @@ -756,7 +807,7 @@ func TestResharderMixedTablesOrder1(t *testing.T) { } func TestResharderMixedTablesOrder2(t *testing.T) { - env := newTestResharderEnv([]string{"0"}, []string{"-80", "80-"}) + env := newTestResharderEnv(t, []string{"0"}, []string{"-80", "80-"}) defer env.close() schm := &tabletmanagerdatapb.SchemaDefinition{ @@ -814,7 +865,7 @@ func TestResharderMixedTablesOrder2(t *testing.T) { ) env.tmc.expectVRQuery(100, fmt.Sprintf("select workflow, source, cell, tablet_types from _vt.vreplication where db_name='vt_%s' and message != 'FROZEN'", env.keyspace), result) - err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, env.sources, env.targets, true, "", "") + err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, env.sources, env.targets, true, "", "", true, false) want := "buildResharder: readRefStreams: blsIsReference: cannot reshard streams with a mix of reference and sharded tables" if err == nil || !strings.HasPrefix(err.Error(), want) { t.Errorf("Reshard err: %v, want %v", err.Error(), want) diff --git a/go/vt/wrangler/stream_migrater.go b/go/vt/wrangler/stream_migrater.go index b48b9e5c578..b75bb501c0b 100644 --- a/go/vt/wrangler/stream_migrater.go +++ b/go/vt/wrangler/stream_migrater.go @@ -516,7 +516,7 @@ func (sm *streamMigrater) templatizeKeyRange(ctx context.Context, rule *binlogda if strings.Contains(rule.Filter, "{{") { return fmt.Errorf("cannot migrate queries that contain '{{' in their string: %s", rule.Filter) } - val.Val = []byte("{{.}}") + val.Val = "{{.}}" rule.Filter = sqlparser.String(statement) return nil } @@ -526,8 +526,8 @@ func (sm *streamMigrater) templatizeKeyRange(ctx context.Context, rule *binlogda Name: sqlparser.NewColIdent("in_keyrange"), Exprs: sqlparser.SelectExprs{ &sqlparser.AliasedExpr{Expr: &sqlparser.ColName{Name: vtable.ColumnVindexes[0].Columns[0]}}, - &sqlparser.AliasedExpr{Expr: sqlparser.NewStrLiteral([]byte(vtable.ColumnVindexes[0].Type))}, - &sqlparser.AliasedExpr{Expr: sqlparser.NewStrLiteral([]byte("{{.}}"))}, + &sqlparser.AliasedExpr{Expr: sqlparser.NewStrLiteral(vtable.ColumnVindexes[0].Type)}, + &sqlparser.AliasedExpr{Expr: sqlparser.NewStrLiteral("{{.}}")}, }, } sel.AddWhere(inkr) diff --git a/go/vt/wrangler/switcher.go b/go/vt/wrangler/switcher.go index 9aea911adb3..aad7ae46ead 100644 --- a/go/vt/wrangler/switcher.go +++ b/go/vt/wrangler/switcher.go @@ -31,6 +31,10 @@ type switcher struct { wr *Wrangler } +func (r *switcher) addParticipatingTablesToKeyspace(ctx context.Context, keyspace, tableSpecs string) error { + return r.ts.addParticipatingTablesToKeyspace(ctx, keyspace, tableSpecs) +} + func (r *switcher) deleteRoutingRules(ctx context.Context) error { return r.ts.deleteRoutingRules(ctx) } diff --git a/go/vt/wrangler/switcher_dry_run.go b/go/vt/wrangler/switcher_dry_run.go index 6f7a505ce6c..c01cd09ebfb 100644 --- a/go/vt/wrangler/switcher_dry_run.go +++ b/go/vt/wrangler/switcher_dry_run.go @@ -37,6 +37,11 @@ type switcherDryRun struct { ts *trafficSwitcher } +func (dr *switcherDryRun) addParticipatingTablesToKeyspace(ctx context.Context, keyspace, tableSpecs string) error { + dr.drLog.Log("All source tables will be added to the target keyspace vschema") + return nil +} + func (dr *switcherDryRun) deleteRoutingRules(ctx context.Context) error { dr.drLog.Log("Routing rules for participating tables will be deleted") return nil diff --git a/go/vt/wrangler/switcher_interface.go b/go/vt/wrangler/switcher_interface.go index d7c94fa8011..87272e6736b 100644 --- a/go/vt/wrangler/switcher_interface.go +++ b/go/vt/wrangler/switcher_interface.go @@ -49,6 +49,6 @@ type iswitcher interface { removeTargetTables(ctx context.Context) error dropTargetShards(ctx context.Context) error deleteRoutingRules(ctx context.Context) error - + addParticipatingTablesToKeyspace(ctx context.Context, keyspace, tableSpecs string) error logs() *[]string } diff --git a/go/vt/wrangler/tablet.go b/go/vt/wrangler/tablet.go index b0727ae4f4a..c70fc084f73 100644 --- a/go/vt/wrangler/tablet.go +++ b/go/vt/wrangler/tablet.go @@ -210,7 +210,7 @@ func (wr *Wrangler) VReplicationExec(ctx context.Context, tabletAlias *topodatap return wr.tmc.VReplicationExec(ctx, ti.Tablet, query) } -// VReplicationExec executes a query remotely using the DBA pool +// GenericVExec executes a query remotely using the DBA pool func (wr *Wrangler) GenericVExec(ctx context.Context, tabletAlias *topodatapb.TabletAlias, query, workflow, keyspace string) (*querypb.QueryResult, error) { ti, err := wr.ts.GetTablet(ctx, tabletAlias) if err != nil { @@ -230,22 +230,5 @@ func (wr *Wrangler) GenericVExec(ctx context.Context, tabletAlias *topodatapb.Ta // the system is in transition (a reparenting event is in progress and parts of // the topo have not yet been updated). func (wr *Wrangler) isMasterTablet(ctx context.Context, ti *topo.TabletInfo) (bool, error) { - // Tablet record claims to be non-master, we believe it - if ti.Type != topodatapb.TabletType_MASTER { - return false, nil - } - si, err := wr.ts.GetShard(ctx, ti.Keyspace, ti.Shard) - if err != nil { - // strictly speaking it isn't correct to return false here, the tablet status is unknown - return false, err - } - // Tablet record claims to be master, and shard record matches - if topoproto.TabletAliasEqual(si.MasterAlias, ti.Tablet.Alias) { - return true, nil - } - // Shard record has another tablet as master, so check MasterTermStartTime - // If tablet record's MasterTermStartTime is later than the one in the shard record, then tablet is master - tabletMTST := ti.GetMasterTermStartTime() - shardMTST := si.GetMasterTermStartTime() - return tabletMTST.After(shardMTST), nil + return topotools.IsPrimaryTablet(ctx, wr.TopoServer(), ti) } diff --git a/go/vt/wrangler/testlib/planned_reparent_shard_test.go b/go/vt/wrangler/testlib/planned_reparent_shard_test.go index 59f10c4dc26..e84a1043311 100644 --- a/go/vt/wrangler/testlib/planned_reparent_shard_test.go +++ b/go/vt/wrangler/testlib/planned_reparent_shard_test.go @@ -290,7 +290,7 @@ func TestPlannedReparentNoMaster(t *testing.T) { err := vp.Run([]string{"PlannedReparentShard", "-wait_replicas_timeout", "10s", "-keyspace_shard", replica1.Tablet.Keyspace + "/" + replica1.Tablet.Shard, "-new_master", topoproto.TabletAliasString(replica1.Tablet.Alias)}) assert.Error(t, err) - assert.Contains(t, err.Error(), "the shard has no master") + assert.Contains(t, err.Error(), "the shard has no current primary") } // TestPlannedReparentShardWaitForPositionFail simulates a failure of the WaitForPosition call @@ -386,7 +386,7 @@ func TestPlannedReparentShardWaitForPositionFail(t *testing.T) { // run PlannedReparentShard err := vp.Run([]string{"PlannedReparentShard", "-wait_replicas_timeout", "10s", "-keyspace_shard", newMaster.Tablet.Keyspace + "/" + newMaster.Tablet.Shard, "-new_master", topoproto.TabletAliasString(newMaster.Tablet.Alias)}) assert.Error(t, err) - assert.Contains(t, err.Error(), "replication on master-elect cell1-0000000001 did not catch up in time") + assert.Contains(t, err.Error(), "replication on primary-elect cell1-0000000001 did not catch up in time") // now check that DemoteMaster was undone and old master is still master assert.True(t, newMaster.FakeMysqlDaemon.ReadOnly, "newMaster.FakeMysqlDaemon.ReadOnly not set") @@ -486,7 +486,7 @@ func TestPlannedReparentShardWaitForPositionTimeout(t *testing.T) { // run PlannedReparentShard err := vp.Run([]string{"PlannedReparentShard", "-wait_replicas_timeout", "10s", "-keyspace_shard", newMaster.Tablet.Keyspace + "/" + newMaster.Tablet.Shard, "-new_master", topoproto.TabletAliasString(newMaster.Tablet.Alias)}) assert.Error(t, err) - assert.Contains(t, err.Error(), "replication on master-elect cell1-0000000001 did not catch up in time") + assert.Contains(t, err.Error(), "replication on primary-elect cell1-0000000001 did not catch up in time") // now check that DemoteMaster was undone and old master is still master assert.True(t, newMaster.FakeMysqlDaemon.ReadOnly, "newMaster.FakeMysqlDaemon.ReadOnly not set") diff --git a/go/vt/wrangler/traffic_switcher.go b/go/vt/wrangler/traffic_switcher.go index 6e2960a17bd..06c92c3f88a 100644 --- a/go/vt/wrangler/traffic_switcher.go +++ b/go/vt/wrangler/traffic_switcher.go @@ -26,6 +26,8 @@ import ( "sync" "time" + "vitess.io/vitess/go/json2" + "vitess.io/vitess/go/vt/topotools" "vitess.io/vitess/go/vt/vtgate/evalengine" @@ -114,7 +116,8 @@ type trafficSwitcher struct { sourceKSSchema *vindexes.KeyspaceSchema optCells string //cells option passed to MoveTables/Reshard optTabletTypes string //tabletTypes option passed to MoveTables/Reshard - + externalCluster string + externalTopo *topo.Server } // tsTarget contains the metadata for each migration target. @@ -465,11 +468,19 @@ func (wr *Wrangler) SwitchReads(ctx context.Context, targetKeyspace, workflow st } return sw.logs(), nil } - wr.Logger().Infof("switchShardReads: %+v, %+v, %+v", cells, servedTypes, direction) + wr.Logger().Infof("About to switchShardReads: %+v, %+v, %+v", cells, servedTypes, direction) if err := ts.switchShardReads(ctx, cells, servedTypes, direction); err != nil { ts.wr.Logger().Errorf("switchShardReads failed: %v", err) return nil, err } + + wr.Logger().Infof("switchShardReads Completed: %+v, %+v, %+v", cells, servedTypes, direction) + if err := wr.ts.ValidateSrvKeyspace(ctx, targetKeyspace, strings.Join(cells, ",")); err != nil { + err2 := vterrors.Wrapf(err, "After switching shard reads, found SrvKeyspace for %s is corrupt in cell %s", + targetKeyspace, strings.Join(cells, ",")) + log.Errorf("%w", err2) + return nil, err2 + } return sw.logs(), nil } @@ -696,6 +707,47 @@ func (wr *Wrangler) dropArtifacts(ctx context.Context, sw iswitcher) error { return nil } +// finalizeMigrateWorkflow deletes the streams for the Migrate workflow. +// We only cleanup the target for external sources +func (wr *Wrangler) finalizeMigrateWorkflow(ctx context.Context, targetKeyspace, workflow, tableSpecs string, + cancel, keepData, dryRun bool) (*[]string, error) { + ts, err := wr.buildTrafficSwitcher(ctx, targetKeyspace, workflow) + if err != nil { + wr.Logger().Errorf("buildTrafficSwitcher failed: %v", err) + return nil, err + } + var sw iswitcher + if dryRun { + sw = &switcherDryRun{ts: ts, drLog: NewLogRecorder()} + } else { + sw = &switcher{ts: ts, wr: wr} + } + var tctx context.Context + tctx, targetUnlock, lockErr := sw.lockKeyspace(ctx, ts.targetKeyspace, "completeMigrateWorkflow") + if lockErr != nil { + ts.wr.Logger().Errorf("Target LockKeyspace failed: %v", lockErr) + return nil, lockErr + } + defer targetUnlock(&err) + ctx = tctx + if err := sw.dropTargetVReplicationStreams(ctx); err != nil { + return nil, err + } + if !cancel { + sw.addParticipatingTablesToKeyspace(ctx, targetKeyspace, tableSpecs) + if err := ts.wr.ts.RebuildSrvVSchema(ctx, nil); err != nil { + return nil, err + } + } + log.Infof("cancel is %t, keepData %t", cancel, keepData) + if cancel && !keepData { + if err := sw.removeTargetTables(ctx); err != nil { + return nil, err + } + } + return sw.logs(), nil +} + // DropSources cleans up source tables, shards and blacklisted tables after a MoveTables/Reshard is completed func (wr *Wrangler) DropSources(ctx context.Context, targetKeyspace, workflow string, removalType TableRemovalType, keepData, force, dryRun bool) (*[]string, error) { ts, err := wr.buildTrafficSwitcher(ctx, targetKeyspace, workflow) @@ -781,12 +833,22 @@ func (wr *Wrangler) buildTrafficSwitcher(ctx context.Context, targetKeyspace, wo optTabletTypes: optTabletTypes, } log.Infof("Migration ID for workflow %s: %d", workflow, ts.id) + sourceTopo := wr.ts // Build the sources for _, target := range targets { for _, bls := range target.sources { if ts.sourceKeyspace == "" { ts.sourceKeyspace = bls.Keyspace + ts.externalCluster = bls.ExternalCluster + if ts.externalCluster != "" { + externalTopo, err := wr.ts.OpenExternalVitessClusterServer(ctx, ts.externalCluster) + if err != nil { + return nil, err + } + sourceTopo = externalTopo + ts.externalTopo = externalTopo + } } else if ts.sourceKeyspace != bls.Keyspace { return nil, fmt.Errorf("source keyspaces are mismatched across streams: %v vs %v", ts.sourceKeyspace, bls.Keyspace) } @@ -810,11 +872,11 @@ func (wr *Wrangler) buildTrafficSwitcher(ctx context.Context, targetKeyspace, wo if _, ok := ts.sources[bls.Shard]; ok { continue } - sourcesi, err := ts.wr.ts.GetShard(ctx, bls.Keyspace, bls.Shard) + sourcesi, err := sourceTopo.GetShard(ctx, bls.Keyspace, bls.Shard) if err != nil { return nil, err } - sourceMaster, err := ts.wr.ts.GetTablet(ctx, sourcesi.MasterAlias) + sourceMaster, err := sourceTopo.GetTablet(ctx, sourcesi.MasterAlias) if err != nil { return nil, err } @@ -824,7 +886,7 @@ func (wr *Wrangler) buildTrafficSwitcher(ctx context.Context, targetKeyspace, wo } } } - if ts.sourceKeyspace != ts.targetKeyspace { + if ts.sourceKeyspace != ts.targetKeyspace || ts.externalCluster != "" { ts.migrationType = binlogdatapb.MigrationType_TABLES } else { // TODO(sougou): for shard migration, validate that source and target combined @@ -838,7 +900,7 @@ func (wr *Wrangler) buildTrafficSwitcher(ctx context.Context, targetKeyspace, wo } } } - vs, err := ts.wr.ts.GetVSchema(ctx, ts.sourceKeyspace) + vs, err := sourceTopo.GetVSchema(ctx, ts.sourceKeyspace) if err != nil { return nil, err } @@ -944,11 +1006,15 @@ func hashStreams(targetKeyspace string, targets map[string]*tsTarget) int64 { func (ts *trafficSwitcher) validate(ctx context.Context) error { if ts.migrationType == binlogdatapb.MigrationType_TABLES { + sourceTopo := ts.wr.ts + if ts.externalTopo != nil { + sourceTopo = ts.externalTopo + } // All shards must be present. - if err := ts.compareShards(ctx, ts.sourceKeyspace, ts.sourceShards()); err != nil { + if err := ts.compareShards(ctx, ts.sourceKeyspace, ts.sourceShards(), sourceTopo); err != nil { return err } - if err := ts.compareShards(ctx, ts.targetKeyspace, ts.targetShards()); err != nil { + if err := ts.compareShards(ctx, ts.targetKeyspace, ts.targetShards(), ts.wr.ts); err != nil { return err } // Wildcard table names not allowed. @@ -961,12 +1027,12 @@ func (ts *trafficSwitcher) validate(ctx context.Context) error { return nil } -func (ts *trafficSwitcher) compareShards(ctx context.Context, keyspace string, sis []*topo.ShardInfo) error { +func (ts *trafficSwitcher) compareShards(ctx context.Context, keyspace string, sis []*topo.ShardInfo, topo *topo.Server) error { var shards []string for _, si := range sis { shards = append(shards, si.ShardName()) } - topoShards, err := ts.wr.ts.GetShardNames(ctx, keyspace) + topoShards, err := topo.GetShardNames(ctx, keyspace) if err != nil { return err } @@ -1020,6 +1086,12 @@ func (ts *trafficSwitcher) switchShardReads(ctx context.Context, cells []string, } else { fromShards, toShards = ts.targetShards(), ts.sourceShards() } + if err := ts.wr.ts.ValidateSrvKeyspace(ctx, ts.targetKeyspace, strings.Join(cells, ",")); err != nil { + err2 := vterrors.Wrapf(err, "Before switching shard reads, found SrvKeyspace for %s is corrupt in cell %s", + ts.targetKeyspace, strings.Join(cells, ",")) + log.Errorf("%w", err2) + return err2 + } for _, servedType := range servedTypes { if err := ts.wr.updateShardRecords(ctx, ts.sourceKeyspace, fromShards, cells, servedType, true /* isFrom */, false /* clearSourceShards */); err != nil { return err @@ -1032,6 +1104,12 @@ func (ts *trafficSwitcher) switchShardReads(ctx context.Context, cells []string, return err } } + if err := ts.wr.ts.ValidateSrvKeyspace(ctx, ts.targetKeyspace, strings.Join(cells, ",")); err != nil { + err2 := vterrors.Wrapf(err, "After switching shard reads, found SrvKeyspace for %s is corrupt in cell %s", + ts.targetKeyspace, strings.Join(cells, ",")) + log.Errorf("%w", err2) + return err2 + } return nil } @@ -1109,7 +1187,7 @@ func (ts *trafficSwitcher) changeTableSourceWrites(ctx context.Context, access a }); err != nil { return err } - return ts.wr.RefreshTabletsByShard(ctx, source.si, nil, nil) + return ts.wr.RefreshTabletsByShard(ctx, source.si, nil) }) } @@ -1344,7 +1422,7 @@ func (ts *trafficSwitcher) allowTableTargetWrites(ctx context.Context) error { }); err != nil { return err } - return ts.wr.RefreshTabletsByShard(ctx, target.si, nil, nil) + return ts.wr.RefreshTabletsByShard(ctx, target.si, nil) }) } @@ -1374,6 +1452,11 @@ func (ts *trafficSwitcher) changeWriteRoute(ctx context.Context) error { } func (ts *trafficSwitcher) changeShardRouting(ctx context.Context) error { + if err := ts.wr.ts.ValidateSrvKeyspace(ctx, ts.targetKeyspace, ""); err != nil { + err2 := vterrors.Wrapf(err, "Before changing shard routes, found SrvKeyspace for %s is corrupt", ts.targetKeyspace) + log.Errorf("%w", err2) + return err2 + } err := ts.forAllSources(func(source *tsSource) error { _, err := ts.wr.ts.UpdateShardFields(ctx, ts.sourceKeyspace, source.si.ShardName(), func(si *topo.ShardInfo) error { si.IsMasterServing = false @@ -1394,7 +1477,16 @@ func (ts *trafficSwitcher) changeShardRouting(ctx context.Context) error { if err != nil { return err } - return ts.wr.ts.MigrateServedType(ctx, ts.targetKeyspace, ts.targetShards(), ts.sourceShards(), topodatapb.TabletType_MASTER, nil) + err = ts.wr.ts.MigrateServedType(ctx, ts.targetKeyspace, ts.targetShards(), ts.sourceShards(), topodatapb.TabletType_MASTER, nil) + if err != nil { + return err + } + if err := ts.wr.ts.ValidateSrvKeyspace(ctx, ts.targetKeyspace, ""); err != nil { + err2 := vterrors.Wrapf(err, "After changing shard routes, found SrvKeyspace for %s is corrupt", ts.targetKeyspace) + log.Errorf("%w", err2) + return err2 + } + return nil } func (ts *trafficSwitcher) startReverseVReplication(ctx context.Context) error { @@ -1488,7 +1580,7 @@ func (ts *trafficSwitcher) dropSourceBlacklistedTables(ctx context.Context) erro }); err != nil { return err } - return ts.wr.RefreshTabletsByShard(ctx, source.si, nil, nil) + return ts.wr.RefreshTabletsByShard(ctx, source.si, nil) }) } @@ -1637,6 +1729,7 @@ func (ts *trafficSwitcher) dropSourceReverseVReplicationStreams(ctx context.Cont } func (ts *trafficSwitcher) removeTargetTables(ctx context.Context) error { + log.Infof("removeTargetTables") err := ts.forAllTargets(func(target *tsTarget) error { for _, tableName := range ts.tables { query := fmt.Sprintf("drop table %s.%s", target.master.DbName(), tableName) @@ -1725,3 +1818,41 @@ func reverseName(workflow string) string { } return workflow + reverse } + +// addParticipatingTablesToKeyspace updates the vschema with the new tables that were created as part of the +// Migrate flow. It is called when the Migrate flow is Completed +func (ts *trafficSwitcher) addParticipatingTablesToKeyspace(ctx context.Context, keyspace, tableSpecs string) error { + var err error + var vschema *vschemapb.Keyspace + vschema, err = ts.wr.ts.GetVSchema(ctx, keyspace) + if err != nil { + return err + } + if vschema == nil { + return fmt.Errorf("no vschema found for keyspace %s", keyspace) + } + if vschema.Tables == nil { + vschema.Tables = make(map[string]*vschemapb.Table) + } + if strings.HasPrefix(tableSpecs, "{") { // user defined the vschema snippet, typically for a sharded target + wrap := fmt.Sprintf(`{"tables": %s}`, tableSpecs) + ks := &vschemapb.Keyspace{} + if err := json2.Unmarshal([]byte(wrap), ks); err != nil { + return err + } + if err != nil { + return err + } + for table, vtab := range ks.Tables { + vschema.Tables[table] = vtab + } + } else { + if vschema.Sharded { + return fmt.Errorf("no sharded vschema was provided, so you will need to update the vschema of the target manually for the moved tables") + } + for _, table := range ts.tables { + vschema.Tables[table] = &vschemapb.Table{} + } + } + return ts.wr.ts.SaveVSchema(ctx, keyspace, vschema) +} diff --git a/go/vt/wrangler/vdiff.go b/go/vt/wrangler/vdiff.go index 6f6fcb035ea..c64a9a4a2a1 100644 --- a/go/vt/wrangler/vdiff.go +++ b/go/vt/wrangler/vdiff.go @@ -194,7 +194,7 @@ func (wr *Wrangler) VDiff(ctx context.Context, targetKeyspace, workflow, sourceC return nil, vterrors.Wrap(err, "buildVDiffPlan") } - if err := df.selectTablets(ctx); err != nil { + if err := df.selectTablets(ctx, ts); err != nil { return nil, vterrors.Wrap(err, "selectTablets") } defer func(ctx context.Context) { @@ -492,7 +492,7 @@ func newMergeSorter(participants map[string]*shardStreamer, comparePKs []int) *e } ob := make([]engine.OrderbyParams, 0, len(comparePKs)) for _, cpk := range comparePKs { - ob = append(ob, engine.OrderbyParams{Col: cpk}) + ob = append(ob, engine.OrderbyParams{Col: cpk, WeightStringCol: -1}) } return &engine.MergeSort{ Primitives: prims, @@ -501,7 +501,7 @@ func newMergeSorter(participants map[string]*shardStreamer, comparePKs []int) *e } // selectTablets selects the tablets that will be used for the diff. -func (df *vdiff) selectTablets(ctx context.Context) error { +func (df *vdiff) selectTablets(ctx context.Context, ts *trafficSwitcher) error { var wg sync.WaitGroup var err1, err2 error @@ -510,7 +510,11 @@ func (df *vdiff) selectTablets(ctx context.Context) error { go func() { defer wg.Done() err1 = df.forAll(df.sources, func(shard string, source *shardStreamer) error { - tp, err := discovery.NewTabletPicker(df.ts.wr.ts, []string{df.sourceCell}, df.ts.sourceKeyspace, shard, df.tabletTypesStr) + sourceTopo := df.ts.wr.ts + if ts.externalTopo != nil { + sourceTopo = ts.externalTopo + } + tp, err := discovery.NewTabletPicker(sourceTopo, []string{df.sourceCell}, df.ts.sourceKeyspace, shard, df.tabletTypesStr) if err != nil { return err } diff --git a/go/vt/wrangler/vexec.go b/go/vt/wrangler/vexec.go index 5b7259800a5..35b75ebded2 100644 --- a/go/vt/wrangler/vexec.go +++ b/go/vt/wrangler/vexec.go @@ -30,19 +30,20 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/log" - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - querypb "vitess.io/vitess/go/vt/proto/query" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" + vtctldvexec "vitess.io/vitess/go/vt/vtctl/workflow/vexec" // renamed to avoid a collision with the vexec struct in this package "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/evalengine" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) const ( - vexecTableQualifier = "_vt" - vreplicationTableName = "vreplication" - schemaMigrationsTableName = "schema_migrations" + vexecTableQualifier = "_vt" + vreplicationTableName = "vreplication" ) // vexec is the construct by which we run a query against backend shards. vexec is created by user-facing @@ -81,7 +82,6 @@ func newVExec(ctx context.Context, workflow, keyspace, query string, wr *Wrangle // QueryResultForRowsAffected aggregates results into row-type results (fields + values) func (wr *Wrangler) QueryResultForRowsAffected(results map[*topo.TabletInfo]*sqltypes.Result) *sqltypes.Result { var qr = &sqltypes.Result{} - qr.RowsAffected = uint64(len(results)) qr.Fields = []*querypb.Field{{ Name: "Tablet", Type: sqltypes.VarBinary, @@ -102,7 +102,6 @@ func (wr *Wrangler) QueryResultForRowsAffected(results map[*topo.TabletInfo]*sql // QueryResultForTabletResults aggregates given results into a "rows-affected" type result (no row data) func (wr *Wrangler) QueryResultForTabletResults(results map[*topo.TabletInfo]*sqltypes.Result) *sqltypes.Result { var qr = &sqltypes.Result{} - qr.RowsAffected = uint64(len(results)) defaultFields := []*querypb.Field{{ Name: "Tablet", Type: sqltypes.VarBinary, @@ -511,7 +510,8 @@ func (wr *Wrangler) ListAllWorkflows(ctx context.Context, keyspace string, activ where = " where state <> 'Stopped'" } query := "select distinct workflow from _vt.vreplication" + where - results, err := wr.runVexec(ctx, "", keyspace, query, false) + vx := vtctldvexec.NewVExec(keyspace, "", wr.ts, wr.tmc) + results, err := vx.QueryContext(ctx, query) if err != nil { return nil, err } diff --git a/go/vt/wrangler/vexec_plan.go b/go/vt/wrangler/vexec_plan.go index 4beb5be4fd9..a279a4145e2 100644 --- a/go/vt/wrangler/vexec_plan.go +++ b/go/vt/wrangler/vexec_plan.go @@ -24,6 +24,7 @@ import ( "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/sqlparser" "github.com/olekukonko/tablewriter" @@ -190,7 +191,7 @@ func qualifiedTableName(tableName string) string { // getPlanner returns a specific planner appropriate for the queried table func (vx *vexec) getPlanner(ctx context.Context) error { switch vx.tableName { - case qualifiedTableName(schemaMigrationsTableName): + case qualifiedTableName(schema.SchemaMigrationsTableName): vx.planner = newSchemaMigrationsPlanner(vx) case qualifiedTableName(vreplicationTableName): vx.planner = newVReplicationPlanner(vx) @@ -254,7 +255,7 @@ func (vx *vexec) addDefaultWheres(planner vexecPlanner, where *sqlparser.Where) expr := &sqlparser.ComparisonExpr{ Left: &sqlparser.ColName{Name: sqlparser.NewColIdent(plannerParams.dbNameColumn)}, Operator: sqlparser.EqualOp, - Right: sqlparser.NewStrLiteral([]byte(vx.masters[0].DbName())), + Right: sqlparser.NewStrLiteral(vx.masters[0].DbName()), } if newWhere == nil { newWhere = &sqlparser.Where{ @@ -272,7 +273,7 @@ func (vx *vexec) addDefaultWheres(planner vexecPlanner, where *sqlparser.Where) expr := &sqlparser.ComparisonExpr{ Left: &sqlparser.ColName{Name: sqlparser.NewColIdent(plannerParams.workflowColumn)}, Operator: sqlparser.EqualOp, - Right: sqlparser.NewStrLiteral([]byte(vx.workflow)), + Right: sqlparser.NewStrLiteral(vx.workflow), } newWhere.Expr = &sqlparser.AndExpr{ Left: newWhere.Expr, diff --git a/go/vt/wrangler/vexec_test.go b/go/vt/wrangler/vexec_test.go index 1ed3ccdc1d1..43dbde69db4 100644 --- a/go/vt/wrangler/vexec_test.go +++ b/go/vt/wrangler/vexec_test.go @@ -378,7 +378,7 @@ func TestVExecValidations(t *testing.T) { { name: "unsupported query", query: "describe _vt.vreplication", - errorString: "query not supported by vexec: otherread", + errorString: "query not supported by vexec: explain _vt.vreplication", }, } for _, bq := range badQueries { diff --git a/go/vt/wrangler/workflow.go b/go/vt/wrangler/workflow.go index e3db75a3acd..2c2e57be595 100644 --- a/go/vt/wrangler/workflow.go +++ b/go/vt/wrangler/workflow.go @@ -22,6 +22,7 @@ type VReplicationWorkflowType int const ( MoveTablesWorkflow = VReplicationWorkflowType(iota) ReshardWorkflow + MigrateWorkflow ) // Workflow state display strings @@ -54,6 +55,7 @@ func (vrw *VReplicationWorkflow) String() string { // VReplicationWorkflowParams stores args and options passed to a VReplicationWorkflow command type VReplicationWorkflowParams struct { + WorkflowType VReplicationWorkflowType Workflow, TargetKeyspace string Cells, TabletTypes, ExcludeTables string EnableReverseReplication, DryRun bool @@ -68,6 +70,10 @@ type VReplicationWorkflowParams struct { // Reshard specific SourceShards, TargetShards []string SkipSchemaCopy bool + AutoStart, StopAfterCopy bool + + // Migrate specific + ExternalCluster string } // NewVReplicationWorkflow sets up a MoveTables or Reshard workflow based on options provided, deduces the state of the @@ -165,7 +171,7 @@ func (vrw *VReplicationWorkflow) Create(ctx context.Context) error { return fmt.Errorf("workflow has already been created, state is %s", vrw.CachedState()) } switch vrw.workflowType { - case MoveTablesWorkflow: + case MoveTablesWorkflow, MigrateWorkflow: err = vrw.initMoveTables() case ReshardWorkflow: excludeTables := strings.Split(vrw.params.ExcludeTables, ",") @@ -240,7 +246,7 @@ func (vrw *VReplicationWorkflow) GetStreamCount() (int64, int64, []*WorkflowErro return totalStreams, runningStreams, workflowErrors, nil } -// SwitchTraffic switches traffic forward for tablet_types passed +// SwitchTraffic switches traffic in the direction passed for specified tablet_types func (vrw *VReplicationWorkflow) SwitchTraffic(direction TrafficSwitchDirection) (*[]string, error) { var dryRunResults []string var rdDryRunResults, wrDryRunResults *[]string @@ -251,6 +257,9 @@ func (vrw *VReplicationWorkflow) SwitchTraffic(direction TrafficSwitchDirection) if !vrw.Exists() { return nil, fmt.Errorf("workflow has not yet been started") } + if vrw.workflowType == MigrateWorkflow { + return nil, fmt.Errorf("invalid action for Migrate workflow: SwitchTraffic") + } isCopyInProgress, err = vrw.IsCopyInProgress() if err != nil { @@ -289,6 +298,9 @@ func (vrw *VReplicationWorkflow) ReverseTraffic() (*[]string, error) { if !vrw.Exists() { return nil, fmt.Errorf("workflow has not yet been started") } + if vrw.workflowType == MigrateWorkflow { + return nil, fmt.Errorf("invalid action for Migrate workflow: ReverseTraffic") + } return vrw.SwitchTraffic(DirectionBackward) } @@ -300,7 +312,15 @@ const ( // Complete cleans up a successful workflow func (vrw *VReplicationWorkflow) Complete() (*[]string, error) { + var dryRunResults *[]string + var err error ws := vrw.ws + + if vrw.workflowType == MigrateWorkflow { + return vrw.wr.finalizeMigrateWorkflow(vrw.ctx, ws.TargetKeyspace, ws.Workflow, vrw.params.Tables, + false, vrw.params.KeepData, vrw.params.DryRun) + } + if !ws.WritesSwitched || len(ws.ReplicaCellsNotSwitched) > 0 || len(ws.RdonlyCellsNotSwitched) > 0 { return nil, fmt.Errorf(ErrWorkflowNotFullySwitched) } @@ -310,10 +330,8 @@ func (vrw *VReplicationWorkflow) Complete() (*[]string, error) { } else { renameTable = DropTable } - var dryRunResults *[]string - var err error - if dryRunResults, err = vrw.wr.DropSources(vrw.ctx, vrw.ws.TargetKeyspace, vrw.ws.Workflow, renameTable, vrw.params.KeepData, - false, vrw.params.DryRun); err != nil { + if dryRunResults, err = vrw.wr.DropSources(vrw.ctx, vrw.ws.TargetKeyspace, vrw.ws.Workflow, renameTable, + false, vrw.params.KeepData, vrw.params.DryRun); err != nil { return nil, err } return dryRunResults, nil @@ -322,6 +340,12 @@ func (vrw *VReplicationWorkflow) Complete() (*[]string, error) { // Cancel deletes all artifacts from a workflow which has not yet been switched func (vrw *VReplicationWorkflow) Cancel() error { ws := vrw.ws + if vrw.workflowType == MigrateWorkflow { + _, err := vrw.wr.finalizeMigrateWorkflow(vrw.ctx, ws.TargetKeyspace, ws.Workflow, "", + true, vrw.params.KeepData, vrw.params.DryRun) + return err + } + if ws.WritesSwitched || len(ws.ReplicaCellsSwitched) > 0 || len(ws.RdonlyCellsSwitched) > 0 { return fmt.Errorf(ErrWorkflowPartiallySwitched) } @@ -377,13 +401,14 @@ func (vrw *VReplicationWorkflow) parseTabletTypes() (hasReplica, hasRdonly, hasM func (vrw *VReplicationWorkflow) initMoveTables() error { log.Infof("In VReplicationWorkflow.initMoveTables() for %+v", vrw) return vrw.wr.MoveTables(vrw.ctx, vrw.params.Workflow, vrw.params.SourceKeyspace, vrw.params.TargetKeyspace, - vrw.params.Tables, vrw.params.Cells, vrw.params.TabletTypes, vrw.params.AllTables, vrw.params.ExcludeTables) + vrw.params.Tables, vrw.params.Cells, vrw.params.TabletTypes, vrw.params.AllTables, vrw.params.ExcludeTables, + vrw.params.AutoStart, vrw.params.StopAfterCopy, vrw.params.ExternalCluster) } func (vrw *VReplicationWorkflow) initReshard() error { log.Infof("In VReplicationWorkflow.initReshard() for %+v", vrw) return vrw.wr.Reshard(vrw.ctx, vrw.params.TargetKeyspace, vrw.params.Workflow, vrw.params.SourceShards, - vrw.params.TargetShards, vrw.params.SkipSchemaCopy, vrw.params.Cells, vrw.params.TabletTypes) + vrw.params.TargetShards, vrw.params.SkipSchemaCopy, vrw.params.Cells, vrw.params.TabletTypes, vrw.params.AutoStart, vrw.params.StopAfterCopy) } func (vrw *VReplicationWorkflow) switchReads() (*[]string, error) { diff --git a/go/vt/wrangler/wrangler.go b/go/vt/wrangler/wrangler.go index 787f5008fc7..9b9e1cd6fbe 100644 --- a/go/vt/wrangler/wrangler.go +++ b/go/vt/wrangler/wrangler.go @@ -42,19 +42,21 @@ var ( // Multiple go routines can use the same Wrangler at the same time, // provided they want to share the same logger / topo server / lock timeout. type Wrangler struct { - logger logutil.Logger - ts *topo.Server - tmc tmclient.TabletManagerClient - vtctld vtctlservicepb.VtctldServer + logger logutil.Logger + ts *topo.Server + tmc tmclient.TabletManagerClient + vtctld vtctlservicepb.VtctldServer + sourceTs *topo.Server } // New creates a new Wrangler object. func New(logger logutil.Logger, ts *topo.Server, tmc tmclient.TabletManagerClient) *Wrangler { return &Wrangler{ - logger: logger, - ts: ts, - tmc: tmc, - vtctld: grpcvtctldserver.NewVtctldServer(ts), + logger: logger, + ts: ts, + tmc: tmc, + vtctld: grpcvtctldserver.NewVtctldServer(ts), + sourceTs: ts, } } diff --git a/go/vtbench/vtbench.go b/go/vtbench/vtbench.go index 34e030297c4..53a705b581c 100644 --- a/go/vtbench/vtbench.go +++ b/go/vtbench/vtbench.go @@ -231,7 +231,7 @@ func (bt *benchThread) clientLoop(ctx context.Context) { log.Errorf("query error: %v", err) break } else { - b.Rows.Add(int64(result.RowsAffected)) + b.Rows.Add(int64(len(result.Rows))) } } diff --git a/helm/release.sh b/helm/release.sh index 0c225f54e19..ffdd994f301 100755 --- a/helm/release.sh +++ b/helm/release.sh @@ -27,6 +27,11 @@ docker tag vitess/mysqlctld:$vt_base_version-buster vitess/mysqlctld:$vt_base_ve docker push vitess/mysqlctld:$vt_base_version-buster docker push vitess/mysqlctld:$vt_base_version +docker build --build-arg VT_BASE_VER=$vt_base_version -t vitess/mysqlctl:$vt_base_version-buster mysqlctl +docker tag vitess/mysqlctl:$vt_base_version-buster vitess/mysqlctl:$vt_base_version +docker push vitess/mysqlctl:$vt_base_version-buster +docker push vitess/mysqlctl:$vt_base_version + docker build --build-arg VT_BASE_VER=$vt_base_version -t vitess/vtctl:$vt_base_version-buster vtctl docker tag vitess/vtctl:$vt_base_version-buster vitess/vtctl:$vt_base_version docker push vitess/vtctl:$vt_base_version-buster diff --git a/java/client/pom.xml b/java/client/pom.xml index 188cea73f35..d2251cd577b 100644 --- a/java/client/pom.xml +++ b/java/client/pom.xml @@ -5,7 +5,7 @@ io.vitess vitess-parent - 9.0.2-SNAPSHOT + 10.0.2 vitess-client diff --git a/java/example/pom.xml b/java/example/pom.xml index ceef8a55a18..d0318cee370 100644 --- a/java/example/pom.xml +++ b/java/example/pom.xml @@ -5,7 +5,7 @@ io.vitess vitess-parent - 9.0.2-SNAPSHOT + 10.0.2 vitess-example diff --git a/java/grpc-client/pom.xml b/java/grpc-client/pom.xml index f53519e6e28..84062137934 100644 --- a/java/grpc-client/pom.xml +++ b/java/grpc-client/pom.xml @@ -5,7 +5,7 @@ io.vitess vitess-parent - 9.0.2-SNAPSHOT + 10.0.2 vitess-grpc-client diff --git a/java/jdbc/pom.xml b/java/jdbc/pom.xml index c6c2a629c91..3cfec1ebb03 100644 --- a/java/jdbc/pom.xml +++ b/java/jdbc/pom.xml @@ -5,7 +5,7 @@ io.vitess vitess-parent - 9.0.2-SNAPSHOT + 10.0.2 vitess-jdbc diff --git a/java/pom.xml b/java/pom.xml index 16c1a4b4915..19009d74e50 100644 --- a/java/pom.xml +++ b/java/pom.xml @@ -11,7 +11,7 @@ io.vitess vitess-parent - 9.0.2-SNAPSHOT + 10.0.2 pom Vitess Java Client libraries [Parent] @@ -94,7 +94,7 @@ com.google.guava guava - 26.0-jre + 30.1.1-jre com.google.protobuf diff --git a/misc/git/hooks/visitorgen b/misc/git/hooks/asthelpers similarity index 83% rename from misc/git/hooks/visitorgen rename to misc/git/hooks/asthelpers index 65c04d613db..cd8a9ac547d 100755 --- a/misc/git/hooks/visitorgen +++ b/misc/git/hooks/asthelpers @@ -15,4 +15,4 @@ # this script, which should run before committing code, makes sure that the visitor is re-generated when the ast changes -go run ./go/vt/sqlparser/visitorgen/main -compareOnly=true -input=go/vt/sqlparser/ast.go -output=go/vt/sqlparser/rewriter.go \ No newline at end of file +go run ./go/tools/asthelpergen/main -in ./go/vt/sqlparser -verify=true -iface vitess.io/vitess/go/vt/sqlparser.SQLNode -except "*ColName" \ No newline at end of file diff --git a/misc/git/hooks/goimports b/misc/git/hooks/goimports index 389f3877223..a8859b5e104 100755 --- a/misc/git/hooks/goimports +++ b/misc/git/hooks/goimports @@ -19,7 +19,7 @@ # it has execute permissions. # # This script does not handle file names that contain spaces. -gofiles=$(git diff --cached --name-only --diff-filter=ACM | grep '^go/.*\.go$') +gofiles=$(git diff --cached --name-only --diff-filter=ACM | grep '^go/.*\.go$' | grep -v '^go/vt/proto/') [ -z "$gofiles" ] && exit 0 unformatted=$(goimports -local vitess.io/vitess -l=true $gofiles 2>&1 | awk -F: '{print $1}') diff --git a/proto/binlogdata.proto b/proto/binlogdata.proto index fc647d19857..777affe1766 100644 --- a/proto/binlogdata.proto +++ b/proto/binlogdata.proto @@ -200,6 +200,10 @@ message BinlogSource { // StopAfterCopy specifies if vreplication should be stopped // after copying is done. bool stop_after_copy = 9; + + // ExternalCluster is the name of the mounted cluster which has the source keyspace/db for this workflow + // it is of the type + string external_cluster = 10; } // VEventType enumerates the event types. Many of these types diff --git a/proto/mysqlctl.proto b/proto/mysqlctl.proto index 274f82e74c6..adcd921d004 100644 --- a/proto/mysqlctl.proto +++ b/proto/mysqlctl.proto @@ -54,3 +54,9 @@ service MysqlCtl { rpc ReinitConfig(ReinitConfigRequest) returns (ReinitConfigResponse) {}; rpc RefreshConfig(RefreshConfigRequest) returns (RefreshConfigResponse) {}; } + +// BackupInfo is the read-only attributes of a mysqlctl/backupstorage.BackupHandle. +message BackupInfo { + string name = 1; + string directory = 2; +} diff --git a/proto/query.proto b/proto/query.proto index 9b8d21bc0e0..4b9b2345f72 100644 --- a/proto/query.proto +++ b/proto/query.proto @@ -297,6 +297,24 @@ message ExecuteOptions { // skip_query_plan_cache specifies if the query plan should be cached by vitess. // By default all query plans are cached. bool skip_query_plan_cache = 10; + + enum PlannerVersion { + DEFAULT_PLANNER = 0; + V3 = 1; + Gen4 = 2; + Gen4Greedy = 3; + Gen4Left2Right = 4; + Gen4WithFallback = 5; + } + + // PlannerVersion specifies which planner to use. + // If DEFAULT is chosen, whatever vtgate was started with will be used + PlannerVersion planner_version = 11; + + // has_created_temp_tables signals whether plans created in this session should be cached or not + // if the user has created temp tables, Vitess will not reuse plans created for this session in other sessions. + // The current session can still use other sessions cached plans. + bool has_created_temp_tables = 12; } // Field describes a single column returned by a query diff --git a/proto/topodata.proto b/proto/topodata.proto index f56d89977c1..85cce658e5b 100644 --- a/proto/topodata.proto +++ b/proto/topodata.proto @@ -406,3 +406,18 @@ message CellsAlias { // Cells that map to this alias repeated string cells = 2; } + +message TopoConfig { + string topo_type = 1; + string server = 2; + string root = 3; +} + +message ExternalVitessCluster { + TopoConfig topo_config = 1; +} + +// ExternalClusters +message ExternalClusters { + repeated ExternalVitessCluster vitess_cluster = 1; +} diff --git a/proto/vtadmin.proto b/proto/vtadmin.proto index bf9cc73bfe5..3b8ee1a9112 100644 --- a/proto/vtadmin.proto +++ b/proto/vtadmin.proto @@ -21,20 +21,51 @@ option go_package = "vitess.io/vitess/go/vt/proto/vtadmin"; package vtadmin; +import "tabletmanagerdata.proto"; import "topodata.proto"; +import "vschema.proto"; +import "vtctldata.proto"; /* Services */ // VTAdmin is the Vitess Admin API service. It provides RPCs that operate on // across a range of Vitess clusters. service VTAdmin { + // FindSchema returns a single Schema that matches the provided table name + // across all specified clusters IDs. Not specifying a set of cluster IDs + // causes the search to span all configured clusters. + // + // An error occurs if either no table exists across any of the clusters with + // the specified table name, or if multiple tables exist with that name. + rpc FindSchema(FindSchemaRequest) returns (Schema) {}; + // GetClusters returns all configured clusters. + rpc GetClusters(GetClustersRequest) returns (GetClustersResponse) {}; // GetGates returns all gates across all the specified clusters. rpc GetGates(GetGatesRequest) returns (GetGatesResponse) {}; + // GetKeyspaces returns all keyspaces across the specified clusters. + rpc GetKeyspaces(GetKeyspacesRequest) returns (GetKeyspacesResponse) {}; + // GetSchema returns the schema for the specified (cluster, keyspace, table) + // tuple. + rpc GetSchema(GetSchemaRequest) returns (Schema) {}; + // GetSchemas returns all schemas across the specified clusters. + rpc GetSchemas(GetSchemasRequest) returns (GetSchemasResponse) {}; // GetTablet looks up a tablet by hostname across all clusters and returns // the result. rpc GetTablet(GetTabletRequest) returns (Tablet) {}; // GetTablets returns all tablets across all the specified clusters. rpc GetTablets(GetTabletsRequest) returns (GetTabletsResponse) {}; + // GetVSchema returns a VSchema for the specified keyspace in the specified + // cluster. + rpc GetVSchema(GetVSchemaRequest) returns (VSchema) {}; + // GetVSchemas returns the VSchemas for all specified clusters. + rpc GetVSchemas(GetVSchemasRequest) returns (GetVSchemasResponse) {}; + // GetWorkflow returns a single Workflow for a given cluster, keyspace, and + // workflow name. + rpc GetWorkflow(GetWorkflowRequest) returns (Workflow) {}; + // GetWorkflows returns the Workflows for all specified clusters. + rpc GetWorkflows(GetWorkflowsRequest) returns (GetWorkflowsResponse) {}; + // VTExplain provides information on how Vitess plans to execute a particular query. + rpc VTExplain(VTExplainRequest) returns (VTExplainResponse) {}; } /* Data types */ @@ -45,6 +76,43 @@ message Cluster { string name = 2; } +message ClusterWorkflows { + repeated Workflow workflows = 1; + // Warnings is a list of non-fatal errors encountered when fetching + // workflows for a particular cluster. + repeated string warnings = 2; +} + +// Keyspace represents information about a keyspace in a particular Vitess +// cluster. +message Keyspace { + Cluster cluster = 1; + vtctldata.Keyspace keyspace = 2; + map shards = 3; +} + +message Schema { + Cluster cluster = 1; + string keyspace = 2; + + repeated tabletmanagerdata.TableDefinition table_definitions = 3; + // TableSizes is a mapping of table name to TableSize information. + map table_sizes = 4; + + message ShardTableSize { + uint64 row_count = 1; + uint64 data_length = 2; + } + + // TableSize aggregates table size information across all shards containing + // in the given keyspace and cluster, as well as per-shard size information. + message TableSize { + uint64 row_count = 1; + uint64 data_length = 2; + map by_shard = 3; + } +} + // Tablet groups the topo information of a tablet together with the Vitess // cluster it belongs to. message Tablet { @@ -60,6 +128,20 @@ message Tablet { ServingState state = 3; } +// VSchema represents the vschema for a keyspace in the cluster it belongs to. +message VSchema { + Cluster cluster = 1; + // Name is the name of the keyspace this VSchema is for. + string name = 2; + vschema.Keyspace v_schema = 3; +} + +// Vtctld represents information about a single Vtctld host. +message Vtctld { + string hostname = 1; + Cluster cluster = 2; +} + // VTGate represents information about a single VTGate host. message VTGate { // Hostname is the shortname of the VTGate. @@ -76,8 +158,26 @@ message VTGate { repeated string keyspaces = 5; } +message Workflow { + Cluster cluster = 1; + string keyspace = 2; + vtctldata.Workflow workflow = 3; +} + /* Request/Response types */ +message FindSchemaRequest { + string table = 1; + repeated string cluster_ids = 2; + GetSchemaTableSizeOptions table_size_options = 3; +} + +message GetClustersRequest {} + +message GetClustersResponse { + repeated Cluster clusters = 1; +} + message GetGatesRequest { repeated string cluster_ids = 1; } @@ -86,6 +186,34 @@ message GetGatesResponse { repeated VTGate gates = 1; } +message GetKeyspacesRequest { + repeated string cluster_ids = 1; +} + +message GetKeyspacesResponse { + repeated Keyspace keyspaces = 1; +} + +message GetSchemaRequest { + string cluster_id = 1; + string keyspace = 2; + string table = 3; + GetSchemaTableSizeOptions table_size_options = 4; +} + +message GetSchemasRequest { + repeated string cluster_ids = 1; + GetSchemaTableSizeOptions table_size_options = 2; +} + +message GetSchemasResponse { + repeated Schema schemas = 1; +} + +message GetSchemaTableSizeOptions { + bool aggregate_sizes = 1; +} + message GetTabletRequest { string hostname = 1; // ClusterIDs is an optional parameter to narrow the scope of the search, if @@ -101,3 +229,59 @@ message GetTabletsRequest { message GetTabletsResponse { repeated Tablet tablets = 1; } + +message GetVSchemaRequest { + string cluster_id = 1; + string keyspace = 2; +} + +message GetVSchemasRequest { + repeated string cluster_ids = 1; +} + +message GetVSchemasResponse { + repeated VSchema v_schemas = 1; +} + +message GetWorkflowRequest { + string cluster_id = 1; + string keyspace = 2; + string name = 3; + bool active_only = 4; +} + +message GetWorkflowsRequest { + repeated string cluster_ids = 1; + // ActiveOnly specifies whether to return workflows that are currently + // active (running or paused) instead of all workflows. + bool active_only = 2; + // Keyspaces is a list of keyspaces to restrict the workflow search to. Note + // that the keyspaces list applies across all cluster IDs in the request. + // + // If, for example, you have two clusters, each with a keyspace called "foo" + // and want the workflows from "foo" in cluster1 but not from cluster2, you + // must make two requests. + // + // Keyspaces and IgnoreKeyspaces are mutually-exclusive, and Keyspaces takes + // precedence; if Keyspaces is a non-empty list, then IgnoreKeyspaces is + // ignored completely. + repeated string keyspaces = 3; + // IgnoreKeyspaces is a list of keyspaces to skip during the workflow + // search. It has the same semantics as the Keyspaces parameter, so refer to + // that documentation for more details. + repeated string ignore_keyspaces = 4; +} + +message GetWorkflowsResponse { + map workflows_by_cluster = 1; +} + +message VTExplainRequest { + string cluster = 1; + string keyspace = 2; + string sql = 3; +} + +message VTExplainResponse { + string response = 1; +} diff --git a/proto/vtctldata.proto b/proto/vtctldata.proto index 1a7142ea09e..8a6f321b6d8 100644 --- a/proto/vtctldata.proto +++ b/proto/vtctldata.proto @@ -22,8 +22,14 @@ option go_package = "vitess.io/vitess/go/vt/proto/vtctldata"; package vtctldata; +import "binlogdata.proto"; import "logutil.proto"; +import "mysqlctl.proto"; +import "replicationdata.proto"; +import "tabletmanagerdata.proto"; import "topodata.proto"; +import "vschema.proto"; +import "vttime.proto"; // ExecuteVtctlCommandRequest is the payload for ExecuteVtctlCommand. // timeouts are in nanoseconds. @@ -37,11 +43,273 @@ message ExecuteVtctlCommandResponse { logutil.Event event = 1; } +// TableMaterializeSttings contains the settings for one table. +message TableMaterializeSettings { + string target_table = 1; + // source_expression is a select statement. + string source_expression = 2; + // create_ddl contains the DDL to create the target table. + // If empty, the target table must already exist. + // if "copy", the target table DDL is the same as the source table. + string create_ddl = 3; +} + +// MaterializeSettings contains the settings for the Materialize command. +message MaterializeSettings { + // workflow is the name of the workflow. + string workflow = 1; + string source_keyspace = 2; + string target_keyspace = 3; + // stop_after_copy specifies if vreplication should be stopped after copying. + bool stop_after_copy = 4; + repeated TableMaterializeSettings table_settings = 5; + // optional parameters. + string cell = 6; + string tablet_types = 7; + // ExternalCluster is the name of the mounted cluster which has the source keyspace/db for this workflow + // it is of the type + string external_cluster = 8; + +} + +/* Data types for VtctldServer */ + +message Keyspace { + string name = 1; + topodata.Keyspace keyspace = 2; +} + +message Shard { + string keyspace = 1; + string name = 2; + topodata.Shard shard = 3; +} + +// TODO: comment the hell out of this. +message Workflow { + string name = 1; + ReplicationLocation source = 2; + ReplicationLocation target = 3; + int64 max_v_replication_lag = 4; + map shard_streams = 5; + + message ReplicationLocation { + string keyspace = 1; + repeated string shards = 2; + } + + message ShardStream { + repeated Stream streams = 1; + repeated topodata.Shard.TabletControl tablet_controls = 2; + bool is_primary_serving = 3; + } + + message Stream { + int64 id = 1; + string shard = 2; + topodata.TabletAlias tablet = 3; + binlogdata.BinlogSource binlog_source = 4; + string position = 5; + string stop_position = 6; + string state = 7; + string db_name = 8; + vttime.Time transaction_timestamp = 9; + vttime.Time time_updated = 10; + string message = 11; + repeated CopyState copy_states = 12; + + message CopyState { + string table = 1; + string last_pk = 2; + } + } +} + +/* Request/response types for VtctldServer */ + +message ChangeTabletTypeRequest { + topodata.TabletAlias tablet_alias = 1; + topodata.TabletType db_type = 2; + bool dry_run = 3; +} + +message ChangeTabletTypeResponse { + topodata.Tablet before_tablet = 1; + topodata.Tablet after_tablet = 2; + bool was_dry_run = 3; +} + +message CreateKeyspaceRequest { + // Name is the name of the keyspace. + string name = 1; + // Force proceeds with the request even if the keyspace already exists. + bool force = 2; + // AllowEmptyVSchema allows a keyspace to be created with no vschema. + bool allow_empty_v_schema = 3; + + // ShardingColumnName specifies the column to use for sharding operations. + string sharding_column_name = 4; + // ShardingColumnType specifies the type of the column to use for sharding + // operations. + topodata.KeyspaceIdType sharding_column_type = 5; + + // ServedFroms specifies a set of db_type:keyspace pairs used to serve + // traffic for the keyspace. + repeated topodata.Keyspace.ServedFrom served_froms = 6; + + // Type is the type of the keyspace to create. + topodata.KeyspaceType type = 7; + // BaseKeyspace specifies the base keyspace for SNAPSHOT keyspaces. It is + // required to create a SNAPSHOT keyspace. + string base_keyspace = 8; + // SnapshotTime specifies the snapshot time for this keyspace. It is required + // to create a SNAPSHOT keyspace. + vttime.Time snapshot_time = 9; +} + +message CreateKeyspaceResponse { + // Keyspace is the newly-created keyspace. + Keyspace keyspace = 1; +} + +message CreateShardRequest { + // Keyspace is the name of the keyspace to create the shard in. + string keyspace = 1; + // ShardName is the name of the shard to create. E.g. "-" or "-80". + string shard_name = 2; + // Force treats an attempt to create a shard that already exists as a + // non-error. + bool force = 3; + // IncludeParent creates the parent keyspace as an empty BASE keyspace, if it + // doesn't already exist. + bool include_parent = 4; +} + +message CreateShardResponse { + // Keyspace is the created keyspace. It is set only if IncludeParent was + // specified in the request and the parent keyspace needed to be created. + Keyspace keyspace = 1; + // Shard is the newly-created shard object. + Shard shard = 2; + // ShardAlreadyExists is set if Force was specified in the request and the + // shard already existed. + bool shard_already_exists = 3; +} + +message DeleteKeyspaceRequest { + // Keyspace is the name of the keyspace to delete. + string keyspace = 1; + // Recursive causes all shards in the keyspace to be recursively deleted + // before deleting the keyspace. It is an error to call DeleteKeyspace on a + // non-empty keyspace without also specifying Recursive. + bool recursive = 2; +} + +message DeleteKeyspaceResponse { +} + +message DeleteShardsRequest { + // Shards is the list of shards to delete. The nested topodatapb.Shard field + // is not required for DeleteShard, but the Keyspace and Shard fields are. + repeated Shard shards = 1; + // Recursive also deletes all tablets belonging to the shard(s). It is an + // error to call DeleteShard on a non-empty shard without also specificying + // Recursive. + bool recursive = 2; + // EvenIfServing allows a shard to be deleted even if it is serving, which is + // normally an error. Use with caution. + bool even_if_serving = 4; +} + +message DeleteShardsResponse { +} + +message DeleteTabletsRequest { + // TabletAliases is the list of tablets to delete. + repeated topodata.TabletAlias tablet_aliases = 1; + // AllowPrimary allows for the master/primary tablet of a shard to be deleted. + // Use with caution. + bool allow_primary = 2; +} + +message DeleteTabletsResponse { +} + +message EmergencyReparentShardRequest { + // Keyspace is the name of the keyspace to perform the Emergency Reparent in. + string keyspace = 1; + // Shard is the name of the shard to perform the Emergency Reparent in. + string shard = 2; + // Optional alias of a tablet that should become the new shard primary. If not + // not specified, the vtctld will select the most up-to-date canditate to + // promote. + topodata.TabletAlias new_primary = 3; + // List of replica aliases to ignore during the Emergency Reparent. The vtctld + // will not attempt to stop replication on these tablets, nor attempt to + // demote any that may think they are the shard primary. + repeated topodata.TabletAlias ignore_replicas = 4; + // WaitReplicasTimeout is the duration of time to wait for replicas to catch + // up in reparenting. + vttime.Duration wait_replicas_timeout = 5; +} + +message EmergencyReparentShardResponse { + // Keyspace is the name of the keyspace the Emergency Reparent took place in. + string keyspace = 1; + // Shard is the name of the shard the Emergency Reparent took place in. + string shard = 2; + // PromotedPrimary is the alias of the tablet that was promoted to shard + // primary. If NewPrimary was set in the request, then this will be the same + // alias. Otherwise, it will be the alias of the tablet found to be most + // up-to-date. + topodata.TabletAlias promoted_primary = 3; + repeated logutil.Event events = 4; +} + +message FindAllShardsInKeyspaceRequest { + string keyspace = 1; +} + +message FindAllShardsInKeyspaceResponse { + map shards = 1; +} + +message GetBackupsRequest { + string keyspace = 1; + string shard = 2; +} + +message GetBackupsResponse { + repeated mysqlctl.BackupInfo backups = 1; +} + +message GetCellInfoNamesRequest { +} + +message GetCellInfoNamesResponse { + repeated string names = 1; +} + +message GetCellInfoRequest { + string cell = 1; +} + +message GetCellInfoResponse { + topodata.CellInfo cell_info = 1; +} + +message GetCellsAliasesRequest { +} + +message GetCellsAliasesResponse { + map aliases = 1; +} + message GetKeyspacesRequest { } message GetKeyspacesResponse { - repeated Keyspace keyspaces = 1; + repeated Keyspace keyspaces = 1; } message GetKeyspaceRequest { @@ -52,46 +320,232 @@ message GetKeyspaceResponse { Keyspace keyspace = 1; } -message Keyspace { - string name = 1; - topodata.Keyspace keyspace = 2; +message GetSchemaRequest { + topodata.TabletAlias tablet_alias = 1; + // Tables is a list of tables for which we should gather information. Each is + // either an exact match, or a regular expression of the form /regexp/. + repeated string tables = 2; + // ExcludeTables is a list of tables to exclude from the result. Each is + // either an exact match, or a regular expression of the form /regexp/. + repeated string exclude_tables = 3; + // IncludeViews specifies whether to include views in the result. + bool include_views = 4; + // TableNamesOnly specifies whether to limit the results to just table names, + // rather than full schema information for each table. + bool table_names_only = 5; + // TableSizesOnly specifies whether to limit the results to just table sizes, + // rather than full schema information for each table. It is ignored if + // TableNamesOnly is set to true. + bool table_sizes_only = 6; } -message FindAllShardsInKeyspaceRequest { +message GetSchemaResponse { + tabletmanagerdata.SchemaDefinition schema = 1; +} + +message GetShardRequest { string keyspace = 1; + string shard_name = 2; } -message FindAllShardsInKeyspaceResponse { - map shards = 1; +message GetShardResponse { + Shard shard = 1; } -message Shard { +message GetSrvKeyspacesRequest { string keyspace = 1; - string name = 2; - topodata.Shard shard = 3; + // Cells is a list of cells to lookup a SrvKeyspace for. Leaving this empty is + // equivalent to specifying all cells in the topo. + repeated string cells = 2; } -// TableMaterializeSttings contains the settings for one table. -message TableMaterializeSettings { - string target_table = 1; - // source_expression is a select statement. - string source_expression = 2; - // create_ddl contains the DDL to create the target table. - // If empty, the target table must already exist. - // if "copy", the target table DDL is the same as the source table. - string create_ddl = 3; +message GetSrvKeyspacesResponse { + // SrvKeyspaces is a mapping of cell name to SrvKeyspace. + map srv_keyspaces = 1; } -// MaterializeSettings contains the settings for the Materialize command. -message MaterializeSettings { - // workflow is the name of the workflow. - string workflow = 1; - string source_keyspace = 2; - string target_keyspace = 3; - // stop_after_copy specifies if vreplication should be stopped after copying. - bool stop_after_copy = 4; - repeated TableMaterializeSettings table_settings = 5; - // optional parameters. - string cell = 6; - string tablet_types = 7; +message GetSrvVSchemaRequest { + string cell = 1; +} + +message GetSrvVSchemaResponse { + vschema.SrvVSchema srv_v_schema = 1; +} + +message GetTabletRequest { + topodata.TabletAlias tablet_alias = 1; +} + +message GetTabletResponse { + topodata.Tablet tablet = 1; +} + +message GetTabletsRequest { + // Keyspace is the name of the keyspace to return tablets for. Omit to return + // all tablets. + string keyspace = 1; + // Shard is the name of the shard to return tablets for. This field is ignored + // if Keyspace is not set. + string shard = 2; + // Cells is an optional set of cells to return tablets for. + repeated string cells = 3; + // Strict specifies how the server should treat failures from individual + // cells. + // + // When false (the default), GetTablets will return data from any cells that + // return successfully, but will fail the request if all cells fail. When + // true, any individual cell can fail the full request. + bool strict = 4; + // TabletAliases is an optional list of tablet aliases to fetch Tablet objects + // for. If specified, Keyspace, Shard, and Cells are ignored, and tablets are + // looked up by their respective aliases' Cells directly. + repeated topodata.TabletAlias tablet_aliases = 5; +} + +message GetTabletsResponse { + repeated topodata.Tablet tablets = 1; +} + +message GetVSchemaRequest { + string keyspace = 1; +} + +message GetVSchemaResponse { + vschema.Keyspace v_schema = 1; +} + +message GetWorkflowsRequest { + string keyspace = 1; + bool active_only = 2; +} + +message GetWorkflowsResponse { + repeated Workflow workflows = 1; +} + +message InitShardPrimaryRequest { + string keyspace = 1; + string shard = 2; + topodata.TabletAlias primary_elect_tablet_alias = 3; + bool force = 4; + vttime.Duration wait_replicas_timeout = 5; +} + +message InitShardPrimaryResponse { + repeated logutil.Event events = 1; +} + +message PlannedReparentShardRequest { + // Keyspace is the name of the keyspace to perform the Planned Reparent in. + string keyspace = 1; + // Shard is the name of the shard to perform teh Planned Reparent in. + string shard = 2; + // NewPrimary is the alias of the tablet to promote to shard primary. If not + // specified, the vtctld will select the most up-to-date candidate to promote. + // + // It is an error to set NewPrimary and AvoidPrimary to the same alias. + topodata.TabletAlias new_primary = 3; + // AvoidPrimary is the alias of the tablet to demote. In other words, + // specifying an AvoidPrimary alias tells the vtctld to promote any replica + // other than this one. A shard whose current primary is not this one is then + // a no-op. + // + // It is an error to set NewPrimary and AvoidPrimary to the same alias. + topodata.TabletAlias avoid_primary = 4; + // WaitReplicasTimeout is the duration of time to wait for replicas to catch + // up in replication both before and after the reparent. The timeout is not + // cumulative across both wait periods, meaning that the replicas have + // WaitReplicasTimeout time to catch up before the reparent, and an additional + // WaitReplicasTimeout time to catch up after the reparent. + vttime.Duration wait_replicas_timeout = 5; +} + +message PlannedReparentShardResponse { + // Keyspace is the name of the keyspace the Planned Reparent took place in. + string keyspace = 1; + // Shard is the name of the shard the Planned Reparent took place in. + string shard = 2; + // PromotedPrimary is the alias of the tablet that was promoted to shard + // primary. If NewPrimary was set in the request, then this will be the same + // alias. Otherwise, it will be the alias of the tablet found to be most + // up-to-date. + topodata.TabletAlias promoted_primary = 3; + repeated logutil.Event events = 4; +} + +message RemoveKeyspaceCellRequest { + string keyspace = 1; + string cell = 2; + // Force proceeds even if the cell's topology server cannot be reached. This + // should only be set if a cell has been shut down entirely, and the global + // topology data just needs to be updated. + bool force = 3; + // Recursive also deletes all tablets in that cell belonging to the specified + // keyspace. + bool recursive = 4; +} + +message RemoveKeyspaceCellResponse { + // (TODO:@amason) Consider including the deleted SrvKeyspace object and any + // deleted Tablet objects here. +} + +message RemoveShardCellRequest { + string keyspace = 1; + string shard_name = 2; + string cell = 3; + // Force proceeds even if the cell's topology server cannot be reached. This + // should only be set if a cell has been shut down entirely, and the global + // topology data just needs to be updated. + bool force = 4; + // Recursive also deletes all tablets in that cell belonging to the specified + // keyspace and shard. + bool recursive = 5; +} + +message RemoveShardCellResponse { + // (TODO:@amason) Consider including the deleted SrvKeyspacePartitions objects + // and any deleted Tablet objects here. +} + +message ReparentTabletRequest { + // Tablet is the alias of the tablet that should be reparented under the + // current shard primary. + topodata.TabletAlias tablet = 1; +} + +message ReparentTabletResponse { + // Keyspace is the name of the keyspace the tablet was reparented in. + string keyspace = 1; + // Shard is the name of the shard the tablet was reparented in. + string shard = 2; + // Primary is the alias of the tablet that the tablet was reparented under. + topodata.TabletAlias primary = 3; +} + +message ShardReplicationPositionsRequest { + string keyspace = 1; + string shard = 2; +} + +message ShardReplicationPositionsResponse { + // ReplicationStatuses is a mapping of tablet alias string to replication + // status for that tablet. + map replication_statuses = 1; + // TabletMap is the set of tablets whose replication statuses were queried, + // keyed by tablet alias. + map tablet_map = 2; +} + +message TabletExternallyReparentedRequest { + // Tablet is the alias of the tablet that was promoted externally and should + // be updated to the shard primary in the topo. + topodata.TabletAlias tablet = 1; +} + +message TabletExternallyReparentedResponse { + string keyspace = 1; + string shard = 2; + topodata.TabletAlias new_primary = 3; + topodata.TabletAlias old_primary = 4; } diff --git a/proto/vtctlservice.proto b/proto/vtctlservice.proto index ebd82714774..c0c13c40f2c 100644 --- a/proto/vtctlservice.proto +++ b/proto/vtctlservice.proto @@ -31,10 +31,105 @@ service Vtctl { // Service Vtctld exposes gRPC endpoints for each vt command. service Vtctld { - // FindAllShardsInKeyspace returns a map of shard names to shard references for a given keyspace. + // ChangeTabletType changes the db type for the specified tablet, if possible. + // This is used primarily to arrange replicas, and it will not convert a + // primary. For that, use InitShardPrimary. + // + // NOTE: This command automatically updates the serving graph. + rpc ChangeTabletType(vtctldata.ChangeTabletTypeRequest) returns (vtctldata.ChangeTabletTypeResponse) {}; + // CreateKeyspace creates the specified keyspace in the topology. For a + // SNAPSHOT keyspace, the request must specify the name of a base keyspace, + // as well as a snapshot time. + rpc CreateKeyspace(vtctldata.CreateKeyspaceRequest) returns (vtctldata.CreateKeyspaceResponse) {}; + // CreateShard creates the specified shard in the topology. + rpc CreateShard(vtctldata.CreateShardRequest) returns (vtctldata.CreateShardResponse) {}; + // DeleteKeyspace deletes the specified keyspace from the topology. In + // recursive mode, it also recursively deletes all shards in the keyspace. + // Otherwise, the keyspace must be empty (have no shards), or DeleteKeyspace + // returns an error. + rpc DeleteKeyspace(vtctldata.DeleteKeyspaceRequest) returns (vtctldata.DeleteKeyspaceResponse) {}; + // DeleteShards deletes the specified shards from the topology. In recursive + // mode, it also deletes all tablets belonging to the shard. Otherwise, the + // shard must be empty (have no tablets) or DeleteShards returns an error for + // that shard. + rpc DeleteShards(vtctldata.DeleteShardsRequest) returns (vtctldata.DeleteShardsResponse) {}; + // DeleteTablets deletes one or more tablets from the topology. + rpc DeleteTablets(vtctldata.DeleteTabletsRequest) returns (vtctldata.DeleteTabletsResponse) {}; + // EmergencyReparentShard reparents the shard to the new primary. It assumes + // the old primary is dead or otherwise not responding. + rpc EmergencyReparentShard(vtctldata.EmergencyReparentShardRequest) returns (vtctldata.EmergencyReparentShardResponse) {}; + // FindAllShardsInKeyspace returns a map of shard names to shard references + // for a given keyspace. rpc FindAllShardsInKeyspace(vtctldata.FindAllShardsInKeyspaceRequest) returns (vtctldata.FindAllShardsInKeyspaceResponse) {}; + // GetBackups returns all the backups for a shard. + rpc GetBackups(vtctldata.GetBackupsRequest) returns (vtctldata.GetBackupsResponse) {}; + // GetCellInfoNames returns all the cells for which we have a CellInfo object, + // meaning we have a topology service registered. + rpc GetCellInfoNames(vtctldata.GetCellInfoNamesRequest) returns (vtctldata.GetCellInfoNamesResponse) {}; + // GetCellInfo returns the information for a cell. + rpc GetCellInfo(vtctldata.GetCellInfoRequest) returns (vtctldata.GetCellInfoResponse) {}; + // GetCellsAliases returns a mapping of cell alias to cells identified by that + // alias. + rpc GetCellsAliases(vtctldata.GetCellsAliasesRequest) returns (vtctldata.GetCellsAliasesResponse) {}; // GetKeyspace reads the given keyspace from the topo and returns it. rpc GetKeyspace(vtctldata.GetKeyspaceRequest) returns (vtctldata.GetKeyspaceResponse) {}; // GetKeyspaces returns the keyspace struct of all keyspaces in the topo. rpc GetKeyspaces(vtctldata.GetKeyspacesRequest) returns (vtctldata.GetKeyspacesResponse) {}; + // GetSchema returns the schema for a tablet, or just the schema for the + // specified tables in that tablet. + rpc GetSchema(vtctldata.GetSchemaRequest) returns (vtctldata.GetSchemaResponse) {}; + // GetShard returns information about a shard in the topology. + rpc GetShard(vtctldata.GetShardRequest) returns (vtctldata.GetShardResponse) {}; + // GetSrvKeyspaces returns the SrvKeyspaces for a keyspace in one or more + // cells. + rpc GetSrvKeyspaces (vtctldata.GetSrvKeyspacesRequest) returns (vtctldata.GetSrvKeyspacesResponse) {}; + // GetSrvVSchema returns a the SrvVSchema for a cell. + rpc GetSrvVSchema(vtctldata.GetSrvVSchemaRequest) returns (vtctldata.GetSrvVSchemaResponse) {}; + // GetTablet returns information about a tablet. + rpc GetTablet(vtctldata.GetTabletRequest) returns (vtctldata.GetTabletResponse) {}; + // GetTablets returns tablets, optionally filtered by keyspace and shard. + rpc GetTablets(vtctldata.GetTabletsRequest) returns (vtctldata.GetTabletsResponse) {}; + // GetVSchema returns the vschema for a keyspace. + rpc GetVSchema(vtctldata.GetVSchemaRequest) returns (vtctldata.GetVSchemaResponse) {}; + // GetWorkflows returns a list of workflows for the given keyspace. + rpc GetWorkflows(vtctldata.GetWorkflowsRequest) returns (vtctldata.GetWorkflowsResponse) {}; + // InitShardPrimary sets the initial primary for a shard. Will make all other + // tablets in the shard replicas of the provided primary. + // + // WARNING: This could cause data loss on an already replicating shard. + // PlannedReparentShard or EmergencyReparentShard should be used in those + // cases instead. + rpc InitShardPrimary(vtctldata.InitShardPrimaryRequest) returns (vtctldata.InitShardPrimaryResponse) {}; + // PlannedReparentShard reparents the shard to the new primary, or away from + // an old primary. Both the old and new primaries need to be reachable and + // running. + // + // **NOTE**: The vtctld will not consider any replicas outside the cell the + // current shard primary is in for promotion unless NewPrimary is explicitly + // provided in the request. + rpc PlannedReparentShard(vtctldata.PlannedReparentShardRequest) returns (vtctldata.PlannedReparentShardResponse) {}; + // RemoveKeyspaceCell removes the specified cell from the Cells list for all + // shards in the specified keyspace, as well as from the SrvKeyspace for that + // keyspace in that cell. + rpc RemoveKeyspaceCell(vtctldata.RemoveKeyspaceCellRequest) returns (vtctldata.RemoveKeyspaceCellResponse) {}; + // RemoveShardCell removes the specified cell from the specified shard's Cells + // list. + rpc RemoveShardCell(vtctldata.RemoveShardCellRequest) returns (vtctldata.RemoveShardCellResponse) {}; + // ReparentTablet reparents a tablet to the current primary in the shard. This + // only works if the current replica position matches the last known reparent + // action. + rpc ReparentTablet(vtctldata.ReparentTabletRequest) returns (vtctldata.ReparentTabletResponse) {}; + // ShardReplicationPositions returns the replication position of each tablet + // in a shard. This RPC makes a best-effort to return partial results. For + // example, if one tablet in the shard graph is unreachable, then + // ShardReplicationPositions will return non-error, and include valid results + // for the reachable tablets. + rpc ShardReplicationPositions(vtctldata.ShardReplicationPositionsRequest) returns (vtctldata.ShardReplicationPositionsResponse) {}; + // TabletExternallyReparented changes metadata in the topology server to + // acknowledge a shard primary change performed by an external tool (e.g. + // orchestrator). + // + // See the Reparenting guide for more information: + // https://vitess.io/docs/user-guides/configuration-advanced/reparenting/#external-reparenting. + rpc TabletExternallyReparented(vtctldata.TabletExternallyReparentedRequest) returns (vtctldata.TabletExternallyReparentedResponse) {}; } diff --git a/proto/vtgate.proto b/proto/vtgate.proto index 43723c5bcd5..e2714899d79 100644 --- a/proto/vtgate.proto +++ b/proto/vtgate.proto @@ -21,7 +21,7 @@ option go_package = "vitess.io/vitess/go/vt/proto/vtgate"; package vtgate; -option java_package="io.vitess.proto"; +option java_package = "io.vitess.proto"; import "binlogdata.proto"; import "query.proto"; @@ -114,7 +114,7 @@ message Session { // user_defined_variables contains all the @variables defined for this session map user_defined_variables = 13; - + // system_variables keeps track of all session variables set for this connection // TODO: systay should we keep this so we can apply it ordered? map system_variables = 14; @@ -143,6 +143,9 @@ message Session { // Session UUID string SessionUUID = 22; + + // enable_system_settings defines if we can use reserved connections. + bool enable_system_settings = 23; } // ReadAfterWrite contains information regarding gtid set and timeout @@ -267,6 +270,10 @@ message ResolveTransactionRequest { message ResolveTransactionResponse { } +message VStreamFlags { + bool minimize_skew = 1; +} + // VStreamRequest is the payload for VStream. message VStreamRequest { vtrpc.CallerID caller_id = 1; @@ -278,6 +285,7 @@ message VStreamRequest { // position is of the form 'ks1:0@MySQL56/|ks2:-80@MySQL56/'. binlogdata.VGtid vgtid = 3; binlogdata.Filter filter = 4; + VStreamFlags flags = 5; } // VStreamResponse is streamed by VStream. diff --git a/proto/vttime.proto b/proto/vttime.proto index 5224fcb9d12..33bc59348b7 100644 --- a/proto/vttime.proto +++ b/proto/vttime.proto @@ -28,3 +28,7 @@ message Time { int32 nanoseconds = 2; } +message Duration { + int64 seconds = 1; + int32 nanos = 2; +} diff --git a/resources/bin/gh-ost b/resources/bin/gh-ost index 299ebfb846b..d54349be8da 100644 Binary files a/resources/bin/gh-ost and b/resources/bin/gh-ost differ diff --git a/test.go b/test.go index 1e324a97d92..16fd50fc14f 100755 --- a/test.go +++ b/test.go @@ -83,7 +83,7 @@ var ( pull = flag.Bool("pull", true, "re-pull the bootstrap image, in case it's been updated") docker = flag.Bool("docker", true, "run tests with Docker") useDockerCache = flag.Bool("use_docker_cache", false, "if true, create a temporary Docker image to cache the source code and the binaries generated by 'make build'. Used for execution on Travis CI.") - shard = flag.Int("shard", -1, "if N>=0, run the tests whose Shard field matches N") + shard = flag.String("shard", "", "if non-empty, run the tests whose Shard field matches value") tag = flag.String("tag", "", "if provided, only run tests with the given tag. Can't be combined with -shard or explicit test list") exclude = flag.String("exclude", "", "if provided, exclude tests containing any of the given tags (comma delimited)") keepData = flag.Bool("keep-data", false, "don't delete the per-test VTDATAROOT subfolders") @@ -123,7 +123,7 @@ type Test struct { Manual bool // Shard is used to split tests among workers. - Shard int + Shard string // RetryMax is the maximum number of times a test will be retried. // If 0, flag *retryMax is used. @@ -705,7 +705,7 @@ func getTestsSorted(names []string, testMap map[string]*Test) []*Test { func selectedTests(args []string, config *Config) []*Test { var tests []*Test excludedTests := strings.Split(*exclude, ",") - if *shard >= 0 { + if *shard != "" { // Run the tests in a given shard. // This can be combined with positional args. var names []string @@ -738,7 +738,7 @@ func selectedTests(args []string, config *Config) []*Test { tests = append(tests, t) } } - if len(args) == 0 && *shard < 0 { + if len(args) == 0 && *shard == "" { // Run all tests. var names []string for name, t := range config.Tests { diff --git a/test/README.md b/test/README.md new file mode 100644 index 00000000000..a1e0afe8782 --- /dev/null +++ b/test/README.md @@ -0,0 +1,44 @@ +##Github CI Workflows + +This document has a short outline of how tests are run in CI, how to add new tests and where these are configured. + +### Adding a new test + +Unit tests are run by the unit test runner, one per platform, currently percona56, mysql57, mysql80, mariadb101, mariadb102, mariadb103. +The workflow first installs the required database server before calling `make unit_test`. + +To add a new end-to-end (e2e) test (also called _cluster end to end_ tests): +* Add a new object to test/config.json +* If you are creating a new test _shard_: + * update `clusterList` in `ci_workflow_gen.go` + * `make generate_ci_workflows` +* If you are adding a new database platform, update the `templates\unit_test.tpl` to add + the platform specific packages and update `unitTestDatabases` + + +### Vitess test runner +The `.github/workflows` directory contains one yaml file per workflow. e2e tests are run using the `test.go` script +in the repository root. + +This script invokes the vitess e2e test framework using a json configuration file `test/config.json` which has one object per test. +Each test is of the form: + +```javascript +"vtgate": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/vtgate"], + "Command": [], + "Manual": false, + "Shard": 17, + "RetryMax": 0, + "Tags": [] + }, +``` +The important parameters here are Args which define the arguments to `go test` and the Shard which says +which Test VM should run this test. All tests which have a common Shard value are run in the same test vm. + +### Known Issue + +* Each VM does not seem to be able to create a lot of vttablets. For this reason we have had to split a few VReplication +e2e tests across Shards. We need to identify and if possible fix this limitation so that we can reduce the number of test Shards + \ No newline at end of file diff --git a/test/ci_workflow_gen.go b/test/ci_workflow_gen.go new file mode 100644 index 00000000000..02c6f7f8a54 --- /dev/null +++ b/test/ci_workflow_gen.go @@ -0,0 +1,190 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "bytes" + "fmt" + "log" + "os" + "strings" + "text/template" +) + +const ( + workflowConfigDir = "../.github/workflows" + + unitTestTemplate = "templates/unit_test.tpl" + unitTestDatabases = "percona56, mysql57, mysql80, mariadb101, mariadb102, mariadb103" + + clusterTestTemplate = "templates/cluster_endtoend_test.tpl" +) + +var ( + clusterList = []string{ + "11", + "12", + "13", + "14", + "15", + "16", + "17", + "18", + "19", + "20", + "21", + "22", + "23", + "24", + "26", + "27", + "vreplication_basic", + "vreplication_multicell", + "vreplication_cellalias", + "vreplication_v2", + "onlineddl_ghost", + "onlineddl_vrepl", + "onlineddl_vrepl_stress", + "vreplication_migrate", + "onlineddl_revert", + "onlineddl_declarative", + "tabletmanager_throttler", + "tabletmanager_throttler_custom_config", + } + // TODO: currently some percona tools including xtrabackup are installed on all clusters, we can possibly optimize + // this by only installing them in the required clusters + clustersRequiringXtraBackup = clusterList + clustersRequiringMakeTools = []string{ + "18", + "24", + } +) + +type unitTest struct { + Name, Platform string +} + +type clusterTest struct { + Name, Shard string + MakeTools, InstallXtraBackup bool +} + +func mergeBlankLines(buf *bytes.Buffer) string { + var out []string + in := strings.Split(buf.String(), "\n") + lastWasBlank := false + for _, line := range in { + if strings.TrimSpace(line) == "" { + if lastWasBlank { + continue + } + lastWasBlank = true + } else { + lastWasBlank = false + } + + out = append(out, line) + } + return strings.Join(out, "\n") +} + +func main() { + generateUnitTestWorkflows() + generateClusterWorkflows() +} + +func canonnizeList(list []string) []string { + var output []string + for _, item := range list { + item = strings.TrimSpace(item) + if item != "" { + output = append(output, item) + } + } + return output +} +func parseList(csvList string) []string { + var list []string + for _, item := range strings.Split(csvList, ",") { + list = append(list, strings.TrimSpace(item)) + } + return list +} + +func generateClusterWorkflows() { + clusters := canonnizeList(clusterList) + for _, cluster := range clusters { + test := &clusterTest{ + Name: fmt.Sprintf("Cluster (%s)", cluster), + Shard: cluster, + } + makeToolClusters := canonnizeList(clustersRequiringMakeTools) + for _, makeToolCluster := range makeToolClusters { + if makeToolCluster == cluster { + test.MakeTools = true + break + } + } + xtraBackupClusters := canonnizeList(clustersRequiringXtraBackup) + for _, xtraBackupCluster := range xtraBackupClusters { + if xtraBackupCluster == cluster { + test.InstallXtraBackup = true + break + } + } + + path := fmt.Sprintf("%s/cluster_endtoend_%s.yml", workflowConfigDir, cluster) + generateWorkflowFile(clusterTestTemplate, path, test) + } +} + +func generateUnitTestWorkflows() { + platforms := parseList(unitTestDatabases) + for _, platform := range platforms { + test := &unitTest{ + Name: fmt.Sprintf("Unit Test (%s)", platform), + Platform: platform, + } + path := fmt.Sprintf("%s/unit_test_%s.yml", workflowConfigDir, platform) + generateWorkflowFile(unitTestTemplate, path, test) + } +} + +func generateWorkflowFile(templateFile, path string, test interface{}) { + tpl, err := template.ParseFiles(templateFile) + if err != nil { + fmt.Printf("Error: %s\n", err) + return + } + + buf := &bytes.Buffer{} + err = tpl.Execute(buf, test) + if err != nil { + fmt.Printf("Error: %s\n", err) + return + } + + f, err := os.Create(path) + if err != nil { + log.Println("Error creating file: ", err) + return + } + f.WriteString("# DO NOT MODIFY: THIS FILE IS GENERATED USING \"make generate_ci_workflows\"\n\n") + f.WriteString(mergeBlankLines(buf)) + fmt.Printf("Generated %s\n", path) + +} diff --git a/test/config.json b/test/config.json index 95fba07112b..b33c0dca1ca 100644 --- a/test/config.json +++ b/test/config.json @@ -1,17 +1,17 @@ { "Tests": { - "java": { - "File": "", - "Args": [], - "Command": [ - "make", - "java_test" - ], - "Manual": false, - "Shard": 10, - "RetryMax": 0, - "Tags": [] - }, + "java": { + "File": "", + "Args": [], + "Command": [ + "make", + "java_test" + ], + "Manual": false, + "Shard": "10", + "RetryMax": 0, + "Tags": [] + }, "client_test": { "File": "", "Args": [], @@ -19,7 +19,7 @@ "test/client_test.sh" ], "Manual": false, - "Shard": 25, + "Shard": "25", "RetryMax": 0, "Tags": [] }, @@ -31,7 +31,7 @@ "e2e_test_race" ], "Manual": false, - "Shard": -1, + "Shard": "", "RetryMax": 0, "Tags": [] }, @@ -42,7 +42,7 @@ "tools/unit_test_runner.sh" ], "Manual": false, - "Shard": -1, + "Shard": "", "RetryMax": 0, "Tags": [] }, @@ -54,7 +54,7 @@ "unit_test_race" ], "Manual": false, - "Shard": 5, + "Shard": "5", "RetryMax": 0, "Tags": [] }, @@ -63,7 +63,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/backup/vtctlbackup"], "Command": [], "Manual": false, - "Shard": 11, + "Shard": "11", "RetryMax": 0, "Tags": [] }, @@ -72,7 +72,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/backup/mysqlctld"], "Command": [], "Manual": false, - "Shard": 21, + "Shard": "21", "RetryMax": 0, "Tags": [] }, @@ -81,7 +81,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/backup/vtbackup"], "Command": [], "Manual": false, - "Shard": 19, + "Shard": "19", "RetryMax": 0, "Tags": [] }, @@ -90,7 +90,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/backup/transform"], "Command": [], "Manual": false, - "Shard": 19, + "Shard": "19", "RetryMax": 0, "Tags": [] }, @@ -99,7 +99,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/backup/transform/mysqlctld"], "Command": [], "Manual": false, - "Shard": 21, + "Shard": "21", "RetryMax": 0, "Tags": [] }, @@ -108,7 +108,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/backup/xtrabackup"], "Command": [], "Manual": false, - "Shard": 20, + "Shard": "20", "RetryMax": 0, "Tags": [] }, @@ -117,7 +117,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/backup/xtrabackupstream"], "Command": [], "Manual": false, - "Shard": 20, + "Shard": "20", "RetryMax": 0, "Tags": [] }, @@ -126,7 +126,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/cellalias"], "Command": [], "Manual": false, - "Shard": 12, + "Shard": "12", "RetryMax": 0, "Tags": [] }, @@ -135,7 +135,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/preparestmt"], "Command": [], "Manual": false, - "Shard": 12, + "Shard": "12", "RetryMax": 0, "Tags": [] }, @@ -144,7 +144,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/mysqlserver"], "Command": [], "Manual": false, - "Shard": 24, + "Shard": "24", "RetryMax": 0, "Tags": [] }, @@ -153,7 +153,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/messaging"], "Command": [], "Manual": false, - "Shard": 12, + "Shard": "12", "RetryMax": 0, "Tags": [] }, @@ -162,7 +162,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/clustertest"], "Command": [], "Manual": false, - "Shard": 11, + "Shard": "11", "RetryMax": 0, "Tags": [] }, @@ -171,7 +171,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/encryption/encryptedreplication"], "Command": [], "Manual": false, - "Shard": 12, + "Shard": "12", "RetryMax": 0, "Tags": [] }, @@ -180,7 +180,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/encryption/encryptedtransport"], "Command": [], "Manual": false, - "Shard": 12, + "Shard": "12", "RetryMax": 0, "Tags": [] }, @@ -189,7 +189,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/sharding/initialsharding/v3"], "Command": [], "Manual": false, - "Shard": 13, + "Shard": "13", "RetryMax": 0, "Tags": [] }, @@ -198,7 +198,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/sharding/initialsharding/bytes"], "Command": [], "Manual": false, - "Shard": 13, + "Shard": "13", "RetryMax": 0, "Tags": [] }, @@ -207,7 +207,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/sharding/initialsharding/multi"], "Command": [], "Manual": false, - "Shard": -1, + "Shard": "", "RetryMax": 0, "Tags": [] }, @@ -216,7 +216,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/keyspace"], "Command": [], "Manual": false, - "Shard": 12, + "Shard": "12", "RetryMax": 0, "Tags": [ "site_test" @@ -227,7 +227,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/sharding/mergesharding/int"], "Command": [], "Manual": false, - "Shard": 22, + "Shard": "22", "RetryMax": 0, "Tags": [ "worker_test" @@ -238,7 +238,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/sharding/mergesharding/string"], "Command": [], "Manual": false, - "Shard": 22, + "Shard": "22", "RetryMax": 0, "Tags": [ "worker_test" @@ -249,7 +249,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/mysqlctl"], "Command": [], "Manual": false, - "Shard": 12, + "Shard": "12", "RetryMax": 0, "Tags": [ "site_test" @@ -260,18 +260,54 @@ "Args": ["vitess.io/vitess/go/test/endtoend/mysqlctld"], "Command": [], "Manual": false, - "Shard": 12, + "Shard": "12", "RetryMax": 0, "Tags": [ "site_test" ] }, - "onlineddl": { + "onlineddl_ghost": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/onlineddl/ghost"], + "Command": [], + "Manual": false, + "Shard": "onlineddl_ghost", + "RetryMax": 0, + "Tags": [] + }, + "onlineddl_vrepl": { "File": "unused.go", - "Args": ["vitess.io/vitess/go/test/endtoend/onlineddl"], + "Args": ["vitess.io/vitess/go/test/endtoend/onlineddl/vrepl"], "Command": [], "Manual": false, - "Shard": 26, + "Shard": "onlineddl_vrepl", + "RetryMax": 0, + "Tags": [] + }, + "onlineddl_vrepl_stress": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/onlineddl/vrepl_stress"], + "Command": [], + "Manual": false, + "Shard": "onlineddl_vrepl_stress", + "RetryMax": 0, + "Tags": [] + }, + "onlineddl_revert": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/onlineddl/revert"], + "Command": [], + "Manual": false, + "Shard": "onlineddl_revert", + "RetryMax": 0, + "Tags": [] + }, + "onlineddl_declarative": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/onlineddl/declarative"], + "Command": [], + "Manual": false, + "Shard": "onlineddl_declarative", "RetryMax": 0, "Tags": [] }, @@ -280,7 +316,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/recovery/pitr"], "Command": [], "Manual": false, - "Shard": 10, + "Shard": "10", "RetryMax": 0, "Tags": [ "site_test" @@ -291,7 +327,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/recovery/pitrtls"], "Command": [], "Manual": false, - "Shard": 26, + "Shard": "26", "RetryMax": 0, "Tags": [ "site_test" @@ -302,7 +338,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/recovery/unshardedrecovery"], "Command": [], "Manual": false, - "Shard": 11, + "Shard": "11", "RetryMax": 0, "Tags": [] }, @@ -311,7 +347,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/reparent"], "Command": [], "Manual": false, - "Shard": 14, + "Shard": "14", "RetryMax": 0, "Tags": [] }, @@ -320,7 +356,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/sharding/resharding/v3"], "Command": [], "Manual": false, - "Shard": 15, + "Shard": "15", "RetryMax": 0, "Tags": [ "worker_test" @@ -331,7 +367,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/sharding/resharding/string"], "Command": [], "Manual": false, - "Shard": 15, + "Shard": "15", "RetryMax": 0, "Tags": [ "worker_test" @@ -342,7 +378,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/sharded"], "Command": [], "Manual": false, - "Shard": 11, + "Shard": "11", "RetryMax": 0, "Tags": [] }, @@ -351,7 +387,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/recovery/shardedrecovery"], "Command": [], "Manual": false, - "Shard": 16, + "Shard": "16", "RetryMax": 0, "Tags": [] }, @@ -360,7 +396,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/tabletgateway/buffer"], "Command": [], "Manual": false, - "Shard": 13, + "Shard": "13", "RetryMax": 0, "Tags": [] }, @@ -369,7 +405,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/tabletgateway/cellalias"], "Command": [], "Manual": false, - "Shard": 13, + "Shard": "13", "RetryMax": 0, "Tags": [] }, @@ -378,7 +414,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/tabletgateway"], "Command": [], "Manual": false, - "Shard": 15, + "Shard": "15", "RetryMax": 0, "Tags": [] }, @@ -387,7 +423,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/tabletmanager"], "Command": [], "Manual": false, - "Shard": 18, + "Shard": "18", "RetryMax": 0, "Tags": [ "site_test" @@ -400,7 +436,7 @@ ], "Command": [], "Manual": false, - "Shard": 18, + "Shard": "18", "RetryMax": 0, "Tags": [ "site_test" @@ -411,7 +447,18 @@ "Args": ["vitess.io/vitess/go/test/endtoend/tabletmanager/throttler"], "Command": [], "Manual": false, - "Shard": 18, + "Shard": "tabletmanager_throttler", + "RetryMax": 0, + "Tags": [ + "site_test" + ] + }, + "tabletmanager_throttler_custom_config": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/tabletmanager/throttler_custom_config"], + "Command": [], + "Manual": false, + "Shard": "tabletmanager_throttler_custom_config", "RetryMax": 0, "Tags": [ "site_test" @@ -422,7 +469,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/tabletmanager/tablegc"], "Command": [], "Manual": false, - "Shard": 27, + "Shard": "27", "RetryMax": 0, "Tags": [ "site_test" @@ -435,7 +482,7 @@ ], "Command": [], "Manual": false, - "Shard": 25, + "Shard": "25", "RetryMax": 0, "Tags": [ "site_test" @@ -446,7 +493,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/versionupgrade", "-keep-data", "-force-vtdataroot", "/tmp/vtdataroot/vtroot_10901", "-force-port-start", "11900", "-force-base-tablet-uid", "1190"], "Command": [], "Manual": false, - "Shard": 28, + "Shard": "28", "RetryMax": 0, "Tags": [] }, @@ -455,7 +502,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/sharding/verticalsplit"], "Command": [], "Manual": false, - "Shard": 16, + "Shard": "16", "RetryMax": 0, "Tags": [ "worker_test" @@ -466,7 +513,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vtgate"], "Command": [], "Manual": false, - "Shard": 17, + "Shard": "17", "RetryMax": 0, "Tags": [] }, @@ -475,7 +522,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vtgate/buffer"], "Command": [], "Manual": false, - "Shard": 17, + "Shard": "17", "RetryMax": 0, "Tags": [] }, @@ -484,7 +531,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vtgate/concurrentdml"], "Command": [], "Manual": false, - "Shard": 17, + "Shard": "17", "RetryMax": 0, "Tags": [] }, @@ -493,7 +540,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vtgate/schema"], "Command": [], "Manual": false, - "Shard": 17, + "Shard": "17", "RetryMax": 0, "Tags": [] }, @@ -502,7 +549,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vtgate/sequence"], "Command": [], "Manual": false, - "Shard": 17, + "Shard": "17", "RetryMax": 0, "Tags": [] }, @@ -511,7 +558,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vtgate/reservedconn"], "Command": [], "Manual": false, - "Shard": 17, + "Shard": "17", "RetryMax": 0, "Tags": [] }, @@ -520,7 +567,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vtgate/reservedconn/reconnect1"], "Command": [], "Manual": false, - "Shard": 17, + "Shard": "17", "RetryMax": 0, "Tags": [] }, @@ -529,7 +576,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vtgate/reservedconn/reconnect2"], "Command": [], "Manual": false, - "Shard": 17, + "Shard": "17", "RetryMax": 0, "Tags": [] }, @@ -538,7 +585,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vtgate/transaction"], "Command": [], "Manual": false, - "Shard": 17, + "Shard": "17", "RetryMax": 0, "Tags": [] }, @@ -547,7 +594,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vtgate/unsharded"], "Command": [], "Manual": false, - "Shard": 17, + "Shard": "17", "RetryMax": 0, "Tags": [] }, @@ -556,7 +603,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vtgate/vschema"], "Command": [], "Manual": false, - "Shard": 17, + "Shard": "17", "RetryMax": 0, "Tags": [] }, @@ -565,7 +612,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vtgate/readafterwrite"], "Command": [], "Manual": false, - "Shard": 17, + "Shard": "17", "RetryMax": 0, "Tags": [] }, @@ -574,7 +621,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/topotest/zk2", "--topo-flavor=zk2"], "Command": [], "Manual": false, - "Shard": 25, + "Shard": "25", "RetryMax": 0, "Tags": [] }, @@ -583,7 +630,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/topotest/consul", "--topo-flavor=consul"], "Command": [], "Manual": false, - "Shard": 18, + "Shard": "18", "RetryMax": 0, "Tags": [] }, @@ -592,7 +639,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/topotest/etcd2"], "Command": [], "Manual": false, - "Shard": 17, + "Shard": "17", "RetryMax": 0, "Tags": [] }, @@ -601,7 +648,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vtctldweb"], "Command": [], "Manual": false, - "Shard": 10, + "Shard": "10", "RetryMax": 0, "Tags": [] }, @@ -610,7 +657,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vtcombo"], "Command": [], "Manual": false, - "Shard": 25, + "Shard": "25", "RetryMax": 0, "Tags": [] }, @@ -619,7 +666,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/recovery/xtrabackup"], "Command": [], "Manual": false, - "Shard": 17, + "Shard": "17", "RetryMax": 0, "Tags": [] }, @@ -628,7 +675,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/worker"], "Command": [], "Manual": false, - "Shard": 23, + "Shard": "23", "RetryMax": 0, "Tags": [ "worker_test" @@ -639,8 +686,8 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "MultiCell"], "Command": [], "Manual": false, - "Shard": 22, - "RetryMax": 3, + "Shard": "vreplication_multicell", + "RetryMax": 0, "Tags": [] }, "vreplication_cellalias": { @@ -648,8 +695,8 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "CellAlias"], "Command": [], "Manual": false, - "Shard": 23, - "RetryMax": 3, + "Shard": "vreplication_cellalias", + "RetryMax": 0, "Tags": [] }, "vreplication_basic": { @@ -657,8 +704,8 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestBasicVreplicationWorkflow"], "Command": [], "Manual": false, - "Shard": 24, - "RetryMax": 3, + "Shard": "vreplication_basic", + "RetryMax": 0, "Tags": [] }, "orchestrator": { @@ -666,7 +713,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/orchestrator"], "Command": [], "Manual": false, - "Shard": 22, + "Shard": "22", "RetryMax": 0, "Tags": [] }, @@ -675,7 +722,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vault"], "Command": [], "Manual": false, - "Shard": 23, + "Shard": "23", "RetryMax": 0, "Tags": [] }, @@ -684,8 +731,17 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestBasicV2Workflows"], "Command": [], "Manual": false, - "Shard": 21, - "RetryMax": 3, + "Shard": "vreplication_v2", + "RetryMax": 0, + "Tags": [] + }, + "vreplication_migrate": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestMigrate"], + "Command": [], + "Manual": false, + "Shard": "vreplication_migrate", + "RetryMax": 0, "Tags": [] } } diff --git a/test/templates/cluster_endtoend_test.tpl b/test/templates/cluster_endtoend_test.tpl new file mode 100644 index 00000000000..7a6548a71a0 --- /dev/null +++ b/test/templates/cluster_endtoend_test.tpl @@ -0,0 +1,60 @@ +name: {{.Name}} +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on {{.Name}} + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + {{if .InstallXtraBackup}} + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + {{end}} + + {{if .MakeTools}} + + - name: Installing zookeeper and consul + run: | + make tools + + {{end}} + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard {{.Shard}} diff --git a/test/templates/unit_test.tpl b/test/templates/unit_test.tpl new file mode 100644 index 00000000000..37a20971d54 --- /dev/null +++ b/test/templates/unit_test.tpl @@ -0,0 +1,127 @@ +name: {{.Name}} +on: [push, pull_request] +jobs: + + test: + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + export DEBIAN_FRONTEND="noninteractive" + sudo apt-get update + + {{if (eq .Platform "mysql57")}} + + # mysql57 + sudo apt-get install -y mysql-server mysql-client + + {{else}} + + # !mysql57 + + # Uninstall any previously installed MySQL first + sudo systemctl stop apparmor + sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common + sudo apt-get -y autoremove + sudo apt-get -y autoclean + sudo deluser mysql + sudo rm -rf /var/lib/mysql + sudo rm -rf /etc/mysql + + {{if (eq .Platform "percona56")}} + + # percona56 + sudo rm -rf /var/lib/mysql + sudo apt install -y gnupg2 + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt update + sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y percona-server-server-5.6 percona-server-client-5.6 + + {{end}} + + {{if (eq .Platform "mysql80")}} + + # mysql80 + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb + echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections + sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* + sudo apt-get update + sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-server mysql-client + + {{end}} + + {{if (eq .Platform "mariadb101")}} + + # mariadb101 + sudo apt-get install -y software-properties-common + sudo apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0xF1656F24C74CD1D8 + sudo add-apt-repository 'deb [arch=amd64,arm64,ppc64el] http://nyc2.mirrors.digitalocean.com/mariadb/repo/10.1/ubuntu bionic main' + sudo apt update + sudo DEBIAN_FRONTEND="noninteractive" apt install -y mariadb-server + + {{end}} + + {{if (eq .Platform "mariadb102")}} + + # mariadb102 + sudo apt-get install -y software-properties-common + sudo apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0xF1656F24C74CD1D8 + sudo add-apt-repository 'deb [arch=amd64,arm64,ppc64el] http://nyc2.mirrors.digitalocean.com/mariadb/repo/10.2/ubuntu bionic main' + sudo apt update + sudo DEBIAN_FRONTEND="noninteractive" apt install -y mariadb-server + + {{end}} + + {{if (eq .Platform "mariadb103")}} + + # mariadb103 + sudo apt-get install -y software-properties-common + sudo apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0xF1656F24C74CD1D8 + sudo add-apt-repository 'deb [arch=amd64,arm64,ppc64el] http://nyc2.mirrors.digitalocean.com/mariadb/repo/10.3/ubuntu bionic main' + sudo apt update + sudo DEBIAN_FRONTEND="noninteractive" apt install -y mariadb-server + + {{end}} + + {{end}} {{/*outer if*/}} + + sudo apt-get install -y make unzip g++ curl git wget ant openjdk-8-jdk eatmydata + sudo service mysql stop + sudo bash -c "echo '/usr/sbin/mysqld { }' > /etc/apparmor.d/usr.sbin.mysqld" # https://bugs.launchpad.net/ubuntu/+source/mariadb-10.1/+bug/1806263 + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld || echo "could not remove mysqld profile" + + mkdir -p dist bin + curl -L https://github.com/coreos/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz | tar -zxC dist + mv dist/etcd-v3.3.10-linux-amd64/{etcd,etcdctl} bin/ + + go mod download + + - name: Run make tools + run: | + make tools + + - name: Run test + timeout-minutes: 30 + run: | + eatmydata -- make unit_test diff --git a/tools.go b/tools.go new file mode 100644 index 00000000000..71b7b7613c5 --- /dev/null +++ b/tools.go @@ -0,0 +1,14 @@ +//+build tools + +package tools + +// Package tools is used to import go modules that we use for tooling as dependencies. +// For more information, please refer to: https://github.com/go-modules-by-example/index/blob/master/010_tools/README.md + +import ( + _ "github.com/gogo/protobuf/protoc-gen-gofast" + _ "k8s.io/code-generator/cmd/client-gen" + _ "k8s.io/code-generator/cmd/deepcopy-gen" + _ "k8s.io/code-generator/cmd/informer-gen" + _ "k8s.io/code-generator/cmd/lister-gen" +) diff --git a/tools/check_make_parser.sh b/tools/check_make_parser.sh index 8920762108a..4da1c2a9395 100755 --- a/tools/check_make_parser.sh +++ b/tools/check_make_parser.sh @@ -19,11 +19,12 @@ if ! cd go/vt/sqlparser/ ; then fi mv $CUR $TMP -output=$(go run golang.org/x/tools/cmd/goyacc -o $CUR sql.y) +output=$(go run ./goyacc -fast-append -o $CUR sql.y) +expectedOutput=" +conflicts: 2 shift/reduce" -if [ -n "$output" ]; then - echo "Expected empty output from goyacc, got:" - echo $output +if [[ "$output" != "$expectedOutput" ]]; then + echo -e "Expected output from goyacc:$expectedOutput\ngot:$output" mv $TMP $CUR exit 1 fi diff --git a/tools/check_make_sizegen.sh b/tools/check_make_sizegen.sh new file mode 100755 index 00000000000..edcff23a5e3 --- /dev/null +++ b/tools/check_make_sizegen.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# +# Validate that the current version of the generated cache_size files match the output +# generated by sizegen. +# +# This is used in Travis to verify that the currently committed version was +# generated with the proper cache_size files. + +source build.env + +TMP="/tmp/cached_size.$$.go" +ALL_FILES=$(find . -name "cached_size.go") + +set +e + +for SRC in $ALL_FILES +do + TMP="/tmp/"$(echo "$SRC" | sed 's/\//_/g' | sed "s/cached_size.go/cached_size_$$.go/g") + mv "$SRC" "$TMP" +done + +make sizegen + +STATUS=0 + +for SRC in $ALL_FILES +do + TMP="/tmp/"$(echo "$SRC" | sed 's/\//_/g' | sed "s/cached_size.go/cached_size_$$.go/g") + + if [ ! -f "$SRC" ]; then + mv "$TMP" "$SRC" + continue + fi + + if ! diff -q "$SRC" "$TMP" > /dev/null ; then + echo "ERROR: Regenerated file for $SRC does not match the current version:" + diff -u "$SRC" "$TMP" + + echo + echo "Please re-run 'make sizegen' to generate." + STATUS=1 + fi + mv "$TMP" "$SRC" +done + +exit $STATUS + diff --git a/tools/e2e_test_runner.sh b/tools/e2e_test_runner.sh index c581957a366..dc2edbf0e59 100755 --- a/tools/e2e_test_runner.sh +++ b/tools/e2e_test_runner.sh @@ -45,7 +45,7 @@ all_except_flaky_and_cluster_tests=$(echo "$packages_with_tests" | grep -vE ".+ flaky_tests=$(echo "$packages_with_tests" | grep -E ".+ .+_flaky_test\.go" | grep -vE "go/test/endtoend" | cut -d" " -f1) # Run non-flaky tests. -echo "$all_except_flaky_and_cluster_tests" | xargs go test $VT_GO_PARALLEL +echo "$all_except_flaky_and_cluster_tests" | xargs go test -count=1 $VT_GO_PARALLEL if [ $? -ne 0 ]; then echo "ERROR: Go unit tests failed. See above for errors." echo diff --git a/tools/make-release-packages.sh b/tools/make-release-packages.sh index ba31cb4df77..d33eb9d3b6b 100755 --- a/tools/make-release-packages.sh +++ b/tools/make-release-packages.sh @@ -35,7 +35,7 @@ mkdir -p releases # Copy a subset of binaries from issue #5421 mkdir -p "${RELEASE_DIR}/bin" -for binary in vttestserver mysqlctl mysqlctld query_analyzer topo2topo vtaclcheck vtbackup vtbench vtclient vtcombo vtctl vtctlclient vtctld vtexplain vtgate vttablet vtworker vtworkerclient zk zkctl zkctld; do +for binary in vttestserver mysqlctl mysqlctld query_analyzer topo2topo vtaclcheck vtbackup vtbench vtclient vtcombo vtctl vtctldclient vtctlclient vtctld vtexplain vtgate vttablet vtorc vtworker vtworkerclient zk zkctl zkctld; do cp "bin/$binary" "${RELEASE_DIR}/bin/" done; diff --git a/tools/rowlog/rowlog.go b/tools/rowlog/rowlog.go index f90a6a48a44..e4458005f4d 100644 --- a/tools/rowlog/rowlog.go +++ b/tools/rowlog/rowlog.go @@ -12,6 +12,7 @@ import ( "strings" "sync" "time" + vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" @@ -155,7 +156,7 @@ func startStreaming(ctx context.Context, vtgate, vtctld, keyspace, tablet, table log.Fatal(err) } defer conn.Close() - reader, err := conn.VStream(ctx, topodatapb.TabletType_MASTER, vgtid, filter) + reader, err := conn.VStream(ctx, topodatapb.TabletType_MASTER, vgtid, filter, &vtgatepb.VStreamFlags{}) var fields []*query.Field var gtid string var plan *TablePlan diff --git a/web/orchestrator/public/js/orchestrator.js b/web/orchestrator/public/js/orchestrator.js index 62316744a36..b00f11f8869 100644 --- a/web/orchestrator/public/js/orchestrator.js +++ b/web/orchestrator/public/js/orchestrator.js @@ -76,6 +76,19 @@ function isCompactDisplay() { return ($.cookie("compact-display") == "true"); } +// origin: https://vanillajstoolkit.com/ +/** + * Sanitize and encode all HTML in a user-submitted string + * https://portswigger.net/web-security/cross-site-scripting/preventing + * @param {String} str The user-submitted string + * @return {String} str The sanitized string + */ +function sanitizeHTML (str) { + return str.replace(/[^\w-_. ]/gi, function (c) { + return '&#' + c.charCodeAt(0) + ';'; + }); +} + function anonymizeInstanceId(instanceId) { var tokens = instanceId.split("__"); return "instance-" + md5(tokens[1]).substring(0, 4) + ":" + tokens[2]; @@ -1133,7 +1146,7 @@ $(document).ready(function() { $("[data-nav-page=user-id]").css('display', 'inline-block'); $("[data-nav-page=user-id] a").html(" " + getUserId()); } - var orchestratorMsg = getParameterByName("orchestrator-msg") + var orchestratorMsg = sanitizeHTML(getParameterByName("orchestrator-msg")) if (orchestratorMsg) { addInfo(orchestratorMsg) diff --git a/web/vtadmin/README.md b/web/vtadmin/README.md index 0f024212d1f..6f70884c4df 100644 --- a/web/vtadmin/README.md +++ b/web/vtadmin/README.md @@ -11,7 +11,7 @@ In this section, we'll get vtadmin-web, vtadmin-api, and Vitess all running loca ./docker/local/run.sh ``` -1. Create an empty vtgate credentials file to avoid the gRPC dialer bug mentioned in https://github.com/vitessio/vitess/pull/7187. Location and filename don't matter since you'll be passing this in as a flag; I put mine at ` /Users/sarabee/id1-grpc_vtgate_credentials.json`: +1. Create an empty vtgate credentials file to avoid the gRPC dialer bug mentioned in https://github.com/vitessio/vitess/pull/7187. Location and filename don't matter since you'll be passing this in as a flag; I put mine at ` /Users/sarabee/vtadmin-creds.json`: ```json { @@ -28,14 +28,14 @@ In this section, we'll get vtadmin-web, vtadmin-api, and Vitess all running loca { "host": { "hostname": "127.0.0.1:15991" - }, - "tags": ["pool:pool1", "cell:zone1", "extra:tag"] - }, + } + } + ], + "vtctlds": [ { "host": { - "hostname": "127.0.0.1:15992" - }, - "tags": ["dead-dove-do-not-eat"] + "hostname": "127.0.0.1:15999" + } } ] } @@ -47,9 +47,10 @@ In this section, we'll get vtadmin-web, vtadmin-api, and Vitess all running loca make build ./bin/vtadmin \ - --addr ":15999" \ - --cluster-defaults "vtsql-credentials-path-tmpl=/Users/sarabee/id1-grpc_vtgate_credentials.json" \ - --cluster "name=cluster1,id=id1,discovery=staticFile,discovery-staticFile-path=/Users/sarabee/vtadmin-cluster1.json,vtsql-discovery-tags=cell:zone1" + --addr ":14200" \ + --cluster-defaults "vtctld-credentials-path-tmpl=/Users/sarabee/vtadmin-creds.json,vtsql-credentials-path-tmpl=/Users/sarabee/vtadmin-creds.json" \ + --cluster "name=cluster1,id=id1,discovery=staticFile,discovery-staticFile-path=/Users/sarabee/vtadmin-cluster1.json" \ + --http-origin=http://localhost:3000 ``` 1. Finally! Start up vtadmin-web on [http://localhost:3000](http://localhost:3000), pointed at the vtadmin-api server you started in the last step. @@ -57,7 +58,7 @@ In this section, we'll get vtadmin-web, vtadmin-api, and Vitess all running loca ```bash cd web/vtadmin npm install - REACT_APP_VTADMIN_API_ADDRESS="http://127.0.0.1:15999" npm start + REACT_APP_VTADMIN_API_ADDRESS="http://127.0.0.1:14200" npm start ``` # Developer guide diff --git a/web/vtadmin/package-lock.json b/web/vtadmin/package-lock.json index cb25ffa0647..3e44e8a070c 100644 --- a/web/vtadmin/package-lock.json +++ b/web/vtadmin/package-lock.json @@ -2080,9 +2080,10 @@ } }, "@testing-library/dom": { - "version": "7.29.0", - "resolved": "https://registry.npmjs.org/@testing-library/dom/-/dom-7.29.0.tgz", - "integrity": "sha512-0hhuJSmw/zLc6ewR9cVm84TehuTd7tbqBX9pRNSp8znJ9gTmSgesdbiGZtt8R6dL+2rgaPFp9Yjr7IU1HWm49w==", + "version": "7.29.4", + "resolved": "https://registry.npmjs.org/@testing-library/dom/-/dom-7.29.4.tgz", + "integrity": "sha512-CtrJRiSYEfbtNGtEsd78mk1n1v2TUbeABlNIcOCJdDfkN5/JTOwQEbbQpoSRxGqzcWPgStMvJ4mNolSuBRv1NA==", + "dev": true, "requires": { "@babel/code-frame": "^7.10.4", "@babel/runtime": "^7.12.5", @@ -2098,6 +2099,7 @@ "version": "4.3.0", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, "requires": { "color-convert": "^2.0.1" } @@ -2106,6 +2108,7 @@ "version": "4.1.0", "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.0.tgz", "integrity": "sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A==", + "dev": true, "requires": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -2115,6 +2118,7 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, "requires": { "color-name": "~1.1.4" } @@ -2122,17 +2126,20 @@ "color-name": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true }, "has-flag": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==" + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true }, "supports-color": { "version": "7.2.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, "requires": { "has-flag": "^4.0.0" } @@ -2140,9 +2147,10 @@ } }, "@testing-library/jest-dom": { - "version": "5.11.6", - "resolved": "https://registry.npmjs.org/@testing-library/jest-dom/-/jest-dom-5.11.6.tgz", - "integrity": "sha512-cVZyUNRWwUKI0++yepYpYX7uhrP398I+tGz4zOlLVlUYnZS+Svuxv4fwLeCIy7TnBYKXUaOlQr3vopxL8ZfEnA==", + "version": "5.11.9", + "resolved": "https://registry.npmjs.org/@testing-library/jest-dom/-/jest-dom-5.11.9.tgz", + "integrity": "sha512-Mn2gnA9d1wStlAIT2NU8J15LNob0YFBVjs2aEQ3j8rsfRQo+lAs7/ui1i2TGaJjapLmuNPLTsrm+nPjmZDwpcQ==", + "dev": true, "requires": { "@babel/runtime": "^7.9.2", "@types/testing-library__jest-dom": "^5.9.1", @@ -2158,6 +2166,7 @@ "version": "4.3.0", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, "requires": { "color-convert": "^2.0.1" } @@ -2166,6 +2175,7 @@ "version": "3.0.0", "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz", "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==", + "dev": true, "requires": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -2175,6 +2185,7 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, "requires": { "color-name": "~1.1.4" } @@ -2182,12 +2193,14 @@ "color-name": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true }, "css": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/css/-/css-3.0.0.tgz", "integrity": "sha512-DG9pFfwOrzc+hawpmqX/dHYHJG+Bsdb0klhyi1sDneOgGOXy9wQIC8hzyVp1e4NRYDBdxcylvywPkkXCHAzTyQ==", + "dev": true, "requires": { "inherits": "^2.0.4", "source-map": "^0.6.1", @@ -2197,17 +2210,20 @@ "has-flag": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==" + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true }, "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true }, "source-map-resolve": { "version": "0.6.0", "resolved": "https://registry.npmjs.org/source-map-resolve/-/source-map-resolve-0.6.0.tgz", "integrity": "sha512-KXBr9d/fO/bWo97NXsPIAW1bFSBOuCnjbNTBMO7N59hsv5i9yzRDfcYwwt0l04+VqnKC+EwzvJZIP/qkuMgR/w==", + "dev": true, "requires": { "atob": "^2.1.2", "decode-uri-component": "^0.2.0" @@ -2217,6 +2233,7 @@ "version": "7.2.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, "requires": { "has-flag": "^4.0.0" } @@ -2224,14 +2241,29 @@ } }, "@testing-library/react": { - "version": "11.2.2", - "resolved": "https://registry.npmjs.org/@testing-library/react/-/react-11.2.2.tgz", - "integrity": "sha512-jaxm0hwUjv+hzC+UFEywic7buDC9JQ1q3cDsrWVSDAPmLotfA6E6kUHlYm/zOeGCac6g48DR36tFHxl7Zb+N5A==", + "version": "11.2.5", + "resolved": "https://registry.npmjs.org/@testing-library/react/-/react-11.2.5.tgz", + "integrity": "sha512-yEx7oIa/UWLe2F2dqK0FtMF9sJWNXD+2PPtp39BvE0Kh9MJ9Kl0HrZAgEuhUJR+Lx8Di6Xz+rKwSdEPY2UV8ZQ==", + "dev": true, "requires": { "@babel/runtime": "^7.12.5", "@testing-library/dom": "^7.28.1" } }, + "@testing-library/react-hooks": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/@testing-library/react-hooks/-/react-hooks-5.0.3.tgz", + "integrity": "sha512-UrnnRc5II7LMH14xsYNm/WRch/67cBafmrSQcyFh0v+UUmSf1uzfB7zn5jQXSettGwOSxJwdQUN7PgkT0w22Lg==", + "dev": true, + "requires": { + "@babel/runtime": "^7.12.5", + "@types/react": ">=16.9.0", + "@types/react-dom": ">=16.9.0", + "@types/react-test-renderer": ">=16.9.0", + "filter-console": "^0.1.1", + "react-error-boundary": "^3.1.0" + } + }, "@testing-library/user-event": { "version": "12.6.0", "resolved": "https://registry.npmjs.org/@testing-library/user-event/-/user-event-12.6.0.tgz", @@ -2246,9 +2278,10 @@ "integrity": "sha512-/+CRPXpBDpo2RK9C68N3b2cOvO0Cf5B9aPijHsoDQTHivnGSObdOF2BRQOYjojWTDy6nQvMjmqRXIxH55VjxxA==" }, "@types/aria-query": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@types/aria-query/-/aria-query-4.2.0.tgz", - "integrity": "sha512-iIgQNzCm0v7QMhhe4Jjn9uRh+I6GoPmt03CbEtwx3ao8/EfoQcmgtqH4vQ5Db/lxiIGaWDv6nwvunuh0RyX0+A==" + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@types/aria-query/-/aria-query-4.2.1.tgz", + "integrity": "sha512-S6oPal772qJZHoRZLFc/XoZW2gFvwXusYUmXPXkgxJLuEk2vOt7jc4Yo6z/vtI0EBkbPBVrJJ0B+prLIKiWqHg==", + "dev": true }, "@types/babel__core": { "version": "7.1.12", @@ -2287,6 +2320,11 @@ "@babel/types": "^7.3.0" } }, + "@types/classnames": { + "version": "2.2.11", + "resolved": "https://registry.npmjs.org/@types/classnames/-/classnames-2.2.11.tgz", + "integrity": "sha512-2koNhpWm3DgWRp5tpkiJ8JGc1xTn2q0l+jUNUE7oMKXUf5NpI9AIdC4kbjGNFBdHtcxBD18LAksoudAVhFKCjw==" + }, "@types/cookie": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/@types/cookie/-/cookie-0.4.0.tgz", @@ -2324,6 +2362,11 @@ "@types/node": "*" } }, + "@types/history": { + "version": "4.7.8", + "resolved": "https://registry.npmjs.org/@types/history/-/history-4.7.8.tgz", + "integrity": "sha512-S78QIYirQcUoo6UJZx9CSP0O2ix9IaeAXwQi26Rhr/+mg7qqPy8TzaxHSUut7eGjL8WmLccT7/MXf304WjqHcA==" + }, "@types/html-minifier-terser": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/@types/html-minifier-terser/-/html-minifier-terser-5.1.1.tgz", @@ -2369,6 +2412,21 @@ "resolved": "https://registry.npmjs.org/@types/json5/-/json5-0.0.29.tgz", "integrity": "sha1-7ihweulOEdK4J7y+UnC86n8+ce4=" }, + "@types/lodash": { + "version": "4.14.168", + "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.14.168.tgz", + "integrity": "sha512-oVfRvqHV/V6D1yifJbVRU3TMp8OT6o6BG+U9MkwuJ3U8/CsDHvalRpsxBqivn71ztOFZBTfJMvETbqHiaNSj7Q==", + "dev": true + }, + "@types/lodash-es": { + "version": "4.17.4", + "resolved": "https://registry.npmjs.org/@types/lodash-es/-/lodash-es-4.17.4.tgz", + "integrity": "sha512-BBz79DCJbD2CVYZH67MBeHZRX++HF+5p8Mo5MzjZi64Wac39S3diedJYHZtScbRVf4DjZyN6LzA0SB0zy+HSSQ==", + "dev": true, + "requires": { + "@types/lodash": "*" + } + }, "@types/long": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/@types/long/-/long-4.0.1.tgz", @@ -2442,6 +2500,34 @@ "@types/react": "^16" } }, + "@types/react-router": { + "version": "5.1.11", + "resolved": "https://registry.npmjs.org/@types/react-router/-/react-router-5.1.11.tgz", + "integrity": "sha512-ofHbZMlp0Y2baOHgsWBQ4K3AttxY61bDMkwTiBOkPg7U6C/3UwwB5WaIx28JmSVi/eX3uFEMRo61BV22fDQIvg==", + "requires": { + "@types/history": "*", + "@types/react": "*" + } + }, + "@types/react-router-dom": { + "version": "5.1.7", + "resolved": "https://registry.npmjs.org/@types/react-router-dom/-/react-router-dom-5.1.7.tgz", + "integrity": "sha512-D5mHD6TbdV/DNHYsnwBTv+y73ei+mMjrkGrla86HthE4/PVvL1J94Bu3qABU+COXzpL23T1EZapVVpwHuBXiUg==", + "requires": { + "@types/history": "*", + "@types/react": "*", + "@types/react-router": "*" + } + }, + "@types/react-test-renderer": { + "version": "17.0.0", + "resolved": "https://registry.npmjs.org/@types/react-test-renderer/-/react-test-renderer-17.0.0.tgz", + "integrity": "sha512-nvw+F81OmyzpyIE1S0xWpLonLUZCMewslPuA8BtjSKc5XEbn8zEQBXS7KuOLHTNnSOEM2Pum50gHOoZ62tqTRg==", + "dev": true, + "requires": { + "@types/react": "*" + } + }, "@types/resolve": { "version": "0.0.8", "resolved": "https://registry.npmjs.org/@types/resolve/-/resolve-0.0.8.tgz", @@ -2469,6 +2555,7 @@ "version": "5.9.5", "resolved": "https://registry.npmjs.org/@types/testing-library__jest-dom/-/testing-library__jest-dom-5.9.5.tgz", "integrity": "sha512-ggn3ws+yRbOHog9GxnXiEZ/35Mow6YtPZpd7Z5mKDeZS/o7zx3yAle0ov/wjhVB5QT4N2Dt+GNoGCdqkBGCajQ==", + "dev": true, "requires": { "@types/jest": "*" } @@ -4229,6 +4316,11 @@ } } }, + "classnames": { + "version": "2.2.6", + "resolved": "https://registry.npmjs.org/classnames/-/classnames-2.2.6.tgz", + "integrity": "sha512-JR/iSQOSt+LQIWwrwEzJ9uk0xfN3mTVYMwt1Ir5mUcSN6pU+V4zQFFaJsclJbPuAUQH+yfWef6tm7l1quW3C8Q==" + }, "clean-css": { "version": "4.2.3", "resolved": "https://registry.npmjs.org/clean-css/-/clean-css-4.2.3.tgz", @@ -4429,6 +4521,11 @@ } } }, + "compute-scroll-into-view": { + "version": "1.0.16", + "resolved": "https://registry.npmjs.org/compute-scroll-into-view/-/compute-scroll-into-view-1.0.16.tgz", + "integrity": "sha512-a85LHKY81oQnikatZYA90pufpZ6sQx++BoCxOEMsjpZx+ZnaKGQnCyCehTRr/1p9GBIAHTjcU9k71kSYWloLiQ==" + }, "concat-map": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", @@ -4817,7 +4914,8 @@ "css.escape": { "version": "1.5.1", "resolved": "https://registry.npmjs.org/css.escape/-/css.escape-1.5.1.tgz", - "integrity": "sha1-QuJ9T6BK4y+TGktNQZH6nN3ul8s=" + "integrity": "sha1-QuJ9T6BK4y+TGktNQZH6nN3ul8s=", + "dev": true }, "cssdb": { "version": "4.4.0", @@ -5332,7 +5430,8 @@ "dom-accessibility-api": { "version": "0.5.4", "resolved": "https://registry.npmjs.org/dom-accessibility-api/-/dom-accessibility-api-0.5.4.tgz", - "integrity": "sha512-TvrjBckDy2c6v6RLxPv5QXOnU+SmF9nBII5621Ve5fu6Z/BDrENurBEvlC1f44lKEUVqOpK4w9E5Idc5/EgkLQ==" + "integrity": "sha512-TvrjBckDy2c6v6RLxPv5QXOnU+SmF9nBII5621Ve5fu6Z/BDrENurBEvlC1f44lKEUVqOpK4w9E5Idc5/EgkLQ==", + "dev": true }, "dom-converter": { "version": "0.2.0", @@ -5434,6 +5533,24 @@ "resolved": "https://registry.npmjs.org/dotenv-expand/-/dotenv-expand-5.1.0.tgz", "integrity": "sha512-YXQl1DSa4/PQyRfgrv6aoNjhasp/p4qs9FjJ4q4cQk+8m4r6k4ZSiEyytKG8f8W9gi8WsQtIObNmKd+tMzNTmA==" }, + "downshift": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/downshift/-/downshift-6.1.0.tgz", + "integrity": "sha512-MnEJERij+1pTVAsOPsH3q9MJGNIZuu2sT90uxOCEOZYH6sEzkVGtUcTBVDRQkE8y96zpB7uEbRn24aE9VpHnZg==", + "requires": { + "@babel/runtime": "^7.12.5", + "compute-scroll-into-view": "^1.0.16", + "prop-types": "^15.7.2", + "react-is": "^17.0.1" + }, + "dependencies": { + "react-is": { + "version": "17.0.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-17.0.1.tgz", + "integrity": "sha512-NAnt2iGDXohE5LI7uBnLnqvLQMtzhkiAOLXTmv+qnF9Ky7xAPcX8Up/xWIhxvLVGJvuLiNc4xQLtuqDRzb4fSA==" + } + } + }, "duplexer": { "version": "0.1.2", "resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz", @@ -6716,6 +6833,17 @@ "to-regex-range": "^5.0.1" } }, + "filter-console": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/filter-console/-/filter-console-0.1.1.tgz", + "integrity": "sha512-zrXoV1Uaz52DqPs+qEwNJWJFAWZpYJ47UNmpN9q4j+/EYsz85uV0DC9k8tRND5kYmoVzL0W+Y75q4Rg8sRJCdg==", + "dev": true + }, + "filter-obj": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/filter-obj/-/filter-obj-1.1.0.tgz", + "integrity": "sha1-mzERErxsYSehbgFsbF1/GeCAXFs=" + }, "finalhandler": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.1.2.tgz", @@ -7481,6 +7609,14 @@ "resolved": "https://registry.npmjs.org/hex-color-regex/-/hex-color-regex-1.1.0.tgz", "integrity": "sha512-l9sfDFsuqtOqKDsQdqrMRk0U85RZc0RtOR9yPI7mRVOa4FsR/BVnZ0shmQRM96Ji99kYZP/7hn1cedc1+ApsTQ==" }, + "history": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/history/-/history-5.0.0.tgz", + "integrity": "sha512-3NyRMKIiFSJmIPdq7FxkNMJkQ7ZEtVblOQ38VtKaA0zZMW1Eo6Q6W8oDKEflr1kNNTItSnk4JMCO1deeSgbLLg==", + "requires": { + "@babel/runtime": "^7.7.6" + } + }, "hmac-drbg": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/hmac-drbg/-/hmac-drbg-1.0.1.tgz", @@ -7491,6 +7627,14 @@ "minimalistic-crypto-utils": "^1.0.1" } }, + "hoist-non-react-statics": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz", + "integrity": "sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==", + "requires": { + "react-is": "^16.7.0" + } + }, "hoopy": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/hoopy/-/hoopy-0.1.4.tgz", @@ -10158,6 +10302,11 @@ "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.20.tgz", "integrity": "sha512-PlhdFcillOINfeV7Ni6oF1TAEayyZBoZ8bcshTHqOYJYlrqzRK5hagpagky5o4HfCzzd1TRkXPMFq6cKk9rGmA==" }, + "lodash-es": { + "version": "4.17.20", + "resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.20.tgz", + "integrity": "sha512-JD1COMZsq8maT6mnuz1UMV0jvYD0E0aUsSOdrr1/nAG3dhqQXwRRgeW0cSqH1U43INKcqxaiVIQNOUDld7gRDA==" + }, "lodash._reinterpolate": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/lodash._reinterpolate/-/lodash._reinterpolate-3.0.0.tgz", @@ -10315,7 +10464,8 @@ "lz-string": { "version": "1.4.4", "resolved": "https://registry.npmjs.org/lz-string/-/lz-string-1.4.4.tgz", - "integrity": "sha1-wNjq82BZ9wV5bh40SBHPTEmNOiY=" + "integrity": "sha1-wNjq82BZ9wV5bh40SBHPTEmNOiY=", + "dev": true }, "magic-string": { "version": "0.25.7", @@ -10677,7 +10827,17 @@ "min-indent": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/min-indent/-/min-indent-1.0.1.tgz", - "integrity": "sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg==" + "integrity": "sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg==", + "dev": true + }, + "mini-create-react-context": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/mini-create-react-context/-/mini-create-react-context-0.4.1.tgz", + "integrity": "sha512-YWCYEmd5CQeHGSAKrYvXgmzzkrvssZcuuQDDeqkT+PziKGMgE+0MCCtcKbROzocGBG1meBLl2FotlRwf4gAzbQ==", + "requires": { + "@babel/runtime": "^7.12.1", + "tiny-warning": "^1.0.3" + } }, "mini-css-extract-plugin": { "version": "0.11.3", @@ -11406,6 +11566,22 @@ "prepend-http": "^1.0.0", "query-string": "^4.1.0", "sort-keys": "^1.0.0" + }, + "dependencies": { + "query-string": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/query-string/-/query-string-4.3.4.tgz", + "integrity": "sha1-u7aTucqRXCMlFbIosaArYJBD2+s=", + "requires": { + "object-assign": "^4.1.0", + "strict-uri-encode": "^1.0.0" + } + }, + "strict-uri-encode": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz", + "integrity": "sha1-J5siXfHVgrH1TmWt3UNS4Y+qBxM=" + } } }, "npm-run-path": { @@ -13360,12 +13536,14 @@ "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==" }, "query-string": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/query-string/-/query-string-4.3.4.tgz", - "integrity": "sha1-u7aTucqRXCMlFbIosaArYJBD2+s=", + "version": "6.14.0", + "resolved": "https://registry.npmjs.org/query-string/-/query-string-6.14.0.tgz", + "integrity": "sha512-In3o+lUxlgejoVJgwEdYtdxrmlL0cQWJXj0+kkI7RWVo7hg5AhFtybeKlC9Dpgbr8eOC4ydpEh8017WwyfzqVQ==", "requires": { - "object-assign": "^4.1.0", - "strict-uri-encode": "^1.0.0" + "decode-uri-component": "^0.2.0", + "filter-obj": "^1.1.0", + "split-on-first": "^1.0.0", + "strict-uri-encode": "^2.0.0" } }, "querystring": { @@ -13562,6 +13740,15 @@ "scheduler": "^0.20.1" } }, + "react-error-boundary": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/react-error-boundary/-/react-error-boundary-3.1.0.tgz", + "integrity": "sha512-lmPrdi5SLRJR+AeJkqdkGlW/CRkAUvZnETahK58J4xb5wpbfDngasEGu+w0T1iXEhVrYBJZeW+c4V1hILCnMWQ==", + "dev": true, + "requires": { + "@babel/runtime": "^7.12.5" + } + }, "react-error-overlay": { "version": "6.0.8", "resolved": "https://registry.npmjs.org/react-error-overlay/-/react-error-overlay-6.0.8.tgz", @@ -13586,6 +13773,80 @@ "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.8.3.tgz", "integrity": "sha512-X8jZHc7nCMjaCqoU+V2I0cOhNW+QMBwSUkeXnTi8IPe6zaRWfn60ZzvFDZqWPfmSJfjub7dDW1SP0jaHWLu/hg==" }, + "react-router": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/react-router/-/react-router-5.2.0.tgz", + "integrity": "sha512-smz1DUuFHRKdcJC0jobGo8cVbhO3x50tCL4icacOlcwDOEQPq4TMqwx3sY1TP+DvtTgz4nm3thuo7A+BK2U0Dw==", + "requires": { + "@babel/runtime": "^7.1.2", + "history": "^4.9.0", + "hoist-non-react-statics": "^3.1.0", + "loose-envify": "^1.3.1", + "mini-create-react-context": "^0.4.0", + "path-to-regexp": "^1.7.0", + "prop-types": "^15.6.2", + "react-is": "^16.6.0", + "tiny-invariant": "^1.0.2", + "tiny-warning": "^1.0.0" + }, + "dependencies": { + "history": { + "version": "4.10.1", + "resolved": "https://registry.npmjs.org/history/-/history-4.10.1.tgz", + "integrity": "sha512-36nwAD620w12kuzPAsyINPWJqlNbij+hpK1k9XRloDtym8mxzGYl2c17LnV6IAGB2Dmg4tEa7G7DlawS0+qjew==", + "requires": { + "@babel/runtime": "^7.1.2", + "loose-envify": "^1.2.0", + "resolve-pathname": "^3.0.0", + "tiny-invariant": "^1.0.2", + "tiny-warning": "^1.0.0", + "value-equal": "^1.0.1" + } + }, + "isarray": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=" + }, + "path-to-regexp": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.8.0.tgz", + "integrity": "sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA==", + "requires": { + "isarray": "0.0.1" + } + } + } + }, + "react-router-dom": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-5.2.0.tgz", + "integrity": "sha512-gxAmfylo2QUjcwxI63RhQ5G85Qqt4voZpUXSEqCwykV0baaOTQDR1f0PmY8AELqIyVc0NEZUj0Gov5lNGcXgsA==", + "requires": { + "@babel/runtime": "^7.1.2", + "history": "^4.9.0", + "loose-envify": "^1.3.1", + "prop-types": "^15.6.2", + "react-router": "5.2.0", + "tiny-invariant": "^1.0.2", + "tiny-warning": "^1.0.0" + }, + "dependencies": { + "history": { + "version": "4.10.1", + "resolved": "https://registry.npmjs.org/history/-/history-4.10.1.tgz", + "integrity": "sha512-36nwAD620w12kuzPAsyINPWJqlNbij+hpK1k9XRloDtym8mxzGYl2c17LnV6IAGB2Dmg4tEa7G7DlawS0+qjew==", + "requires": { + "@babel/runtime": "^7.1.2", + "loose-envify": "^1.2.0", + "resolve-pathname": "^3.0.0", + "tiny-invariant": "^1.0.2", + "tiny-warning": "^1.0.0", + "value-equal": "^1.0.1" + } + } + } + }, "react-scripts": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/react-scripts/-/react-scripts-4.0.1.tgz", @@ -13761,6 +14022,7 @@ "version": "3.0.0", "resolved": "https://registry.npmjs.org/redent/-/redent-3.0.0.tgz", "integrity": "sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==", + "dev": true, "requires": { "indent-string": "^4.0.0", "strip-indent": "^3.0.0" @@ -14103,6 +14365,11 @@ "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==" }, + "resolve-pathname": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/resolve-pathname/-/resolve-pathname-3.0.0.tgz", + "integrity": "sha512-C7rARubxI8bXFNB/hqcp/4iUeIXJhJZvFPFPiSPRnhU5UPxzMFIl+2E6yY6c4k9giDJAhtV+enfA+G89N6Csng==" + }, "resolve-url": { "version": "0.2.1", "resolved": "https://registry.npmjs.org/resolve-url/-/resolve-url-0.2.1.tgz", @@ -15251,6 +15518,11 @@ "integrity": "sha512-1klA3Gi5PD1Wv9Q0wUoOQN1IWAuPu0D1U03ThXTr0cJ20+/iq2tHSDnK7Kk/0LXJ1ztUB2/1Os0wKmfyNgUQfg==", "dev": true }, + "split-on-first": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/split-on-first/-/split-on-first-1.1.0.tgz", + "integrity": "sha512-43ZssAJaMusuKWL8sKUBQXHWOpq8d6CfN/u1p4gUzfJkM05C8rxTmYrkIPTXapZpORA6LkkzcUulJ8FqA7Uudw==" + }, "split-string": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/split-string/-/split-string-3.1.0.tgz", @@ -15458,9 +15730,9 @@ "integrity": "sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ==" }, "strict-uri-encode": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz", - "integrity": "sha1-J5siXfHVgrH1TmWt3UNS4Y+qBxM=" + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strict-uri-encode/-/strict-uri-encode-2.0.0.tgz", + "integrity": "sha1-ucczDHBChi9rFC3CdLvMWGbONUY=" }, "string-length": { "version": "4.0.1", @@ -15586,6 +15858,7 @@ "version": "3.0.0", "resolved": "https://registry.npmjs.org/strip-indent/-/strip-indent-3.0.0.tgz", "integrity": "sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ==", + "dev": true, "requires": { "min-indent": "^1.0.0" } @@ -16250,6 +16523,16 @@ "resolved": "https://registry.npmjs.org/timsort/-/timsort-0.3.0.tgz", "integrity": "sha1-QFQRqOfmM5/mTbmiNN4R3DHgK9Q=" }, + "tiny-invariant": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.1.0.tgz", + "integrity": "sha512-ytxQvrb1cPc9WBEI/HSeYYoGD0kWnGEOR8RY6KomWLBVhqz0RgTwVO9dLrGz7dC+nN9llyI7OKAgRq8Vq4ZBSw==" + }, + "tiny-warning": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz", + "integrity": "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==" + }, "tmpl": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.4.tgz", @@ -16796,6 +17079,11 @@ "spdx-expression-parse": "^3.0.0" } }, + "value-equal": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/value-equal/-/value-equal-1.0.1.tgz", + "integrity": "sha512-NOJ6JZCAWr0zlxZt+xqCHNTEKOsrks2HQd4MqhP1qy4z1SkbEP467eNx6TgDKXMvUOb+OENfJCZwM+16n7fRfw==" + }, "vary": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", diff --git a/web/vtadmin/package.json b/web/vtadmin/package.json index 7766bf30968..12ec97abd53 100644 --- a/web/vtadmin/package.json +++ b/web/vtadmin/package.json @@ -7,17 +7,23 @@ "npm": ">=6.14.9" }, "dependencies": { - "@testing-library/jest-dom": "^5.11.6", - "@testing-library/react": "^11.2.2", "@testing-library/user-event": "^12.6.0", + "@types/classnames": "^2.2.11", "@types/jest": "^26.0.19", "@types/node": "^12.19.9", "@types/react": "^16.14.2", "@types/react-dom": "^16.9.10", + "@types/react-router-dom": "^5.1.7", + "classnames": "^2.2.6", + "downshift": "^6.1.0", + "history": "^5.0.0", + "lodash-es": "^4.17.20", "node-sass": "^4.14.1", + "query-string": "^6.14.0", "react": "^17.0.1", "react-dom": "^17.0.1", "react-query": "^3.5.9", + "react-router-dom": "^5.2.0", "react-scripts": "4.0.1", "typescript": "^4.1.3", "web-vitals": "^0.2.4" @@ -54,7 +60,16 @@ "last 1 safari version" ] }, + "jest": { + "transformIgnorePatterns": [ + "/!node_modules\\/lodash-es/" + ] + }, "devDependencies": { + "@testing-library/jest-dom": "^5.11.9", + "@testing-library/react": "^11.2.5", + "@testing-library/react-hooks": "^5.0.3", + "@types/lodash-es": "^4.17.4", "msw": "^0.24.4", "prettier": "^2.2.1", "protobufjs": "^6.10.2", diff --git a/web/vtadmin/src/api/http.test.ts b/web/vtadmin/src/api/http.test.ts index cfd319156a6..af46c15b26c 100644 --- a/web/vtadmin/src/api/http.test.ts +++ b/web/vtadmin/src/api/http.test.ts @@ -39,21 +39,45 @@ import { HTTP_RESPONSE_NOT_OK_ERROR, MALFORMED_HTTP_RESPONSE_ERROR } from './htt // means our fake is more robust than it would be otherwise. Since we are using // the exact same protos in our fake as in our real vtadmin-api server, we're guaranteed // to have type parity. -process.env.REACT_APP_VTADMIN_API_ADDRESS = ''; const server = setupServer(); +// mockServerJson configures an HttpOkResponse containing the given `json` +// for all requests made against the given `endpoint`. const mockServerJson = (endpoint: string, json: object) => { server.use(rest.get(endpoint, (req, res, ctx) => res(ctx.json(json)))); }; -// Enable API mocking before tests. -beforeAll(() => server.listen()); +// Since vtadmin uses process.env variables quite a bit, we need to +// do a bit of a dance to clear them out between test runs. +const ORIGINAL_PROCESS_ENV = process.env; +const TEST_PROCESS_ENV = { + ...process.env, + REACT_APP_VTADMIN_API_ADDRESS: '', +}; + +beforeAll(() => { + process.env = { ...TEST_PROCESS_ENV }; + + // Enable API mocking before tests. + server.listen(); +}); -// Reset any runtime request handlers we may add during the tests. -afterEach(() => server.resetHandlers()); +afterEach(() => { + // Reset the process.env to clear out any changes made in the tests. + process.env = { ...TEST_PROCESS_ENV }; -// Disable API mocking after the tests are done. -afterAll(() => server.close()); + jest.restoreAllMocks(); + + // Reset any runtime request handlers we may add during the tests. + server.resetHandlers(); +}); + +afterAll(() => { + process.env = { ...ORIGINAL_PROCESS_ENV }; + + // Disable API mocking after the tests are done. + server.close(); +}); describe('api/http', () => { describe('vtfetch', () => { @@ -66,13 +90,22 @@ describe('api/http', () => { expect(result).toEqual(response); }); - it('parses and returns JSON, given an HttpErrorResponse response', async () => { + it('throws an error if response.ok is false', async () => { const endpoint = `/api/tablets`; const response = { ok: false }; mockServerJson(endpoint, response); - const result = await api.vtfetch(endpoint); - expect(result).toEqual(response); + expect.assertions(3); + + try { + await api.fetchTablets(); + } catch (e) { + /* eslint-disable jest/no-conditional-expect */ + expect(e.name).toEqual(HTTP_RESPONSE_NOT_OK_ERROR); + expect(e.message).toEqual(endpoint); + expect(e.response).toEqual(response); + /* eslint-enable jest/no-conditional-expect */ + } }); it('throws an error on malformed JSON', async () => { @@ -105,72 +138,79 @@ describe('api/http', () => { /* eslint-enable jest/no-conditional-expect */ } }); - }); - describe('fetchTablets', () => { - it('returns a list of Tablets, given a successful response', async () => { - const t0 = pb.Tablet.create({ tablet: { hostname: 't0' } }); - const t1 = pb.Tablet.create({ tablet: { hostname: 't1' } }); - const t2 = pb.Tablet.create({ tablet: { hostname: 't2' } }); - const tablets = [t0, t1, t2]; - - mockServerJson(`/api/tablets`, { - ok: true, - result: { - tablets: tablets.map((t) => t.toJSON()), - }, + describe('credentials', () => { + it('uses the REACT_APP_FETCH_CREDENTIALS env variable if specified', async () => { + process.env.REACT_APP_FETCH_CREDENTIALS = 'include'; + + jest.spyOn(global, 'fetch'); + + const endpoint = `/api/tablets`; + const response = { ok: true, result: null }; + mockServerJson(endpoint, response); + + await api.vtfetch(endpoint); + expect(global.fetch).toHaveBeenCalledTimes(1); + expect(global.fetch).toHaveBeenCalledWith(endpoint, { credentials: 'include' }); + + jest.restoreAllMocks(); }); - const result = await api.fetchTablets(); - expect(result).toEqual(tablets); - }); + it('uses the fetch default `credentials` property by default', async () => { + jest.spyOn(global, 'fetch'); - it('throws an error if response.ok is false', async () => { - const response = { ok: false }; - mockServerJson('/api/tablets', response); + const endpoint = `/api/tablets`; + const response = { ok: true, result: null }; + mockServerJson(endpoint, response); - expect.assertions(3); + await api.vtfetch(endpoint); + expect(global.fetch).toHaveBeenCalledTimes(1); + expect(global.fetch).toHaveBeenCalledWith(endpoint, { credentials: undefined }); - try { - await api.fetchTablets(); - } catch (e) { - /* eslint-disable jest/no-conditional-expect */ - expect(e.name).toEqual(HTTP_RESPONSE_NOT_OK_ERROR); - expect(e.message).toEqual('/api/tablets'); - expect(e.response).toEqual(response); - /* eslint-enable jest/no-conditional-expect */ - } - }); + jest.restoreAllMocks(); + }); - it('throws an error if result.tablets is not an array', async () => { - mockServerJson('/api/tablets', { ok: true, result: { tablets: null } }); + it('throws an error if an invalid value used for `credentials`', async () => { + (process as any).env.REACT_APP_FETCH_CREDENTIALS = 'nope'; - expect.assertions(1); + jest.spyOn(global, 'fetch'); - try { - await api.fetchTablets(); - } catch (e) { - /* eslint-disable jest/no-conditional-expect */ - expect(e.message).toMatch('expected tablets to be an array'); - /* eslint-enable jest/no-conditional-expect */ - } - }); + const endpoint = `/api/tablets`; + const response = { ok: true, result: null }; + mockServerJson(endpoint, response); + + try { + await api.vtfetch(endpoint); + } catch (e) { + /* eslint-disable jest/no-conditional-expect */ + expect(e.message).toEqual( + 'Invalid fetch credentials property: nope. Must be undefined or one of omit, same-origin, include' + ); + expect(global.fetch).toHaveBeenCalledTimes(0); + /* eslint-enable jest/no-conditional-expect */ + } - it('throws an error if JSON cannot be unmarshalled into Tablet objects', async () => { - mockServerJson(`/api/tablets`, { - ok: true, - result: { - tablets: [{ cluster: 'this should be an object, not a string' }], - }, + jest.restoreAllMocks(); }); + }); + }); + + describe('vtfetchEntities', () => { + it('throws an error if result.tablets is not an array', async () => { + const endpoint = '/api/foos'; + mockServerJson(endpoint, { ok: true, result: { foos: null } }); expect.assertions(1); try { - await api.fetchTablets(); + await api.vtfetchEntities({ + endpoint, + extract: (res) => res.result.foos, + transform: (e) => null, // doesn't matter + }); } catch (e) { /* eslint-disable jest/no-conditional-expect */ - expect(e.message).toEqual('cluster.object expected'); + expect(e.message).toMatch('expected entities to be an array, got null'); /* eslint-enable jest/no-conditional-expect */ } }); diff --git a/web/vtadmin/src/api/http.ts b/web/vtadmin/src/api/http.ts index dceb9ae3e98..aa3200a72c2 100644 --- a/web/vtadmin/src/api/http.ts +++ b/web/vtadmin/src/api/http.ts @@ -25,9 +25,10 @@ interface HttpErrorResponse { ok: false; } -type HttpResponse = HttpOkResponse | HttpErrorResponse; - export const MALFORMED_HTTP_RESPONSE_ERROR = 'MalformedHttpResponseError'; + +// MalformedHttpResponseError is thrown when the JSON response envelope +// is an unexpected shape. class MalformedHttpResponseError extends Error { responseJson: object; @@ -39,6 +40,9 @@ class MalformedHttpResponseError extends Error { } export const HTTP_RESPONSE_NOT_OK_ERROR = 'HttpResponseNotOkError'; + +// HttpResponseNotOkError is throw when the `ok` is false in +// the JSON response envelope. class HttpResponseNotOkError extends Error { response: HttpErrorResponse | null; @@ -57,31 +61,141 @@ class HttpResponseNotOkError extends Error { // // Note that this only validates the HttpResponse envelope; it does not // do any type checking or validation on the result. -export const vtfetch = async (endpoint: string): Promise => { - const url = `${process.env.REACT_APP_VTADMIN_API_ADDRESS}${endpoint}`; - const response = await fetch(url); +export const vtfetch = async (endpoint: string): Promise => { + const { REACT_APP_VTADMIN_API_ADDRESS } = process.env; + + const url = `${REACT_APP_VTADMIN_API_ADDRESS}${endpoint}`; + const opts = vtfetchOpts(); + + const response = await global.fetch(url, opts); const json = await response.json(); if (!('ok' in json)) throw new MalformedHttpResponseError('invalid http envelope', json); - return json as HttpResponse; + // Throw "not ok" responses so that react-query correctly interprets them as errors. + // See https://react-query.tanstack.com/guides/query-functions#handling-and-throwing-errors + if (!json.ok) throw new HttpResponseNotOkError(endpoint, json); + + return json as HttpOkResponse; }; -export const fetchTablets = async () => { - const endpoint = '/api/tablets'; - const res = await vtfetch(endpoint); +export const vtfetchOpts = (): RequestInit => { + const credentials = process.env.REACT_APP_FETCH_CREDENTIALS; + if (credentials && credentials !== 'omit' && credentials !== 'same-origin' && credentials !== 'include') { + throw Error( + `Invalid fetch credentials property: ${credentials}. Must be undefined or one of omit, same-origin, include` + ); + } + return { credentials }; +}; - // Throw "not ok" responses so that react-query correctly interprets them as errors. - // See https://react-query.tanstack.com/guides/query-functions#handling-and-throwing-errors - if (!res.ok) throw new HttpResponseNotOkError(endpoint, res); +// vtfetchEntities is a helper function for querying vtadmin-api endpoints +// that return a list of protobuf entities. +export const vtfetchEntities = async (opts: { + endpoint: string; + // Extract the list of entities from the response. We can't (strictly) + // guarantee type safety for API responses, hence the `any` return type. + extract: (res: HttpOkResponse) => any; + // Transform an individual entity in the array to its (proto)typed form. + // This will almost always be a `.verify` followed by a `.create`, + // but because of how protobufjs structures its generated types, + // writing this in a generic way is... unpleasant, and difficult to read. + transform: (e: object) => T; +}): Promise => { + const res = await vtfetch(opts.endpoint); + + const entities = opts.extract(res); + if (!Array.isArray(entities)) { + throw Error(`expected entities to be an array, got ${entities}`); + } + + return entities.map(opts.transform); +}; + +export const fetchClusters = async () => + vtfetchEntities({ + endpoint: '/api/clusters', + extract: (res) => res.result.clusters, + transform: (e) => { + const err = pb.Cluster.verify(e); + if (err) throw Error(err); + return pb.Cluster.create(e); + }, + }); - const tablets = res.result?.tablets; - if (!Array.isArray(tablets)) throw Error(`expected tablets to be an array, got ${tablets}`); +export const fetchGates = async () => + vtfetchEntities({ + endpoint: '/api/gates', + extract: (res) => res.result.gates, + transform: (e) => { + const err = pb.VTGate.verify(e); + if (err) throw Error(err); + return pb.VTGate.create(e); + }, + }); - return tablets.map((t: any) => { - const err = pb.Tablet.verify(t); - if (err) throw Error(err); +export const fetchKeyspaces = async () => + vtfetchEntities({ + endpoint: '/api/keyspaces', + extract: (res) => res.result.keyspaces, + transform: (e) => { + const err = pb.Keyspace.verify(e); + if (err) throw Error(err); + return pb.Keyspace.create(e); + }, + }); - return pb.Tablet.create(t); +export const fetchSchemas = async () => + vtfetchEntities({ + endpoint: '/api/schemas', + extract: (res) => res.result.schemas, + transform: (e) => { + const err = pb.Schema.verify(e); + if (err) throw Error(err); + return pb.Schema.create(e); + }, }); + +export interface FetchSchemaParams { + clusterID: string; + keyspace: string; + table: string; +} + +export const fetchSchema = async ({ clusterID, keyspace, table }: FetchSchemaParams) => { + const { result } = await vtfetch(`/api/schema/${clusterID}/${keyspace}/${table}`); + + const err = pb.Schema.verify(result); + if (err) throw Error(err); + + return pb.Schema.create(result); +}; + +export const fetchTablets = async () => + vtfetchEntities({ + endpoint: '/api/tablets', + extract: (res) => res.result.tablets, + transform: (e) => { + const err = pb.Tablet.verify(e); + if (err) throw Error(err); + return pb.Tablet.create(e); + }, + }); + +export const fetchWorkflows = async () => { + const { result } = await vtfetch(`/api/workflows`); + + const err = pb.GetWorkflowsResponse.verify(result); + if (err) throw Error(err); + + return pb.GetWorkflowsResponse.create(result); +}; + +export const fetchWorkflow = async (params: { clusterID: string; keyspace: string; name: string }) => { + const { result } = await vtfetch(`/api/workflow/${params.clusterID}/${params.keyspace}/${params.name}`); + + const err = pb.Workflow.verify(result); + if (err) throw Error(err); + + return pb.Workflow.create(result); }; diff --git a/web/vtadmin/src/components/App.module.scss b/web/vtadmin/src/components/App.module.scss index 87de655a8f8..e978807d116 100644 --- a/web/vtadmin/src/components/App.module.scss +++ b/web/vtadmin/src/components/App.module.scss @@ -13,10 +13,24 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - .container { - margin: 0 auto; - max-width: 1200px; + display: grid; + grid-template-areas: 'nav content'; + grid-template-rows: auto; + grid-template-columns: 260px auto; + height: 100vh; + overflow: hidden; + position: relative; + width: 100vw; +} + +.navContainer { + grid-area: nav; + height: 100vh; +} + +.mainContainer { + grid-area: content; + overflow: auto; padding: 24px; - text-align: center; } diff --git a/web/vtadmin/src/components/App.tsx b/web/vtadmin/src/components/App.tsx index e6438e67daa..11f521528a1 100644 --- a/web/vtadmin/src/components/App.tsx +++ b/web/vtadmin/src/components/App.tsx @@ -1,5 +1,5 @@ /** - * Copyright 2020 The Vitess Authors. + * Copyright 2021 The Vitess Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,33 +14,75 @@ * limitations under the License. */ import * as React from 'react'; +import { BrowserRouter as Router, Redirect, Route, Switch } from 'react-router-dom'; import style from './App.module.scss'; -import logo from '../img/vitess-icon-color.svg'; -import { TabletList } from './TabletList'; -import { useTablets } from '../hooks/api'; +import { Tablets } from './routes/Tablets'; +import { Debug } from './routes/Debug'; +import { NavRail } from './NavRail'; +import { Error404 } from './routes/Error404'; +import { Clusters } from './routes/Clusters'; +import { Gates } from './routes/Gates'; +import { Keyspaces } from './routes/Keyspaces'; +import { Schemas } from './routes/Schemas'; +import { Schema } from './routes/Schema'; +import { Workflows } from './routes/Workflows'; +import { Workflow } from './routes/Workflow'; export const App = () => { - const { data, error, isError, isSuccess } = useTablets(); - - // Placeholder UI :D - let content =
Loading...
; - if (isError) { - content = ( -
- {error?.name}: {error?.message} -
- ); - } else if (isSuccess) { - content = ; - } - return ( -
- logo -

VTAdmin

+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + - {content} -
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ ); }; diff --git a/web/vtadmin/src/components/Button.module.scss b/web/vtadmin/src/components/Button.module.scss new file mode 100644 index 00000000000..064667d8719 --- /dev/null +++ b/web/vtadmin/src/components/Button.module.scss @@ -0,0 +1,82 @@ +.button { + background: var(--colorPrimary); + border: solid 2px var(--colorPrimary); + border-radius: 6px; + box-sizing: border-box; + color: var(--textColorInverted); + cursor: pointer; + font-family: var(--fontFamilyPrimary); + font-size: var(--fontSizeDefault); + font-weight: 500; + height: var(--inputHeightMedium); + line-height: 1; + outline: none; + padding: 0 12px; + user-select: none; + transition: all 0.1s ease-in-out; + white-space: nowrap; + width: min-content; + + .icon { + display: inline-block; + fill: var(--textColorInverted); + height: 1.4em; + margin-right: 0.4em; + vertical-align: middle; + width: 1.4em; + } +} + +.button:active { + background: var(--colorPrimary200); + border-color: var(--colorPrimary200); + box-shadow: none !important; +} + +.button:disabled { + background: var(--colorDisabled); + border-color: var(--colorDisabled); + cursor: not-allowed; +} + +.button:not(:disabled):hover, +.button:not(:disabled):focus { + box-shadow: var(--boxShadowHover); +} + +.button.secondary { + background: transparent; + border: solid 2px var(--colorPrimary); + color: var(--colorPrimary); + + .icon { + fill: var(--colorPrimary); + } + + &:active { + background: var(--backgroundSecondaryHighlight); + } + + &:disabled { + background: transparent; + border-color: var(--colorDisabled); + color: var(--colorDisabled); + } + + &:disabled .icon { + fill: var(--colorDisabled); + } +} + +.button.sizeLarge { + font-size: var(--fontSizeLarge); + height: var(--inputHeightLarge); + padding: 0 16px; +} + +.button.sizeSmall { + border-width: 1px; + font-size: var(--fontSizeSmall); + height: var(--inputHeightSmall); + padding: 0 6px; +} diff --git a/web/vtadmin/src/components/Button.tsx b/web/vtadmin/src/components/Button.tsx new file mode 100644 index 00000000000..643fc99bcd2 --- /dev/null +++ b/web/vtadmin/src/components/Button.tsx @@ -0,0 +1,36 @@ +import * as React from 'react'; +import cx from 'classnames'; + +import style from './Button.module.scss'; +import { Icon, Icons } from './Icon'; + +interface Props extends React.DetailedHTMLProps, HTMLButtonElement> { + className?: string; + icon?: Icons; + secondary?: boolean; + size?: 'large' | 'medium' | 'small'; +} + +export const Button: React.FunctionComponent = ({ + children, + className, + icon, + secondary, + size = 'medium', + type = 'button', + ...props +}) => { + const buttonClass = cx(className, style.button, { + [style.secondary]: !!secondary, + [style.sizeLarge]: size === 'large', + [style.sizeSmall]: size === 'small', + [style.withIcon]: !!icon, + }); + + return ( + + ); +}; diff --git a/web/vtadmin/src/components/Code.module.scss b/web/vtadmin/src/components/Code.module.scss new file mode 100644 index 00000000000..772c6647eec --- /dev/null +++ b/web/vtadmin/src/components/Code.module.scss @@ -0,0 +1,56 @@ +/** + * Copyright 2021 The Vitess Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +.table { + margin: 8px 0; +} + +.table tr { + border: none; +} + +.table td { + border: none; + line-height: 1.6; + margin: 0; +} + +.lineNumber { + box-sizing: border-box; + color: var(--textColorSecondary); + font-family: var(--fontFamilyMonospace); + font-size: var(--fontSizeDefault); + line-height: 2.4rem; + min-width: 5rem; + padding: 0 1.2rem; + text-align: right; + user-select: none; + vertical-align: top; + white-space: nowrap; + width: 1%; +} + +.lineNumber::before { + content: attr(data-line-number); +} + +.code { + font-family: var(--fontFamilyMonospace); + font-size: var(--fontSizeDefault); + line-height: 2.4rem; + padding: 0 1.2rem; + tab-size: 8; + white-space: pre; +} diff --git a/web/vtadmin/src/components/Code.tsx b/web/vtadmin/src/components/Code.tsx new file mode 100644 index 00000000000..d547bfbf32d --- /dev/null +++ b/web/vtadmin/src/components/Code.tsx @@ -0,0 +1,44 @@ +/** + * Copyright 2021 The Vitess Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import * as React from 'react'; + +import style from './Code.module.scss'; + +interface Props { + code?: string | null | undefined; +} + +export const Code = ({ code }: Props) => { + if (typeof code !== 'string') return null; + + const codeLines = code.split('\n'); + return ( + + + {codeLines.map((line, idx) => { + return ( + + + + ); + })} + +
+ + {line} +
+ ); +}; diff --git a/web/vtadmin/src/components/Icon.tsx b/web/vtadmin/src/components/Icon.tsx new file mode 100644 index 00000000000..9ddef9aca7b --- /dev/null +++ b/web/vtadmin/src/components/Icon.tsx @@ -0,0 +1,82 @@ +import * as icons from '../icons'; + +interface Props { + className?: string; + icon: Icons; +} + +// All icons are from the VTAdmin Figma icon library: +// https://www.figma.com/file/By3SoETBRHpOirv3Ctfxdq/Designs +export const Icon = ({ icon, ...props }: Props) => { + const componentName = icon.charAt(0).toUpperCase() + icon.slice(1); + + const IconComponent = (icons as any)[componentName]; + if (!IconComponent) { + console.warn(`Invalid icon: ${icon}`); + return null; + } + + return ; +}; + +export enum Icons { + add = 'add', + alertFail = 'alertFail', + archive = 'archive', + archiveAuto = 'archiveAuto', + archiveForever = 'archiveForever', + archiveRestore = 'archiveRestore', + arrowDown = 'arrowDown', + arrowLeft = 'arrowLeft', + arrowRight = 'arrowRight', + arrowUp = 'arrowUp', + avatar = 'avatar', + boxChecked = 'boxChecked', + boxEmpty = 'boxEmpty', + boxIndeterminate = 'boxIndeterminate', + bug = 'bug', + chart = 'chart', + checkSuccess = 'checkSuccess', + chevronDown = 'chevronDown', + chevronLeft = 'chevronLeft', + chevronRight = 'chevronRight', + chevronUp = 'chevronUp', + circleAdd = 'circleAdd', + circleDelete = 'circleDelete', + circleRemove = 'circleRemove', + circleWorkflow = 'circleWorkflow', + clear = 'clear', + code = 'code', + copy = 'copy', + delete = 'delete', + document = 'document', + download = 'download', + dropDown = 'dropDown', + dropUp = 'dropUp', + ellipsis = 'ellipsis', + gear = 'gear', + history = 'history', + info = 'info', + keyG = 'keyG', + keyK = 'keyK', + keyR = 'keyR', + keyS = 'keyS', + keyT = 'keyT', + keyboard = 'keyboard', + link = 'link', + pageFirst = 'pageFirst', + pageLast = 'pageLast', + pause = 'pause', + question = 'question', + radioEmpty = 'radioEmpty', + radioSelected = 'radioSelected', + refresh = 'refresh', + remove = 'remove', + retry = 'retry', + runQuery = 'runQuery', + search = 'search', + sort = 'sort', + spinnerLoading = 'spinnerLoading', + start = 'start', + wrench = 'wrench', +} diff --git a/web/vtadmin/src/components/NavRail.module.scss b/web/vtadmin/src/components/NavRail.module.scss new file mode 100644 index 00000000000..d2dae8ec8c6 --- /dev/null +++ b/web/vtadmin/src/components/NavRail.module.scss @@ -0,0 +1,117 @@ +/** + * Copyright 2021 The Vitess Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +$navRailHoverTransition: background-color 0.1s ease-in-out; + +.container { + background: var(--backgroundSecondary); + border-right: solid 1px var(--backgroundSecondaryHighlight); + display: flex; + flex-direction: column; + justify-content: space-between; + height: 100vh; + overflow-y: auto; +} + +.logoContainer { + display: block; + padding: 24px; + text-align: center; +} + +.footerContainer { + margin: auto 0 24px 0; +} + +.navLinks { + margin: 24px 0 32px 0; +} + +.navList { + list-style-type: none; + margin: 0; + padding: 0; + + &::after { + content: ''; + background-color: var(--colorScaffoldingHighlight); + display: block; + height: 1px; + margin: 20px 24px; + max-width: 100%; + } +} + +.navList:last-child::after, +.footerContainer .navList::after { + content: none; +} + +a.navLink { + border-left: solid 4px transparent; + color: var(--textColorPrimary); + display: flex; + flex-wrap: nowrap; + font-size: 1.6rem; + font-weight: 500; + outline: none; + padding: 13px 24px; + text-decoration: none; + transition: $navRailHoverTransition; + + &.navLinkActive { + border-color: var(--colorPrimary); + color: var(--colorPrimary); + } + + &:focus, + &:hover { + background: var(--backgroundSecondaryHighlight); + color: var(--colorPrimary); + } +} + +.badge { + background-color: var(--backgroundSecondaryHighlight); + border-radius: 20px; + color: var(--textColorSecondary); + display: inline-block; + font-size: 1.4rem; + line-height: 1.9rem; + margin-left: auto; + padding: 2px 8px; + transition: $navRailHoverTransition; +} + +a.navLinkActive .badge, +a.navLink:focus .badge, +a.navLink:hover .badge { + background-color: var(--backgroundPrimaryHighlight); + color: var(--colorPrimary); +} + +.icon { + fill: var(--colorScaffoldingForeground); + height: 2rem; + margin-right: 1.2rem; + transition: $navRailHoverTransition; +} + +a.navLinkActive .icon, +a.navLink:focus .icon, +a.navLink:hover .icon { + fill: var(--colorPrimary); +} diff --git a/web/vtadmin/src/components/NavRail.tsx b/web/vtadmin/src/components/NavRail.tsx new file mode 100644 index 00000000000..9863ec5d547 --- /dev/null +++ b/web/vtadmin/src/components/NavRail.tsx @@ -0,0 +1,99 @@ +/** + * Copyright 2021 The Vitess Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import * as React from 'react'; +import { Link, NavLink } from 'react-router-dom'; + +import style from './NavRail.module.scss'; +import logo from '../img/vitess-icon-color.svg'; +import { useClusters, useGates, useKeyspaces, useTableDefinitions, useTablets, useWorkflows } from '../hooks/api'; +import { Icon, Icons } from './Icon'; + +export const NavRail = () => { + const { data: clusters = [] } = useClusters(); + const { data: keyspaces = [] } = useKeyspaces(); + const { data: gates = [] } = useGates(); + const { data: schemas = [] } = useTableDefinitions(); + const { data: tablets = [] } = useTablets(); + const { data: workflows = [] } = useWorkflows(); + + return ( +
+ + Vitess logo + + +
+
    +
  • + +
  • +
  • + +
  • +
+ +
    +
  • + {/* FIXME replace this with a C when we have one */} + +
  • +
  • + +
  • +
  • + +
  • +
  • + +
  • +
  • + +
  • +
+ +
    +
  • + +
  • +
  • + +
  • +
+
+ +
+
    +
  • + +
  • +
  • + +
  • +
+
+
+ ); +}; + +const NavRailLink = ({ count, icon, text, to }: { count?: number; icon: Icons; text: string; to: string }) => { + return ( + + + {text} + {typeof count === 'number' &&
{count}
} +
+ ); +}; diff --git a/web/vtadmin/src/components/TabletList.tsx b/web/vtadmin/src/components/TabletList.tsx deleted file mode 100644 index 67d92676321..00000000000 --- a/web/vtadmin/src/components/TabletList.tsx +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Copyright 2020 The Vitess Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -import { vtadmin as pb, topodata } from '../proto/vtadmin'; - -interface Props { - tablets: pb.Tablet[]; -} - -const SERVING_STATES = Object.keys(pb.Tablet.ServingState); -const TABLET_TYPES = Object.keys(topodata.TabletType); - -export const TabletList = ({ tablets }: Props) => { - return ( - - - - - - - - - - - {tablets.map((t, i) => ( - - - - - - - ))} - -
ClusterHostnameTypeState
{t.cluster?.name} - {t.tablet?.hostname} - {t.tablet?.type && TABLET_TYPES[t.tablet?.type]}{SERVING_STATES[t.state]}
- ); -}; diff --git a/web/vtadmin/src/components/TextInput.module.scss b/web/vtadmin/src/components/TextInput.module.scss new file mode 100644 index 00000000000..74a061ae610 --- /dev/null +++ b/web/vtadmin/src/components/TextInput.module.scss @@ -0,0 +1,106 @@ +.inputContainer { + display: block; + position: relative; +} + +$iconSizeMedium: 1.6rem; +$iconSizeLarge: 2.2rem; +$iconOffsetHorizontal: 3.2rem; +$iconOffsetHorizontalLarge: 4.2rem; +$iconPositionHorizontal: 1.2rem; +$iconPositionHorizontalLarge: 1.6rem; + +.input { + background: var(--backgroundPrimary); + border: solid 2px var(--colorDisabled); + border-radius: 6px; + box-sizing: border-box; + color: var(--textColorPrimary); + display: block; + font-family: var(--fontFamilyPrimary); + font-size: var(--fontSizeBody); + height: var(--inputHeightMedium); + line-height: var(--inputHeightMedium); + padding: 0 12px; + transition: all 0.1s ease-in-out; + width: 100%; + + &:disabled { + background: var(--backgroundSecondary); + border-color: var(--backgroundSecondaryHighlight); + cursor: not-allowed; + } + + &.large { + font-size: var(--fontSizeLarge); + height: var(--inputHeightLarge); + line-height: var(--inputHeightLarge); + padding: 0 16px; + } + + &.withIconLeft { + padding-left: $iconOffsetHorizontal; + + &.large { + padding-left: $iconOffsetHorizontalLarge; + } + } + + &.withIconRight { + padding-right: $iconOffsetHorizontal; + + &.large { + padding-right: $iconOffsetHorizontalLarge; + } + } +} + +.icon { + fill: var(--grey600); + height: $iconSizeMedium; + margin-top: -($iconSizeMedium / 2); + position: absolute; + top: 50%; + width: $iconSizeMedium; +} + +.iconLeft { + /* stylelint-disable-next-line */ + @extend .icon; + + left: $iconPositionHorizontal; +} + +.iconRight { + /* stylelint-disable-next-line */ + @extend .icon; + + right: $iconPositionHorizontal; +} + +.large ~ .icon { + height: $iconSizeLarge; + margin-top: -($iconSizeLarge / 2); + width: $iconSizeLarge; +} + +.large ~ .iconLeft { + left: $iconPositionHorizontalLarge; +} + +.large ~ .iconRight { + right: $iconPositionHorizontalLarge; +} + +.input:focus { + border-color: var(--colorPrimary); + outline: none; +} + +.input:focus ~ .icon { + fill: var(--colorPrimary); +} + +.input:disabled ~ .icon { + fill: var(--colorDisabled); +} diff --git a/web/vtadmin/src/components/TextInput.tsx b/web/vtadmin/src/components/TextInput.tsx new file mode 100644 index 00000000000..4a530cc38df --- /dev/null +++ b/web/vtadmin/src/components/TextInput.tsx @@ -0,0 +1,37 @@ +import * as React from 'react'; +import cx from 'classnames'; + +import { Icon, Icons } from './Icon'; + +import style from './TextInput.module.scss'; + +type NativeInputProps = Omit< + React.DetailedHTMLProps, HTMLInputElement>, + // Omit the props that we explicitly want to overwrite + 'size' +>; +interface Props extends NativeInputProps { + // className is applied to the , not the parent container. + className?: string; + iconLeft?: Icons; + iconRight?: Icons; + size?: 'large'; +} + +export const TextInput = ({ className, iconLeft, iconRight, size, ...props }: Props) => { + const inputClass = cx(style.input, { + [style.large]: size === 'large', + [style.withIconLeft]: !!iconLeft, + [style.withIconRight]: !!iconRight, + }); + + // Order of elements matters: the comes before the icons so that + // we can use CSS adjacency selectors like `input:focus ~ .icon`. + return ( +
+ + {iconLeft && } + {iconRight && } +
+ ); +}; diff --git a/web/vtadmin/src/components/dataTable/DataTable.tsx b/web/vtadmin/src/components/dataTable/DataTable.tsx new file mode 100644 index 00000000000..51e19f1b917 --- /dev/null +++ b/web/vtadmin/src/components/dataTable/DataTable.tsx @@ -0,0 +1,75 @@ +/** + * Copyright 2021 The Vitess Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import * as React from 'react'; +import { useLocation } from 'react-router-dom'; + +import { useURLPagination } from '../../hooks/useURLPagination'; +import { useURLQuery } from '../../hooks/useURLQuery'; +import { stringify } from '../../util/queryString'; +import { PaginationNav } from './PaginationNav'; + +interface Props { + columns: string[]; + data: T[]; + pageSize?: number; + renderRows: (rows: T[]) => JSX.Element[]; +} + +// Generally, page sizes of ~100 rows are fine in terms of performance, +// but anything over ~50 feels unwieldy in terms of UX. +const DEFAULT_PAGE_SIZE = 50; + +export const DataTable = ({ columns, data, pageSize = DEFAULT_PAGE_SIZE, renderRows }: Props) => { + const { pathname } = useLocation(); + const urlQuery = useURLQuery(); + + const totalPages = Math.ceil(data.length / pageSize); + const { page } = useURLPagination({ totalPages }); + + const startIndex = (page - 1) * pageSize; + const endIndex = startIndex + pageSize; + const dataPage = data.slice(startIndex, endIndex); + + const startRow = startIndex + 1; + const lastRow = Math.min(data.length, startIndex + pageSize); + + const formatPageLink = (p: number) => ({ + pathname, + search: stringify({ ...urlQuery.query, page: p === 1 ? undefined : p }), + }); + + return ( +
+ + + + {columns.map((col, cdx) => ( + + ))} + + + {renderRows(dataPage)} +
{col}
+ + + {!!data.length && ( +

+ Showing {startRow} {lastRow > startRow ? `- ${lastRow}` : null} of {data.length} +

+ )} +
+ ); +}; diff --git a/web/vtadmin/src/components/dataTable/PaginationNav.module.scss b/web/vtadmin/src/components/dataTable/PaginationNav.module.scss new file mode 100644 index 00000000000..68231d7dcc0 --- /dev/null +++ b/web/vtadmin/src/components/dataTable/PaginationNav.module.scss @@ -0,0 +1,51 @@ +/** + * Copyright 2021 The Vitess Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +.links { + display: flex; + list-style-type: none; + margin: 0; + padding: 0; +} + +.placeholder, +a.link { + border: solid 1px var(--backgroundPrimaryHighlight); + border-radius: 6px; + color: var(--textColorSecondary); + display: block; + line-height: 36px; + margin-right: 8px; + text-align: center; + width: 36px; +} + +a.link { + cursor: pointer; + text-decoration: none; + + &:hover { + border-color: var(--colorPrimary); + } + + &.activeLink { + border-color: var(--colorPrimary); + color: var(--colorPrimary); + } +} + +.placeholder::before { + content: '...'; +} diff --git a/web/vtadmin/src/components/dataTable/PaginationNav.test.tsx b/web/vtadmin/src/components/dataTable/PaginationNav.test.tsx new file mode 100644 index 00000000000..3092c4f1617 --- /dev/null +++ b/web/vtadmin/src/components/dataTable/PaginationNav.test.tsx @@ -0,0 +1,99 @@ +/** + * Copyright 2021 The Vitess Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { render, screen, within } from '@testing-library/react'; +import { MemoryRouter } from 'react-router-dom'; +import { PaginationNav, Props } from './PaginationNav'; + +const formatLink = (page: number) => ({ + pathname: '/test', + search: `?hello=world&page=${page}`, +}); + +describe('PaginationNav', () => { + const tests: { + name: string; + props: Props; + expected: null | Array; + }[] = [ + { + name: 'renders without breaks', + props: { currentPage: 1, formatLink, maxVisible: 3, totalPages: 2 }, + expected: [1, 2], + }, + { + name: 'renders breaks on the right', + props: { currentPage: 1, formatLink, maxVisible: 5, totalPages: 11 }, + expected: [1, 2, 3, null, 11], + }, + { + name: 'renders breaks on the left', + props: { currentPage: 11, formatLink, maxVisible: 5, totalPages: 11 }, + expected: [1, null, 9, 10, 11], + }, + { + name: 'renders breaks in the middle', + props: { currentPage: 6, formatLink, maxVisible: 5, totalPages: 11 }, + expected: [1, null, 6, null, 11], + }, + { + name: 'renders widths according to the minWidth prop', + props: { currentPage: 6, formatLink, maxVisible: 9, minWidth: 2, totalPages: 100 }, + expected: [1, 2, null, 5, 6, 7, null, 99, 100], + }, + { + name: 'does not render if totalPages == 0', + props: { currentPage: 1, formatLink, totalPages: 0 }, + expected: null, + }, + { + name: 'renders even if page > totalPages', + props: { currentPage: 100000, formatLink, maxVisible: 5, totalPages: 11 }, + expected: [1, null, 9, 10, 11], + }, + ]; + + test.each(tests.map(Object.values))('%s', (name: string, props: Props, expected: Array) => { + render(, { wrapper: MemoryRouter }); + + const nav = screen.queryByRole('navigation'); + if (expected === null) { + expect(nav).toBeNull(); + return; + } + + const lis = screen.getAllByRole('listitem'); + expect(lis).toHaveLength(expected.length); + + lis.forEach((li, idx) => { + const e = expected[idx]; + const link = within(li).queryByRole('link'); + + if (e === null) { + // Placeholders don't render links + expect(link).toBeNull(); + } else { + expect(link).toHaveAttribute('href', `/test?hello=world&page=${e}`); + expect(link).toHaveTextContent(`${e}`); + + if (e === props.currentPage) { + expect(link).toHaveClass('activeLink'); + } else { + expect(link).not.toHaveClass('activeLink'); + } + } + }); + }); +}); diff --git a/web/vtadmin/src/components/dataTable/PaginationNav.tsx b/web/vtadmin/src/components/dataTable/PaginationNav.tsx new file mode 100644 index 00000000000..a96857d225f --- /dev/null +++ b/web/vtadmin/src/components/dataTable/PaginationNav.tsx @@ -0,0 +1,119 @@ +/** + * Copyright 2021 The Vitess Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import cx from 'classnames'; +import * as React from 'react'; +import { Link, LinkProps } from 'react-router-dom'; + +import style from './PaginationNav.module.scss'; + +export interface Props { + currentPage: number; + formatLink: (page: number) => LinkProps['to']; + // The maximum number of pagination elements to show. Note that this includes any placeholders. + // It's recommended for this value to be >= 5 to handle the case where there are + // breaks on either side of the list. + maxVisible?: number; + // The minimum number of pagination elements to show at the beginning/end of a sequence, + // adjacent to any sequence breaks. + minWidth?: number; + // The total number of pages + totalPages: number; +} + +const DEFAULT_MAX_VISIBLE = 8; +const DEFAULT_MIN_WIDTH = 1; + +// This assumes we always want to 1-index our pages, where "page 1" is the first page. +// If we find a need for zero-indexed pagination, we can make this configurable. +const FIRST_PAGE = 1; + +// PageSpecifiers with a numeric value are links. `null` is used +// to signify a break in the sequence. +type PageSpecifier = number | null; + +export const PaginationNav = ({ + currentPage, + formatLink, + maxVisible = DEFAULT_MAX_VISIBLE, + minWidth = DEFAULT_MIN_WIDTH, + totalPages, +}: Props) => { + if (totalPages <= 1) { + return null; + } + + // This rather magical solution is borrowed, with gratitude, from StackOverflow + // https://stackoverflow.com/a/46385144 + const leftWidth = (maxVisible - minWidth * 2 - 3) >> 1; + const rightWidth = (maxVisible - minWidth * 2 - 2) >> 1; + + let numbers: PageSpecifier[] = []; + if (totalPages <= maxVisible) { + // No breaks in list + numbers = range(FIRST_PAGE, totalPages); + } else if (currentPage <= maxVisible - minWidth - 1 - rightWidth) { + // No break on left side of page + numbers = range(FIRST_PAGE, maxVisible - minWidth - 1).concat( + null, + range(totalPages - minWidth + 1, totalPages) + ); + } else if (currentPage >= totalPages - minWidth - 1 - rightWidth) { + // No break on right of page + numbers = range(FIRST_PAGE, minWidth).concat( + null, + range(totalPages - minWidth - 1 - rightWidth - leftWidth, totalPages) + ); + } else { + // Breaks on both sides + numbers = range(FIRST_PAGE, minWidth).concat( + null, + range(currentPage - leftWidth, currentPage + rightWidth), + null, + range(totalPages - minWidth + 1, totalPages) + ); + } + + return ( + + ); +}; + +// lodash-es has a `range` function but it doesn't play nice +// with the PageSpecifier[] return type (since it's a mixed array +// of numbers and nulls). +const range = (start: number, end: number): PageSpecifier[] => { + if (isNaN(start) || isNaN(end)) return []; + return Array.from(Array(end - start + 1), (_, i) => i + start); +}; diff --git a/web/vtadmin/src/components/inputs/Label.module.scss b/web/vtadmin/src/components/inputs/Label.module.scss new file mode 100644 index 00000000000..517162d3983 --- /dev/null +++ b/web/vtadmin/src/components/inputs/Label.module.scss @@ -0,0 +1,19 @@ +/** + * Copyright 2021 The Vitess Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +.label { + font-weight: 700; + line-height: 3.2rem; +} diff --git a/web/vtadmin/src/components/inputs/Label.tsx b/web/vtadmin/src/components/inputs/Label.tsx new file mode 100644 index 00000000000..5bba38da553 --- /dev/null +++ b/web/vtadmin/src/components/inputs/Label.tsx @@ -0,0 +1,33 @@ +/** + * Copyright 2021 The Vitess Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import * as React from 'react'; + +import style from './Label.module.scss'; + +type NativeLabelProps = React.DetailedHTMLProps, HTMLLabelElement>; + +interface Props extends NativeLabelProps { + label: string; +} + +export const Label: React.FunctionComponent = ({ children, label, ...props }) => { + return ( + + ); +}; diff --git a/web/vtadmin/src/components/inputs/Select.module.scss b/web/vtadmin/src/components/inputs/Select.module.scss new file mode 100644 index 00000000000..d5fd9ca72e2 --- /dev/null +++ b/web/vtadmin/src/components/inputs/Select.module.scss @@ -0,0 +1,151 @@ +/** + * Copyright 2021 The Vitess Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +.container { + display: inline-block; + position: relative; +} + +.toggle { + background: var(--backgroundPrimary); + border: solid 2px var(--colorDisabled); + border-radius: 6px; + box-sizing: border-box; + color: var(--textColorPrimary); + cursor: pointer; + display: block; + font-family: var(--fontFamilyPrimary); + font-size: var(--fontSizeBody); + height: var(--inputHeightMedium); + min-width: 16rem; + padding: 0 40px 0 12px; + position: relative; + text-align: left; + transition: all 0.1s ease-in-out; + white-space: nowrap; +} + +.placeholder .toggle { + color: var(--textColorSecondary); +} + +.open .toggle, +.toggle:active, +.toggle:focus { + border-color: var(--colorPrimary); + outline: none; +} + +.toggle:disabled { + background: var(--backgroundSecondary); + border-color: var(--backgroundSecondaryHighlight); + color: var(--textColorSecondary); + cursor: not-allowed; +} + +.large .toggle { + font-size: var(--fontSizeLarge); + height: var(--inputHeightLarge); + min-width: 24rem; + padding: 0 16px; +} + +.chevron { + height: 20px; + position: absolute; + top: calc(50% - 10px); + right: 4px; +} + +.dropdown { + background: var(--backgroundPrimary); + border: solid 2px var(--colorDisabled); + border-radius: 6px; + box-sizing: border-box; + margin: 4px 0 0 0; + height: min-content; + max-height: 420px; + overflow: auto; + outline: none; + padding: 8px 0; + min-width: 100%; + position: absolute; + z-index: 1000; +} + +.menu { + list-style-type: none; + margin: 0; + outline: none; + padding: 0; +} + +.menu li { + line-height: 32px; + padding: 4px 12px; + + &:hover { + background: var(--backgroundPrimaryHighlight); + cursor: pointer; + } + + &.active { + background: var(--backgroundPrimaryHighlight); + } +} + +.large .menu li { + font-size: var(--fontSizeLarge); + min-width: 24rem; + padding: 8px 16px; +} + +.clear { + background: none; + border: none; + box-sizing: border-box; + color: var(--textColorSecondary); + cursor: pointer; + display: block; + font-family: var(--fontFamilyPrimary); + font-size: var(--fontSizeBody); + min-width: 16rem; + padding: 4px 12px; + position: relative; + text-align: left; + transition: all 0.1s ease-in-out; + white-space: nowrap; + width: 100%; + + &:hover, + &:active, + &:focus { + background: var(--backgroundPrimaryHighlight); + } +} + +.large .clear { + font-size: var(--fontSizeLarge); + padding: 8px 16px; +} + +.emptyContainer { + outline: none; + padding: 8px 12px; +} + +.emptyPlaceholder { + color: var(--textColorSecondary); +} diff --git a/web/vtadmin/src/components/inputs/Select.tsx b/web/vtadmin/src/components/inputs/Select.tsx new file mode 100644 index 00000000000..4572d83612a --- /dev/null +++ b/web/vtadmin/src/components/inputs/Select.tsx @@ -0,0 +1,148 @@ +/** + * Copyright 2021 The Vitess Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import cx from 'classnames'; +import { useSelect, UseSelectStateChange } from 'downshift'; +import * as React from 'react'; + +import { Label } from './Label'; +import style from './Select.module.scss'; +import { Icon, Icons } from '../Icon'; + +interface Props { + disabled?: boolean; + items: T[]; + itemToString?: (item: T | null) => string; + label: string; + onChange: (selectedItem: T | null | undefined) => void; + placeholder: string; + emptyPlaceholder?: string | (() => JSX.Element | string); + renderItem?: (item: T) => JSX.Element | string; + selectedItem: T | null; + size?: 'large'; +} + +/** + * Select performs exactly the same as the native HTML setTheme(t)} + type="radio" + value={t} + /> + {t} + +
+ ))} +
+ + +
+

Icons

+
+ {Object.values(Icons).map((i) => ( + + ))} +
+
+ +
+

Select

+
+
+ setFormData({ ...formData, selectFruitNameDefault: fruitName })} + placeholder="Choose a fruit name" + selectedItem={formData.selectFruitNameDefault || null} + /> + setFormData({ ...formData, selectFruitDefault: fruit })} + placeholder="Choose a fruit" + renderItem={(fruit) => `${fruit.emoji} ${fruit.name}`} + selectedItem={formData.selectFruitDefault || null} + /> +
+
+ setFormData({ ...formData, selectFruitNameLarge: fruitName })} + placeholder="Choose a fruit name" + size="large" + selectedItem={formData.selectFruitNameLarge || null} + /> +